query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Whether the process is running as the main process | def is_main_process(args: dict):
return not is_distributed(args) or args.local_rank == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_main_process() -> bool:\n return multiprocessing.current_process().name == 'MainProcess' and os.environ['main_process_pid'] == str(os.getpid())",
"def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False",
"def is_running(program):\n return program in get_running()",
"def is_running(self):\n if self._process:\n return self._process.poll() is None\n else:\n return False",
"def is_running(self):\n if self._process and self._process.poll() is None:\n return True\n return False",
"def running(self):\n return self.sub_process and self.sub_process.is_alive()",
"def isRunning(self):\n if not self.running:\n return False\n elif self.process.poll() == 0 or self.process.returncode >= 0:\n return False\n else:\n return True",
"def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False",
"def is_running(self) -> bool:\n return False",
"def is_started(self):\n return bool(self._processes)",
"def _ServerIsRunning( self ):\n return utils.ProcessIsRunning( self._gocode_handle )",
"def is_running(self):\r\n if self._gone:\r\n return False\r\n try:\r\n # Checking if pid is alive is not enough as the pid might\r\n # have been reused by another process.\r\n # pid + creation time, on the other hand, is supposed to\r\n # identify a process univocally.\r\n return self.create_time == \\\r\n self.get_process_create_time()\r\n except NoSuchProcess:\r\n self._gone = True\r\n return False",
"def running(self):\n return bool(self.proc and self._running())",
"def _is_running_from_main_thread():\n return tornado.ioloop.IOLoop.current(instance=False)",
"def is_main_thread():\n if not _dispatcher:\n return True\n else:\n return _dispatcher.is_main_thread()",
"def _is_running(self):\n try:\n # Process is not killed, os.kill(pid, 0) does nothing but raise if process does not\n # exist.\n os.kill(self.pid, 0)\n except ProcessLookupError:\n return False\n else:\n return True",
"def is_running(self):\n\t\treturn self in _running",
"def isprogram(self):\n return True",
"def can_run(self):\n\t\treturn self._start is None",
"def is_vega_process(pid):\n try:\n p = psutil.Process(pid)\n if p.name().startswith(\"vega-main\"):\n return True\n except Exception:\n return False\n return False",
"def get_prog_runatstart(self):\n #en = self._get_prop(\"runAtStartup\")\n #return bool( en == \"true\" )\n return bool(self._mydict['runAtStartup'] == \"true\")",
"def _is_running(self):\n return self._run_state.is_running()",
"def running(self):\n\t\treturn self._start is not None",
"def isprogram(self):\n return False",
"def is_process_running(pid):\n return os.path.exists(\"/proc/%s\" % pid)",
"def is_running(self) -> bool:\n return self.executor.is_alive() if self.executor else False",
"def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))",
"def is_multigpu_child_process():\n return (dist.is_initialized() or \"TORCHELASTIC_RUN_ID\" in os.environ) and os.environ[\"LOCAL_RANK\"] != \"0\"",
"def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False",
"def is_running(self):\n\t\treturn self._running"
] | [
"0.8799151",
"0.7497712",
"0.7343165",
"0.7201382",
"0.7150669",
"0.7103503",
"0.7073254",
"0.7026223",
"0.70111364",
"0.6951164",
"0.6927277",
"0.6923171",
"0.68789256",
"0.68608314",
"0.68460107",
"0.68151265",
"0.6789779",
"0.67896146",
"0.67687416",
"0.6764666",
"0.6742409",
"0.6740245",
"0.6712004",
"0.6707239",
"0.6678798",
"0.6677478",
"0.66727585",
"0.66594183",
"0.6645946",
"0.6638398"
] | 0.7922667 | 1 |
Set parameters in the parameter tree. This method simply wraps underlying ParameterTree method so that an exceptions can be reraised with an appropriate FileInterfaceError. | def set(self, path, data):
try:
self.param_tree.set(path, data)
except ParameterTreeError as e:
raise FileInterfaceError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setParameterNode(self, parameterNode):\r\n # framework\r\n profbox()\r\n self.parameterNode = parameterNode",
"def setParameterNode(self, parameterNode):\n #framework\n profbox()\n self.parameterNode = parameterNode",
"def setParams(self, paramSet):\r\n pass",
"def set_params(self):\n raise NotImplementedError",
"def set_parameter(self, params, name, val):\n raise NotImplementedError()",
"def setParameter(self, name, value):",
"def setParameters(self, izParameters): #$NON-NLS-1$\r",
"def _set_parameters(self, override_previous=True, validate_legality=False,\n **parameters):\n # The 'mode' parameter is only relevant to the current hierarchy\n self.mode = parameters.pop('mode', self.mode)\n\n for name, value in iteritems(parameters):\n if isinstance(value, Pipe):\n if override_previous or (name not in self.__dict__ and\n name not in self._pipes):\n\n self._pipes[name] = value\n\n else:\n if override_previous or (name not in self.__dict__ and\n name not in self._pipes):\n\n if isinstance(value, BaseResource):\n self.add_resources({name: value})\n\n else:\n setattr(self, name, value)\n\n if validate_legality and not self._is_valid_input(name):\n raise AttributeError(\"Unrecognized parameter %r passed to %r\" %\n (name, self.data.name))",
"def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition('__')\n if key not in valid_params:\n raise EstimatorParameterError(\n 'Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' % (key, self))\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self",
"def set_params(self, params):",
"def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()",
"def _set_params(self, *args, **kwargs):\n\n params = args[0]\n\n # check for attempt to set readonly parameters (read-only or immutable set outside startup)\n self._verify_not_readonly(*args, **kwargs)\n old_config = self._param_dict.get_config()\n\n for (key, val) in params.iteritems():\n log.debug(\"KEY = \" + str(key) + \" VALUE = \" + str(val))\n self._param_dict.set_value(key, val)\n\n new_config = self._param_dict.get_config()\n # check for parameter change\n if not dict_equal(old_config, new_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)",
"def set_parameters(self, *args, **kwargs):\n if len(args) > 0:\n if hasattr(args[0], '__iter__'):\n self._parameters = self._Parameters(*args[0])\n elif args[0] is None:\n self._parameters = self._Parameters()\n else:\n self._parameters = self._Parameters(*args)\n else:\n self._parameters = self._Parameters(**kwargs)",
"def set_parameter_value(self, parameter, value):\n pass",
"def set_params(self):\r\n pass",
"def set_params(self, **kwargs):\n ...",
"def _set_params(self, *args, **kwargs):\n startup = False\n try:\n params = args[0]\n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n try:\n startup = args[1]\n except IndexError:\n pass\n\n # Only check for readonly parameters if we are not setting them from startup\n if not startup:\n readonly = self._param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY)\n\n log.debug(\"set param, but check visibility first\")\n log.debug(\"Read only keys: %s\", readonly)\n\n for (key, val) in params.iteritems():\n if key in readonly:\n raise InstrumentParameterException(\"Attempt to set read only parameter (%s)\" % key)\n\n # Make sure this method is overloaded because this just verifies, but doesn't\n # set a damn thing.",
"def set_parameters(self, params):\n self.kp = params.pgain",
"def set(self, **parameters):\r\n for name in parameters:\r\n if name in self.prm:\r\n self.prm[name] = parameters[name]\r\n else:\r\n self._illegal_parameter(name)",
"def set_params(self, *arg):\n pass",
"def parameters(self, parameters):\n\n self._parameters = parameters",
"def parameters(self, parameters):\n\n self._parameters = parameters",
"def parameters(self, parameters):\n\n self._parameters = parameters",
"def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)",
"def set_params(self, **kwargs):\n\t\tself._treeType = kwargs.get('treeType', self._treeType)\n\t\tfor key, value in kwargs.items():\n\t\t\tif key in self._model_complexity_args:\n\t\t\t\tself._model_complexity_args[key] = value",
"def setParameterNode(self, inputParameterNode):\n\n\t\tif inputParameterNode:\n\t\t\tself.logic.setDefaultParameters(inputParameterNode)\n\n\t\t# Unobserve previously selected parameter node and add an observer to the newly selected.\n\t\t# Changes of parameter node are observed so that whenever parameters are changed by a script or any other module\n\t\t# those are reflected immediately in the GUI.\n\t\tif self._parameterNode is not None:\n\t\t\tself.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)\n\t\tself._parameterNode = inputParameterNode\n\t\tif self._parameterNode is not None:\n\t\t\tself.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)\n\n\t\t# Initial GUI update\n\t\tself.updateGUIFromParameterNode()",
"def set_params(self, **kwargs) -> NoReturn:\n pass",
"def set_params(self, *argv, **kwargs):\n pass",
"def set_params(self, *argv, **kwargs):\n pass",
"def set_params(self, *argv, **kwargs):\n pass"
] | [
"0.6985717",
"0.67060566",
"0.6683662",
"0.66480035",
"0.6634291",
"0.66082174",
"0.65245366",
"0.6508125",
"0.64559436",
"0.6373968",
"0.6365402",
"0.63510525",
"0.63371843",
"0.62663895",
"0.6263053",
"0.6206298",
"0.6145241",
"0.6132779",
"0.609262",
"0.6084063",
"0.60808367",
"0.60808367",
"0.60808367",
"0.6054713",
"0.60349625",
"0.6025687",
"0.6014199",
"0.6014075",
"0.6014075",
"0.6014075"
] | 0.7035647 | 0 |
Retrieve all of the txt configuration files in the absolute directory path Clears the internal lists first to prevent circular appending at every "GET" | def get_config_files(self):
self.clear_lists()
print self.abs_directory
for file in os.listdir(self.abs_directory):
print file
if file.endswith('.json') and "qemii" in file:
self.txt_files.append(file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_path(self, conf):\n\t\tpass",
"def get_cfg_files(self):\n\t\tcfg_files = []\n\t\tfor config_object, config_value in self.maincfg_values:\n\t\t\t\n\t\t\t## Add cfg_file objects to cfg file list\n\t\t\tif config_object == \"cfg_file\" and os.path.isfile(config_value):\n\t\t\t\t\tcfg_files.append(config_value)\n\n\t\t\t## Parse all files in a cfg directory\n\t\t\tif config_object == \"cfg_dir\":\n\t\t\t\tdirectories = []\n\t\t\t\traw_file_list = []\n\t\t\t\tdirectories.append( config_value )\n\t\t\t\t# Walk through every subdirectory and add to our list\n\t\t\t\twhile len(directories) > 0:\n\t\t\t\t\tcurrent_directory = directories.pop(0)\n\t\t\t\t\t# Nagios doesnt care if cfg_dir exists or not, so why should we ?\n\t\t\t\t\tif not os.path.isdir( current_directory ): continue\n\t\t\t\t\tlist = os.listdir(current_directory)\n\t\t\t\t\tfor item in list:\n\t\t\t\t\t\t# Append full path to file\n\t\t\t\t\t\titem = \"%s\" % (os.path.join(current_directory, item.strip() ) )\n\t\t\t\t\t\tif os.path.islink( item ):\n\t\t\t\t\t\t\titem = os.readlink( item )\n\t\t\t\t\t\tif os.path.isdir(item):\n\t\t\t\t\t\t\tdirectories.append( item )\n\t\t\t\t\t\tif raw_file_list.count( item ) < 1:\n\t\t\t\t\t\t\traw_file_list.append( item )\n\t\t\t\tfor raw_file in raw_file_list:\n\t\t\t\t\tif raw_file.endswith('.cfg'):\n\t\t\t\t\t\tif os.path.exists(raw_file):\n\t\t\t\t\t\t\t'Nagios doesnt care if cfg_file exists or not, so we will not throws errors'\n\t\t\t\t\t\t\tcfg_files.append(raw_file)\n\n\t\treturn cfg_files",
"def action_listall():\n\n def parse_file(filename):\n config = {}\n\n # get all content\n with open(filename, 'r') as f:\n lines = f.read().split('\\n')\n\n # parse the content\n for l_nb in range(len(lines)):\n items = [_.strip() for _ in lines[l_nb].split('#')[0].split('=')]\n if len(items) > 1:\n v = '='.join(items[1:]).strip()\n # handle [...] for param value\n if '[' in v and ']' not in v:\n l_nb += 1\n # get the next line until the array in not closed by ']'\n while ']' not in v:\n v += lines[l_nb].split('#')[0].strip()\n l_nb += 1\n # remove '' around param value\n if v[0] == \"'\" and v[-1:] == \"'\":\n v = v[1:len(v)]\n config[items[0]] = v\n return config\n\n out = []\n for root, dirs, files in os.walk('/etc/xen'):\n for cfgfile in files:\n if cfgfile.endswith('.cfg') and not cfgfile.startswith('.'):\n out.append(parse_file(os.path.join(root, cfgfile)))\n return out",
"def get_fr_config_files(self):\n self.get_config_files()\n for file in self.txt_files:\n if \"fr\" in file:\n self.fr_config_files.append(file)\n return self.fr_config_files",
"def get_list():\n\n print(f\"Корневой каталог: {config_tools.NAME_PATH}\")\n for dirpath, dirnames, filenames in os.walk(config_tools.NAME_PATH):\n # перебрать каталоги\n for dirname in dirnames:\n print(\"Каталог:\", os.path.join(dirpath, dirname))\n # перебрать файлы\n for filename in filenames:\n print(\"Файл:\", os.path.join(dirpath, filename))",
"def get_fp_config_files(self):\n self.get_config_files()\n for file in self.txt_files: \n if \"fp\" in file:\n self.fp_config_files.append(file)\n return self.fp_config_files",
"def get_configfiles():\r\n configArray=''\r\n try:\r\n #print(len(configArray))\r\n while len(configArray) == 0:\r\n configFiles = input(\"List of Configuration and Files sepearated by commas (vhosts.conf,sslhosts.conf) \\n\") # takes the whole line of n numbers\r\n configArray = list(map(str,configFiles.split(',')))\r\n ### DEBUGGING\r\n # print(\"config array 0\" + configArray[0])\r\n # print(\"config array 1\" + configArray[1])\r\n #print(\"config array 0\" + configArray[0])\r\n ### /DEBUGGING ###\r\n if configArray[0] == '':\r\n print(\"please enter configuration files \")\r\n del configArray[:]\r\n #print(len(configArray))\r\n #print(configArray[0])\r\n return configArray[0], configArray[1]\r\n except:\r\n print(\"something went wrong with getting the config files\")",
"def ini_get_all():\n raise NotImplementedError()",
"def __get_url(self, conf):\n url_file = conf[self.conf_item.get_url_list_file()]\n url_list = list()\n map((lambda url: url_list.append(url.strip())), open(url_file))\n return url_list",
"def load_conf(self):\n\n self.load_file(self.ini_file)\n self.files = []\n conf_file = open(self.ini_file, \"r\")\n for l in conf_file:\n self.files.append(l.strip())\n conf_file.close()",
"def _read_files(self):\n \n for langname in self.langnames:\n filename = f'data/word_lists/{langname}.txt'\n with open(filename) as f:\n index = self.langnames.index(langname)\n lang_list = getattr(self, f'word_list{index}')\n words = f.readlines()\n for word in words:\n fword = ''.join(char for char in word if char is not '\\n')\n lang_list.append(fword)\n f.close()\n return",
"def ReadEntries(self):\n entries = []\n config = wx.Config.Get()\n config.SetPath(DEPS_CONFIG)\n step = config.GetFirstEntry()\n while (step[0]):\n entries.append(config.Read(step[1]))\n step = config.GetNextEntry(step[2])\n config.SetPath('..')\n return entries",
"def fetch_config_files(ACS=False):\n cwd = os.getcwd()\n \n print('Config directory: {0}/CONF'.format(os.getenv('GRIZLI')))\n \n os.chdir(os.path.join(os.getenv('GRIZLI'), 'CONF'))\n \n ftpdir = 'ftp://ftp.stsci.edu/cdbs/wfc3_aux/'\n tarfiles = ['{0}/WFC3.IR.G102.cal.V4.32.tar.gz'.format(ftpdir),\n '{0}/WFC3.IR.G141.cal.V4.32.tar.gz'.format(ftpdir),\n '{0}/grism_master_sky_v0.5.tar.gz'.format(ftpdir)]\n \n gURL = 'http://www.stsci.edu/~brammer/Grizli/Files'\n tarfiles.append('{0}/WFC3IR_extended_PSF.v1.tar.gz'.format(gURL))\n \n if ACS:\n tarfiles.append('{0}/ACS.WFC.sky.tar.gz'.format(gURL))\n\n tarfiles.append('{0}/ACS_CONFIG.tar.gz'.format(gURL))\n \n for url in tarfiles:\n file=os.path.basename(url)\n if not os.path.exists(file):\n print('Get {0}'.format(file))\n os.system('curl -o {0} {1}'.format(file, url))\n \n os.system('tar xzvf {0}'.format(file))\n \n # ePSF files for fitting point sources\n psf_path = 'http://www.stsci.edu/hst/wfc3/analysis/PSF/psf_downloads/wfc3_ir/'\n files = ['{0}/PSFSTD_WFC3IR_{1}.fits'.format(psf_path, filt) \n for filt in ['F105W', 'F125W', 'F140W', 'F160W']]\n \n for url in files:\n file=os.path.basename(url)\n if not os.path.exists(file):\n print('Get {0}'.format(file))\n os.system('curl -o {0} {1}'.format(file, url))\n else:\n print('File {0} exists'.format(file))\n \n # Stellar templates\n print('Templates directory: {0}/templates'.format(os.getenv('GRIZLI')))\n os.chdir('{0}/templates'.format(os.getenv('GRIZLI')))\n \n files = ['http://www.stsci.edu/~brammer/Grizli/Files/stars_pickles.npy',\n 'http://www.stsci.edu/~brammer/Grizli/Files/stars_bpgs.npy']\n \n for url in files:\n file=os.path.basename(url)\n if not os.path.exists(file):\n print('Get {0}'.format(file))\n os.system('curl -o {0} {1}'.format(file, url))\n else:\n print('File {0} exists'.format(file))\n \n print('ln -s stars_pickles.npy stars.npy')\n os.system('ln -s stars_pickles.npy stars.npy')\n \n os.chdir(cwd)",
"def read_project(path: str):\n textfilecontent = {}\n\n # Discover .txt files and add them to the dictionary\n for filepath in iglob(os.path.join(path, '**/*.txt'), recursive=True):\n add_path_dict(input_dict=textfilecontent, start_path=path,\n file_path=filepath)\n\n return textfilecontent",
"def clear_lists(self): \n self.fp_config_files = []\n self.txt_files = []\n self.fr_config_files = []",
"def filepaths(self):\n pass",
"def get_listfile(self, datadir):\n return []",
"def listFiles(self):\n pass",
"def _configFiles(self):\n import glob\n ret = [] \n for ext in self.configManager.extensions:\n ret.extend(\n glob.glob(f\"{self.pipelinesDir}/{self.pipeName}/*{ext}\"))\n return ret",
"def find_config_files(create: bool = False) -> List[str]:\n files = [\".wpwatcher/wpwatcher.conf\", \"wpwatcher.conf\"]\n env = [\"HOME\", \"XDG_CONFIG_HOME\", \"APPDATA\", \"PWD\"]\n\n return Config.find_files(\n env, files, Config.TEMPLATE_FILE, create=create\n )",
"def _readFiles(self):\n template_files = []\n for file in os.listdir(self.template_folder):\n if file.endswith(\".xml\"):\n template_files.append(file)\n return template_files",
"def list_configurations(path):\n configurations = []\n\n for afile in os.listdir(path):\n afile = os.path.join(path, afile)\n if os.path.isfile(afile) and afile.endswith('.py'):\n configurations.append(afile)\n\n return configurations",
"def get_config_files(self):\n flag, i = self.inotify\n\n if flag:\n kwargs = {}\n\n if PY3:\n kwargs['timeout_s'] = 0\n\n filenames = set()\n\n for event in i.event_gen(**kwargs):\n if event is None:\n break\n\n filenames.add(event[3])\n\n return list(filenames)\n\n else:\n return os.listdir(self.watch)",
"def parseInputFileList (self) :\n filelist = []\n try :\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"#\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: cfg file \" , self.cfgName , \" not found\"\n return\n\n #return filelist",
"def list_dir(self, path):",
"def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e",
"def __load_config_files(self):\n # Parsed configuration files\n # ==========================\n T_dict = ParsedParameterFile(self.config_path('0/T'))\n fv_solutions = ParsedParameterFile(self.config_path('system/fvSolution'))\n fv_schemes = ParsedParameterFile(self.config_path('system/fvSchemes'))\n control_dict = ParsedParameterFile(self.config_path('system/controlDict'))\n transport_props = ParsedParameterFile(self.config_path('constant/transportProperties'))\n\n # Registered files\n # ================\n self.foam_file('0/T', T_dict)\n self.foam_file('system/controlDict', control_dict)\n self.foam_file('system/fvSolution', fv_solutions)\n self.foam_file('system/fvSchemes', fv_schemes)\n self.foam_file('system/controlDict', control_dict)\n self.foam_file('constant/transportProperties', transport_props)",
"def cb_filelist(args):\n req = args[\"request\"]\n\n pyhttp = req.getHttp()\n config = req.getConfiguration()\n pathinfo = pyhttp[\"PATH_INFO\"]\n\n if not pathinfo.startswith(\"/\" + TRIGGER):\n return\n\n logger = tools.getLogger()\n\n data = req.getData()\n data[INIT_KEY] = 1\n datadir = config[\"datadir\"]\n data['root_datadir'] = config['datadir']\n wikidir = config.get(\"wikidir\", config['datadir'])\n\n # convert the / to os.sep so that we can use os.path stuff.\n wikidir = wikidir.replace(\"/\", os.sep)\n if not wikidir.endswith(os.sep):\n wikidir = wikidir + os.sep\n\n page_name = pathinfo[len(\"/\" + TRIGGER)+1:]\n\n if not page_name:\n return\n\n page_name = page_name.replace(\"/\", os.sep)\n\n if not page_name:\n return\n\n if page_name.endswith(os.sep):\n page_name = page_name[:-1]\n\n # if the page has a flavour, we use that. otherwise\n # we default to the wiki flavour\n page_name, flavour = os.path.splitext(page_name)\n if flavour:\n data[\"flavour\"] = flavour[1:]\n\n # wikifile should hold the absolute path on the file system to\n # the wiki file we're looking at. if it's in a parent directory\n # of wikidir, then we abort. \n wikifile = os.path.normpath(os.path.join(wikidir, page_name))\n if not wikifile.startswith(wikidir):\n logger.info(\"wiki file requested '%s' is not in wikidir.\" % wikifile)\n return []\n\n # we build our own config dict for the fileentry to kind of\n # fake it into loading this file correctly rather than\n # one of the entries.\n newdatadir = wikidir\n\n ext = tools.what_ext(data[\"extensions\"].keys(), wikifile)\n\n if not ext:\n logger.info(\"wiki file '%s' does not exist.\" % wikifile)\n return []\n\n data['root_datadir'] = page_name + '.' + ext\n data['bl_type'] = 'file'\n wikifile = wikifile + \".\" + ext\n\n if not os.path.isfile(wikifile):\n return []\n\n fe = FileEntry(req, wikifile, wikidir)\n\n # now we evaluate python code blocks\n body = fe.getData()\n body = eval_python_blocks(req, body)\n body = \"<!-- STATIC PAGE START -->\\n\\n%s\\n<!-- STATIC PAGE END -->\\n\" % body\n\n # now we evaluate for wikilinks\n body = connect_links(config[\"base_url\"],\n data[\"extensions\"].keys(),\n wikidir,\n body)\n\n fe.setData(body)\n\n fe[\"absolute_path\"] = TRIGGER\n fe[\"fn\"] = page_name\n fe[\"file_path\"] = TRIGGER + \"/\" + page_name\n fe[\"template_name\"] = \"wiki\"\n\n data['blog_title_with_path'] = \"%s : %s\" % \\\n (config.get(\"blog_title\", \"\"), fe.get(\"title_escaped\", \"\"))\n\n # set the datadir back\n config[\"datadir\"] = datadir\n\n return [fe]",
"def __loadListOfDocuments(self):\n\t\tfor value in default_paths():\n\t\t\titem = addNewListItemCalled([os.path.normpath(value)], self.ui.listWidget)\n\t\t\titem.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)\n\t\t\tt = item.font()\n\t\t\tt.setItalic(True)\n\t\t\titem.setFont(t)\n\n\t\t# load up state from storage...\n\t\tpaths = QSettings().value(\"paths\").toList()\n\t\tfor value in paths:\n\t\t\tstr = os.path.normpath(value.toString())\n\t\t\tif str not in default_paths():\n\t\t\t\taddNewListItemCalled([str], self.ui.listWidget, mutable=True)",
"def getAllFiles(self):\n\n\t\treturn self.getFilesForDirs([])"
] | [
"0.67695117",
"0.6177163",
"0.617072",
"0.59774256",
"0.59301084",
"0.5873713",
"0.58602625",
"0.58550894",
"0.58476",
"0.58326143",
"0.57421196",
"0.5723155",
"0.57174045",
"0.5698926",
"0.56854516",
"0.56496686",
"0.5622283",
"0.56196785",
"0.5619165",
"0.55947644",
"0.5593981",
"0.5585803",
"0.5584352",
"0.5578536",
"0.5558146",
"0.5557192",
"0.5551609",
"0.55420125",
"0.55193764",
"0.55054903"
] | 0.7291427 | 0 |
gets the frame processor config files from the list of text files found | def get_fp_config_files(self):
self.get_config_files()
for file in self.txt_files:
if "fp" in file:
self.fp_config_files.append(file)
return self.fp_config_files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_fr_config_files(self):\n self.get_config_files()\n for file in self.txt_files:\n if \"fr\" in file:\n self.fr_config_files.append(file)\n return self.fr_config_files",
"def get_config_files(self):\n self.clear_lists()\n print self.abs_directory\n for file in os.listdir(self.abs_directory):\n print file\n if file.endswith('.json') and \"qemii\" in file:\n self.txt_files.append(file)",
"def _configFiles(self):\n import glob\n ret = [] \n for ext in self.configManager.extensions:\n ret.extend(\n glob.glob(f\"{self.pipelinesDir}/{self.pipeName}/*{ext}\"))\n return ret",
"def parseInputFileList (self) :\n filelist = []\n try :\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"#\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: cfg file \" , self.cfgName , \" not found\"\n return\n\n #return filelist",
"def parseInputFileList (self):\n filelist = []\n try:\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"@@@\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: label cfg file \" , self.cfgName , \" not found\"\n return",
"def get_files_for_processing():\n all_files = os.listdir(read_path)\n txt_files = list(filter(lambda filename: fnmatch.fnmatch(filename, '*.txt'), all_files))\n return txt_files",
"def __load_config_files(self):\n # Parsed configuration files\n # ==========================\n T_dict = ParsedParameterFile(self.config_path('0/T'))\n fv_solutions = ParsedParameterFile(self.config_path('system/fvSolution'))\n fv_schemes = ParsedParameterFile(self.config_path('system/fvSchemes'))\n control_dict = ParsedParameterFile(self.config_path('system/controlDict'))\n transport_props = ParsedParameterFile(self.config_path('constant/transportProperties'))\n\n # Registered files\n # ================\n self.foam_file('0/T', T_dict)\n self.foam_file('system/controlDict', control_dict)\n self.foam_file('system/fvSolution', fv_solutions)\n self.foam_file('system/fvSchemes', fv_schemes)\n self.foam_file('system/controlDict', control_dict)\n self.foam_file('constant/transportProperties', transport_props)",
"def _iter_configurations() -> Iterable[pathlib.Path]:\n for ext in CONFIGURATION_FILE_FORMATS:\n yield from HERE.rglob(f\"*{ext}\")",
"def _config_files():\n from .plugin import plugins\n return [p for p in (p.config_file() for p in plugins()) if p is not None]",
"def findFiles(self):\n\n with open('analysis_result/firmwalkerOutput.txt', 'r') as firmwalker:\n for line in firmwalker:\n if line.startswith('##################################### ssh'):\n self.ssh = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### dropbear'):\n self.dropbear = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### busybox'):\n self.busyBox = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### telnet'):\n self.telnet = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### openssl'):\n self.openssl = next(firmwalker).strip('d/').strip('\\n')",
"def read_list_file(path_file):\n with open(path_file,'r') as f_in:\n lines = f_in.readlines()\n lines = [x for x in lines if not (x.strip() == '' or x.strip()[0] == '#')]\n left_file_list = []\n right_file_list = []\n gt_file_list = []\n conf_file_list = []\n for l in lines:\n to_load = re.split(',|;',l.strip())\n left_file_list.append(to_load[0])\n right_file_list.append(to_load[1])\n if len(to_load)>2:\n gt_file_list.append(to_load[2])\n if len(to_load)>3:\n conf_file_list.append(to_load[3])\n return left_file_list,right_file_list,gt_file_list,conf_file_list",
"def get_cfg_files(self):\n\t\tcfg_files = []\n\t\tfor config_object, config_value in self.maincfg_values:\n\t\t\t\n\t\t\t## Add cfg_file objects to cfg file list\n\t\t\tif config_object == \"cfg_file\" and os.path.isfile(config_value):\n\t\t\t\t\tcfg_files.append(config_value)\n\n\t\t\t## Parse all files in a cfg directory\n\t\t\tif config_object == \"cfg_dir\":\n\t\t\t\tdirectories = []\n\t\t\t\traw_file_list = []\n\t\t\t\tdirectories.append( config_value )\n\t\t\t\t# Walk through every subdirectory and add to our list\n\t\t\t\twhile len(directories) > 0:\n\t\t\t\t\tcurrent_directory = directories.pop(0)\n\t\t\t\t\t# Nagios doesnt care if cfg_dir exists or not, so why should we ?\n\t\t\t\t\tif not os.path.isdir( current_directory ): continue\n\t\t\t\t\tlist = os.listdir(current_directory)\n\t\t\t\t\tfor item in list:\n\t\t\t\t\t\t# Append full path to file\n\t\t\t\t\t\titem = \"%s\" % (os.path.join(current_directory, item.strip() ) )\n\t\t\t\t\t\tif os.path.islink( item ):\n\t\t\t\t\t\t\titem = os.readlink( item )\n\t\t\t\t\t\tif os.path.isdir(item):\n\t\t\t\t\t\t\tdirectories.append( item )\n\t\t\t\t\t\tif raw_file_list.count( item ) < 1:\n\t\t\t\t\t\t\traw_file_list.append( item )\n\t\t\t\tfor raw_file in raw_file_list:\n\t\t\t\t\tif raw_file.endswith('.cfg'):\n\t\t\t\t\t\tif os.path.exists(raw_file):\n\t\t\t\t\t\t\t'Nagios doesnt care if cfg_file exists or not, so we will not throws errors'\n\t\t\t\t\t\t\tcfg_files.append(raw_file)\n\n\t\treturn cfg_files",
"def search(self):\n files = os.listdir(self.filePath)\n txt_file = []\n for f in files:\n f_ext = f.split('.')[-1]\n if f_ext == self.flag:\n if self.flag == 'txt':\n txt_file.append(FileTxt(os.sep.join([self.filePath, f])))\n\n if self.flag == 'csv':\n txt_file.append(FileCsv(os.sep.join([self.filePath, f])))\n\n return txt_file",
"def get_configs_from_multiple_files():\n train_config = train_pb2.TrainConfig()\n with tf.gfile.GFile(FLAGS.train_config_path, 'r') as f:\n text_format.Merge(f.read(), train_config)\n\n model_config = model_pb2.DetectionModel()\n with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f:\n text_format.Merge(f.read(), model_config)\n\n input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f:\n text_format.Merge(f.read(), input_config)\n\n return model_config, train_config, input_config",
"def extract_program_text(filename: str) -> List[str]:\n with open(filename) as conf:\n if filename.endswith('.tfstate'):\n configs = json.loads(conf.read())\n program_text = []\n resources = configs['modules'][0]['resources']\n for resource in resources:\n pattern = re.compile(\"signalform_detector.*\")\n if pattern.match(resource) is not None:\n program_text.append(\n re.sub(r'\\n +', '\\n', resources[resource]['primary']['attributes']['program_text']),\n )\n return program_text\n else:\n configs = conf.read()\n pattern = re.compile(r'program_text:.+(?:=>)?\\s+\\\"(.+)\\\"')\n return [re.sub(r'\\\\n +', '\\n', pattern_match) for pattern_match in re.findall(pattern, configs)]",
"def _read_files(self):\n \n for langname in self.langnames:\n filename = f'data/word_lists/{langname}.txt'\n with open(filename) as f:\n index = self.langnames.index(langname)\n lang_list = getattr(self, f'word_list{index}')\n words = f.readlines()\n for word in words:\n fword = ''.join(char for char in word if char is not '\\n')\n lang_list.append(fword)\n f.close()\n return",
"def get_files(self):\n def _get_files_by_names(files, name_set, postfix):\n ret = []\n for f in files: \n name = osp.basename(f).split(\"_%s\" % postfix)[0]\n if name in name_set:\n ret.append(f)\n return ret\n\n frame1_files = sorted(glob.glob(osp.join(self.root, 'images', \"*_pre_disaster*\")))\n frame2_files = sorted(glob.glob(osp.join(self.root, \"images\", \"*_post_disaster*\")))\n label_files = sorted(glob.glob(osp.join(self.root, \"masks\", \"*_change*\")))\n assert len(frame1_files) == len(frame2_files) == len(label_files), \\\n \"%d, %d, %d\" % (len(frame1_files), len(frame2_files), len(label_files))\n\n file_names = [osp.basename(f).split(\"_pre\")[0] for f in frame1_files]\n file_names = sorted(list(set(file_names)))\n if self.isTrain:\n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[0]\n else: \n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[1]\n self.frame1_files = _get_files_by_names(frame1_files, name_set, 'pre')\n self.frame2_files = _get_files_by_names(frame2_files, name_set, 'post')\n self.label_files = _get_files_by_names(label_files, name_set, 'change')",
"def _readFiles(self):\n template_files = []\n for file in os.listdir(self.template_folder):\n if file.endswith(\".xml\"):\n template_files.append(file)\n return template_files",
"def get_already_processed_files(config: Config) -> list[str]:\n already_processed_files = []\n if os.path.exists(config.already_processed):\n with open(config.already_processed, 'r') as f:\n already_processed_files = f.read().splitlines()\n\n return already_processed_files",
"def _getDefaultConfigFiles(self, _os = os, _sys = sys):\n argv0 = util.filename.fromLocale(\n _sys.argv[0], self.runtime.path_encoding\n )\n if isinstance(argv0, unicode):\n candidates = [util.filename.toLocale(\n name, locale_enc = self.runtime.path_encoding\n ) for name in [\n _os.path.join(\n self.runtime.repository, u'conf', u'mailer.conf'\n ),\n _os.path.join(_os.path.dirname(argv0), u'mailer.conf'),\n u'/etc/svn-mailer.conf',\n ]\n ]\n else:\n # --path-encoding=none\n candidates = [\n _os.path.join(self.runtime.repository, 'conf', 'mailer.conf'),\n _os.path.join(_os.path.dirname(argv0), 'mailer.conf'),\n _os.path.join(_os.path.sep, \"etc\", \"svn-mailer.conf\"),\n ]\n\n return candidates",
"def read_file_names(self):\n files_BIDMC = os.listdir(self.root_dir_BIDMC)\n masks_BIDMC = os.listdir(self.seg_dir_BIDMC)\n files_HK = os.listdir(self.root_dir_HK)\n masks_HK = os.listdir(self.seg_dir_HK)\n files_I2CVB = os.listdir(self.root_dir_I2CVB)\n masks_I2CVB = os.listdir(self.seg_dir_I2CVB)\n files_ISBI = os.listdir(self.root_dir_ISBI)\n masks_ISBI = os.listdir(self.seg_dir_ISBI)\n files_ISBI_15 = os.listdir(self.root_dir_ISBI_15)\n masks_ISBI_15 = os.listdir(self.seg_dir_ISBI_15)\n files_UCL = os.listdir(self.root_dir_UCL)\n masks_UCL = os.listdir(self.seg_dir_UCL)\n site_files = [files_BIDMC, files_HK, files_I2CVB, files_ISBI, files_ISBI_15, files_UCL]\n site_masks = [masks_BIDMC, masks_HK, masks_I2CVB, masks_ISBI, masks_ISBI_15, masks_UCL]\n return site_files, site_masks",
"def get_frame_list(self):\r\n\r\n logger.debug('Executing frame extraction')\r\n\r\n frames_loaded = False\r\n\r\n # Try to load YAML file with frame list\r\n if os.path.exists(self.frames_file_path):\r\n\r\n print 'Loading YAML file with frame list'\r\n logger.debug('Loading YAML file with frame list')\r\n\r\n f_list = utils.load_YAML_file(self.frames_file_path)\r\n\r\n if f_list:\r\n self.frame_list = f_list\r\n\r\n print 'YAML file with frame_list loaded'\r\n logger.debug('YAML file with frame_list loaded')\r\n\r\n frames_loaded = True\r\n\r\n if not frames_loaded:\r\n\r\n print '\\n\\n### Frame extraction ###\\n'\r\n logger.debug('\\n\\n### Frame extraction ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n if not (os.path.exists(self.frames_path)):\r\n os.makedirs(self.frames_path)\r\n\r\n # Counter for all frames\r\n frame_counter = 0\r\n\r\n # Value of frame_counter for last analyzed frame\r\n last_anal_frame = 0\r\n\r\n # Open video file\r\n capture = cv2.VideoCapture(self.resource_path)\r\n\r\n self.frame_list = []\r\n\r\n # Save parameters for this video\r\n param_dict = {}\r\n\r\n if capture is None or not capture.isOpened():\r\n\r\n error = 'Error in opening video file'\r\n\r\n print error\r\n logger.debug(error)\r\n\r\n return\r\n\r\n else:\r\n\r\n video_fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)\r\n\r\n param_dict[c.VIDEO_FPS_KEY] = video_fps\r\n\r\n # Original number of frames\r\n tot_frames = capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\r\n\r\n param_dict[c.VIDEO_TOT_FRAMES_KEY] = tot_frames\r\n\r\n self.fps = video_fps\r\n\r\n self.video_frames = float(tot_frames)\r\n\r\n # Saved frames\r\n saved_frames = 0\r\n\r\n while True:\r\n\r\n # Read frame\r\n ret, frame = capture.read()\r\n\r\n # If no frame is read, abort\r\n if not ret:\r\n break\r\n\r\n used_fps = c.USED_FPS\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n use_or_res = c.USE_ORIGINAL_RES\r\n used_res_scale_factor = c.USED_RES_SCALE_FACTOR\r\n\r\n if self.params is not None:\r\n\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_RES_KEY in self.params:\r\n use_or_res = self.params[c.USE_ORIGINAL_RES_KEY]\r\n\r\n if c.USED_RES_SCALE_FACTOR_KEY in self.params:\r\n used_res_scale_factor = self.params[\r\n c.USED_RES_SCALE_FACTOR_KEY]\r\n\r\n # Next frame to be analyzed\r\n next_frame = last_anal_frame + (video_fps / used_fps) - 1\r\n\r\n if use_or_fps or (frame_counter > next_frame):\r\n\r\n # Frame position in video in milliseconds\r\n elapsed_ms = capture.get(cv2.cv.CV_CAP_PROP_POS_MSEC)\r\n\r\n # print 'elapsed video s =', elapsed_video_s\r\n\r\n fr_name = '%07d.png' % frame_counter\r\n\r\n frame_path = os.path.join(self.frames_path, fr_name)\r\n\r\n # Resize frame\r\n if not use_or_res:\r\n fx = used_res_scale_factor\r\n\r\n fy = used_res_scale_factor\r\n\r\n interp = cv2.INTER_AREA\r\n\r\n frame = cv2.resize(src=frame, dsize=(0, 0),\r\n fx=fx, fy=fy,\r\n interpolation=interp)\r\n\r\n cv2.imwrite(frame_path, frame,\r\n [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n frame_dict = {c.SAVED_FRAME_NAME_KEY: fr_name,\r\n c.ELAPSED_VIDEO_TIME_KEY: int(elapsed_ms)}\r\n\r\n self.frame_list.append(frame_dict)\r\n\r\n last_anal_frame = frame_counter\r\n\r\n saved_frames += 1\r\n\r\n frame_counter += 1\r\n\r\n self.progress = 100 * (frame_counter / self.video_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n del capture\r\n\r\n self.saved_frames = float(saved_frames)\r\n\r\n param_dict[c.VIDEO_SAVED_FRAMES_KEY] = self.saved_frames\r\n\r\n # Save frame list in YAML file\r\n utils.save_YAML_file(self.frames_file_path, self.frame_list)\r\n\r\n # Save video parameters in YAML file\r\n\r\n utils.save_YAML_file(self.params_file_path, param_dict)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for frame extraction:', str(time_in_seconds), 's\\n'\r\n logger.debug(\r\n 'Time for frame extraction:', str(time_in_seconds), 's\\n')\r\n\r\n self.anal_times[c.FRAME_EXTRACTION_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)",
"def extract_files(self) -> list:\n pass",
"def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]",
"def get_detector_configurations(self):\n print('Reading detector configurations ...')\n list_of_detector_configurations = list()\n configuration_filepath = path.join(self.experiment_path, self.CONFIGURATIONS_INPUT_FILE)\n configuration_reader = csv.DictReader(open(configuration_filepath, 'r'))\n for detector_configuration in configuration_reader:\n for key in detector_configuration.keys():\n detector_configuration[key] = eval(detector_configuration[key])\n list_of_detector_configurations.append(detector_configuration)\n print('Finished reading configurations!')\n return list_of_detector_configurations",
"def get_file_list(start):\n valid_files = []\n for root, dirs, files in os.walk(start):\n for name in files:\n if name[-5:] == \".conf\":\n valid_files.append(os.path.join(root,name))\n return valid_files",
"def recognize_files(list_of_filenames):\n reg_exp = define_regex()\n pattern = re.compile(reg_exp) \n matched = []\n for filename in list_of_filenames:\n match = pattern.match(filename)\n if match != None:\n matched.append(filename)\n return matched",
"def buildfilelist():\r\n for files in filelist:\r\n if os.path.splitext(files)[1]=='.dxf': #查找目录下的dxf文件,加入到readfilelist文件列表中 \r\n readfilelist.append(files)\r\n #feilin=file('feilin(ph).dxf','w') #新建一个文件,名字先占位用,后续改成由配置文件中读入名称。 \r",
"def _populate_params(self):\n self.params = []\n for root, dirs, files in os.walk(os.curdir):\n for file in files:\n fullfile = str(os.path.join(root, file))\n if self.config.regex_find_params.match(fullfile):\n self.params.append(fullfile)",
"def _findFiles(self, inputfolder):\n protofile, caffemodel = None, None\n files = os.listdir(inputfolder)\n for f in files:\n name, ext = splitext(f)\n if ext == '.caffemodel':\n caffemodel = join(inputfolder, f)\n elif f == 'deploy.prototxt':\n protofile = join(inputfolder, f)\n return protofile, caffemodel"
] | [
"0.6520556",
"0.63628197",
"0.63615507",
"0.6221763",
"0.5962912",
"0.5892015",
"0.5830065",
"0.5739405",
"0.57229286",
"0.56986266",
"0.5679086",
"0.5662258",
"0.5656805",
"0.5652418",
"0.5599099",
"0.5556959",
"0.5527652",
"0.5507178",
"0.5476617",
"0.5443827",
"0.5414869",
"0.53968036",
"0.5382331",
"0.5381769",
"0.5372437",
"0.5358118",
"0.53476703",
"0.53460705",
"0.5334813",
"0.53312916"
] | 0.68645674 | 0 |
clears the text file, fr and fp config file lists | def clear_lists(self):
self.fp_config_files = []
self.txt_files = []
self.fr_config_files = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear():",
"def clean_files(self):\n self.filenames.clear()",
"def clear_specific(self):\n self.specific_file = None\n self.specific_parser = None\n\n self.specific_box.delete(0, END)",
"def _clear_variables( self ):\r\n self.navigation = None\r\n self.resPath = None\r\n self.resolutions = None\r\n self.currentResolution = None\r\n self.resolution = None\r\n for doc in self.include_doc:\r\n try: doc.unlink()\r\n except: pass",
"def clear():\n inputText.delete(\"1.0\", END)\n outputText.config(state = NORMAL)\n outputText.delete(\"1.0\", END)\n outputText.config(state = DISABLED)\n periodText.config(state = NORMAL)\n periodText.delete(\"1.0\", END)\n periodText.config(state = DISABLED)\n frequencyText.config(state = NORMAL)\n frequencyText.delete(\"1.0\", END)\n frequencyText.config(state = DISABLED)\n execText.config(state = NORMAL)\n execText.delete(\"1.0\", END)\n execText.config(state = DISABLED)\n registerList = []\n counterList = [0, 0, 0]",
"def clean():\n clean_files()",
"def clear(self):\n\n for a in self.formats + self.other_clear:\n setattr(self, a, None)\n self.filename = None\n self.timestamp = None\n self.lastfail = None",
"def clearRecentFiles(self):\n self.recentFiles.clear()\n for n in range(RECENTFILEMAX):\n self.setSection(CFG_RECENT, str(n), None)",
"def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)",
"def clear_all(self):\n self.clear_files_paths()\n self.clear_programs()",
"def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"",
"def reset(self):\n self.files = []\n self.regions = []\n self.headers = {}\n self.radial_data = []\n self.histogram_data = []\n self.p2p_data = []\n self.ptable = None",
"def clear_all(self):\n\n self.general_file = None\n self.general_parser = None\n\n self.specific_file = None\n self.specific_parser = None\n\n self.audio_file = None\n self.audio_parser = None\n\n self.video_file = None\n self.video_parser = None\n\n\n self.top_unique_num = None\n\n self.general_box.delete(0, END)\n self.specific_box.delete(0, END)\n self.audio_box.delete(0, END)\n self.video_box.delete(0, END)\n self.top_unique_audio_box.delete(0, END)\n self.top_unique_video_box.delete(0, END)\n\n self.top_unique_audio_entry.delete(0, END)\n self.top_unique_video_entry.delete(0, END)\n\n if self.missing_files_label is not None:\n self.missing_files_label.grid_remove()\n if self.no_month_selected_label is not None:\n self.no_month_selected_label.grid_remove()\n if self.top_n_too_large_label is not None:\n self.top_n_too_large_label.grid_remove()\n if self.cant_export_label is not None:\n self.cant_export_label.grid_remove()",
"def clear(self):\r\n if self.fs_type == 'FAT':\r\n for file_entry in self.metadata.get_files():\r\n file_metadata = file_entry['metadata']\r\n file_metadata = FATAllocatorMeta(file_metadata)\r\n self.fs.clear(file_metadata)\r\n elif self.fs_type == 'NTFS':\r\n for file_entry in self.metadata.get_files():\r\n file_metadata = file_entry['metadata']\r\n file_metadata = NTFSAllocatorMeta(file_metadata)\r\n self.fs.clear(file_metadata)\r\n else:\r\n raise NotImplementedError()",
"def clear_form(self):\n self.lst_state_item = None\n self.lst_file_item = None\n self.txt_state.setText(\"\")\n self.txt_file.setText(\"\")\n self.lbl_image.setText(\"\")\n self.frm_edit.setEnabled(False)\n self.tbl_symbols.clearSelection()\n self.preview = False",
"def clear(self):\n self.raster_path_line.clear()\n self.labels_path.clear()\n self.shapefile_path.clear()\n self.costumelabels.clear()\n self.layer_name.clear()\n self.class_name.clear()\n self.idfield.clear()",
"def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))",
"def clear(self):\n with open(self.config_file, mode='w') as f:\n data = {}\n d = json.dumps(data)\n f.write(d)",
"def clear_files_paths(self):\n del self.__files_paths[:]",
"def clear_previous_selections(self):\n self.headers = []\n self.filename = ''\n self.x_axis = ''\n self.y_axis = ''\n self.delim = ''\n self.non_numeric_x_axis = False\n self.count_desired = False",
"def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass",
"def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)",
"def clear_config():\n check_config()\n fs.truncate(PYWS_DIR_BIN)",
"def clean_up(self):\n self.fname = None\n self.failed_files = []\n self.custom_failed = []\n self.results = None",
"def clear(self):\r\n\t\tself.tokens = []\r\n\t\tself.source = \"\"\r\n\t\tself.index = 0",
"def clearAllSettings(self) -> None:\n ...",
"def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])",
"def clear(self):",
"def clear(self):",
"def clear(self):"
] | [
"0.6580808",
"0.656783",
"0.6490422",
"0.64671326",
"0.6438465",
"0.64325815",
"0.6405157",
"0.63748187",
"0.6326406",
"0.63253486",
"0.6313617",
"0.6293512",
"0.6253082",
"0.6251729",
"0.62491286",
"0.6137878",
"0.61237454",
"0.6122098",
"0.6118917",
"0.6106766",
"0.61034346",
"0.6071016",
"0.60666364",
"0.6038928",
"0.6037392",
"0.6032959",
"0.6030835",
"0.60051334",
"0.60051334",
"0.60051334"
] | 0.8425523 | 0 |
Find the overlap areas between each cartesian bin and each polar bin. | def get_overlap_values(self, cbins, rbins, thbins):
dr = (cbins - 0.5) / rbins
dth = (pi / 2) / thbins
thbins_reduced = int(ceil(thbins / 2))
def overlap_value(x, y, r, th):
"""
Find the overlap area between a cartesian and a polar bin.
"""
thmin = max(th - dth/2, atan2(y - 0.5, x + 0.5))
thmax = min(th + dth/2, atan2(y + 0.5, x - 0.5))
rin = lambda theta: maximum(r - dr/2, maximum((x - 0.5) / npcos(theta), (y - 0.5) / npsin(theta)))
rout = lambda theta: minimum(r + dr/2, minimum((x + 0.5) / npcos(theta), (y + 0.5) / npsin(theta)))
integrand = lambda theta: maximum(rout(theta)**2 - rin(theta)**2, 0)
return 0.5 * quad(integrand, thmin, thmax)[0]
expected = int(pi*rbins**2)
rs = empty(expected, dtype=int)
ths = empty(expected, dtype=int)
xs = empty(expected, dtype=int)
ys = empty(expected, dtype=int)
vals = empty(expected, dtype=float)
found = 0
for thi in arange(thbins_reduced):
th = (thi + 0.5) * dth
for ri in arange(rbins):
r = (ri + 0.5) * dr
for x in arange(round((r - dr/2) * cos(th + dth/2)), min(cbins, round((r + dr/2) * cos(th - dth/2)) + 1)):
for y in arange(round((r - dr/2) * sin(th - dth/2)), min(cbins, round((r + dr/2) * sin(th + dth/2)) + 1)):
if ((x - 0.5)**2 + (y - 0.5)**2 < (r + dr/2)**2) and \
((x + 0.5)**2 + (y + 0.5)**2 > (r - dr/2)**2) and \
(atan2(y + 0.5, x - 0.5) > th - dth/2) and \
(atan2(y - 0.5, x + 0.5) < th + dth/2):
area = overlap_value(x, y, r, th)
if area > 0:
rs[found] = ri
ths[found] = thi
xs[found] = x
ys[found] = y
vals[found] = area
found+=1
return rs[:found], ths[:found], xs[:found], ys[:found], vals[:found] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def face_area(lon_b, lat_b, r_sphere = 6.375e6):\n \n # Convert inputs to radians\n lon_b_rad = lon_b * np.pi / 180.0\n lat_b_rad = lat_b * np.pi / 180.0\n \n r_sq = r_sphere * r_sphere\n n_cs = lon_b.shape[1] - 1\n \n # Allocate output array\n cs_area = np.zeros((n_cs,n_cs))\n \n # Ordering\n valid_combo = np.array([[1,2,4],[2,3,1],[3,2,4],[4,1,3]]) - 1\n \n for i_lon in range(n_cs):\n for i_lat in range(n_cs):\n lon_corner = np.zeros(4)\n lat_corner = np.zeros(4)\n xyz_corner = np.zeros((4,3))\n for i_vert in range(4):\n x_lon = i_lon + (i_vert > 1)\n x_lat = i_lat + (i_vert == 0 or i_vert == 3)\n lon_corner[i_vert] = lon_b_rad[x_lon,x_lat]\n lat_corner[i_vert] = lat_b_rad[x_lon,x_lat]\n for i_vert in range(4):\n xyz_corner[i_vert,:] = ll2xyz(lon_corner[i_vert],lat_corner[i_vert])\n tot_ang = 0.0\n for i_corner in range(4):\n curr_combo = valid_combo[i_corner,:]\n xyz_mini = np.zeros((3,3))\n for i_mini in range(3):\n xyz_mini[i_mini,:] = xyz_corner[curr_combo[i_mini],:]\n curr_ang = sphere_angle(xyz_mini[0,:],xyz_mini[1,:],xyz_mini[2,:])\n tot_ang += curr_ang\n cs_area[i_lon,i_lat] = r_sq * (tot_ang - (2.0*np.pi))\n \n return cs_area",
"def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1",
"def bins_crossed(self, position_in_grid, endpoint_in_grid):\n bins_crossed = Set()\n if position_in_grid[0] == endpoint_in_grid[0]:\n # movement is in y direction\n for y_coord in self.get_range(\n position_in_grid[1],\n endpoint_in_grid[1],\n ):\n bins_crossed.add((position_in_grid[0], y_coord))\n elif position_in_grid[1] == endpoint_in_grid[1]:\n # movement is in x direction\n for x_coord in self.get_range(\n position_in_grid[0],\n endpoint_in_grid[0],\n ):\n bins_crossed.add((x_coord, position_in_grid[1]))\n\n else:\n raise ValueError(\"Diagonal movement\")\n\n return bins_crossed",
"def overlap_borders(self, chunk):\n # determine the common intersect slices within the chunk\n borders = []\n for s, b, olap, idx in zip(chunk.slices, self.bounds, self.overlap, range(0, len(chunk.slices))):\n if s.start == b.start:\n borders.append((idx, -1))\n elif s.stop == b.stop:\n borders.append((idx, 1))\n return borders",
"def calcOverlap(intervals):\n bp = 0 \n for i in intervals:\n bp += sum([overlapCases(i, j) for j in intervals])\n return(bp)",
"def get_overlap_blocks(self):\n bv = self.base_g.new_vertex_property(\"vector<int>\")\n bc_in = self.base_g.new_vertex_property(\"vector<int>\")\n bc_out = self.base_g.new_vertex_property(\"vector<int>\")\n bc_total = self.base_g.new_vertex_property(\"vector<int>\")\n self._state.get_bv_overlap(self.base_g._Graph__graph,\n _prop(\"v\", self.base_g, bv),\n _prop(\"v\", self.base_g, bc_in),\n _prop(\"v\", self.base_g, bc_out),\n _prop(\"v\", self.base_g, bc_total))\n return bv, bc_in, bc_out, bc_total",
"def getArea(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n area = 0\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n p0 = latlon2ecef(self._toplats[ind],\n self._toplons[ind],\n self._topdeps[ind])\n p1 = latlon2ecef(self._toplats[ind + 1],\n self._toplons[ind + 1],\n self._topdeps[ind + 1])\n p2 = latlon2ecef(self._botlats[ind + 1],\n self._botlons[ind + 1],\n self._botdeps[ind + 1])\n p3 = latlon2ecef(self._botlats[ind],\n self._botlons[ind],\n self._botdeps[ind])\n a = np.sqrt((p1[0] - p0[0])**2 +\n (p1[1] - p0[1])**2 +\n (p1[2] - p0[2])**2)\n b = np.sqrt((p2[0] - p0[0])**2 +\n (p2[1] - p0[1])**2 +\n (p2[2] - p0[2])**2)\n c = np.sqrt((p2[0] - p1[0])**2 +\n (p2[1] - p1[1])**2 +\n (p2[2] - p1[2])**2)\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n a = np.sqrt((p0[0] - p3[0])**2 +\n (p0[1] - p3[1])**2 +\n (p0[2] - p3[2])**2)\n b = np.sqrt((p2[0] - p3[0])**2 +\n (p2[1] - p3[1])**2 +\n (p2[2] - p3[2])**2)\n c = np.sqrt((p0[0] - p2[0])**2 +\n (p0[1] - p2[1])**2 +\n (p0[2] - p2[2])**2)\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n area = area + (A1 + A2) / 1000 / 1000\n return area",
"def computeArea(self, A, B, C, D, E, F, G, H):\n R1 = [(A, B), (C, D), (A, D), (C, B)]\n R2 = [(E, F), (G, H), (E, H), (G, F)]\n R1_left, R1_top, R1_bot, R1_right = A, D, B, C\n R2_left, R2_top, R2_bot, R2_right = E, H, F, G\n\n A1 = abs(D - B) * (C - A)\n A2 = abs(H - F) * (G - E)\n\n R1_in = []\n for i in R1:\n if self.isIn(i, R2):\n R1_in.append(i)\n R1_in = sorted(R1_in)\n\n R2_in = []\n for i in R2:\n if self.isIn(i, R1):\n R2_in.append(i)\n R2_in = sorted(R2_in)\n\n def overlap(R1_in, R2_in):\n if len(R1_in) == 0 and len(R2_in) == 0:\n if R2_top > R1_top and R2_bot < R1_bot and R1_left < R2_left and R1_right > R2_right:\n return (R2_right - R2_left) * (R1_top - R1_bot)\n elif R1_top > R2_top and R1_bot < R2_bot and R2_left < R1_left and R2_right > R1_right:\n return (R1_right - R1_left) * (R2_top - R2_bot)\n else:\n return 0\n\n elif len(R1_in) == 1 and len(R2_in) == 1:\n C1, C2 = R1_in[0], R2_in[0]\n overlap = abs(C1[0] - C2[0]) * abs(C1[1] - C2[1])\n return overlap\n\n elif len(R1_in) == 2:\n R1a, R1b = R1_in[0], R1_in[1]\n xa, ya, xb, yb = R1a[0], R1a[1], R1b[0], R1b[1]\n if xa == xb:\n h = abs(ya - yb)\n if R2_left < R1_right < R2_right:\n w = R1_right - R2_left\n elif R2_left < R1_left < R2_right:\n w = R2_right - R1_left\n elif R1_left < R2_left < R1_right:\n w = R1_right - R2_left\n elif R1_left < R2_right < R1_right:\n w = R2_right - R1_left\n else:\n w = 0\n return h * w\n if ya == yb:\n w = abs(xa - xb)\n if R2_bot < R1_bot < R2_top:\n h = R2_top - R1_bot\n elif R2_bot < R1_top < R2_top:\n h = R1_top - R2_bot\n elif R1_bot < R2_bot < R1_top:\n h = R1_top - R2_bot\n elif R1_bot < R2_top < R1_top:\n h = R2_top - R1_bot\n else:\n h = 0\n return h * w\n\n elif len(R2_in) == 2:\n R2a, R2b = R2_in[0], R2_in[1]\n xa, ya, xb, yb = R2a[0], R2a[1], R2b[0], R2b[1]\n if xa == xb:\n h = abs(ya - yb)\n if R1_left < R2_left < R1_right:\n w = R1_right - R2_left\n elif R1_left < R2_right < R1_right:\n w = R2_right - R1_left\n elif R2_left < R1_left < R2_right:\n w = R2_right - R1_left\n elif R2_left < R1_right < R2_right:\n w = R1_right - R2_left\n else:\n w = 0\n return h * w\n if ya == yb:\n w = abs(xa - xb)\n if R1_bot < R2_top < R1_top:\n h = R2_top - R1_bot\n elif R1_bot < R2_bot < R1_top:\n h = R1_top - R2_bot\n elif R2_bot < R1_top < R2_top:\n h = R1_top - R2_bot\n elif R2_bot < R1_bot < R2_top:\n h = R2_top - R1_bot\n else:\n h = 0\n return h * w\n\n elif len(R1_in) == 4:\n return A1\n elif len(R2_in) == 4:\n return A2\n\n return A1 + A2 - overlap(R1_in, R2_in)",
"def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)",
"def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps",
"def _count_overlap(*rectangles): # Expanded args to help memoization\n\n raise Exception(\"I don't work\")\n\n if len(rectangles) <= 1:\n return 0\n #print(rectangles)\n\n overlap_sum = 0\n rectangles_ = list(rectangles) # Make a copy to destructively iterate on\n while rectangles_: # Iterate\n new_overlap = []\n (ixmin, ixmax), (iymin, iymax) = map(sorted, zip(*rectangles_.pop(0))) # Destructively\n for rectangle in rectangles_:\n (jxmin, jxmax), (jymin, jymax) = map(sorted, zip(*rectangle))\n min_xmax, min_ymax = min(ixmax, jxmax), min(iymax, jymax)\n max_xmin, max_ymin = max(ixmin, jxmin), max(iymin, jymin)\n if min_xmax > max_xmin and min_ymax > max_ymin: # Rectangles overlap\n if (ixmax, iymax) == (jxmax, jymax) and \\\n (ixmin, iymin) == (jxmin, jymin): # Identical rectangles\n overlap_sum += _count_rects(((ixmax, iymax), (ixmin, iymin)))\n else:\n new_overlap.append(((min_xmax, min_ymax), (max_xmin, max_ymin)))\n if new_overlap:\n overlap_sum += sum(map(_count_rects, new_overlap)) - _count_overlap(*new_overlap)\n return overlap_sum",
"def calculate_genomic_area(self, counts, intervals):\n # compute area of each cell in the interval grid\n intervals = np.array([0] + intervals, dtype=np.float32) / 1.0e6\n areas = np.zeros([len(intervals) - 1, len(intervals) - 1], dtype=np.float32)\n for row in xrange(1, len(intervals)):\n for col in xrange(row, len(intervals)):\n areas[row - 1, col - 1] = (intervals[row] - intervals[row - 1]) * (intervals[col] - intervals[col - 1])\n if col > row:\n areas[col - 1, row - 1] = areas[row - 1, col - 1]\n\n areas_masked = OrderedDict()\n denom = np.sum(np.array(self.sizes) / 1.0e6) ** 2\n for combo, vals in enumerate(counts):\n factor = 1\n areas_masked.update({str(subspecies.to_string(combo, True)): np.sum((vals > 0) * areas * factor) / denom})\n return areas_masked",
"def covers_overlaps(self, bounds):\n bounds = tuple(float(b) for b in bounds)\n return self.numba_rtree.covers_overlaps(bounds)",
"def bin_centers(radial_bins):\n\n outer = radial_bins[1:]\n inner = radial_bins[:-1]\n return 0.5 * (outer + inner)",
"def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1",
"def patch_areas(patch_ids):\n\n return np.bincount(patch_ids.reshape((-1,)))[1:]",
"def iou_bbox(bboxes1, bboxes2):\n bboxes1 = np.array(bboxes1, np.float32)\n bboxes2 = np.array(bboxes2, np.float32)\n \n intersection_min_y = np.maximum(bboxes1[:, 0], bboxes2[:, 0])\n intersection_max_y = np.minimum(bboxes1[:, 0] + bboxes1[:, 2] - 1, bboxes2[:, 0] + bboxes2[:, 2] - 1)\n intersection_height = np.maximum(intersection_max_y - intersection_min_y + 1, np.zeros_like(bboxes1[:, 0]))\n\n intersection_min_x = np.maximum(bboxes1[:, 1], bboxes2[:, 1])\n intersection_max_x = np.minimum(bboxes1[:, 1] + bboxes1[:, 3] - 1, bboxes2[:, 1] + bboxes2[:, 3] - 1)\n intersection_width = np.maximum(intersection_max_x - intersection_min_x + 1, np.zeros_like(bboxes1[:, 1]))\n\n area_intersection = intersection_height * intersection_width\n area_first = bboxes1[:, 2] * bboxes1[:, 3]\n area_second = bboxes2[:, 2] * bboxes2[:, 3]\n area_union = area_first + area_second - area_intersection\n \n iou = area_intersection * 1.0 / area_union\n iof = area_intersection * 1.0 / area_first\n ios = area_intersection * 1.0 / area_second\n\n return iou, iof, ios",
"def bbox_overlaps(bboxes1, bboxes2, mode='iou'):\n\n from icv.data.core.bbox import BBox\n assert mode in ['iou', 'iof']\n\n bboxes1 = np.array([np.array(b.bbox) if isinstance(b,BBox) else b for b in bboxes1])\n bboxes2 = np.array([np.array(b.bbox) if isinstance(b,BBox) else b for b in bboxes2])\n\n bboxes1 = bboxes1.astype(np.float32)\n bboxes2 = bboxes2.astype(np.float32)\n rows = bboxes1.shape[0]\n cols = bboxes2.shape[0]\n ious = np.zeros((rows, cols), dtype=np.float32)\n if rows * cols == 0:\n return ious\n exchange = False\n if bboxes1.shape[0] > bboxes2.shape[0]:\n bboxes1, bboxes2 = bboxes2, bboxes1\n ious = np.zeros((cols, rows), dtype=np.float32)\n exchange = True\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\n bboxes1[:, 3] - bboxes1[:, 1] + 1)\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\n bboxes2[:, 3] - bboxes2[:, 1] + 1)\n for i in range(bboxes1.shape[0]):\n x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\n y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\n x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\n y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\n overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(\n y_end - y_start + 1, 0)\n if mode == 'iou':\n union = area1[i] + area2 - overlap\n else:\n union = area1[i] if not exchange else area2\n ious[i, :] = overlap / union\n if exchange:\n ious = ious.T\n return ious",
"def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;",
"def areas(self):\n\n height_delta = (np.cos(self.polar_corners[:-1, :-1]) - np.cos(self.polar_corners[:-1, 1:]))\n azimuth_delta = (self.azimuthal_corners[1:, 1:] - self.azimuthal_corners[:-1, 1:])\n\n return height_delta * azimuth_delta",
"def compute_bb(self):\n all_shapes = list(self.parts.values()) + list(self.edges.values())\n bbox_vertices = cascaded_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x,min_y, max_y]",
"def overlap(cir1x, cir1y, rad1, cir2x, cir2y, rad2):\n radius = rad1 + rad2\n compare = ((cir2y - cir1y)**2 + (cir2x - cir1x)**2)**0.5\n if compare > radius:\n print \"no overlapping\"\n else:\n print \"overlapping\"",
"def calculate_bin_edges(n_bins, geo):\n #Gefittete offsets: x,y,factor: factor*(x+x_off)\n #[6.19, 0.064, 1.0128]\n \n #print \"Reading detector geometry in order to calculate the detector dimensions from file \" + fname_geo_limits\n #geo = np.loadtxt(fname_geo_limits)\n\n # derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]\n geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)\n #print ('Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits))\n\n x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines\n y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483\n z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329\n\n #offset_x, offset_y, scale = [6.19, 0.064, 1.0128]\n #x_bin_edges = (x_bin_edges + offset_x )*scale\n #y_bin_edges = (y_bin_edges + offset_y )*scale\n\n #calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges\n\n return x_bin_edges, y_bin_edges, z_bin_edges",
"def fit_galaxy(self, ypos, xpos, r_in, r_out = 0):\r\n count_out = []\r\n count_in = []\r\n for j, i in product(np.arange(ypos - (r_out + r_in), ypos + r_out + r_in + 1),np.arange(xpos - (r_out + r_in), xpos + 1 + r_out + r_in)): # Create square\r\n if (j - ypos) ** 2 + (i - xpos) ** 2 <= r_in ** 2 and 0<= j <= self.shapes[0] - 1 and 0<= i <= self.shapes[1] - 1: # make sure points are in a circle\r\n j = int(j)\r\n i = int(i)\r\n if self.raw_image_data[j,i] * self.masked[j,i] == self.raw_image_data[j,i]:\r\n count_in.append(self.raw_image_data[j,i])\r\n self.masked[j,i] = 0 # self.mask_region runs the for loop again\r\n if r_in ** 2 < (j - ypos) ** 2 + (i - xpos) ** 2 <= (r_in + r_out)**2 and 0<= j <= (self.shapes[0] - 1) and 0<= i <= self.shapes[1] - 1: # in the outer ring\r\n j = int(j)\r\n i = int(i)\r\n if self.raw_image_data[j,i] * self.masked[j,i] == self.raw_image_data[j,i]: \r\n count_out.append(self.raw_image_data[j][i]) \r\n self.masked[j,i]\r\n return count_in, count_out",
"def getAreas(self, RA, Dec, minRA, maxRA, minDec, maxDec):\n # Create area from these\n # RA is in [0.,360.]\n # special treatement near RA~0\n ax = None\n\n areaList = []\n if ax is not None:\n areapList = []\n\n if maxRA >= 360.:\n # in that case two areas necessary\n areaList.append(area(minRA, 360., minDec, maxDec))\n areaList.append(area(0.0, maxRA-360., minDec, maxDec))\n else:\n if minRA < 0.:\n # in that case two areas necessary\n areaList.append(area(minRA+360., 360., minDec, maxDec))\n areaList.append(area(-1.e-8, maxRA, minDec, maxDec))\n else:\n areaList.append(area(minRA, maxRA, minDec, maxDec))\n\n return areaList",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def bin_volumes(radial_bins):\n\n single_vol = lambda x: (4.0 / 3.0) * np.pi * x ** 3\n outer = single_vol(radial_bins[1:])\n inner = single_vol(radial_bins[:-1])\n return outer - inner",
"def calc_overlap(self, start, stop):\n\n overlaps = []\n for s in self.map:\n e = self.map[s]\n if s >= start or s <= stop:\n # We found an overlap\n if e <= stop:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": s, \"stop\": stop})\n elif e >= start or e <= stop:\n if s >= start:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": start, \"stop\": e})\n return overlaps"
] | [
"0.6044802",
"0.5895329",
"0.58889174",
"0.58684486",
"0.5833127",
"0.58256036",
"0.5822762",
"0.58167046",
"0.5789582",
"0.5789443",
"0.57723266",
"0.5764114",
"0.5745368",
"0.57442003",
"0.57348084",
"0.57329583",
"0.56990653",
"0.56981635",
"0.5677911",
"0.5641722",
"0.5640506",
"0.5637746",
"0.56142473",
"0.55897677",
"0.5584177",
"0.5578119",
"0.5578119",
"0.5578119",
"0.55677044",
"0.55640036"
] | 0.72021544 | 0 |
Simply a setter for the switch_state variable | def switch_to_state(self, state):
self.switch_state = state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_state(self, state: int):",
"def set_state(self,s):\n self.state = s",
"def set_state(self, value):\n self.state = value",
"def set_state( self ):",
"def __change_state(self, state):\n self.state = state",
"def _set_state(self, state):\n #print(\"** set state from %d to %d\" % (self.state, state))\n self.state = state",
"def set_state(self, state):\n self.state = state",
"def set_switch(self, value):\n act = SwitchAction(self, value)\n return act.invoke()",
"def set_state(self,state):\n self.__state = state",
"def _set_switch(self, switch, state):\n switch = self.switch_by_label(switch)\n id = self.switches[switch.label].id\n # make sure that the serial port is open\n self.assure_serial()\n # create command for the arduino and send it\n input_string = str(id[0]) + str(id[1]) + str(state)\n self.serial.write(input_string.encode('ascii'))\n time.sleep(self.WRITE_DELAY)\n # read switch after setting it, to confirm switching\n try:\n self._get_switch(switch)\n except SwitchError:\n raise SwitchError(\"Reading switch after switching was \"\n \"unsuccessful: Indicators of the switch show \"\n f\"{switch.indicators}.\")\n # raise error, if the switching was not successful\n if switch.state != state:\n raise SwitchError(\"Setting the switch was unsuccessful. The \"\n f\"switch should be in state {state}, but \"\n f\"the indicators show state {switch.state}.\")",
"def set_state(self, state: int):\n self.state = state",
"def bcp_switch(self, name, state, **kwargs):\n if int(state):\n self.events.post('switch_' + name + '_active')\n else:\n self.events.post('switch_' + name + '_inactive')",
"def setState(self, state):\n self.state = state",
"def __setstate__(self, state):\n return None",
"def set_state(self, new_state):\n self.state = new_state",
"def SetState(self, new_state):\r\n\r\n self.state = new_state",
"def change_state(self):\n new_state = 0 if self.state.state == 1 else 1\n answer = UsbHost.send_query(self.state.ser, \"SetState\", str(self.state.device_id), new_state)\n if answer in wrong_answers:\n error_message(\"Не удалось сменить состояние\")\n self.statusbar.showMessage(answer_translate[answer])\n else:\n self.statusbar.clearMessage()\n self.state.state = new_state\n if new_state == 1:\n self.set_auto_active()\n if new_state == 0:\n self.set_hand_active()",
"def assign_state(self, state):\n raise NotImplementedError()",
"def set_state(self, state):\n self.state = state\n self.config(fill=self.state)",
"def manualState(self, tfid, state):\n self.trafficLights.get(int(tfid)).setState(state)\n self.trafficLights.get(int(tfid)).updateState()",
"def __setstate__(self, state):\n\n self.set(DER = state)",
"def setLightSwitch(self, _state=False):\n if _state == True:\n render.setLight(self.lightNP)\n elif _state == False:\n render.clearLight(self.lightNP)",
"def state(self, state):\n self._state = state",
"def on_state_change(self, new_state):\n self.state = new_state",
"def state(self, state: str) -> None:",
"def state(self, state: str) -> None:\n self._state = state",
"def set_state(self, state: Any) -> None:\n raise NotImplementedError(\n 'This environment has not implemented `set_state()`.'\n )",
"def change_state(self,state):\n if self.__currentState:\n self.__currentState.stop()\n \n try:\n idler=self[state]\n except KeyError:\n raise \"%s is not a state of %s\" % (state,self)\n \n self.__currentState=idler()\n self.__currentState.idle()\n self.__currentState=None",
"def setstate(self,name,state):\n if (name not in KFNode.names):\n print ' state name ',name,' not in KNode!'\n self.states[name]=state.copy()\n self.status = name\n return",
"def set_state(self, uState):\n self.strategy['state_handler'].set_state(self.state, uState)"
] | [
"0.7938014",
"0.77415305",
"0.77284354",
"0.7591089",
"0.7483043",
"0.74070084",
"0.74032426",
"0.73946637",
"0.7236219",
"0.722214",
"0.72149825",
"0.7191996",
"0.71756476",
"0.7167509",
"0.7059652",
"0.7028504",
"0.70257914",
"0.69464743",
"0.6940021",
"0.69133204",
"0.68645734",
"0.685395",
"0.6823454",
"0.6810233",
"0.6785652",
"0.6754591",
"0.6753355",
"0.6740043",
"0.66116273",
"0.6582"
] | 0.7992266 | 0 |
uses the USA Today API in order to extract the list of the subAPIs for music reviews | def obtain_USAToday_APIs(api="http://api.usatoday.com/open/reviews/music?count=1000&api_key=mhph6f4afgvetbqtex4rs22a"):
import requests
r = requests.get(api)
jsonfile = r.json()
key = jsonfile.keys()
data = jsonfile[key[0]]
keys = jsonfile[key[0]].keys()
api_dict = {}
nData = '?count=1000&'
for i in keys:
ls_str = data[i].split('?')
new_one = ls_str[0]+nData+ls_str[1]
if(api_dict.has_key(i)):
continue
else:
api_dict[i] = new_one
return api_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self, request, format=None):\n albums = MusicAlbum.objects.all()\n # trending = \n # popular = albums[6:]\n recommendations = serializers.ReadMusicAlbumSerializer(\n albums[:2], many=True, context={'request': request}\n )\n trending = serializers.ReadMusicAlbumSerializer(\n albums[2:6], many=True, context={'request': request}\n )\n popular = serializers.ReadMusicAlbumSerializer(\n albums[6:26], many=True, context={'request': request}\n )\n return Response({\n 'recommendations': recommendations.data,\n 'trending': trending.data,\n 'popular': popular.data\n })",
"def getSongsSpotify(song_name,access_token):\n song_name = song_name.strip()\n query = \"https://api.spotify.com/v1/search?q={}&type=track&limit=20&offset=0\".format(song_name)\n response = requests.get(\n query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # \n \n songs_no = response_json[\"tracks\"][\"total\"]\n if songs_no == 0 :\n return {\"songs_no\" : songs_no}\n songs = response_json[\"tracks\"][\"items\"]\n if(len(songs)<5):\n uri = [songs[0][\"uri\"]]\n names = [songs[0][\"name\"]]\n artists = [songs[0][\"artists\"][0][\"name\"]]\n imageUrl = [songs[0][\"album\"][\"images\"][-1][\"url\"]]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n else:\n uri = [ songs[i][\"uri\"] for i in range(0,5)]\n names = [songs[i][\"name\"] for i in range(0,5)]\n artists = [songs[i][\"artists\"][0][\"name\"] for i in range(0,5)]\n imageUrl = [songs[i][\"album\"][\"images\"][-1][\"url\"] for i in range(0,5)]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n return response_obj",
"def newsapi(country):\n\turlnews=urlcountry\n\turl=urlnews+country\n\turlapi=url+'&'+'apiKey='\n\turlcoun=urlapi+apikey\n\tresponse=requests.get(urlcoun)\n\tdata=response.json()\n\treturn data",
"def get_tv_listings():\n\n #get user email from session\n email = session.get(\"current_user\")\n\n if email: \n #get user_id to get access to favorites table and users table\n user = User.get_user_with_email(email)\n\n #use the backref relationship to find the titles of the user's favorite shows and save in a list\n favorite_titles = []\n for favorite in user.favorites:\n favorite_titles.append(favorite.show.title)\n\n #create list that will contain dictionaries with show title and a list of dictionaries regarding tv listings\n listings = []\n\n for title in favorite_titles:\n show = {}\n #convert title from unicode to string to run API call\n title_str = str(title)\n series_id = onconnect_search_series_id(title_str)\n print \"\\n\\n\", series_id, \"\\n\\n\"\n airings = onconnect_search_airings(series_id)\n #add show title to dictionary, add airings object to dictionary\n show[\"title\"] = title_str\n if airings:\n show[\"listings\"] = airings\n else:\n show[\"listings\"] = [\"empty\"]\n\n #add dictionary to the listings list\n listings.append(show)\n time.sleep(1)\n\n \n listings = jsonify(listings)\n\n return listings\n\n else:\n flash(\"Please login first!\")\n return redirect('/login')",
"def getRecommendationSonglist(self, limit = 20, offset = 0, total = True):\n currAPIVersion = self.config['apiVersion']\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n currDict = {\n 'limit' : limit,\n 'offset': offset,\n 'total' : total\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict)\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n\n return currR, currAPIURL[2]",
"def get_show_list():\n\n url = 'https://api.transistor.fm/v1/shows'\n r = httpx.get(url, headers=header)\n typer.echo([(x['id'], x['attributes']['title']) for x in r.json()['data']])",
"def getSongs(self, songIDList):\n for i, songID in enumerate(songIDList):\n if not isinstance(songID, str):\n songIDList[i] = str(songID)\n\n currAPIVersion = self.config['apiVersion']\n #currAPIVersion = 0\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n if currAPIVersion == 0:\n currDict = {\n 'ids' : repr(songIDList).replace(\" \", \"\").replace(\"'\", \"\").replace(\"\\\"\", \"\"),\n }\n if currAPIVersion == 1:\n currDict = {\n #'c' : json.dumps([{ \"ids\" : songIDList}]).replace(\" \", \"\"),\n 'ids' : repr(songIDList).replace(\" \", \"\").replace(\"'\", \"\").replace(\"\\\"\", \"\"),\n #'c' : json.dumps([{ \"id\" : [int(x) for x in songIDList]}]).replace(\" \", \"\"),\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict)\n #print currR\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n#modify\n sortedData = range(len(songIDList))\n for song in currR['songs']:\n sortedData[songIDList.index(str(song['id']))] = song\n\n for i, song in enumerate(sortedData):\n if isinstance(song, int):\n sortedData[i] = {}\n #raise NEApiError, \"not all songdetails are responsed back here.\"\n\n currR['songs'] = sortedData\n return currR, currAPIURL[2]",
"def apiExample():\n Client_ID = '5b42d8679569faa6e359a3f562e8cd76'\n api_endpoint = \"http://api.soundcloud.com/tracks/13158665?client_id=%s\" \\\n % Client_ID\n request = urllib2.urlopen(api_endpoint)\n data = json.load(request)\n return data",
"def test_discover(self):\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)",
"def __init__(self):\r\n self.apiroot = 'http://ws.spotify.com/'\r\n self.add_filter(self.use_json)",
"def search_adapter(json_response):\n\n def get_tracks():\n ret = {\"result\": []}\n for item in json_response['tracks']['items']:\n ret[\"result\"].append(json_to_track_info(item))\n return ret\n\n def get_albums():\n ret = {\"result\": []}\n for item in json_response['albums']['items']:\n album = item['name']\n artist = item['artists'][0]['name']\n album_id = item['uri']\n ret[\"result\"].append(\n {\"album\": album, \"artist\": artist, \"album_id\": album_id})\n return ret\n\n def get_artists():\n ret = {\"result\": []}\n for item in json_response['artists']['items']:\n artist = item['name']\n artist_id = item['uri']\n ret[\"result\"].append({\"artist\": artist, \"id\": artist_id})\n return ret\n\n if json_response.get('tracks', None):\n return get_tracks()\n\n if json_response.get('albums', None):\n return get_albums()\n\n if json_response.get('artists', None):\n return get_artists()\n\n return json_response",
"def main():\n\n rapidapi_key = os.getenv(\"RAPIDAPIKEY\")\n geniuslyrics_key = os.getenv(\"GENIUSLYRICSKEY\")\n spotify_client_id = os.getenv(\"SPOTIFYCLIENTID\")\n spotify_secret_key = os.getenv(\"SPOTIFYSECRETKEY\")\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Fetching songs and artist\")\n\n years = [year for year in range(1960, 2016)]\n urls = create_urls(years)\n songs_df = fetch_and_parse(urls, years)\n\n logger.info(\"Adding 2013 info from data/raw/ (nasty format in the website)\")\n\n # have to use ; for separator as song names contain commas\n songs_df_2013 = pd.read_csv(\n os.path.join(\"data\", \"raw\", \"2013_top_100.csv\"), sep=\";\"\n )\n songs_df = pd.concat([songs_df, songs_df_2013], ignore_index=True)\n\n songs_df[\"lyrics\"] = \"Not searched\"\n songs_df[\"lyrics_source\"] = None\n\n logger.info(\"Saving song and artist data to disk\")\n\n songs_df.to_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), index=False, sep=\";\"\n )\n\n logger.info(\"Fetching song lyrics\")\n\n songs_df = pd.read_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), sep=\";\"\n )\n\n songs_amount = len(songs_df)\n fetched_songs = 0\n\n for row_index, row in songs_df.iterrows():\n logger.info(f\"Song {row_index + 1} / {songs_amount}\")\n\n if row[\"lyrics\"] == \"Not searched\" or row[\"lyrics\"] == \"Not found\":\n\n # slowing down requests so that we cause no trouble\n time.sleep(0.5)\n\n lyric, source = get_lyric_from_apis(\n artist=row[\"artist\"],\n song_title=row[\"song\"],\n rapidapi_key=rapidapi_key,\n geniuslyrics_key=geniuslyrics_key,\n )\n songs_df.iloc[row_index, songs_df.columns.get_loc(\"lyrics\")] = lyric\n songs_df.iloc[row_index, songs_df.columns.get_loc(\"lyrics_source\")] = source\n\n fetched_songs += 1\n print(lyric)\n\n # saving every after every 100 fetched lyrics\n if fetched_songs > 0 and fetched_songs % 100 == 0:\n print(\"Saving progress\")\n songs_df.to_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"),\n sep=\";\",\n index=False,\n )\n\n songs_df.to_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), sep=\";\", index=False\n )\n\n songs_df = pd.read_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), sep=\";\"\n )\n\n logger.info(\"Fetching audio features from Spotify API\")\n\n audio_features_df = get_spotify_audiofeatures(\n artists=songs_df[\"artist\"],\n song_titles=songs_df[\"song\"],\n spotify_client_id=spotify_client_id,\n spotify_secret_key=spotify_secret_key,\n )\n songs_df = pd.concat([songs_df, audio_features_df], axis=\"columns\")\n\n logger.info(\"Saving final dataset to disk\")\n\n songs_df.to_csv(\n os.path.join(\"data\", \"raw\", \"billboard100_1960-2015.csv\"), sep=\";\", index=False\n )",
"def get_categories():\n bu = 'http://www.watchonlinemovies.com.pk'\n r = requests.get(bu, headers=mozhdr)\n if r.url != bu:\n bu = r.url\n items = {'ARecently Uploaded Movies': bu,\n 'B2018 Movies': bu + 'category/indian-movies/2018-full-movies/',\n 'C2018 English Movies': bu + 'category/hollywood-movies/2018-movies-hollywood/',\n 'D[COLOR yellow]** Search **[/COLOR]': bu + '?s=',\n 'Z[COLOR red]Note: This addon is no longer supported, please install WatchOnlineMovies-New from ReasonsRepository [/COLOR]': 'book'}\n \n return items",
"def fetchAlbumInfo(album_id):\n url = 'https://api.spotify.com/v1/albums/' + album_id\n req = requests.get(url)\n\n data = req.json() \n\n if not req.ok:\n print \"error : \" + data['error']['message']\n return {}\n\n\n #create a new dictionary\n album_info_dict = {}\n #keys for the dictionary\n album_info_dict['artist_id'] = data['artists'][0]['id']\n album_info_dict['album_id'] = album_id\n album_info_dict['name'] = data['name']\n album_info_dict['year'] = data['release_date'][0:4]\n album_info_dict['popularity'] = int(data['popularity']) #Spotify's popularity-meter, an integer\n\n return album_info_dict",
"def gets_artist_and_song_json_from_en_api(artist_and_song):\n\n en_key = os.environ['ECHO_NEST_API_KEY']\n\n print \"seed gets json from en api \", artist_and_song\n\n en_payload = {'title': artist_and_song[1], 'artist': artist_and_song[0]}\n\n r = requests.get(\"http://developer.echonest.com/api/v4/song/search?api_key=%(en_key)s&format=json&results=1&\" % locals(), params=en_payload)\n \n # Debugging print statement\n print (r.url)\n\n # binds dictionary from get request to variable\n dict_from_en_api = r.json()\n\n # Debugging print statement\n pprint(dict_from_en_api)\n\n return dict_from_en_api",
"def getTrends(): \n api = authentication()\n names = [i.name for i in api.GetTrendsCurrent()]\n stringTrends = [i.strip('#') for i in names and ]\n trends = [i for i in stringTrends if i != \"\"]\n return trends",
"def test_search(self):\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)",
"def get_requests():\n global response\n\n #Set the parameters fot the request\n url = \"https://api.nasa.gov/planetary/apod\"\n api_key = \"DEMO_KEY\" #Use your own key\n date = calender.get_date()\n\n querystring = {'api_key':api_key, 'date':date}\n\n #Call the request and turn it into a python usable format\n response = requests.request(\"GET\", url, params=querystring)\n response = response.json()\n\n #Update output label\n set_info()",
"def top_artists_from_API(api_results):\r\n df = pd.DataFrame(api_results[\"items\"])\r\n cols = [\"name\",\"id\",\"genres\",\"popularity\",\"uri\"]\r\n return df[cols]",
"def get(self):\n mb = MusicbrainzClient()\n query = self.get_argument('q')\n artists, tracks = yield [mb.search_artists(query),\n mb.search_tracks(query)]\n data = {\n 'artists': [\n {\n 'id': artist['id'],\n 'artist': artist['name'],\n 'note': artist.get('disambiguation', '')\n }\n for artist in artists['artist-list']\n ],\n 'tracks': [\n {\n 'id': track['id'],\n 'title': track['title'],\n 'artist': track['artist-credit-phrase']\n }\n for track in tracks['recording-list']\n ]\n }\n self.finish(data)",
"def main():\n\n print(\"Retreiving BBC playlists for dates between {} and {}\".\n format(start_date.strftime(\"%Y-%m-%d\"), end_date.strftime(\"%Y-%m-%d\")))\n\n # Get daily schedule URLs within date range\n radio6_schedule_list = helpers.bbc_daily_schedule_urls(bbc_radio6_url, helpers.get_date_list(start_date, end_date))\n\n # Get all show URLS\n all_program_urls = []\n for url in radio6_schedule_list:\n all_program_urls += helpers.bbc_program_urls(url)\n\n # Get all track playlists from program URLs\n track_lists = []\n for url in all_program_urls:\n program_playlist = helpers.get_playlist(url)\n track_lists.append(program_playlist)\n\n print(track_lists)\n return track_lists",
"def __init__(self, client_access_token, artist_name):\n self.client_access_token = client_access_token\n self.artist_name = artist_name\n self.base_url = 'https://api.genius.com/'\n self.headers = {'Authorization': 'Bearer ' + self.client_access_token}\n self.artist_songs = None",
"def crawl_meta_data(api, limit=None):\n app_lists = _discover_apps(api)\n for app_list in app_lists:\n sub_category = app_list.subcategory.proto\n for app in app_list:\n app.proto.category.CopyFrom(sub_category)\n LOGGER.info(app)\n app.write_to_file()\n LOGGER.info(sub_category)",
"def __get_movies(title):\n params = {\n 's': title,\n 'type': 'movie'\n }\n\n response = requests.get(API_URL + API_KEY, params=params).json()\n return response",
"def get_tracks_from_json(jsons):\n\n items = jsons.get(\"items\")\n if not items:\n dbg(\"got unexpected data or no search results\")\n return False\n\n # fetch detailed information about items from videos API\n qs = {'part':'contentDetails,statistics,snippet',\n 'id': ','.join([get_track_id_from_json(i) for i in items])}\n\n wdata = call_gdata('videos', qs)\n\n items_vidinfo = wdata.get('items', [])\n # enhance search results by adding information from videos API response\n for searchresult, vidinfoitem in zip(items, items_vidinfo):\n searchresult.update(vidinfoitem)\n\n # populate list of video objects\n songs = []\n for item in items:\n\n try:\n\n ytid = get_track_id_from_json(item)\n duration = item.get('contentDetails', {}).get('duration')\n\n if duration:\n duration = ISO8601_TIMEDUR_EX.findall(duration)\n if len(duration) > 0:\n _, hours, _, minutes, _, seconds = duration[0]\n duration = [seconds, minutes, hours]\n duration = [int(v) if len(v) > 0 else 0 for v in duration]\n duration = sum([60**p*v for p, v in enumerate(duration)])\n else:\n duration = 30\n else:\n duration = 30\n\n stats = item.get('statistics', {})\n snippet = item.get('snippet', {})\n title = snippet.get('title', '').strip()\n # instantiate video representation in local model\n cursong = Video(ytid=ytid, title=title, length=duration)\n likes = int(stats.get('likeCount', 0))\n dislikes = int(stats.get('dislikeCount', 0))\n #XXX this is a very poor attempt to calculate a rating value\n rating = 5.*likes/(likes+dislikes) if (likes+dislikes) > 0 else 0\n category = snippet.get('categoryId')\n\n # cache video information in custom global variable store\n g.meta[ytid] = dict(\n # tries to get localized title first, fallback to normal title\n title=snippet.get('localized',\n {'title':snippet.get('title',\n '[!!!]')}).get('title',\n '[!]'),\n length=str(fmt_time(cursong.length)),\n rating=str('{}'.format(rating))[:4].ljust(4, \"0\"),\n uploader=snippet.get('channelId'),\n uploaderName=snippet.get('channelTitle'),\n category=category,\n aspect=\"custom\", #XXX\n uploaded=yt_datetime(snippet.get('publishedAt', ''))[1],\n likes=str(num_repr(likes)),\n dislikes=str(num_repr(dislikes)),\n commentCount=str(num_repr(int(stats.get('commentCount', 0)))),\n viewCount=str(num_repr(int(stats.get('viewCount', 0)))))\n\n except Exception as e:\n\n dbg(json.dumps(item, indent=2))\n dbg('Error during metadata extraction/instantiation of search ' +\n 'result {}\\n{}'.format(ytid, e))\n\n songs.append(cursong)\n\n get_page_info_from_json(jsons, len(songs))\n\n # return video objects\n return songs",
"def gettrackinfo(accesstoken, playlist):\n\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n offset = 0\n\n needattributes = [track.trackid for track in playlist.tracks]\n\n while offset < len(needattributes):\n params = {'ids': ','.join(needattributes[offset:100+offset])}\n r = requests.get(\"https://api.spotify.com/v1/audio-features/\",\n headers=headers,\n params=params)\n\n response = r.json()\n\n if \"audio_features\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait correct amount\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n needinfo = True\n while needinfo:\n r = requests.get(\"https://api.spotify.com/v1/audio-features/\",\n headers=headers,\n params=params)\n response = r.json()\n if \"audio_features\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print('error: gettrackinfo failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: gettrackinfo failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: gettrackinfo failed')\n print('no error response')\n return(None)\n\n for i in range(len(response[\"audio_features\"])):\n try:\n playlist.tracks[i+offset].danceability = response[\"audio_features\"][i][\"danceability\"]\n playlist.tracks[i+offset].energy = response[\"audio_features\"][i][\"energy\"]\n playlist.tracks[i+offset].key = response[\"audio_features\"][i][\"key\"]\n playlist.tracks[i+offset].loudness = response[\"audio_features\"][i][\"loudness\"]\n playlist.tracks[i+offset].mode = response[\"audio_features\"][i][\"mode\"]\n playlist.tracks[i+offset].speechiness = response[\"audio_features\"][i][\"speechiness\"]\n playlist.tracks[i+offset].acousticness = response[\"audio_features\"][i][\"acousticness\"]\n playlist.tracks[i+offset].instrumentalness = response[\"audio_features\"][i][\"instrumentalness\"]\n playlist.tracks[i+offset].liveness = response[\"audio_features\"][i][\"liveness\"]\n playlist.tracks[i+offset].loudness = response[\"audio_features\"][i][\"loudness\"]\n playlist.tracks[i+offset].valence = response[\"audio_features\"][i][\"valence\"]\n playlist.tracks[i+offset].tempo = response[\"audio_features\"][i][\"tempo\"]\n playlist.tracks[i+offset].duration_ms = response[\"audio_features\"][i][\"duration_ms\"]\n playlist.tracks[i+offset].time_signature = response[\"audio_features\"][i][\"time_signature\"]\n except Exception as e:\n print('error: error getting attributes from returned JSON')\n print('this piece of json looks like:\\n{}'.format(response[\"audiofeatures\"][i]))\n\n offset = offset + len(response[\"audio_features\"])\n\n\n # t.printattributes()",
"def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)",
"def test_event_track_searchbytrack(self):\n\n flag = \"user\"\n api = \"event.track.searchbytrack\"\n sale_chance_id = 1\n\n result = self.access_api(flag = flag, api = api, sale_chance_id = sale_chance_id)\n self.assertTrue('data_list' in result)\n print(result[\"data_list\"])",
"def get_tracks(self):\n artist = self.get_request_arg(\"artist\")\n album = self.get_request_arg(\"album\")\n if not (album and artist):\n return self.resp_from_data(\n {\"message\": \"Please specify a valid artist and album\"}, 403)\n else:\n tracks = self.ctrl.library.get_tracks(artist, album)\n return self.resp_from_data(tracks)",
"def test_get_all_reviews_user_2_reviews(self):\n # Get the User's Auth Token.\n url = '/api-token-auth/'\n data = {'username': 'carlos', 'password': '123'}\n response = Client().post(url, data)\n content = json.loads(response.content)\n user_token = content['token']\n\n # Prepare the header with the client's token.\n http_authorization = 'Token %s' % user_token\n client = Client(HTTP_AUTHORIZATION=http_authorization)\n\n # GET the Reviews.\n response = client.get('/reviews/')\n self.assertEqual(response.status_code, 200)\n\n # Check if only reviews related to the user were retrieved.\n content = json.loads(response.content)\n expected = {\n 'count': 2,\n 'next': None,\n 'previous': None,\n 'results': [\n {\n 'id': 2,\n 'rating': 3,\n 'title': 'Could be better',\n 'summary': 'I am a little disappointed',\n 'submission_date': '2020-10-12',\n 'ip_address': '127.0.0.1',\n 'reviewer': 2,\n 'company': 1\n },\n {\n 'id': 3,\n 'rating': 2,\n \"title\": \"Not good\",\n \"summary\": \"I won't buy again!\",\n 'submission_date': '2020-10-12',\n 'ip_address': '127.0.0.1',\n 'reviewer': 2,\n 'company': 2\n }\n ]\n }\n self.assertDictEqual(content, expected)"
] | [
"0.5990308",
"0.5970837",
"0.576632",
"0.5740902",
"0.5699372",
"0.5680496",
"0.5655822",
"0.55319315",
"0.5443329",
"0.54422873",
"0.5438654",
"0.5432013",
"0.5431028",
"0.5429935",
"0.54219544",
"0.5420579",
"0.54025465",
"0.5399872",
"0.5391939",
"0.5383238",
"0.5366309",
"0.53573817",
"0.5354289",
"0.5335172",
"0.53306586",
"0.53240496",
"0.53235865",
"0.5318475",
"0.53176093",
"0.529176"
] | 0.66950834 | 0 |
convert any item to a numpy ndarray | def to_ndarray(item):
return type(item), sp.array(item, sp.float64, ndmin=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))",
"def convert_to_ndarray(entity):\n if isinstance(entity, np.ndarray) and entity.dtype.kind in set('biufc'):\n # entity is numerical ndarray already\n return entity\n if isinstance(entity, np.ndarray) and isinstance(entity.flat[0], qt.Qobj):\n # entity is output from qt.eigenstates\n return convert_esys_to_ndarray(entity)\n if isinstance(entity, list) and isinstance(entity[0], np.ndarray) and isinstance(entity[0].flat[0], qt.Qobj):\n # entity is a list of qt.eigenstates\n return np.asarray([convert_esys_to_ndarray(entry) for entry in entity])\n # possibly we have a list of numerical values or a list of ndarrays\n converted_entity = np.asarray(entity)\n if converted_entity.dtype.kind not in set('biufc'):\n raise TypeError('Unable to convert data to numerical numpy array: ', entity)\n return converted_entity",
"def _to_numpy_ndarray(cls, data):\n if isinstance(data, np.ndarray):\n return data\n arr = np.array(data, dtype=np.float)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, newshape=(1, arr.shape[0]))\n return arr",
"def _to_arraylike(data):\n _load_objects()\n if data is None:\n raise ValueError('Cannot convert None data.')\n return None\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.asarray(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data",
"def as_numpy(a):\n if isinstance(a, mx.nd.NDArray):\n a = a.asnumpy()\n return a",
"def to_numpy(x):\n if isinstance(x, list):\n return [to_numpy(e) for e in x]\n elif isinstance(x, np.ndarray):\n return x\n elif isinstance(x, (pd.DataFrame, pd.Series)):\n return x.values\n elif get_backend() == 'pytorch':\n return x.detach().numpy()\n else:\n return x.numpy()",
"def _asarray(v):\n try:\n return np.asarray(v)\n except ValueError:\n return np.asarray(v, dtype=object)",
"def arrayobj1d(inp: Iterable, copy=False) -> np.ndarray:\n return np.array([None] + list(inp), dtype=object, copy=copy)[1:]",
"def _make_array(x):\n try:\n x = np.asfarray(x).squeeze()\n except ValueError:\n pass\n return x",
"def _as_numpy(y):\n if y is None:\n return None\n elif isinstance(y, np.ndarray):\n return np.copy(y)\n elif hasattr(y, 'as_matrix'):\n return y.as_matrix()\n elif hasattr(y, 'tolist'):\n return y.tolist()\n elif is_iterable(y):\n return np.asarray([i for i in y]) # might accidentally force object type in 3\n raise TypeError('cannot convert type %s to numpy ndarray' % type(y))",
"def to_numpy(x):\r\n return x.squeeze().detach().cpu().numpy()",
"def to_numpy(a: List[tvm.nd.NDArray]) -> List[np.ndarray]:\n assert a is not None, \"Empty result cannot be converted to numpy\"\n return [x.numpy() for x in a]",
"def asarray(self):\n from numpy import asarray\n return asarray(self)",
"def to_numpy(x):\n if isinstance(x, np.ndarray): \n return x\n if isinstance(x, Variable):\n x = x.data\n return x.cpu().numpy()",
"def to_numpy(x: Union[torch.Tensor, np.ndarray, Any, None]) -> Union[np.ndarray, None]:\n if x is None:\n return None\n elif torch.is_tensor(x):\n return x.data.cpu().numpy()\n elif isinstance(x, np.ndarray):\n return x\n elif isinstance(x, (Iterable, int, float)):\n return np.array(x)\n else:\n raise ValueError(\"Unsupported type\")",
"def scalararray(inp) -> np.ndarray:\n return np.array([None, inp], dtype=object)[[1]].reshape([])",
"def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs",
"def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs",
"def make_np(x: Union[Tensor, np.ndarray, Number]) -> np.ndarray:\n if isinstance(x, np.ndarray):\n return x\n if np.isscalar(x):\n return np.array([x])\n if isinstance(x, Tensor):\n return x.detach().cpu().numpy()\n raise NotImplementedError(\n \"Got {}, but numpy array, scalar, or torch tensor are expected.\".format(type(x))\n )",
"def toarray(x):\n if is_SparseDataFrame(x):\n x = x.to_coo().toarray()\n elif is_SparseSeries(x):\n x = x.to_dense().to_numpy()\n elif isinstance(x, (pd.DataFrame, pd.Series, pd.Index)):\n x = x.to_numpy()\n elif isinstance(x, sparse.spmatrix):\n x = x.toarray()\n elif isinstance(x, np.matrix):\n x = x.A\n elif isinstance(x, list):\n x_out = []\n for xi in x:\n try:\n xi = toarray(xi)\n except TypeError:\n # recursed too far\n pass\n x_out.append(xi)\n # convert x_out from list to array\n x = np.array(x_out, dtype=_check_numpy_dtype(x_out))\n elif isinstance(x, (np.ndarray, numbers.Number)):\n pass\n else:\n raise TypeError(\"Expected array-like. Got {}\".format(type(x)))\n return x",
"def arg2array(arg):\n if isinstance(arg, (matrix, ndarray)):\n s = arg.shape\n if len(s) == 1:\n return array(arg)\n if min(s) == 1:\n return array(arg).flatten()\n \n elif isinstance(arg, list):\n return array(arg)\n \n elif isinstance(arg, (int, float, float32, float64)):\n return array([arg])\n \n raise ValueError",
"def to_numpy(self, **kwargs):\n pass",
"def __array__(self):\n return np.asarray(self.data)",
"def _asarray(self, vec):\n shape = self.domain[0][0].shape + self.pshape\n arr = np.empty(shape, dtype=self.domain.dtype)\n for i, xi in enumerate(vec):\n for j, xij in enumerate(xi):\n arr[..., i, j] = xij.asarray()\n\n return arr",
"def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()",
"def array(self):\n return np.asarray(self)",
"def cell2array(v: Union[nptyp.ArrayLike, Sequence]) -> np.ndarray:\n if not isinstance(v, np.ndarray):\n v = np.array(v)\n shape = v.shape + v.flatten()[0].shape\n v = v.flatten()\n return np.stack([v1.astype(v[0].dtype) for v1 in v]).reshape(shape)",
"def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)",
"def __array__(self):\n return self.to_array()",
"def to_tvm_ndarray(a: List[np.ndarray]) -> List[tvm.nd.NDArray]:\n assert a is not None, \"Empty result cannot be converted to TVM NDArray\"\n return [tvm.nd.array(x) for x in a]"
] | [
"0.7383202",
"0.712074",
"0.70498645",
"0.70083153",
"0.69416726",
"0.69294995",
"0.6854728",
"0.6834907",
"0.6817344",
"0.6804671",
"0.6770331",
"0.66576916",
"0.6650874",
"0.6647591",
"0.6646724",
"0.6613207",
"0.6582173",
"0.6582173",
"0.65404516",
"0.6507793",
"0.64623237",
"0.64524543",
"0.6418887",
"0.64044476",
"0.63660145",
"0.6355584",
"0.6334472",
"0.6331703",
"0.6313372",
"0.63070637"
] | 0.7740508 | 0 |
Compute the ROUGEN score of a peer with respect to one or more models, for a given value of `n`. | def rouge_n(peer, models, n, alpha=1):
matches = 0
recall_total = 0
peer_counter = _ngram_counts(peer, n)
for model in models:
model_counter = _ngram_counts(model, n)
matches += _counter_overlap(peer_counter, model_counter)
recall_total += _ngram_count(model, n)
precision_total = len(models) * _ngram_count(peer, n)
return _safe_f1(matches, recall_total, precision_total, alpha) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_score(self, n_episodes=5):\n\n # Score is computed via aggregate over multiple episodes\n score = 0\n\n for _ in range(n_episodes):\n score += play_episode(self.model, self.env)\n\n return score / n_episodes",
"def compute_rouge_n(output, reference, n=1, mode='f'):\n assert mode in list('fpr') # F-1, precision, recall\n match = _n_gram_match(reference, output, n)\n if match == 0:\n score = 0.0\n else:\n precision = match / len(list(make_n_grams(output, n)))\n recall = match / len(list(make_n_grams(reference, n)))\n f_score = 2 * (precision * recall) / (precision + recall)\n if mode == 'p':\n score = precision\n elif mode == 'r':\n score = recall\n else:\n score = f_score\n return score",
"def rouge_n(eval_sentences, ref_sentences, n=2):\n f1_scores = []\n for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n eval_ngrams = _get_ngrams(n, eval_sentence)\n ref_ngrams = _get_ngrams(n, ref_sentence)\n ref_count = len(ref_ngrams)\n eval_count = len(eval_ngrams)\n\n # Count the overlapping ngrams between evaluated and reference\n overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)\n overlapping_count = len(overlapping_ngrams)\n\n # Handle edge case. This isn't mathematically correct, but it's good enough\n if eval_count == 0:\n precision = 0.0\n else:\n precision = float(overlapping_count) / eval_count\n if ref_count == 0:\n recall = 0.0\n else:\n recall = float(overlapping_count) / ref_count\n f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))\n\n # return overlapping_count / reference_count\n return np.mean(f1_scores, dtype=np.float32)",
"def rouge_2(peer, models, alpha=1):\n return rouge_n(peer, models, 2, alpha)",
"def get_score(self, n: int) -> float:\n # _logger.info(f'AutoMLPredictResponse function called with {n}')\n return dotty(self.json)[f'predictions.0.detection_scores.{n}']",
"def rouge_3(peer, models, alpha=1):\n return rouge_n(peer, models, 3, alpha)",
"def score(self, n):\r\n \r\n if self.scores:\r\n return self.scores[n]\r\n else:\r\n return None",
"def rouge_1(peer, models, alpha=1):\n return rouge_n(peer, models, 1, alpha)",
"def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result",
"def get_n_best(self):\n pass",
"def run(self, n: int, verbose: bool = False):\n self.__start_generation()\n self.__calculate_fitness()\n for i in range(n):\n self.__next_generation()\n self.__calculate_fitness()\n if verbose:\n self.__show(i)\n return self.population.fittest_two_individual(self.population.individuals)",
"def rouge_n(reference_sentences, evaluated_sentences, n=2):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)\n reference_ngrams = _get_word_ngrams(n, reference_sentences)\n reference_count = len(reference_ngrams)\n evaluated_count = len(evaluated_ngrams)\n\n # Gets the overlapping ngrams between evaluated and reference\n overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)\n overlapping_count = len(overlapping_ngrams)\n\n # Handle edge case. This isn't mathematically correct, but it's good enough\n if evaluated_count == 0:\n precision = 0.0\n else:\n precision = overlapping_count / evaluated_count\n\n if reference_count == 0:\n recall = 0.0\n else:\n recall = overlapping_count / reference_count\n\n f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))\n\n #just returning recall count in rouge, useful for our purpose\n return recall",
"def model(**params):\n N_frb = 0\n vs = []\n hs = []\n cs = []\n ncands = []\n\n for cand in candlist:\n c_res = calculate_metric_terms(\n cand, cluster_function=cluster_function, debug=False, plot=False, **params\n )\n t, frb_found, h, c, v = c_res\n vs.append(v)\n hs.append(h)\n cs.append(c)\n ncands.append(t)\n\n if frb_found:\n N_frb += 1\n\n vs = np.array(vs)\n hs = np.array(hs)\n cs = np.array(cs)\n c_avg = np.average(cs, axis=0, weights=ncands)\n h_avg = np.average(hs, axis=0, weights=ncands)\n v_avg = np.average(vs, axis=0, weights=ncands)\n recall = N_frb / len(vs)\n score = v_avg * recall\n\n return score",
"def rouge_n(evaluated_sentences, reference_sentences, n=2):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)\n reference_ngrams = _get_word_ngrams(n, reference_sentences)\n reference_count = len(reference_ngrams)\n evaluated_count = len(evaluated_ngrams)\n\n # Gets the overlapping ngrams between evaluated and reference\n overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)\n overlapping_count = len(overlapping_ngrams)\n\n # Handle edge case. This isn't mathematically correct, but it's good enough\n if evaluated_count == 0:\n precision = 0.0\n else:\n precision = overlapping_count / evaluated_count\n\n if reference_count == 0:\n recall = 0.0\n else:\n recall = overlapping_count / reference_count\n\n f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))\n\n # return overlapping_count / reference_count\n return f1_score, precision, recall",
"def rouge_n(evaluated_sentences, reference_sentences, n=2):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)\n reference_ngrams = _get_word_ngrams(n, reference_sentences)\n reference_count = len(reference_ngrams)\n evaluated_count = len(evaluated_ngrams)\n\n # Gets the overlapping ngrams between evaluated and reference\n overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)\n overlapping_count = len(overlapping_ngrams)\n\n # Handle edge case. This isn't mathematically correct, but it's good enough\n if evaluated_count == 0:\n precision = 0.0\n else:\n precision = overlapping_count / evaluated_count\n\n if reference_count == 0:\n recall = 0.0\n else:\n recall = overlapping_count / reference_count\n\n f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))\n\n # return overlapping_count / reference_count\n return f1_score, precision, recall",
"def computeRmse(model, data, n):\n print \"RESULT_data:%s \" % ((data.map(lambda x: (x[0], x[1]))).take(50))\n predictions1 = model.predictAll(data.map(lambda x: (x[0], x[1])))\n print \"RESULT1: %s\" % predictions1\n predictionsAndRatings = predictions1.map(lambda x: ((x[0], x[1]), x[2])) \\\n .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \\\n .values()\n #print \"RESULT2: %s\" % predictions1.take(11)\n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))",
"def train(self, n):\n t = self.t\n\n parallel_sentences = list(zip(self.target,self.source))\n\n for i in range(n):\n\n count = defaultdict(lambda:defaultdict(int))\n s_total = dict()\n total = defaultdict(int)\n\n for E,F in parallel_sentences:\n # compute normalization\n for e in E:\n t_e = t[e]\n s_total[e] = 0\n for f in F:\n s_total[e] += t_e[f]\n\n # collect counts\n for e in E:\n count_e = count[e]\n t_e = t[e]\n s_total_e = s_total[e]\n for f in F:\n tmp = t_e[f] / s_total_e\n count_e[f] += tmp\n total[f] += tmp\n\n # estimate probabilities\n for e in self.t_words:\n t_e = t[e]\n count_e = count[e]\n #for f in self.s_words:\n for f in count_e:\n #if f not in count[e]: continue\n t_e[f] = count_e[f] / total[f]",
"def run_tournament_(genes):\n\n n_genes = len(genes)\n scores = np.zeros(n_genes, dtype=np.uint32)\n for i, j in itertools.combinations(range(n_genes), 2):\n s_i, s_j = run_duel(genes[i], genes[j])\n scores[i] += s_i\n scores[j] += s_j\n continue\n\n return scores / (n_genes - 1)",
"def report(results, n_top=1):\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_score'] == i)\n for candidate in candidates:\n print(f\"Model with rank: {i}\")\n print(f\"Mean validation score: {results['mean_test_score'][candidate]} (std: {results['std_test_score'][candidate]}\")\n print(f\"Parameters: {results['params'][candidate]}\")",
"def computeRmse(model, data, n):\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])) \\\n .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \\\n .values()\n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))",
"def calculate_rn_ratios(vn_event_arrays):\n vn_event_arrays = array(vn_event_arrays)\n rn_arrays = []\n for iorder in range(3, 6):\n # compute r2, r3, r4\n rn_array = []\n for itrig in range(3, len(vn_event_arrays[0, :, 0])):\n pT_trig = real(vn_event_arrays[0, itrig, 0])\n dN_trig = real(vn_event_arrays[:, itrig, 1])\n Qn_trig_array = dN_trig*vn_event_arrays[:, itrig, iorder]\n nev = len(Qn_trig_array)\n\n denorm2_dN = dN_trig*(dN_trig - 1.)\n denorm2_array = abs(Qn_trig_array)**2. - dN_trig\n\n for iasso in range(0, itrig+1):\n pT_asso = real(vn_event_arrays[0, iasso, 0])\n dN_asso = real(vn_event_arrays[:, iasso, 1])\n Qn_asso_array = dN_asso*vn_event_arrays[:, iasso, iorder]\n\n num_dN = dN_trig*dN_asso\n num_array = real(Qn_asso_array*conj(Qn_trig_array))\n if iasso == itrig:\n num_dN -= dN_asso\n num_array = (real(Qn_asso_array*conj(Qn_trig_array))\n - dN_asso)\n\n denorm1_dN = dN_asso*(dN_asso - 1.)\n denorm1_array = abs(Qn_asso_array)**2. - dN_asso\n\n rn_jackknife = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n\n num = mean(num_array[array_idx])/mean(num_dN[array_idx])\n denorm1 = (mean(denorm1_array[array_idx])\n /mean(denorm1_dN[array_idx]))\n denorm2 = (mean(denorm2_array[array_idx])\n /mean(denorm2_dN[array_idx]))\n\n if denorm1 > 0. and denorm2 > 0.:\n rn_jackknife[iev] = num/sqrt(denorm1*denorm2)\n\n rn_mean = mean(rn_jackknife)\n rn_err = sqrt((nev - 1.)/nev*sum((rn_jackknife - rn_mean)**2.))\n rn_array.append([pT_trig - pT_asso, rn_mean, rn_err])\n rn_arrays.append(rn_array)\n rn_arrays = array(rn_arrays)\n return(rn_arrays)",
"def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )",
"def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n",
"def network_relevance(valp, orig_res):\n assert isinstance(valp, MNM)\n comps = list(valp.components.keys())\n\n results = np.zeros((len(comps), 3))\n\n for i, n in enumerate(comps):\n\n ws, bs = valp.sess.run([valp.components[n].List_weights, valp.components[n].List_bias]) # Save trained weights\n rws = [np.random.normal(0, 0.1, x.shape) for x in ws] # Get random weights and biases to test importance of networks\n rbs = [np.random.normal(0, 0.1, x.shape) for x in bs]\n\n # Change to random values\n feed_dict_w = {p: v for (p, v) in zip(valp.components[n].w_phs, rws)}\n feed_dict_b = {p: v for (p, v) in zip(valp.components[n].b_phs, rbs)}\n valp.sess.run(valp.components[n].w_assigns, feed_dict_w)\n valp.sess.run(valp.components[n].b_assigns, feed_dict_b)\n\n # Evaluate\n results[i] = evaluate_model(valp)/orig_res\n # Restore original values\n feed_dict_w = {p: v for (p, v) in zip(valp.components[n].w_phs, ws)}\n feed_dict_b = {p: v for (p, v) in zip(valp.components[n].b_phs, bs)}\n valp.sess.run(valp.components[n].w_assigns, feed_dict_w)\n valp.sess.run(valp.components[n].b_assigns, feed_dict_b)\n print(results)\n rank = np.concatenate([ranking(results[:, i]) for i in range(results.shape[1])]).reshape(results.shape, order=\"F\")\n # From here on, the criterion is still raw\n print(rank)\n rank[rank <= lim] = 0\n rank[rank > lim] = 1\n rank[results < 1.03] = 0\n # [0.1, 1] normalization, avoid using [0, 1]\n results -= np.min(results, axis=0)\n results /= (np.max(results, axis=0)/0.9)\n results += 0.1\n\n results = 1/np.prod(results, axis=1)\n\n results = results/np.sum(results)\n return comps, results, rank",
"def test_network(n, test_data_list):\n\n print(\"Testing...\")\n\n # scorecard for how well the network performs, initially empty\n scorecard = []\n\n # go through all the records in the test data set\n for record in test_data_list:\n # split the record by the ',' commas\n all_values = record.split(',')\n # correct answer is first value\n correct_label = int(all_values[0])\n # scale and shift the inputs\n inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n # query the network\n outputs = n.query(inputs)\n # the index of the highest value corresponds to the label\n label = numpy.argmax(outputs)\n # append correct or incorrect to list\n if (label == correct_label):\n # network's answer matches correct answer, add 1 to scorecard\n scorecard.append(1)\n else:\n # network's answer doesn't match correct answer, add 0 to scorecard\n scorecard.append(0)\n\n return scorecard",
"def n_gram_rouge(self,RTSummary,SystemSummary, n = 1):\n rouge_scores = dict()\n RT_tokens = self.nlpWrapper.stemmer(tokens=self.nlpWrapper.tokenize(RTSummary))\n SS_tokens = self.nlpWrapper.stemmer(tokens=self.nlpWrapper.tokenize(SystemSummary))\n RTSummary= set(self.iter_ngrams(RT_tokens,n))\n SystemSummary= set(self.iter_ngrams(SS_tokens,n))\n complete_l = RTSummary.intersection(SystemSummary)\n rouge_score = self.get_f1((len(complete_l)),RTSummary,SystemSummary)\n\n return rouge_score",
"def optimize(self, ngen):\n res = 0\n for res in self(ngen):\n pass\n return res",
"def plotScoreFromN(X_test, y_test, model, path=''):\n # list of accuracy scores\n scores = []\n tr_scores = []\n\n ns = [] # list of n values\n best_score = 0 # the best accuracy score\n best_score_n = 0 # the n value of the best accuracy score\n modelNew = clone(model) # cloned model\n\n # use for loop to find the best n (num of elements) and best score for the PCA\n for n in range(10, 40):\n model = make_pipeline(PCA(n_components=n), clone(modelNew))\n score = cross_val_score(model, X_test, y_test, cv=3).mean()\n\n # compare the current score and the best score to find the best cross validation score\n if score > best_score:\n best_score = score\n best_score_n = n\n ns.append(n)\n scores.append(score)\n\n # transform the testing dataset by using PCA\n transf = PCA(n_components=n).fit_transform(X_test)\n # train the model with the transformed testing dataset\n modelNew.fit(transf, y_test)\n # make predictions\n y_pred = modelNew.predict(transf)\n # calculate the accuracy score, and append the calculated score to the list\n tr_scores.append(accuracy_score(y_test, y_pred))\n\n\n # generate plot\n\n plt.plot(ns, scores, label=\"Testing score\")\n plt.plot(ns, tr_scores, label=\"Training score\")\n plt.title(\"Number of required components as a function of model score\")\n plt.xscale\n smallest = ns[0] # get the first element of the ns, which is the smallest integer in the ns\n largest = ns[(len(ns) - 1)] # get the last element of ns, which is the largest integer in the ns\n plt.xticks(range(smallest, largest, 5))\n plt.xlabel(\"Number of components\")\n plt.axvline(x= best_score_n, c='black', label=\"Best testing score (score=\" + str(round(best_score, 2)) + \", x=\" + str(best_score_n) + \")\")\n plt.ylabel(\"Cross-validation score\")\n plt.legend()\n\n # save figure as an image file.\n plt.savefig(path)\n\n return best_score, best_score_n",
"def nbest(self, score_fn, n):\n return [p for p, s in self.score_ngrams(score_fn)[:n]]",
"def report_grid_score(grid_scores, n_top=3):\n top_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)[:n_top]\n for i, score in enumerate(top_scores):\n print(\"Model with rank: {0}\".format(i + 1))\n print(\"Mean validation score: {0:.3f} (std: {1:.3f})\".format(\n score.mean_validation_score,\n np.std(score.cv_validation_scores)))\n print(\"Parameters: {0}\".format(score.parameters))\n print(\"\")"
] | [
"0.6221026",
"0.61857086",
"0.59200484",
"0.5913296",
"0.58413684",
"0.58119303",
"0.5691505",
"0.56906444",
"0.5665036",
"0.5646627",
"0.5615947",
"0.5566597",
"0.55650103",
"0.55646986",
"0.55566496",
"0.5514124",
"0.5488513",
"0.5460889",
"0.545565",
"0.54523516",
"0.5446015",
"0.5430289",
"0.5426672",
"0.5409011",
"0.5372712",
"0.53522575",
"0.5351159",
"0.53210306",
"0.53007716",
"0.52990955"
] | 0.7420437 | 0 |
Compute the ROUGE1 (unigram) score of a peer with respect to one or more models. | def rouge_1(peer, models, alpha=1):
return rouge_n(peer, models, 1, alpha) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rouge_l(peer, models, alpha=1):\n matches = 0\n recall_total = 0\n for model in models:\n matches += lcs(model, peer)\n recall_total += len(model)\n precision_total = len(models) * len(peer)\n return _safe_f1(matches, recall_total, precision_total, alpha)",
"def rouge_2(peer, models, alpha=1):\n return rouge_n(peer, models, 2, alpha)",
"def rouge_n(peer, models, n, alpha=1):\n matches = 0\n recall_total = 0\n peer_counter = _ngram_counts(peer, n)\n for model in models:\n model_counter = _ngram_counts(model, n)\n matches += _counter_overlap(peer_counter, model_counter)\n recall_total += _ngram_count(model, n)\n precision_total = len(models) * _ngram_count(peer, n)\n return _safe_f1(matches, recall_total, precision_total, alpha)",
"def rouge_3(peer, models, alpha=1):\n return rouge_n(peer, models, 3, alpha)",
"def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)",
"def rouge(hypotheses, references):\n\n # Filter out hyps that are of 0 length\n # hyps_and_refs = zip(hypotheses, references)\n # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]\n # hypotheses, references = zip(*hyps_and_refs)\n\n # Calculate ROUGE-1 F1, precision, recall scores\n rouge_1 = [\n rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))\n\n # Calculate ROUGE-2 F1, precision, recall scores\n rouge_2 = [\n rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))\n\n # Calculate ROUGE-L F1, precision, recall scores\n rouge_l = [\n rouge_l_sentence_level([hyp], [ref])\n for hyp, ref in zip(hypotheses, references)\n ]\n rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))\n\n return {\n \"rouge_1/f_score\": rouge_1_f,\n \"rouge_1/r_score\": rouge_1_r,\n \"rouge_1/p_score\": rouge_1_p,\n \"rouge_2/f_score\": rouge_2_f,\n \"rouge_2/r_score\": rouge_2_r,\n \"rouge_2/p_score\": rouge_2_p,\n \"rouge_l/f_score\": rouge_l_f,\n \"rouge_l/r_score\": rouge_l_r,\n \"rouge_l/p_score\": rouge_l_p,\n }",
"def rouge(hypotheses, references):\n\n # Filter out hyps that are of 0 length\n # hyps_and_refs = zip(hypotheses, references)\n # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]\n # hypotheses, references = zip(*hyps_and_refs)\n\n # Calculate ROUGE-1 F1, precision, recall scores\n rouge_1 = [\n rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))\n\n # Calculate ROUGE-2 F1, precision, recall scores\n rouge_2 = [\n rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))\n\n # Calculate ROUGE-L F1, precision, recall scores\n rouge_l = [\n rouge_l_sentence_level([hyp], [ref])\n for hyp, ref in zip(hypotheses, references)\n ]\n rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))\n\n return {\n \"rouge_1/f_score\": rouge_1_f,\n \"rouge_1/r_score\": rouge_1_r,\n \"rouge_1/p_score\": rouge_1_p,\n \"rouge_2/f_score\": rouge_2_f,\n \"rouge_2/r_score\": rouge_2_r,\n \"rouge_2/p_score\": rouge_2_p,\n \"rouge_l/f_score\": rouge_l_f,\n \"rouge_l/r_score\": rouge_l_r,\n \"rouge_l/p_score\": rouge_l_p,\n }",
"def f1_score(model_id, test_set_id, rubric_id):\n result = {'true_positive': 0, 'false_positive': 0, 'true_negative': 0, 'false_negative': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_result(model_id, test_set_id, rubric_id)\n\n for key in rubrication_result:\n if rubrication_result[key] == answers[key]:\n if rubrication_result[key] == 1:\n result['true_positive'] += 1\n else:\n result['true_negative'] += 1\n else:\n if rubrication_result[key] == 1:\n result['false_positive'] += 1\n else:\n result['false_negative'] += 1\n if (result['true_positive'] + result['false_positive']) > 0:\n result['precision'] = result['true_positive'] / (result['true_positive'] + result['false_positive'])\n else:\n result['precision'] = 0\n if (result['true_positive'] + result['false_negative']) > 0:\n result['recall'] = result['true_positive'] / (result['true_positive'] + result['false_negative'])\n else:\n result['recall'] = 0\n if (result['precision'] + result['recall']) > 0:\n result['f1'] = 2 * result['precision'] * result['recall'] / (result['precision'] + result['recall'])\n else:\n result['f1'] = 0\n return result",
"def score(self, model, probe):\n return scipy.spatial.distance.euclidean(model, probe)",
"def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score",
"def evaluate(poem):\r\n score = 0\r\n sentence_list = poem.split(\"\\n\")\r\n for d1 in sentence_list:\r\n d1 = d1.split()\r\n if (len(d1) > 2):\r\n if (len(wordnet.synsets(d1[-1])) > 1):\r\n w1 = wordnet.synsets(d1[-1])[0]\r\n w2 = wordnet.synsets(d1[-2])[0]\r\n if (w1.wup_similarity(w2)!= None):\r\n score += w1.wup_similarity(w2)\r\n else:\r\n # arbitrary default value\r\n score += .1\r\n return score",
"def F1_score(y, model):\n\tp = precision(y, model)\n\tr = recall(y, model)\n\tf = 2*((p*r)/(p+r))\n\treturn f",
"def f1_score_model(self, model, X, y):\n\n prediction = model.predict_classes(X)\n f1_macro = f1_score(y, prediction, average='macro')\n f1_micro = f1_score(y, prediction, average='macro')\n print(\"f1_macro: \", f1_score(y, prediction, average='macro'))\n print(\"f1_micro: \", f1_score(y, prediction, average=\"micro\"))\n print(\"f1_weighted: \", f1_score(y, prediction, average=\"weighted\"))\n return f1_macro, f1_micro",
"def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc",
"def F1_score(y_t, y_p, weights):\n\n P = Precision()\n R = Recall() #label per label evaluation\n F1_score_per_label = [] #store per label\n P_per_label = []\n R_per_label = []\n F1_tot = 0 #weighted sum\n\n for i in range(8):\n P.update_state( y_t[:,i], y_p[:,i] )\n R.update_state( y_t[:,i], y_p[:,i] )\n p = P.result().numpy()\n r = R.result().numpy()\n P.reset_states()\n R.reset_states()\n if p+r == 0:\n f1 = 0\n else:\n f1 = 2*p*r/ (p+r)\n F1_score_per_label.append(f1)\n P_per_label.append(p)\n R_per_label.append(r)\n\n F1_tot += f1*weights[i]\n\n return F1_score_per_label, P_per_label, R_per_label, F1_tot",
"def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )",
"def evaluate_detections(self, ap, phrase_counts, top1acc, total_aug, top1acc_aug, top1acc_oracle, top1acc_aug_oracle):\n # organize mAP by the number of occurrences\n count_thresholds = cfg.TEST.PHRASE_COUNT_THRESHOLDS\n mAP = np.zeros(len(count_thresholds))\n occurrences = np.zeros_like(mAP)\n samples = np.zeros_like(mAP)\n samples_aug = np.zeros_like(mAP)\n acc = np.zeros_like(mAP)\n acc_aug = np.zeros_like(mAP)\n oracle = np.zeros_like(mAP)\n oracle_aug = np.zeros_like(mAP)\n for phrase, phrase_index in self._phrase_to_ind.iteritems():\n n_occurrences = phrase_counts[phrase_index]\n if n_occurrences < 1:\n continue\n\n train_count = 0\n if phrase in self._train_counts:\n train_count = self._train_counts[phrase]\n\n count_index = min(np.where(train_count <= count_thresholds)[0])\n mAP[count_index] += ap[phrase_index]\n occurrences[count_index] += 1\n samples[count_index] += n_occurrences\n acc[count_index] += top1acc[phrase_index]\n acc_aug[count_index] += top1acc_aug[phrase_index]\n samples_aug[count_index] += total_aug[phrase_index]\n oracle[count_index] += top1acc_oracle[phrase_index]\n oracle_aug[count_index] += top1acc_aug_oracle[phrase_index]\n\n mAP = mAP / occurrences\n thresh_string = '\\t'.join([str(thresh) for thresh in count_thresholds])\n print('\\nThresholds: \\t' + thresh_string + '\\tOverall')\n\n ap_string = '\\t'.join(['%.1f' % round(t * 100, 2) for t in mAP])\n print('AP: \\t' + ap_string + '\\t%.1f' % round(np.mean(mAP) * 100, 2))\n\n n_total = np.sum(samples)\n n_aug = np.sum(total_aug)\n loc_acc = np.sum(acc) / (n_total - n_aug)\n group_acc = acc / (samples - samples_aug)\n acc_string = '\\t'.join(['%.1f' % round(t * 100, 2) for t in group_acc])\n print('Loc Acc:\\t' + acc_string + '\\t%.1f' % round(loc_acc * 100, 2))\n \n loc_acc = np.sum(oracle) / (n_total - n_aug)\n group_acc = oracle / (samples - samples_aug)\n acc_string = '\\t'.join(['%.1f' % round(t * 100, 2) for t in group_acc])\n print('Oracle Acc:\\t' + acc_string + '\\t%.1f' % round(loc_acc * 100, 2))\n if cfg.AUGMENTED_POSITIVE_PHRASES:\n loc_acc = (np.sum(acc) + np.sum(acc_aug)) / n_total\n group_acc = (acc+acc_aug) / samples\n acc_string = '\\t'.join(['%.1f' % round(t * 100, 2) for t in group_acc])\n print('Aug Loc Acc:\\t' + acc_string + '\\t%.1f' % round(loc_acc * 100, 2))\n\n loc_acc = (np.sum(oracle) + np.sum(oracle_aug)) / n_total\n group_acc = (oracle+oracle_aug) / samples\n acc_string = '\\t'.join(['%.1f' % round(t * 100, 2) for t in group_acc])\n print('Oracle Aug Acc:\\t' + acc_string + '\\t%.1f' % round(loc_acc * 100, 2))\n\n occ_string = '\\t'.join(['%i' % occ for occ in occurrences])\n print('Per Thresh Cnt:\\t' + occ_string + '\\t%i' % np.sum(occurrences))\n\n sample_string = '\\t'.join(['%i' % item for item in samples])\n print('Instance Cnt: \\t' + sample_string + '\\t%i' % n_total)\n\n if cfg.TOP_K_PER_PHRASE > 1:\n n_correct = np.sum([np.sum(item) for item in gt_labels])\n acc = round((n_correct/n_total)*100, 2)\n print('Portion of phrases with good boxes: %.2f\\n' % acc)\n\n return np.mean(mAP)",
"def n_gram_rouge(self,RTSummary,SystemSummary, n = 1):\n rouge_scores = dict()\n RT_tokens = self.nlpWrapper.stemmer(tokens=self.nlpWrapper.tokenize(RTSummary))\n SS_tokens = self.nlpWrapper.stemmer(tokens=self.nlpWrapper.tokenize(SystemSummary))\n RTSummary= set(self.iter_ngrams(RT_tokens,n))\n SystemSummary= set(self.iter_ngrams(SS_tokens,n))\n complete_l = RTSummary.intersection(SystemSummary)\n rouge_score = self.get_f1((len(complete_l)),RTSummary,SystemSummary)\n\n return rouge_score",
"def get_similarity_score(self, reviewer1: Any, reviewer2: Any) -> float:\n v1 = self._vertices[reviewer1]\n v2 = self._vertices[reviewer2]\n return v1.reviewer_similarity_score(v2)",
"def get_f1score(conf_matrix1, conf_matrix2, conf_matrix3):\r\n p = get_precision(conf_matrix1, conf_matrix2, conf_matrix3)\r\n r = get_recall(conf_matrix1, conf_matrix2, conf_matrix3)\r\n\r\n if p + r > 0:\r\n return 2 * p * r / (p + r)\r\n else:\r\n return 0",
"def rouge_score(references, generated):\r\n score = rouge(generated, references)\r\n rouge_s = {k: (v * 100) for (k, v) in score.items()}\r\n '''\r\n \"rouge_1/f_score\": rouge_1_f,\r\n \"rouge_1/r_score\": rouge_1_r,\r\n \"rouge_1/p_score\": rouge_1_p,\r\n \"rouge_2/f_score\": rouge_2_f,\r\n \"rouge_2/r_score\": rouge_2_r,\r\n \"rouge_2/p_score\": rouge_2_p,\r\n \"rouge_l/f_score\": rouge_l_f,\r\n \"rouge_l/r_score\": rouge_l_r,\r\n \"rouge_l/p_score\": rouge_l_p,\r\n '''\r\n return rouge_s",
"def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n score_fe = np.zeros(self.k_fe, dtype=np.float64)\n score_re = np.zeros(self.k_re2, dtype=np.float64)\n\n # Handle the covariance penalty.\n if self.cov_pen is not None:\n score_re -= self.cov_pen.grad(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty.\n if self.fe_pen is not None:\n score_fe -= self.fe_pen.grad(fe_params)\n\n # resid' V^{-1} resid, summed over the groups (a scalar)\n rvir = 0.\n\n # exog' V^{-1} resid, summed over the groups (a k_fe\n # dimensional vector)\n xtvir = 0.\n\n # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe\n # matrix)\n xtvix = 0.\n\n # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th\n # covariance parameter.\n xtax = [0.,] * self.k_re2\n\n # Temporary related to the gradient of log |V|\n dlv = np.zeros(self.k_re2, dtype=np.float64)\n\n # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)\n rvavr = np.zeros(self.k_re2, dtype=np.float64)\n\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n if self.reml:\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, exog)\n xtvix += np.dot(exog.T, viexog)\n\n # Contributions to the covariance parameter gradient\n jj = 0\n vex = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n ex_r)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n for jj,mat in self._gen_dV_dPsi(ex_r):\n dlv[jj] = np.trace(_smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, mat))\n rvavr[jj] += np.dot(vir, np.dot(mat, vir))\n if self.reml:\n xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))\n\n # Contribution of log|V| to the covariance parameter\n # gradient.\n score_re -= 0.5 * dlv\n\n # Needed for the fixed effects params gradient\n rvir += np.dot(resid, vir)\n xtvir += np.dot(exog.T, vir)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n score_fe += fac * xtvir / rvir\n score_re += 0.5 * fac * rvavr / rvir\n\n if self.reml:\n for j in range(self.k_re2):\n score_re[j] += 0.5 * np.trace(np.linalg.solve(\n xtvix, xtax[j]))\n\n score_vec = np.concatenate((score_fe, score_re))\n\n if self._freepat is not None:\n return self._freepat.get_packed() * score_vec\n else:\n return score_vec",
"def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union",
"def scoreR(self) :\n if self.leafR() :\n return self.leafScore(), self\n else :\n games = self.R()\n min_g = games[0]\n min_score = min_g.scoreL()\n for g in games[1:] :\n score = g.scoreL()\n if score[0] < min_score[0] :\n min_g = g\n min_score = score\n return (min_score+(min_g,))",
"def scoring(self):\n pass",
"def record_f1_score(record_examples: List[RecordNestedExample]):\n if not record_examples:\n return 0.\n f1_scores = []\n for example in record_examples:\n example_f1s = []\n for answer in example.answers:\n example_f1s.append(exact_match_score(example.prediction, answer))\n if example_f1s:\n f1_scores.append(max(example_f1s))\n return np.mean(f1_scores)",
"def score(self, model, context):\n pass",
"def score_intro_model():\n k = 100\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n mc = ModelChooser([model])\n dp = DataPrep(training=False)\n dp.prepare(n_components=k, use_cached_nmf='/home/ubuntu/ca_bills_project/data/extra/nmf_100_05-23-17-08-23.pkl',\n use_cached_tfidf=\"/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl\", cache_tfidf=True, test=True)\n X_test, y_test = dp.subset(features)\n\n\n mc.score(X_test, y_test)",
"def score(self):\n result = 1\n one_node = self.cups.locate_node(1)\n a = one_node.next()\n b = a.next()\n\n result = a.value * b.value\n\n return result",
"def call(self, model):\n raise NotImplementedError('Define your score here')"
] | [
"0.6404512",
"0.6164474",
"0.61226386",
"0.57171863",
"0.56591094",
"0.56569135",
"0.5644448",
"0.5607102",
"0.5510303",
"0.5491259",
"0.5481637",
"0.547529",
"0.54537225",
"0.5388831",
"0.52865225",
"0.52846694",
"0.52558506",
"0.52415675",
"0.52409685",
"0.5237391",
"0.5225559",
"0.52012724",
"0.5195234",
"0.5175369",
"0.5174358",
"0.51662594",
"0.5162748",
"0.5154949",
"0.51531494",
"0.5150945"
] | 0.69222933 | 0 |
Compute the ROUGE2 (bigram) score of a peer with respect to one or more models. | def rouge_2(peer, models, alpha=1):
return rouge_n(peer, models, 2, alpha) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rouge_n(peer, models, n, alpha=1):\n matches = 0\n recall_total = 0\n peer_counter = _ngram_counts(peer, n)\n for model in models:\n model_counter = _ngram_counts(model, n)\n matches += _counter_overlap(peer_counter, model_counter)\n recall_total += _ngram_count(model, n)\n precision_total = len(models) * _ngram_count(peer, n)\n return _safe_f1(matches, recall_total, precision_total, alpha)",
"def rouge_l(peer, models, alpha=1):\n matches = 0\n recall_total = 0\n for model in models:\n matches += lcs(model, peer)\n recall_total += len(model)\n precision_total = len(models) * len(peer)\n return _safe_f1(matches, recall_total, precision_total, alpha)",
"def rouge_1(peer, models, alpha=1):\n return rouge_n(peer, models, 1, alpha)",
"def rouge_3(peer, models, alpha=1):\n return rouge_n(peer, models, 3, alpha)",
"def score_sample_with_two_models(\n sample, model_1, model_2, score_seed, perturbations_seed, perturbations_inv_strength\n):\n\n score_seed = 1111\n\n blackbox_scorer_1 = PerturbedBlackBoxScorer(\n perturbations_inv_strength,\n W=model_1.W,\n n_Kstars=model_1.n_Kstars,\n score_seed=score_seed,\n perturbations_seed=perturbations_seed,\n )\n blackbox_scorer_2 = copy.deepcopy(blackbox_scorer_1)\n\n vocab = make_vocab(model_1.W)\n Y = [vocab.index(x) for x in sample] # rep the observations as numeric\n\n scores_1, _ = score_batch(Y, model_1, blackbox_scorer_1, override_pi=True)\n scores_2, _ = score_batch(Y, model_2, blackbox_scorer_2, override_pi=True)\n return scores_1, scores_2",
"def _bleu_score_compute(preds_len: Tensor, target_len: Tensor, numerator: Tensor, denominator: Tensor, n_gram: int, weights: Sequence[float], smooth: bool) ->Tensor:\n device = numerator.device\n if min(numerator) == 0.0:\n return tensor(0.0, device=device)\n if smooth:\n precision_scores = torch.div(torch.add(numerator, torch.ones(n_gram, device=device)), torch.add(denominator, torch.ones(n_gram, device=device)))\n precision_scores[0] = numerator[0] / denominator[0]\n else:\n precision_scores = numerator / denominator\n log_precision_scores = tensor(weights, device=device) * torch.log(precision_scores)\n geometric_mean = torch.exp(torch.sum(log_precision_scores))\n brevity_penalty = tensor(1.0, device=device) if preds_len > target_len else torch.exp(1 - target_len / preds_len)\n bleu = brevity_penalty * geometric_mean\n return bleu",
"def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)",
"def evaluate(poem):\r\n score = 0\r\n sentence_list = poem.split(\"\\n\")\r\n for d1 in sentence_list:\r\n d1 = d1.split()\r\n if (len(d1) > 2):\r\n if (len(wordnet.synsets(d1[-1])) > 1):\r\n w1 = wordnet.synsets(d1[-1])[0]\r\n w2 = wordnet.synsets(d1[-2])[0]\r\n if (w1.wup_similarity(w2)!= None):\r\n score += w1.wup_similarity(w2)\r\n else:\r\n # arbitrary default value\r\n score += .1\r\n return score",
"def b2_precision(system_output, gold_standard, sys_el2kbid, gold_el2kbid):\n el_pre_sums = 0.0\n num_elements = 0\n \n for kb_id in system_output.keys():\n mention_set = system_output[kb_id]\n\n num_elements += len(mention_set)\n \n for el_a in mention_set:\n num_correct = 0\n \n for el_b in mention_set:\n correct = b2_correctness(el_a, el_b, sys_el2kbid, gold_el2kbid)\n if(correct): num_correct +=1\n\n el_pre = num_correct / float(len(mention_set))\n \n el_pre_sums += el_pre\n \n #print \"\\t%s\\t%.2f\" % (el_a, el_pre)\n\n P = el_pre_sums / float(num_elements)\n \n return P",
"def score_po(emb_rel: tf.Tensor,\n all_emb_arg1: tf.Tensor,\n emb_arg2: tf.Tensor):\n # [B, E], [B, E] Tensors\n rel_real, rel_img = tf.split(emb_rel, 2, axis=1)\n # [N, E], [N, E] Tensors\n all_arg1_real, all_arg1_img = tf.split(all_emb_arg1, 2, axis=1)\n # [B, E], [B, E] Tensors\n arg2_real, arg2_img = tf.split(emb_arg2, 2, axis=1)\n\n # [B, N] Tensor\n score1 = tf.einsum(\"nj,ij->in\", all_arg1_real, arg2_real * rel_real)\n # [B, N] Tensor\n score2 = tf.einsum(\"nj,ij->in\", all_arg1_img, arg2_img * rel_real)\n # [B, N] Tensor\n score3 = tf.einsum(\"nj,ij->in\", all_arg1_real, arg2_img * rel_img)\n # [B, N] Tensor\n score4 = tf.einsum(\"nj,ij->in\", all_arg1_img, arg2_real * rel_img)\n\n # [B, N] Tensor\n return score1 + score2 + score3 - score4",
"def _bleu_score_compute(\n preds_len: Tensor,\n target_len: Tensor,\n numerator: Tensor,\n denominator: Tensor,\n n_gram: int,\n weights: Sequence[float],\n smooth: bool,\n) -> Tensor:\n device = numerator.device\n if min(numerator) == 0.0:\n return tensor(0.0, device=device)\n\n if smooth:\n precision_scores = torch.div(\n torch.add(numerator, torch.ones(n_gram, device=device)),\n torch.add(denominator, torch.ones(n_gram, device=device)),\n )\n precision_scores[0] = numerator[0] / denominator[0]\n else:\n precision_scores = numerator / denominator\n\n log_precision_scores = tensor(weights, device=device) * torch.log(precision_scores)\n geometric_mean = torch.exp(torch.sum(log_precision_scores))\n brevity_penalty = tensor(1.0, device=device) if preds_len > target_len else torch.exp(1 - (target_len / preds_len))\n return brevity_penalty * geometric_mean",
"def max_v_greedy():\n\n S1=Spectrum.Spectrum()\n S1.add_peak(50.4,16)\n S1.add_peak(50.7,36)\n S1.add_peak(74.8,25)\n S1.add_peak(96.2,23)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.6,49)\n S2.add_peak(50.9,25)\n S2.add_peak(74.6,9)\n S2.add_peak(102.4,17)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n g_score,g_peaks=similarity.cosine_score_greedy(S1,S2)\n\n assert score>=g_score, \"Maximum weighted method did not get higher score than greedy method\"\n assert peaks>=g_peaks, \"Maximum weighted method did not match more peaks than greedy method\"\n\n assert peaks==3, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,0.73), \"Incorrect score with greedy method\"\n\n assert g_peaks==2, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(g_score,0.57), \"Incorrect score with maximum weighted method\"",
"def get_r2_score(self):\n return self.r2_score",
"def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)",
"def rouge(hypotheses, references):\n\n # Filter out hyps that are of 0 length\n # hyps_and_refs = zip(hypotheses, references)\n # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]\n # hypotheses, references = zip(*hyps_and_refs)\n\n # Calculate ROUGE-1 F1, precision, recall scores\n rouge_1 = [\n rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))\n\n # Calculate ROUGE-2 F1, precision, recall scores\n rouge_2 = [\n rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))\n\n # Calculate ROUGE-L F1, precision, recall scores\n rouge_l = [\n rouge_l_sentence_level([hyp], [ref])\n for hyp, ref in zip(hypotheses, references)\n ]\n rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))\n\n return {\n \"rouge_1/f_score\": rouge_1_f,\n \"rouge_1/r_score\": rouge_1_r,\n \"rouge_1/p_score\": rouge_1_p,\n \"rouge_2/f_score\": rouge_2_f,\n \"rouge_2/r_score\": rouge_2_r,\n \"rouge_2/p_score\": rouge_2_p,\n \"rouge_l/f_score\": rouge_l_f,\n \"rouge_l/r_score\": rouge_l_r,\n \"rouge_l/p_score\": rouge_l_p,\n }",
"def get_model_scores(model_output):\n return [hsp.bitscore for query in SearchIO.parse(model_output, \"hmmer3-text\") \\\n for hit in query for hsp in hit]",
"def build_bigram_model(data):\n\n bigrams,unigrams = get_counts(data)\n model = {}\n for bigram in bigrams:\n #unigram count of first member of bigram\n uni_count=unigrams[bigram[0]]\n bi_count=bigrams[bigram]\n\n #probability is bigram count divided by unigram count\n model[bigram]=(bi_count/float(uni_count))\n return model",
"def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union",
"def r2_score(self, weights=None):\n\n if len(self.predicted) < 2:\n msg = \"R^2 score is not well-defined with less than two samples.\"\n warnings.warn(msg)\n return None\n\n if weights is None:\n weight = 1.\n else:\n weight = weights[:, np.newaxis]\n\n numerator = (weight * (self.true - self.predicted) ** 2).sum(axis=0,\n dtype=np.float64)\n denominator = (weight * (self.true - np.average(\n self.true, axis=0, weights=weights)) ** 2).sum(axis=0, dtype=np.float64)\n\n if numerator == 0.0:\n return None\n output_scores = _foo(denominator, numerator)\n\n return float(np.average(output_scores, weights=weights))",
"def _compute_score(self):\n\n sgml_path = str(self.sgml_file.name)\n text_path = sgml_path.replace('.sgm', '.txt')\n ref_path = 'testsets/wmt18.ende.ref.txt'\n\n from sacrebleu import process_to_text, corpus_bleu\n from pathlib import Path\n\n if not Path(text_path).exists():\n process_to_text(sgml_path, text_path)\n\n hyp_stream = [x for x in open(text_path, encoding='utf-8')]\n ref_stream = [r for r in open(ref_path, encoding='utf-8')]\n\n bleu = corpus_bleu(hyp_stream, [ref_stream])\n\n self.score = bleu.score\n self.save()",
"def rouge(hypotheses, references):\n\n # Filter out hyps that are of 0 length\n # hyps_and_refs = zip(hypotheses, references)\n # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]\n # hypotheses, references = zip(*hyps_and_refs)\n\n # Calculate ROUGE-1 F1, precision, recall scores\n rouge_1 = [\n rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))\n\n # Calculate ROUGE-2 F1, precision, recall scores\n rouge_2 = [\n rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))\n\n # Calculate ROUGE-L F1, precision, recall scores\n rouge_l = [\n rouge_l_sentence_level([hyp], [ref])\n for hyp, ref in zip(hypotheses, references)\n ]\n rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))\n\n return {\n \"rouge_1/f_score\": rouge_1_f,\n \"rouge_1/r_score\": rouge_1_r,\n \"rouge_1/p_score\": rouge_1_p,\n \"rouge_2/f_score\": rouge_2_f,\n \"rouge_2/r_score\": rouge_2_r,\n \"rouge_2/p_score\": rouge_2_p,\n \"rouge_l/f_score\": rouge_l_f,\n \"rouge_l/r_score\": rouge_l_r,\n \"rouge_l/p_score\": rouge_l_p,\n }",
"def score(self, model, probe):\n return scipy.spatial.distance.euclidean(model, probe)",
"def run(self, args):\n self.pen.score_for_matched_lexical = args[0]\n self.pen.score_for_matched_synonym = args[1]\n self.factor_word_offset_penalty = args[2]\n self.factor_sentence_length_mismatch = args[3]\n self.factor_name_mismatch = args[4]\n self.factor_fe_offset_penalty = args[5]\n self.weight_target_frame_element = args[6]\n self.weight_frame_elements = args[7]\n self.factor_frame_offset_penalty = args[8]\n misses = []\n for row in range(self.data.get_number_of_rows()):\n ref_sentence = self.data.get_row(row)[self.data.get_gold()]\n results = {}\n for team, team_sentence in self.data.get_row_for_teams(self.evaluator.get_teams(row), row).iteritems():\n results[team] = self.get_sentence_score(ref_sentence, team_sentence)\n misses.append(self.evaluator.compare_all(results, row))\n return np.mean(misses) / 5.0",
"def get_r2_score(ground_truth, predicted):\n residual = np.sum(np.square(np.subtract(ground_truth, predicted)))\n print(residual)\n total = np.sum(np.square(np.subtract(ground_truth, np.mean(ground_truth))))\n print(total)\n return np.subtract(1.0, np.divide(residual, (total + 0.00000000001)))",
"def all_match():\n S1=Spectrum.Spectrum()\n S1.add_peak(50.7,234)\n S1.add_peak(54.6,585)\n S1.add_peak(60.7,773)\n S1.add_peak(65.6,387)\n S1.add_peak(87.7,546)\n S1.add_peak(104.6,598)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.5,234/2)\n S2.add_peak(54.8,585/2)\n S2.add_peak(61.0,773/2)\n S2.add_peak(65.4,387/2)\n S2.add_peak(88.0,546/2)\n S2.add_peak(104.3,598/2)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,1.0), \"Incorrect score with greedy method\"\n\n score,peaks=similarity.cosine_score_greedy(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(score,1.0), \"Incorrect score with maximum weighted method\"",
"def recognize_ngram(models: dict, test_set: SinglesData,probs,BIC_guesses):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n probabilities = []\n guesses = []\n\n model = arpa.loadf(\"devel-lm-M3.sri.lm\")\n lm = model[0] # ARPA files may contain several models.\n # TODO implement the recognizer\n # return probabilities, guesses\n test_sequences = list(test_set.get_all_Xlengths().values())\n word_keys = list(test_set.get_all_Xlengths().keys())\n i = -1 \n for sentence in test_set.sentences_index.values():\n f = {}\n maxs = float(\"-inf\")\n prob = []\n words = []\n\n sentenceLength = 0\n for word_index in sentence:\n i+=1\n word = test_set.wordlist[word_index]\n sentenceLength+=1\n try:\n f[word] = probs[word][i]\n except:\n f[word] = float(\"-inf\")\n prob.append(f[word]) ## These are Just the probabilities unchanged from the BIC recognizer.\n \n # Find Six most probable words and generate the possible permutations \n sixwords = sorted(f,key=f.get,reverse=True)[:6]\n for k in permutations(sixwords, r=sentenceLength):\n l = 0\n for j in range(len(k)):\n l += f[k[j]]\n try:\n sentenceLP = l + 13*lm.log_s(\" \".join(k)) ## According to one student in the forum 13 is the best hyperparameter\n if sentenceLP > maxs: ## https://discussions.udacity.com/t/slm-data-for-this-asl-dataset/230822/8?u=spiros\n sentence = \" \".join(k)\n maxs = sentenceLP\n words = list(k)\n except:\n pass\n\n if(words == []):\n words = BIC_guesses[len(guesses):len(guesses)+sentenceLength] ## Fall back to BIC guesses\n probabilities.append(prob) \n guesses += words\n return (probabilities,guesses)",
"def rouge_score(references, generated):\r\n score = rouge(generated, references)\r\n rouge_s = {k: (v * 100) for (k, v) in score.items()}\r\n '''\r\n \"rouge_1/f_score\": rouge_1_f,\r\n \"rouge_1/r_score\": rouge_1_r,\r\n \"rouge_1/p_score\": rouge_1_p,\r\n \"rouge_2/f_score\": rouge_2_f,\r\n \"rouge_2/r_score\": rouge_2_r,\r\n \"rouge_2/p_score\": rouge_2_p,\r\n \"rouge_l/f_score\": rouge_l_f,\r\n \"rouge_l/r_score\": rouge_l_r,\r\n \"rouge_l/p_score\": rouge_l_p,\r\n '''\r\n return rouge_s",
"def _get_similarity_score(self, dict1, dict2):\n try:\n majorScoreDeterminer1 = ['primaryGenreId']\n majorScoreDeterminer2 = ['genreIds']\n Score = 0 # Base Score\n for items in majorScoreDeterminer2:\n\n for item1 in self._get_app_param_info(dict1, resultCount=1, resultKey=items):\n if item1 in self._get_app_param_info(dict2, resultCount=1, resultKey=items):\n if Score == 0: # Add 50% base score for this category.\n Score += 2 * .5\n Score += 2 * .5 / len(self._get_app_param_info(dict1, resultCount=1, resultKey=items))\n\n for items in majorScoreDeterminer1:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict1, resultCount=1, resultKey=items)):\n Score += (3 / len(majorScoreDeterminer1))\n\n nameMatchScore = difflib.SequenceMatcher(None,\n self._get_app_param_info(dict1, resultCount=1,\n resultKey='trackName'),\n self._get_app_param_info(dict2, resultCount=1,\n resultKey='trackName')).ratio()\n Score += nameMatchScore\n\n minorScoreDeterminer = ['isGameCenterEnabled', 'languageCodesISO2A', 'contentAdvisoryRating', 'artistId',\n 'formattedPrice']\n\n for items in minorScoreDeterminer:\n if items == \"formattedPrice\":\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n else:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)):\n Score += (4 / (len(minorScoreDeterminer)))\n Score = round(Score, 1)\n log_str = \"id\" + str(self._get_app_param_info(dict2, resultCount=1, resultKey='trackId')) + \" - \" + str(\n self._get_app_param_info(dict2, resultCount=1, resultKey='trackName')) + \"\\tScore: \" + str(Score)\n except AssertionError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except TypeError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except:\n e = sys.exc_info()[0]\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n else:\n return log_str",
"def TM_score(peeled_pdb_path, ref_pdb_path, peel_longer):\n if peel_longer:\n cmdLine_TM = (\"bin/TMscore64 \" + peeled_pdb_path + \" \" + ref_pdb_path)\n else:\n cmdLine_TM = (\"bin/TMscore64 \" + ref_pdb_path + \" \" + peeled_pdb_path)\n\n out_TM = sub.Popen(cmdLine_TM.split(), stdout=sub.PIPE).communicate()[0]\n lines_TM = out_TM.decode()\n\n regex_TMscore = re.compile(\"(?:TM-score.+= )([0-9]\\.[0-9]*)[ $]\")\n searchObj = re.search(regex_TMscore, lines_TM)\n\n # It is possible to have a case where the TMscore does not find any\n # residues in common, so we return -1\n if searchObj:\n return float(searchObj.group(1))\n return -1",
"def bleu_score(references, generated, n_gram=4, smooth=False):\r\n formatted_ref = [[ref] for ref in references]\r\n bleu_s, _, _, _, _, _ = compute_bleu(formatted_ref, generated, n_gram, smooth)\r\n return bleu_s * 100"
] | [
"0.63647765",
"0.6264367",
"0.6048327",
"0.56891185",
"0.559623",
"0.5595172",
"0.55653435",
"0.5532778",
"0.5503071",
"0.5493627",
"0.54738057",
"0.54648376",
"0.5429727",
"0.54142624",
"0.5363083",
"0.53563017",
"0.53514856",
"0.53496987",
"0.5347989",
"0.53299314",
"0.5326835",
"0.52540416",
"0.52516764",
"0.5248471",
"0.5228937",
"0.52262926",
"0.5217586",
"0.52142555",
"0.5213674",
"0.52114457"
] | 0.68937504 | 0 |
Compute the ROUGE3 (trigram) score of a peer with respect to one or more models. | def rouge_3(peer, models, alpha=1):
return rouge_n(peer, models, 3, alpha) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rouge_l(peer, models, alpha=1):\n matches = 0\n recall_total = 0\n for model in models:\n matches += lcs(model, peer)\n recall_total += len(model)\n precision_total = len(models) * len(peer)\n return _safe_f1(matches, recall_total, precision_total, alpha)",
"def rouge_n(peer, models, n, alpha=1):\n matches = 0\n recall_total = 0\n peer_counter = _ngram_counts(peer, n)\n for model in models:\n model_counter = _ngram_counts(model, n)\n matches += _counter_overlap(peer_counter, model_counter)\n recall_total += _ngram_count(model, n)\n precision_total = len(models) * _ngram_count(peer, n)\n return _safe_f1(matches, recall_total, precision_total, alpha)",
"def rouge_1(peer, models, alpha=1):\n return rouge_n(peer, models, 1, alpha)",
"def rouge_2(peer, models, alpha=1):\n return rouge_n(peer, models, 2, alpha)",
"def rouge(hypotheses, references):\n\n # Filter out hyps that are of 0 length\n # hyps_and_refs = zip(hypotheses, references)\n # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]\n # hypotheses, references = zip(*hyps_and_refs)\n\n # Calculate ROUGE-1 F1, precision, recall scores\n rouge_1 = [\n rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))\n\n # Calculate ROUGE-2 F1, precision, recall scores\n rouge_2 = [\n rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))\n\n # Calculate ROUGE-L F1, precision, recall scores\n rouge_l = [\n rouge_l_sentence_level([hyp], [ref])\n for hyp, ref in zip(hypotheses, references)\n ]\n rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))\n\n return {\n \"rouge_1/f_score\": rouge_1_f,\n \"rouge_1/r_score\": rouge_1_r,\n \"rouge_1/p_score\": rouge_1_p,\n \"rouge_2/f_score\": rouge_2_f,\n \"rouge_2/r_score\": rouge_2_r,\n \"rouge_2/p_score\": rouge_2_p,\n \"rouge_l/f_score\": rouge_l_f,\n \"rouge_l/r_score\": rouge_l_r,\n \"rouge_l/p_score\": rouge_l_p,\n }",
"def rouge(hypotheses, references):\n\n # Filter out hyps that are of 0 length\n # hyps_and_refs = zip(hypotheses, references)\n # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]\n # hypotheses, references = zip(*hyps_and_refs)\n\n # Calculate ROUGE-1 F1, precision, recall scores\n rouge_1 = [\n rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))\n\n # Calculate ROUGE-2 F1, precision, recall scores\n rouge_2 = [\n rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)\n ]\n rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))\n\n # Calculate ROUGE-L F1, precision, recall scores\n rouge_l = [\n rouge_l_sentence_level([hyp], [ref])\n for hyp, ref in zip(hypotheses, references)\n ]\n rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))\n\n return {\n \"rouge_1/f_score\": rouge_1_f,\n \"rouge_1/r_score\": rouge_1_r,\n \"rouge_1/p_score\": rouge_1_p,\n \"rouge_2/f_score\": rouge_2_f,\n \"rouge_2/r_score\": rouge_2_r,\n \"rouge_2/p_score\": rouge_2_p,\n \"rouge_l/f_score\": rouge_l_f,\n \"rouge_l/r_score\": rouge_l_r,\n \"rouge_l/p_score\": rouge_l_p,\n }",
"def test_score_text3(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\t_, obj_ut = test.score_text(matches)\n\t\tself.assertEqual(obj_ut, {'not good': [[2, -1, 0]],\n\t\t\t'not very good': [[4, -1, 0]]})",
"def score(self, text):\n logger.debug(\"score on an instance of len {0}\".format(len(text)))\n fv = self.instance2fv(text)\n fv /= np.sqrt((fv*fv).sum()) # normalize vector to len 1\n fdot = self.lprot.dot(fv) \n retval = dict(zip(self.langs, fdot))\n return retval",
"def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)",
"def get_model_scores(model_output):\n return [hsp.bitscore for query in SearchIO.parse(model_output, \"hmmer3-text\") \\\n for hit in query for hsp in hit]",
"def neural_rm3_prf(neu, ix, word_embs, doc_embs, corpus_name, corpus, doc_ids, queries, qfield, rank_path, run_name, es=None, fb_docs=10, fb_terms=10, qweight=0.5):\n\n\t# project queries within doc latent space\n\tproj_queries = list()\n\tquery_ids = list()\n\tfor qid, qbody in queries.items():\n\t\tproj_query = neu.query2emb(qbody[qfield], word_embs)\n\t\tif proj_query is not None: # keep query\n\t\t\tproj_queries.append(proj_query)\n\t\t\tquery_ids.append(qid)\n\n\t# compute the cosine similarity between docs and queries\n\tscores = cosine_similarity(doc_embs, proj_queries)\n\n\tif es is not None: # open ranking \n\t\tout = open(rank_path + '/' + run_name + '.txt', 'w')\n\n\t# loop over queries and perform RM3 expansion\n\texp_queries = list()\n\t# loop over the cosine similarity scores\n\tfor qix in tqdm(range(scores.shape[1])):\n\t\t# get the index of the top fb_docs documents from result list\n\t\tfirst_qres = np.argsort(-scores[:, qix])[:fb_docs]\n\t\t# store ids and scores of the retrieved feedback documents\n\t\tids_and_scores = dict()\n\t\tfor res in first_qres:\n\t\t\tif es is None: # perform RM3 using neural model after pseudo-relevance feedback\n\t\t\t\tids_and_scores[res] = scores[res][qix]\n\t\t\telse: # perform RM3 using lexical model after pseudo-relevance feedback\n\t\t\t\tids_and_scores[doc_ids[res]] = scores[res][qix]\n\n\t\tif es is None: # perform RM3 using neural model after pseudo-relevance feedback\n\t\t\t# get query feature vector and normalize to L1 norm\n\t\t\tqfv = create_query_vector(ix, word_embs.wv.vocab, ix.doc2bow(neu.tokenize_query(queries[query_ids[qix]][qfield])))\n\t\t\t# get relevance model feature vector (i.e., RM1)\n\t\t\trm1 = neural_relevance_model(ix, word_embs.wv.vocab, ids_and_scores, corpus, fb_terms)\n\t\t\t# interpolate qfv and rm1 (i.e., RM3)\n\t\t\trm3 = interpolate(qfv, rm1, qweight)\n\n\t\t\t# extract terms and scores from rm3\n\t\t\trm3_terms = list(rm3.keys())\n\t\t\trm3_scores = np.array(list(rm3.values()))\n\t\t\t# project expanded query into document latent space\n\t\t\tproj_query = np.sum(np.multiply(word_embs.wv[rm3_terms], rm3_scores[:, np.newaxis]), axis=0)\n\t\t\t# append projected query to exp_queries\n\t\t\texp_queries.append(proj_query)\n\t\telse: # perform RM3 using lexical model after pseudo-relevance feedback\n\t\t\t# get query feature vector and normalize to L1 norm\n\t\t\tqfv = es.scale_to_L1_norm(Counter(es.analyze_query(queries[query_ids[qix]][qfield])))\n\t\t\t# get relevance model feature vector (i.e., RM1)\n\t\t\trm1 = es.estimate_relevance_model(ids_and_scores, fb_terms)\n\t\t\t# interpolate qfv and rm1 (i.e., RM3)\n\t\t\trm3 = es.interpolate(qfv, rm1, qweight)\n\n\t\t\t# build boosted term queries\n\t\t\tterm_queries = [{'term': {es.field: {'value': term, 'boost': score}}} for term, score in rm3.items()]\n\t\t\t# combine term queries w/ SHOULD operator\n\t\t\texpanded_query = {'query': { 'bool': {'should': term_queries}}}\n\n\t\t\t# perform lexical search after pseudo-relevance feedback\n\t\t\tprf_qres = es.es.search(index=es.index, size=1000, body=expanded_query)\n\t\t\tfor idx, rank in enumerate(prf_qres['hits']['hits']):\n\t\t\t\tout.write('%s %s %s %d %f %s\\n' % (query_ids[qix], 'Q0', rank['_id'], idx, rank['_score'], run_name))\n\n\tif es is None: # perform neural search after pseudo-relevance feedback\n\t\tneu.semantic_search(doc_ids, doc_embs, query_ids, exp_queries, rank_path, run_name)\n\telse: # close ranking\n\t\tout.close()\n\treturn True",
"def getScores(self, w1, w2, w3):\r\n Fw = 2.26 * 3\r\n score = round((float(w1) * float(w2) * float(w3)) ** Fw, 6) # Keep six decimal places\r\n return score",
"def test_score_3():\n\n tpot_obj = TPOTRegressor(scoring='neg_mean_squared_error')\n tpot_obj._pbar = tqdm(total=1, disable=True)\n known_score = 8.9673743407873712 # Assumes use of mse\n # Reify pipeline with known score\n tpot_obj._optimized_pipeline = creator.Individual.\\\n from_string('ExtraTreesRegressor(GradientBoostingRegressor(input_matrix, 100.0, 0.11), 0.17999999999999999)', tpot_obj._pset)\n tpot_obj._fitted_pipeline = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)\n tpot_obj._fitted_pipeline.fit(training_features_r, training_classes_r)\n\n # Get score from TPOT\n score = tpot_obj.score(testing_features_r, testing_classes_r)\n\n # http://stackoverflow.com/questions/5595425/\n def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n assert isclose(known_score, score)",
"def bert_score(preds: Union[List[str], Dict[str, Tensor]], target: Union[List[str], Dict[str, Tensor]], model_name_or_path: Optional[str]=None, num_layers: Optional[int]=None, all_layers: bool=False, model: Optional[Module]=None, user_tokenizer: Any=None, user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor]=None, verbose: bool=False, idf: bool=False, device: Optional[Union[str, torch.device]]=None, max_length: int=512, batch_size: int=64, num_threads: int=4, return_hash: bool=False, lang: str='en', rescale_with_baseline: bool=False, baseline_path: Optional[str]=None, baseline_url: Optional[str]=None) ->Dict[str, Union[List[float], str]]:\n if len(preds) != len(target):\n raise ValueError('Number of predicted and reference sententes must be the same!')\n if verbose and not _TQDM_AVAILABLE:\n raise ModuleNotFoundError('An argument `verbose = True` requires `tqdm` package be installed. Install with `pip install tqdm`.')\n if model is None:\n if not _TRANSFORMERS_AVAILABLE:\n raise ModuleNotFoundError('`bert_score` metric with default models requires `transformers` package be installed. Either install with `pip install transformers>=4.0` or `pip install torchmetrics[text]`.')\n if model_name_or_path is None:\n warn(f'The argument `model_name_or_path` was not specified while it is required when default `transformers` model are used.It is, therefore, used the default recommended model - {_DEFAULT_MODEL}.')\n tokenizer = AutoTokenizer.from_pretrained(model_name_or_path or _DEFAULT_MODEL)\n model = AutoModel.from_pretrained(model_name_or_path or _DEFAULT_MODEL)\n else:\n tokenizer = user_tokenizer\n model.eval()\n model\n try:\n if num_layers and num_layers > model.config.num_hidden_layers:\n raise ValueError(f'num_layers={num_layers} is forbidden for {model_name_or_path}. Please use num_layers <= {model.config.num_hidden_layers}')\n except AttributeError:\n warn('It was not possible to retrieve the parameter `num_layers` from the model specification.')\n _are_empty_lists = all(isinstance(text, list) and len(text) == 0 for text in (preds, target))\n _are_valid_lists = all(isinstance(text, list) and len(text) > 0 and isinstance(text[0], str) for text in (preds, target))\n _are_valid_tensors = all(isinstance(text, dict) and isinstance(text['input_ids'], Tensor) for text in (preds, target))\n if _are_empty_lists:\n warn('Predictions and references are empty.')\n output_dict: Dict[str, Union[List[float], str]] = {'precision': [0.0], 'recall': [0.0], 'f1': [0.0]}\n if return_hash:\n output_dict.update({'hash': _get_hash(model_name_or_path, num_layers, idf)})\n return output_dict\n baseline = _load_baseline(lang, model_name_or_path, baseline_path, baseline_url) if rescale_with_baseline else None\n if _are_valid_lists:\n target_dataset = TextDataset(target, tokenizer, max_length, idf=idf)\n preds_dataset = TextDataset(preds, tokenizer, max_length, idf=idf, tokens_idf=target_dataset.tokens_idf)\n elif _are_valid_tensors:\n target_dataset = TokenizedDataset(**target, idf=idf)\n preds_dataset = TokenizedDataset(**preds, idf=idf, tokens_idf=target_dataset.tokens_idf)\n else:\n raise ValueError('Invalid input provided.')\n target_loader = DataLoader(target_dataset, batch_size=batch_size, num_workers=num_threads)\n preds_loader = DataLoader(preds_dataset, batch_size=batch_size, num_workers=num_threads)\n target_embeddings, target_idf_scale = _get_embeddings_and_idf_scale(target_loader, target_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn)\n preds_embeddings, preds_idf_scale = _get_embeddings_and_idf_scale(preds_loader, preds_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn)\n precision, recall, f1_score = _get_precision_recall_f1(preds_embeddings, target_embeddings, preds_idf_scale, target_idf_scale)\n if baseline is not None:\n precision, recall, f1_score = _rescale_metrics_with_baseline(precision, recall, f1_score, baseline, num_layers, all_layers)\n output_dict = {'precision': precision.tolist(), 'recall': recall.tolist(), 'f1': f1_score.tolist()}\n if return_hash:\n output_dict.update({'hash': _get_hash(model_name_or_path, num_layers, idf)})\n return output_dict",
"def test_score_text4(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\t_, obj_ut = test.score_text(matches, end_threshold=0.5)\n\t\tself.assertEqual(obj_ut, {'not good': [[2, -1, 0]], \n\t\t\t'not very good': [[4, -1.5, 0]]})",
"def score(self, model, probe):\n return scipy.spatial.distance.euclidean(model, probe)",
"def b3_precision(system_output, gold_standard, sys_el2kbid, gold_el2kbid):\n el_pre_sums = 0.0\n num_elements = 0\n \n for kb_id in system_output.keys():\n mention_set = system_output[kb_id]\n\n num_elements += len(mention_set)\n \n for el_a in mention_set:\n num_correct = 0\n \n for el_b in mention_set:\n correct = b3_correctness(el_a, el_b, sys_el2kbid, gold_el2kbid)\n if(correct): num_correct +=1\n\n el_pre = num_correct / float(len(mention_set))\n \n el_pre_sums += el_pre\n \n #print \"\\t%s\\t%.2f\" % (el_a, el_pre)\n\n P = el_pre_sums / float(num_elements)\n \n return P",
"def task3(dataset,writepickle=False,pfilename=None,usepickle=True):\n model,bitext = task1(dataset,printoutput = False,writepickle=writepickle,pfile = pfilename,usepickle=usepickle)\n phrases = extract_phrases(bitext,model)\n scored_phrases = phrase_scoring_ranking(phrases,model,dataset,bitext)\n print_output_task3(scored_phrases,dataset)",
"def calculateScore(self, queue):\n for song in queue:\n if song['explicit']:\n song['score'] = 3 * song['age'] + 2 * song['upvotes'] - 2 * song['downvotes']\n else:\n song['score'] = -1 * song['downvotes']",
"def evaluate(poem):\r\n score = 0\r\n sentence_list = poem.split(\"\\n\")\r\n for d1 in sentence_list:\r\n d1 = d1.split()\r\n if (len(d1) > 2):\r\n if (len(wordnet.synsets(d1[-1])) > 1):\r\n w1 = wordnet.synsets(d1[-1])[0]\r\n w2 = wordnet.synsets(d1[-2])[0]\r\n if (w1.wup_similarity(w2)!= None):\r\n score += w1.wup_similarity(w2)\r\n else:\r\n # arbitrary default value\r\n score += .1\r\n return score",
"def model(**params):\n N_frb = 0\n vs = []\n hs = []\n cs = []\n ncands = []\n\n for cand in candlist:\n c_res = calculate_metric_terms(\n cand, cluster_function=cluster_function, debug=False, plot=False, **params\n )\n t, frb_found, h, c, v = c_res\n vs.append(v)\n hs.append(h)\n cs.append(c)\n ncands.append(t)\n\n if frb_found:\n N_frb += 1\n\n vs = np.array(vs)\n hs = np.array(hs)\n cs = np.array(cs)\n c_avg = np.average(cs, axis=0, weights=ncands)\n h_avg = np.average(hs, axis=0, weights=ncands)\n v_avg = np.average(vs, axis=0, weights=ncands)\n recall = N_frb / len(vs)\n score = v_avg * recall\n\n return score",
"def phrase_scoring_ranking(phrases,model,dataset,bitext):\n e_phrases = []\n f_phrases = []\n count = 0\n f_phrase_count = {}\n e_phrase_count = {} #not needed\n #e_f_pair_count = {} #e words as rows and f words as columns\n f_e_pair_count = {} #e words as rows and f words as columns\n for phrase_set in phrases:\n for phrase in phrase_set:\n e_phrases.append(phrase[3])\n f_phrases.append(phrase[2])\n if phrase[2] in f_phrase_count:\n f_phrase_count[phrase[2]] += 1\n else:\n f_phrase_count[phrase[2]] = 1\n if phrase[2] in f_e_pair_count:\n if phrase[3] in f_e_pair_count[phrase[2]]:\n f_e_pair_count[phrase[2]][phrase[3]] += 1\n else:\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n else:\n f_e_pair_count[phrase[2]]={}\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n\n e_phrases = list(set(e_phrases))\n f_phrases = list(set(f_phrases))\n ep_count = len(e_phrases)\n fp_count = len(f_phrases)\n #pmatrix = np.empty(ep_count*fp_count) # ######Not needed if dictionary is used\n #pmatrix = pmatrix.reshape(ep_count,fp_count)\n #pmatrix.fill(0)\n ef_prob_dict = {}\n for e in e_phrases:\n for f in f_phrases:\n ef_count =count_fe_pair(e,f,f_e_pair_count)# f_e_pair_count[e][f]\n f_count = f_phrase_count[f]\n e_idx = e_phrases.index(e) ###Check the count logic again\n f_idx = f_phrases.index(f)\n pair_prob = ef_count/f_count\n #pmatrix[e_idx][f_idx] = pair_prob\n if f in f_e_pair_count:\n if e in f_e_pair_count[f]:\n if f in ef_prob_dict:\n ef_prob_dict[f][e]=pair_prob\n else:\n ef_prob_dict[f] = {}\n ef_prob_dict[f][e] = pair_prob\n\n #if pmatrix[e_idx][f_idx] != 0:\n # print(e,f,ef_count,f_count,pair_prob)\n return ef_prob_dict",
"def model(self,sample):\n\n lca = self.lca\n \n self.amount_tech = lca.tech_params['amount']\n self.amount_bio = lca.bio_params['amount']\n\n self.i_sample = 0\n self.replace_non_parameterized_exchanges(sample)\n self.replace_parameterized_exchanges(sample)\n\n lca.rebuild_technosphere_matrix(self.amount_tech)\n lca.rebuild_biosphere_matrix(self.amount_bio)\n\n score = (sum(lca.characterization_matrix)*lca.biosphere_matrix) * \\\n spsolve(lca.technosphere_matrix,lca.demand_array)\n\n np.append(self.scores, score)\n\n return score",
"def rouge_score(references, generated):\r\n score = rouge(generated, references)\r\n rouge_s = {k: (v * 100) for (k, v) in score.items()}\r\n '''\r\n \"rouge_1/f_score\": rouge_1_f,\r\n \"rouge_1/r_score\": rouge_1_r,\r\n \"rouge_1/p_score\": rouge_1_p,\r\n \"rouge_2/f_score\": rouge_2_f,\r\n \"rouge_2/r_score\": rouge_2_r,\r\n \"rouge_2/p_score\": rouge_2_p,\r\n \"rouge_l/f_score\": rouge_l_f,\r\n \"rouge_l/r_score\": rouge_l_r,\r\n \"rouge_l/p_score\": rouge_l_p,\r\n '''\r\n return rouge_s",
"def f1_score(model_id, test_set_id, rubric_id):\n result = {'true_positive': 0, 'false_positive': 0, 'true_negative': 0, 'false_negative': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_result(model_id, test_set_id, rubric_id)\n\n for key in rubrication_result:\n if rubrication_result[key] == answers[key]:\n if rubrication_result[key] == 1:\n result['true_positive'] += 1\n else:\n result['true_negative'] += 1\n else:\n if rubrication_result[key] == 1:\n result['false_positive'] += 1\n else:\n result['false_negative'] += 1\n if (result['true_positive'] + result['false_positive']) > 0:\n result['precision'] = result['true_positive'] / (result['true_positive'] + result['false_positive'])\n else:\n result['precision'] = 0\n if (result['true_positive'] + result['false_negative']) > 0:\n result['recall'] = result['true_positive'] / (result['true_positive'] + result['false_negative'])\n else:\n result['recall'] = 0\n if (result['precision'] + result['recall']) > 0:\n result['f1'] = 2 * result['precision'] * result['recall'] / (result['precision'] + result['recall'])\n else:\n result['f1'] = 0\n return result",
"def TM_score(peeled_pdb_path, ref_pdb_path, peel_longer):\n if peel_longer:\n cmdLine_TM = (\"bin/TMscore64 \" + peeled_pdb_path + \" \" + ref_pdb_path)\n else:\n cmdLine_TM = (\"bin/TMscore64 \" + ref_pdb_path + \" \" + peeled_pdb_path)\n\n out_TM = sub.Popen(cmdLine_TM.split(), stdout=sub.PIPE).communicate()[0]\n lines_TM = out_TM.decode()\n\n regex_TMscore = re.compile(\"(?:TM-score.+= )([0-9]\\.[0-9]*)[ $]\")\n searchObj = re.search(regex_TMscore, lines_TM)\n\n # It is possible to have a case where the TMscore does not find any\n # residues in common, so we return -1\n if searchObj:\n return float(searchObj.group(1))\n return -1",
"def _detection_scores(inputs, gt_boxes, gt_labels, model):\n model = check_model('model', model, BlackModel)\n boxes_and_confi, pred_labels = model.predict(*inputs)\n det_scores = []\n correct_labels_num = []\n # repeat gt_boxes and gt_labels for all particles cloned from the same sample in PSOAttack/GeneticAttack\n if gt_boxes.shape[0] == 1 and boxes_and_confi.shape[0] > 1:\n gt_boxes = np.repeat(gt_boxes, boxes_and_confi.shape[0], axis=0)\n gt_labels = np.repeat(gt_labels, boxes_and_confi.shape[0], axis=0)\n iou_thres = 0.5\n for boxes, labels, gt_box, gt_label in zip(boxes_and_confi, pred_labels, gt_boxes, gt_labels):\n gt_box_num = gt_box.shape[0]\n score = 0\n box_num = boxes.shape[0]\n correct_label_flag = np.zeros(gt_label.shape)\n for i in range(box_num):\n pred_box = boxes[i]\n max_iou_confi = 0\n for j in range(gt_box_num):\n iou = calculate_iou(pred_box[:4], gt_box[j][:4])\n if labels[i] == gt_label[j] and iou > iou_thres and correct_label_flag[j] == 0:\n max_iou_confi = max(max_iou_confi, pred_box[-1] + iou)\n correct_label_flag[j] = 1\n score += max_iou_confi\n det_scores.append(score)\n correct_labels_num.append(np.sum(correct_label_flag))\n return np.array(det_scores), np.array(correct_labels_num)",
"def _compute_scores(self, triples):\n # compute scores as sum(s * p * o)\n scores = tf.reduce_sum(triples[0] * triples[1] * triples[2], 1)\n return scores",
"def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )",
"def score(model):\n # get the first layer\n layer = model.get_layer('encoder')\n # extracts weights\n weights = layer.get_weights()[0]\n # calculate the infinity norm as shown in the paper.\n # For each input feature get the absolute maximum weight\n # connected with this feature\n scores = np.linalg.norm(weights, ord=np.inf, axis=1)\n # the final score is a importance measure for each feature\n sorted_scores = sorted(range(len(scores)), key=lambda k: scores[k])\n return sorted_scores[::-1]"
] | [
"0.6201271",
"0.59838575",
"0.56443214",
"0.563465",
"0.5621777",
"0.56136626",
"0.5591619",
"0.551658",
"0.5460007",
"0.5452518",
"0.54252064",
"0.5394155",
"0.53527737",
"0.5327824",
"0.531235",
"0.5268596",
"0.5262611",
"0.5215765",
"0.5202057",
"0.518789",
"0.518238",
"0.5140375",
"0.5127536",
"0.5113973",
"0.5099119",
"0.5076784",
"0.50529796",
"0.5012339",
"0.49881038",
"0.49864954"
] | 0.6721017 | 0 |
Compute the ROUGEL score of a peer with respect to one or more models. | def rouge_l(peer, models, alpha=1):
matches = 0
recall_total = 0
for model in models:
matches += lcs(model, peer)
recall_total += len(model)
precision_total = len(models) * len(peer)
return _safe_f1(matches, recall_total, precision_total, alpha) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)",
"def score(self, model, probe):\n return scipy.spatial.distance.euclidean(model, probe)",
"def rouge_1(peer, models, alpha=1):\n return rouge_n(peer, models, 1, alpha)",
"def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )",
"def get_local_score(self):\n for candidate in self.candidate_list:\n self.score += candidate.get_score()",
"def rouge_2(peer, models, alpha=1):\n return rouge_n(peer, models, 2, alpha)",
"def rouge_n(peer, models, n, alpha=1):\n matches = 0\n recall_total = 0\n peer_counter = _ngram_counts(peer, n)\n for model in models:\n model_counter = _ngram_counts(model, n)\n matches += _counter_overlap(peer_counter, model_counter)\n recall_total += _ngram_count(model, n)\n precision_total = len(models) * _ngram_count(peer, n)\n return _safe_f1(matches, recall_total, precision_total, alpha)",
"def score(self, model, context):\n pass",
"def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc",
"def rouge_3(peer, models, alpha=1):\n return rouge_n(peer, models, 3, alpha)",
"def call(self, model):\n raise NotImplementedError('Define your score here')",
"def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score",
"def score(self, model_name, **params):\n model = self.model_dict[model_name]\n model.set_params(**params)\n model.fit(self.data.loc[self.train_index, self.selected_features_],\n self.data.loc[self.train_index, self.target_name])\n predictions = model.predict(\n self.data.loc[self.test_index, self.selected_features_])\n self.test_score_ = np.sqrt(mean_squared_error(\n predictions, self.data.loc[self.test_index, self.target_name]))",
"def scoreRsrc( self, rr ):\r\n result = 0.0\r\n for tt in self.getSched( )[rr.getid( )]:\r\n for se in tt:\r\n result += 1\r\n print( \"INFO: Value for %s: %s \" % ( rr, result ) )\r\n return( result )",
"def calculate_link_prediction_score(self):\n calculate_method = (\n self.calculate_score_persona\n if self.is_persona_emb\n else self.calculate_score\n )\n self.link_prediction_score_positive = np.array(\n calculate_method(self.test_edges)\n )\n self.link_prediction_score_negative = np.array(\n calculate_method(self.negative_edges)\n )",
"def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget",
"def _compute(self, references=None, models=None):\n \n config = self.config\n if references is None:\n self.logger.msg2(\"Working with all references\")\n references = get_ref_names(self.dbpath)\n \n self.logger.msg1(\"Dropping scores for \"+str(len(models))+ \" models\")\n delete_model_scores(self.dbpath, models)\n self.logger.msg2(\"Working with \" + str(len(references)) +\n \" references, \" + str(len(models)) + \" models\")\n packets = prep_compute_packets(config, \n references=references,\n models=models,\n log=self.logger.msg2) \n \n # run calculations on all the packets\n msg = str(config.cores)+ \" cores, \" + str(len(packets))+ \" packets\"\n self.logger.msg1(\"Scoring (\"+msg+\")\")\n run_packets(packets, config.cores)",
"def score_of_nodes(self, score):\n for hypervisor_id in self.model.get_all_hypervisors():\n hypervisor = self.model. \\\n get_hypervisor_from_id(hypervisor_id)\n count = self.model.get_mapping(). \\\n get_node_vms_from_id(hypervisor_id)\n if len(count) > 0:\n result = self.calculate_score_node(hypervisor)\n else:\n # The hypervisor has not VMs\n result = 0\n if len(count) > 0:\n score.append((hypervisor_id, result))\n return score",
"def TM_score(peeled_pdb_path, ref_pdb_path, peel_longer):\n if peel_longer:\n cmdLine_TM = (\"bin/TMscore64 \" + peeled_pdb_path + \" \" + ref_pdb_path)\n else:\n cmdLine_TM = (\"bin/TMscore64 \" + ref_pdb_path + \" \" + peeled_pdb_path)\n\n out_TM = sub.Popen(cmdLine_TM.split(), stdout=sub.PIPE).communicate()[0]\n lines_TM = out_TM.decode()\n\n regex_TMscore = re.compile(\"(?:TM-score.+= )([0-9]\\.[0-9]*)[ $]\")\n searchObj = re.search(regex_TMscore, lines_TM)\n\n # It is possible to have a case where the TMscore does not find any\n # residues in common, so we return -1\n if searchObj:\n return float(searchObj.group(1))\n return -1",
"def _compute_scores(self, triples):\n # compute scores as sum(s * p * o)\n scores = tf.reduce_sum(triples[0] * triples[1] * triples[2], 1)\n return scores",
"def score(model):\n # get the first layer\n layer = model.get_layer('encoder')\n # extracts weights\n weights = layer.get_weights()[0]\n # calculate the infinity norm as shown in the paper.\n # For each input feature get the absolute maximum weight\n # connected with this feature\n scores = np.linalg.norm(weights, ord=np.inf, axis=1)\n # the final score is a importance measure for each feature\n sorted_scores = sorted(range(len(scores)), key=lambda k: scores[k])\n return sorted_scores[::-1]",
"def score(self):\n result = 1\n one_node = self.cups.locate_node(1)\n a = one_node.next()\n b = a.next()\n\n result = a.value * b.value\n\n return result",
"def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n score_fe = np.zeros(self.k_fe, dtype=np.float64)\n score_re = np.zeros(self.k_re2, dtype=np.float64)\n\n # Handle the covariance penalty.\n if self.cov_pen is not None:\n score_re -= self.cov_pen.grad(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty.\n if self.fe_pen is not None:\n score_fe -= self.fe_pen.grad(fe_params)\n\n # resid' V^{-1} resid, summed over the groups (a scalar)\n rvir = 0.\n\n # exog' V^{-1} resid, summed over the groups (a k_fe\n # dimensional vector)\n xtvir = 0.\n\n # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe\n # matrix)\n xtvix = 0.\n\n # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th\n # covariance parameter.\n xtax = [0.,] * self.k_re2\n\n # Temporary related to the gradient of log |V|\n dlv = np.zeros(self.k_re2, dtype=np.float64)\n\n # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)\n rvavr = np.zeros(self.k_re2, dtype=np.float64)\n\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n if self.reml:\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, exog)\n xtvix += np.dot(exog.T, viexog)\n\n # Contributions to the covariance parameter gradient\n jj = 0\n vex = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n ex_r)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n for jj,mat in self._gen_dV_dPsi(ex_r):\n dlv[jj] = np.trace(_smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, mat))\n rvavr[jj] += np.dot(vir, np.dot(mat, vir))\n if self.reml:\n xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))\n\n # Contribution of log|V| to the covariance parameter\n # gradient.\n score_re -= 0.5 * dlv\n\n # Needed for the fixed effects params gradient\n rvir += np.dot(resid, vir)\n xtvir += np.dot(exog.T, vir)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n score_fe += fac * xtvir / rvir\n score_re += 0.5 * fac * rvavr / rvir\n\n if self.reml:\n for j in range(self.k_re2):\n score_re[j] += 0.5 * np.trace(np.linalg.solve(\n xtvix, xtax[j]))\n\n score_vec = np.concatenate((score_fe, score_re))\n\n if self._freepat is not None:\n return self._freepat.get_packed() * score_vec\n else:\n return score_vec",
"def compute_rouge_l(output, reference, mode='f'):\n assert mode in list('fpr') # F-1, precision, recall\n lcs = _lcs_len(output, reference)\n if lcs == 0:\n score = 0.0\n else:\n precision = lcs / len(output)\n recall = lcs / len(reference)\n f_score = 2 * (precision * recall) / (precision + recall)\n if mode == 'p':\n score = precision\n if mode == 'r':\n score = recall\n else:\n score = f_score\n return score",
"def compute_scores(self):\n if self.num_classes == 2:\n score_1 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold=0.5,\n )[1]\n\n score_2 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold_ratio=0.5,\n )[1]\n\n score_3 = self.competition_metric(\n impact_threshold=0.5,\n )[1]\n else:\n score_1 = self.detection_metric(threshold=0.1)\n score_2 = self.detection_metric(threshold=0.25)\n score_3 = self.detection_metric(threshold=0.5)\n\n return score_1, score_2, score_3",
"def scoreR(self) :\n if self.leafR() :\n return self.leafScore(), self\n else :\n games = self.R()\n min_g = games[0]\n min_score = min_g.scoreL()\n for g in games[1:] :\n score = g.scoreL()\n if score[0] < min_score[0] :\n min_g = g\n min_score = score\n return (min_score+(min_g,))",
"def get_ppl(lm, sentences):\n total_nll = 0\n total_wc = 0\n for sent in sentences:\n words = sent.strip().split()\n score = lm.score(sent, bos=True, eos=False)\n word_count = len(words)\n total_wc += word_count\n total_nll += score\n ppl = 10**-(total_nll/total_wc)\n return ppl",
"def get_ppl(lm, sentences):\n total_nll = 0\n total_wc = 0\n for sent in sentences:\n words = sent.strip().split()\n score = lm.score(sent, bos=True, eos=False)\n word_count = len(words)\n total_wc += word_count\n total_nll += score\n ppl = 10**-(total_nll/total_wc)\n return ppl",
"def test_linked_score(self):\r\n\r\n # Setup the peer grading module with the proper linked location.\r\n peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)\r\n\r\n score_dict = peer_grading.get_score()\r\n\r\n self.assertEqual(score_dict['score'], 1)\r\n self.assertEqual(score_dict['total'], 1)",
"def bert_score(preds: Union[List[str], Dict[str, Tensor]], target: Union[List[str], Dict[str, Tensor]], model_name_or_path: Optional[str]=None, num_layers: Optional[int]=None, all_layers: bool=False, model: Optional[Module]=None, user_tokenizer: Any=None, user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor]=None, verbose: bool=False, idf: bool=False, device: Optional[Union[str, torch.device]]=None, max_length: int=512, batch_size: int=64, num_threads: int=4, return_hash: bool=False, lang: str='en', rescale_with_baseline: bool=False, baseline_path: Optional[str]=None, baseline_url: Optional[str]=None) ->Dict[str, Union[List[float], str]]:\n if len(preds) != len(target):\n raise ValueError('Number of predicted and reference sententes must be the same!')\n if verbose and not _TQDM_AVAILABLE:\n raise ModuleNotFoundError('An argument `verbose = True` requires `tqdm` package be installed. Install with `pip install tqdm`.')\n if model is None:\n if not _TRANSFORMERS_AVAILABLE:\n raise ModuleNotFoundError('`bert_score` metric with default models requires `transformers` package be installed. Either install with `pip install transformers>=4.0` or `pip install torchmetrics[text]`.')\n if model_name_or_path is None:\n warn(f'The argument `model_name_or_path` was not specified while it is required when default `transformers` model are used.It is, therefore, used the default recommended model - {_DEFAULT_MODEL}.')\n tokenizer = AutoTokenizer.from_pretrained(model_name_or_path or _DEFAULT_MODEL)\n model = AutoModel.from_pretrained(model_name_or_path or _DEFAULT_MODEL)\n else:\n tokenizer = user_tokenizer\n model.eval()\n model\n try:\n if num_layers and num_layers > model.config.num_hidden_layers:\n raise ValueError(f'num_layers={num_layers} is forbidden for {model_name_or_path}. Please use num_layers <= {model.config.num_hidden_layers}')\n except AttributeError:\n warn('It was not possible to retrieve the parameter `num_layers` from the model specification.')\n _are_empty_lists = all(isinstance(text, list) and len(text) == 0 for text in (preds, target))\n _are_valid_lists = all(isinstance(text, list) and len(text) > 0 and isinstance(text[0], str) for text in (preds, target))\n _are_valid_tensors = all(isinstance(text, dict) and isinstance(text['input_ids'], Tensor) for text in (preds, target))\n if _are_empty_lists:\n warn('Predictions and references are empty.')\n output_dict: Dict[str, Union[List[float], str]] = {'precision': [0.0], 'recall': [0.0], 'f1': [0.0]}\n if return_hash:\n output_dict.update({'hash': _get_hash(model_name_or_path, num_layers, idf)})\n return output_dict\n baseline = _load_baseline(lang, model_name_or_path, baseline_path, baseline_url) if rescale_with_baseline else None\n if _are_valid_lists:\n target_dataset = TextDataset(target, tokenizer, max_length, idf=idf)\n preds_dataset = TextDataset(preds, tokenizer, max_length, idf=idf, tokens_idf=target_dataset.tokens_idf)\n elif _are_valid_tensors:\n target_dataset = TokenizedDataset(**target, idf=idf)\n preds_dataset = TokenizedDataset(**preds, idf=idf, tokens_idf=target_dataset.tokens_idf)\n else:\n raise ValueError('Invalid input provided.')\n target_loader = DataLoader(target_dataset, batch_size=batch_size, num_workers=num_threads)\n preds_loader = DataLoader(preds_dataset, batch_size=batch_size, num_workers=num_threads)\n target_embeddings, target_idf_scale = _get_embeddings_and_idf_scale(target_loader, target_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn)\n preds_embeddings, preds_idf_scale = _get_embeddings_and_idf_scale(preds_loader, preds_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn)\n precision, recall, f1_score = _get_precision_recall_f1(preds_embeddings, target_embeddings, preds_idf_scale, target_idf_scale)\n if baseline is not None:\n precision, recall, f1_score = _rescale_metrics_with_baseline(precision, recall, f1_score, baseline, num_layers, all_layers)\n output_dict = {'precision': precision.tolist(), 'recall': recall.tolist(), 'f1': f1_score.tolist()}\n if return_hash:\n output_dict.update({'hash': _get_hash(model_name_or_path, num_layers, idf)})\n return output_dict"
] | [
"0.64376223",
"0.61849993",
"0.6173057",
"0.6096087",
"0.6008323",
"0.5948405",
"0.59441435",
"0.5903325",
"0.5891097",
"0.5812651",
"0.5770408",
"0.5759025",
"0.573958",
"0.5702037",
"0.5636004",
"0.5603585",
"0.55869937",
"0.5549397",
"0.5543058",
"0.5512206",
"0.5507263",
"0.54942656",
"0.5489111",
"0.5461238",
"0.54503465",
"0.5422344",
"0.54187006",
"0.54187006",
"0.5413212",
"0.54081094"
] | 0.6972612 | 0 |
Given a list of graphs in networkx format, write each of them in its own little gml file in a folder named name in the data_root folder. Create the folder, if necessary. This function is very hacky, parsing node labels on the go for datasets obtained from the Dortmund collection at | def write_graph_list(name, graph_list, data_root):
data_path = os.path.join(data_root, name)
if not os.path.exists(data_path):
os.makedirs(data_path)
# compute right number of trailing zeros for file names
format_positions = ceil(log10(len(graph_list)))
for i, g in enumerate(graph_list):
lines = nx.generate_gml(g)
# stupid networkx requires labels to be equal to node ids.
# we need to fix this
def sanitize_labels(x):
def getint(v:str):
return int(v.strip('"'))
if x.find('label') == -1:
return x + '\n'
else:
v = x[10:]
label = g.node[getint(v)]['label']
return f' label "{label}"\n'
fixed_lines = map(sanitize_labels, lines)
f = open(os.path.join(data_path, f'{i:0{format_positions}d}.gml'), 'w')
f.writelines(fixed_lines)
f.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))",
"def create_wiki_graph(self):\n\n print 'Creating wiki corpus graph representation'\n\n for path, subdirs, files in os.walk(self.wk_path):\n\n here = os.path.split(path)[1]\n parent = os.path.split(os.path.split(path)[0])[1]\n\n self.categories.add_edge(parent, here)\n\n self.categories[parent][\"path\"] = path\n self.categories[here][\"path\"] = path\n\n for name in files:\n if fnmatch(name, \"*.yaml\") and \"Index\" not in name and \"index\" not in name: # check if there is a text file\n \n category_name = name[0:-5]\n yaml_file_path = os.path.join(\n path, category_name + \".yaml\")\n\n # yaml\n yaml_file = open(yaml_file_path, \"r\")\n docs = yaml.load_all(yaml_file)\n\n # category_name\n for doc in docs:\n cat_parent = doc[\"CategoryPath\"][0]\n\n self.categories.add_edge(\n slugify(cat_parent), slugify(category_name))\n self.categories[slugify(cat_parent)][\"path\"] = path\n self.categories[slugify(category_name)][\"path\"] = path\n\n for cat in doc[\"Categories\"][0][self.language]:\n self.categories.add_edge(\n slugify(category_name), slugify(cat))\n self.categories[slugify(cat)][\"path\"] = path\n\n print(\"The categories graph %s has %d nodes with %d edges\"\n % (self.categories.name,\n nx.number_of_nodes(self.categories),\n nx.number_of_edges(self.categories)))\n for node in nx.nodes(self.categories):\n self.get_corpus_from_node(node)\n\n pickle.dump(self.categories, open(self.graph_path, 'w'))\n\n print \"Graph saved as %s\"%(self.graph_path)",
"def save_graphs(name, path):\n # Generate Fiber Density vs. Wedges graph\n save_graph_fiber_vs_wedges(name, path)\n\n # Generate Fiber Density vs. Rings graph\n save_graph_fiber_vs_rings(name, path)",
"def _write_network_file(graph, out_name, out_format=None, data=False,weight=False):\n\n if out_format==None:\n out_format=\"edges\"\n os.makedirs(os.path.dirname(out_name), exist_ok=True)\n #print(\"writing graph of format \" + out_format + \" at \" + out_name)\n if out_format == 'edges':\n nx.write_edgelist(graph, \"%s.edges\" % (out_name), data=data)\n elif out_format == 'gefx':\n nx.write_gexf(graph, \"%s.gefx\" % (out_name))\n elif out_format == 'gml':\n nx.write_gml(graph, \"%s.gml\" % (out_name))\n elif out_format == 'pajek':\n nx.write_pajek(graph, \"%s.pajek\" % (out_name))\n elif out_format == 'ncol':\n nx.write_edgelist(graph, \"%s.ncol\" % (out_name), delimiter='\\t',data=weight)\n elif out_format == 'graphML' :\n g = nx.write_graphml(graph, \"%s.graphml\" % (out_name))\n else:\n raise Exception(\"UNKNOWN FORMAT \" + out_format)",
"def create_rooted_trees_from_dir(paths, fout, outgroup):\n #pdb.set_trace()\n fout = open(fout, 'w')\n for count, path in enumerate(paths):\n base_path, tree_file_name = os.path.split(path)\n #pdb.set_trace()\n fin = open(path)\n for tree in fin:\n tree = tree.strip()\n tree = Tree(tree)\n tree.set_outgroup(outgroup)\n newick = tree.write(format=5) + '\\n'\n fout.write(newick)\n print count+1\n fout.close()",
"def create_output_folder(output_folder_name: str, finding_labels: list):\n if not os.path.isdir(output_folder_name):\n os.mkdir(output_folder_name)\n for type in ['/train', '/val', '/test']:\n if not os.path.isdir(output_folder_name + type):\n os.mkdir(output_folder_name + type)\n for disease in finding_labels:\n if not os.path.isdir(output_folder_name + type + '/' + disease):\n os.mkdir(output_folder_name + type + '/' + disease)",
"def generate_graphml_output(self, path):\n self.restructure_edge_info()\n self.restructure_node_info()\n return nx.write_graphml(self.G, path)",
"def write_config(filename, data):\n\n # Encode data\n desiredgraphs = ET.Element('desiredgraphs')\n\n for graph in data:\n curr_graph = ET.SubElement(desiredgraphs, 'graph', {key: value for key, value in graph.items() if not isinstance(value, list) and value})\n for key, lst in [(key, value) for key, value in graph.items() if isinstance(value, list) and value]:\n for item in lst:\n ET.SubElement(curr_graph, key, {key: value for key, value in item.items() if value})\n\n # Write\n with open(filename, 'w') as g:\n g.write(xml_to_string(desiredgraphs))",
"def each_word_and_feature_graphs(macros_data, save_dir, words, feature_labels):\n\n for word in words:\n word_dir = os.path.join(save_dir, word)\n if not os.path.exists(word_dir):\n os.makedirs(word_dir)\n for feature_label in feature_labels:\n print(\"graph for word {} for feature {}\".format(word, feature_label))\n generate_graph(macros_data, word_dir, [word], feature_label)",
"def save_dataset(dataset):\n\n with open(f\"{preprocessed_dataset_path}/similarity/{dataset}/items.json\") as f:\n items = json.load(f)\n\n graphs = []\n\n for item in tqdm(items):\n\n # using just factual nodes for similarity, leading to informative segues\n\n if dataset == 'mirex' or dataset == 'lastfmapi' or dataset == 'facebookrecommender':\n # custom initilizer, seed has also artist_musicbrainz_id, to be handled separately\n g = construct_graph(item['seed'], supplier=InformativeActionSupplier(), initializer=custom_construct_graph_initializer)\n else:\n g = construct_graph(item['seed'], supplier=InformativeActionSupplier())\n\n graphs.append(g)\n\n # in items node in joint graph, store also items sub-graph g, useful for similarity computation\n if dataset == 'mirex' or dataset == 'lastfmapi' or dataset == 'facebookrecommender':\n joint_graph = merge_graphs([lambda: graphs], strategy_fields_source_node=lambda g: {'graph': g}, strategy_graph_id=artist_id)\n else:\n joint_graph = merge_graphs([lambda: graphs], strategy_fields_source_node=lambda g: {'graph': g}, strategy_graph_id=artist_name)\n\n # save graph\n nx.write_gpickle(joint_graph, f\"{preprocessed_dataset_path}/similarity/{dataset}/graph\")",
"def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()",
"def write(self, outfilename):\n\n nx.write_gpickle(self.graph, outfilename)",
"def save_graph(self, filename, fileType):\n if fileType == \"GML Format\":\n nx.write_gml(self.graph, filename+\".gml\")\n if fileType == \"Adjacency list\":\n nx.write_adjlist(self.graph, filename+\".adjlist\")\n if fileType == \"YAML\":\n nx.write_yaml(self.graph, filename + \".yaml\")",
"def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge], tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n with open(os.path.join(self.graph_dir_path, \"tp_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_edges.pkl\"), \"wb\") as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_edges.pkl\"), \"wb\") as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)",
"def saveGraph(self, filename):\n nx.write_yaml(self.G,filename)",
"def graph_dir(\n directory: str,\n filename: str = '',\n orientation: str = 'LR',\n data: bool = False,\n show_files: bool = True,\n show_hidden: bool = False,\n max_depth: int = -1,\n ranksep: Union[float, None] = None,\n file_type: str = 'svg',\n render: bool = True\n) -> None:\n assert directory in os.listdir(), \\\n f'Invalid argument for \"directory\". {directory} is not in the current directory'\n options = ['LR', 'RL', 'TB', 'BT']\n assert orientation.upper() in options, \\\n f'Invalid argument for \"orientation\". Must be one of {\", \".join(options)}'\n assert file_type in ['svg', 'png'], \\\n 'Invalid argument for \"file_type\". Must be either \"png\" or \"svg\"'\n\n options = {'rankdir': orientation.upper(), 'overlap': 'scale', 'splines': 'polyline'}\n if ranksep is not None:\n options['ranksep'] = str(ranksep)\n\n tree = Digraph(graph_attr = options)\n index = 0\n multiple = lambda l: '' if l == 1 else 's'\n\n # Get data for size of each folder\n if data:\n dir_sizes = size(directory)\n\n walkdir = os.path.normpath(f'./{directory}/')\n # directory_data is the string used to build up the text in the nodes.\n directory_data = []\n # file_node is the string used to build file information up the text in the nodes.\n file_node = []\n for root, dirs, files in os.walk(walkdir):\n if max_depth > 0 and root.count(os.sep) >= max_depth:\n continue\n if not show_hidden:\n dirs[:] = [dir_ for dir_ in dirs if not dir_.startswith(('__', '.'))]\n tree.attr('node', shape='folder', fillcolor='lemonchiffon', style='filled,bold')\n\n parent_directory = directory if root == '.' else root\n directory_data.clear()\n directory_data.extend(os.path.basename(parent_directory))\n \n file_memory = convert(sum([os.path.getsize(os.path.join(root, f)) for f in files]))\n # Display directory data if parameters permit\n if data:\n directory_data.extend(f' ({dir_sizes[root]})')\n # \\l left aligns items in their container\n directory_data.append('\\l')\n if data and dirs:\n directory_data.extend(f'{len(dirs)} Folder{multiple(len(dirs))}\\l')\n if data and files:\n directory_data.extend(f'{len(files)} File{multiple(len(files))}')\n if not show_files and dirs:\n directory_data.extend(f' ({file_memory})')\n directory_data.append('\\l')\n\n root = root.replace(os.sep, '')\n tree.node(root, label=''.join(directory_data))\n for dir_ in dirs:\n path = os.path.join(root, dir_).replace(os.sep, '')\n tree.node(path, label=dir_)\n tree.edge(root, path)\n\n if files and show_files:\n index += 1\n tree.attr('node', shape='box', style='')\n # Display files in a box on the graph as well as memory information\n # if parameters permit\n if data:\n file_node.extend(f'{len(files)} File{multiple(len(files))} ({file_memory})\\l')\n file_node.extend(('\\l'.join(files), '\\l'))\n file_node_str = ''.join(file_node)\n file_node.clear()\n id_ = f'{index}{file_node_str}'.replace(os.sep, '')\n tree.node(id_, label=file_node_str)\n tree.edge(root, id_)\n\n filename = filename.rsplit('.', 1)[0] if filename else f'{directory}_Graph'\n if not render:\n tree.render(filename, format=file_type)\n os.remove(filename)\n else:\n if file_type == 'png':\n url = f'https://quickchart.io/graphviz?format={file_type}&graph={tree.source}'\n with open(f'{filename}.{file_type}', mode='wb') as f:\n f.write(requests.get(url).content)\n else:\n url = f'https://quickchart.io/graphviz?graph={tree.source}'\n src = requests.get(url).text\n # If request failed no svg is sent.\n if '<svg' not in src and '</svg>' not in src:\n print('Error rendering graph with quickchart.io.')\n else:\n with open(f'{filename}.svg', mode='w') as f:\n f.write(src)",
"def create_map(\n datapointsPath: Union[Path, str],\n linksPath: Union[Path, str],\n datapointAttrPath: Union[Path, str],\n node_attr_map: Dict[str, str],\n link_attr_map: Dict[str, str],\n snapshots: List[Dict] = [],\n playerSettings: Dict[str, Any] = {},\n outFolder: Union[Path, str] = \"data_out\",\n):\n\n # create folders and copy the index file\n print(f\">> creating folders\")\n out_dir = Path(outFolder)\n out_data_path = out_dir / \"data\"\n if not out_data_path.exists():\n print(f\"\\t- new folder - {out_data_path}\")\n out_data_path.mkdir(parents=True, exist_ok=True)\n else:\n print(f\"\\t- found existing. overwriting - {out_data_path}\")\n\n # copy the index and run scripts to out directory\n shutil.copy(\"src/index.html\", out_dir)\n print(f\"\\t- copied {out_dir}/index.html\")\n\n shutil.copy(\"src/run_local.sh\", out_dir)\n print(f\"\\t- copied {out_dir}/run_local.sh\\n\")\n\n # write the files\n print(f\">> building dataset\")\n __write_dataset_file(datapointsPath, datapointAttrPath, out_data_path)\n print(f\"\\t- new dataset file written to {out_data_path / 'nodes.json'}.\\n\")\n\n print(f\">> building network\")\n __write_network_file(datapointsPath, linksPath, node_attr_map, link_attr_map, out_data_path)\n print(f\"\\t- new network file written to {out_data_path / 'links.json'}.\\n\")\n\n print(f\">> building settings\")\n __write_settings_file(snapshots, playerSettings, out_data_path)\n print(f\"\\t- new settings file written to {out_data_path / 'settings.json'}.\\n\")",
"def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")",
"def export(fileprefix, hedges):\n with open(fileprefix + '.txt', 'w') as f:\n for h in hedges:\n s = \"\"\n for node in h[0]: #each node in the tail\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n for node in h[1]: #each node in the head\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n s += '1' + '\\n' #assigns weight for the hedge, currently always set to 1\n f.write(s)",
"def create_folder_structure(ck_dir, out_dir):\n make_dirs(out_dir)\n train_out_dir = out_dir + '/train'\n if not os.path.exists(train_out_dir):\n os.mkdir(train_out_dir)\n\n for sdir in os.listdir(ck_dir):\n spath = os.path.join(ck_dir, sdir)\n for ddir in os.listdir(spath):\n dpath = os.path.join(spath, ddir)\n if os.path.isdir(dpath):\n os.chdir(dpath)\n else:\n print(\"not a dir:\", dpath)\n emotion_txt = glob.glob('*emotion*')\n if len(emotion_txt) == 1:\n add_emotion(os.path.join(dpath, emotion_txt[0]), train_out_dir)\n elif len(emotion_txt) > 1:\n print(emotion_txt)\n test(train_out_dir)",
"def parse_graphml_file_newick_format(filename: str, digraph=True):\n graphml_graph = nx.read_graphml(filename, node_type=newick.Node)\n if digraph:\n graphml_graph = graphml_graph.to_directed()\n\n for current_node in graphml_graph.nodes:\n graphml_graph.add_node(current_node, name=current_node, child_position=0)\n\n return graphml_graph",
"def load_all_graphs():\n all_graphs = []\n for i in range(7):\n with open(f'Full_Network_Book_{i+1}.gml', 'rb') as graph_file:\n all_graphs.append(nx.read_gml(graph_file))\n\n return all_graphs",
"def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')",
"def save_graph(self, path):\n if path.split('.')[-1]=='gexf':\n nx.write_gexf(self.graph, path)\n else:\n nx.write_gpickle(self.graph, path)",
"def create_network_graph(df_graph_tree):\n net = Network(height='750px', width='100%', directed=True, bgcolor='#222222', font_color='white')\n net.force_atlas_2based(gravity=-75)\n for index, row in df_graph_tree.iterrows():\n src = row['Source']\n dst = row['Target']\n label = row['Label']\n title = \"File fullname : {} <br> Type : {}\".format(row['Source'], row['File Type'])\n color = color_of_extension[row['File Type'].lower()] if row['File Type'].lower() in color_of_extension.keys() else 'grey'\n if row['File Type'] == 'folder':\n net.add_node(src, shape='text', label=label, color = color, title = title)\n else:\n net.add_node(src, shape='dot', label=label, color = color, title = title)\n if dst != '':\n #net.add_node(dst, label=label, title=title)\n net.add_edge(src, dst, value=1, color = '#6c6c6c')\n return net",
"def pele_folders(input_, file_list, dir_=None):\r\n os.chdir(\"../\")\r\n if not dir_:\r\n base = basename(input_)\r\n base = base.replace(\".pdb\", \"\")\r\n else:\r\n base = dir_\r\n count = 0\r\n folder = []\r\n for files in file_list:\r\n name = basename(files)\r\n name = name.replace(\".pdb\", \"\")\r\n if not count:\r\n hold = \"bla\"\r\n count += 1\r\n if name != \"original\" and hold != name[:-1]:\r\n hold = name[:-1]\r\n folder.append(\"mutations_{}/{}\\n\".format(base, hold))\r\n with open(\"dirnames_{}.txt\".format(base), \"w\") as txt:\r\n txt.writelines(folder)",
"def createOutputFile(dataList, maxClusterNum, labelConverter, filePrefix):\n outputFileList = []\n for i in range(maxClusterNum):\n outputFileList.append(open(filePrefix + \"_cluster_\" + str(i) + \".txt\", 'w'))\n\n for pt in dataList:\n matchingCluster = labelConverter[pt.label]\n if matchingCluster == -1:\n continue\n outputFileList[matchingCluster].write(str(pt.id) + '\\n')\n\n for i in range(maxClusterNum):\n outputFileList[i].close()",
"def export_tikz(nodes, scale, path):\n filename = asksaveasfile(defaultextension=\".tex\")\n if filename:\n _file = open(filename.name, 'w')\n\n _file.write(\"\\\\begin{tikzpicture}\\n\")\n _file.write(\"\\\\begin{axis}[%\\n\")\n _file.write(\"width=\\\\textwidth,\\n\")\n _file.write(\"scale only axis,\\n\")\n _file.write(\"xmin=-100,\\n\")\n _file.write(\"xmax=2700,\\n\")\n _file.write(\"ymin=-100,\\n\")\n _file.write(\"ymax=2100,\\n\")\n _file.write(\"y dir=reverse,\\n\")\n _file.write(\"axis x line*=bottom,\\n\")\n _file.write(\"axis y line*=left\\n\")\n _file.write(\"]\\n\")\n\n for group in get_groups(nodes):\n _file.write(\n \"\"\"\\\\addplot [color=black,mark size=5.0pt,\n only marks,mark=*,mark options={solid,\n fill=\"\"\" + group.lower() + \"},forget plot]\\n\")\n _file.write(\"table[row sep=crcr]{%\\n\")\n for node in nodes:\n if node.color == group:\n _file.write(\n str(node.x_coord * scale) + \" \" +\n str(node.y_coord * scale) + \"\\\\\\\\\\n\")\n _file.write(\"};\\n\")\n\n if not path is None:\n _file.write(\"\\\\addplot [draw=black,forget plot]\\n\")\n _file.write(\"table[row sep=crcr]{%\\n\")\n for path_node in path['Tour']:\n print(path_node)\n node = nodes[int(path_node)]\n print(node)\n _file.write(\n str(node.x_coord * scale) + \" \" +\n str(node.y_coord * scale) + \"\\\\\\\\\\n\")\n _file.write(\"};\\n\")\n _file.write(\"\\\\end{axis}\\n\")\n _file.write(\"\\\\end{tikzpicture}%\\n\")\n _file.close()",
"def mk_label_folders(idir, classes):\n lbls = set(classes)\n for l in lbls:\n if not os.path.isdir(join(idir, l)):\n os.mkdir(join(idir, l))\n print(f\"Making {l} folder in {idir}\")\n print(\"Done!\")",
"def create_noobj_folder(\n folder: PathLike, \n img_ext: str = \".jpg\",\n):\n folder = Path(folder).expanduser().resolve()\n images = glob(folder, img_ext)\n \n for image in images:\n filename = image.name\n _folder = image.parent.name\n path = folder / (image.stem + \".xml\")\n img_w, img_h = get_image_size(image)\n\n tree = ET.Element(\"annotation\")\n\n et_folder = ET.SubElement(tree, \"folder\")\n et_folder.text = _folder\n\n et_filename = ET.SubElement(tree, \"filename\")\n et_filename.text = filename\n\n et_path = ET.SubElement(tree, \"path\")\n et_path.text = str(path)\n\n et_img_size = ET.SubElement(tree, \"size\")\n ET.SubElement(et_img_size, \"width\").text = str(img_w)\n ET.SubElement(et_img_size, \"height\").text = str(img_h)\n ET.SubElement(et_img_size, \"depth\").text = \"3\"\n\n content = ET.tostring(tree, encoding=\"unicode\", pretty_print=True)\n try: \n path.write_text(content)\n except KeyboardInterrupt:\n path.write_text(content)\n exit()"
] | [
"0.6296529",
"0.6196926",
"0.6115122",
"0.60457796",
"0.60432667",
"0.6025473",
"0.6017168",
"0.6001553",
"0.5972465",
"0.5906791",
"0.5901544",
"0.585161",
"0.58406854",
"0.5752646",
"0.5749147",
"0.57461995",
"0.5728297",
"0.5697866",
"0.5691245",
"0.56871533",
"0.5682423",
"0.5666112",
"0.56378067",
"0.56377435",
"0.56337076",
"0.5618812",
"0.5610416",
"0.5606858",
"0.56043077",
"0.5602492"
] | 0.77036434 | 0 |
fetches tweets and wraps them in Tweet objects | def get_tweets(self):
now = datetime.datetime.now()
tweet_json = self.api.get_tweets(self.last, now)
self.last = now
return [Tweet(x) for x in tweet_json] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass",
"def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text",
"def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass",
"def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass",
"def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}",
"def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets",
"def get_tweets(self):\r\n return self.tweets",
"def get_tweets():\n\n return Tweet.query.all()",
"def get_tweets(api):\n return api.user_timeline()",
"def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets",
"def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets",
"def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))",
"def open_tweet_obj(tweets_obj):\n tweets = []\n for tweet_obj in tweets_obj:\n for tweet in tweet_obj:\n tweets.append(tweet)\n return tweets",
"def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)",
"def grab_tweets():\n\n tweets = []\n long_tweets = []\n\n for each in lists:\n tweets = tweets + twitter.GetListTimeline(list_id=each.id,\n count=count,\n include_rts=True)\n for tweet in tweets:\n if len(tweet.text) >= min_tweet_len:\n long_tweets.append(tweet)\n shuffle(long_tweets)\n\n if len(long_tweets) >= num_tweets:\n return long_tweets[:num_tweets]\n else:\n return long_tweets",
"def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets",
"def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets",
"def populate_twitter_acct_tweets(retrieve_until_dt=datetime.now(tz=timezone.utc) - timedelta(days=60)):\n spinner = itertools.cycle(['|', '/', '-', '\\\\'])\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n while 1:\n for acct in twitter_accts:\n # acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct).first()\n acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct, created_datetime__gte=date(2018, 2, 7)).first()\n\n max_id = None\n if acct_oldest_tweet is not None:\n max_id = acct_oldest_tweet.feedid - 1\n\n # do api call 15 for each account times due to twitter rate limit\n for _ in range(15):\n feed_created_dt = None\n try:\n statuses = api.GetUserTimeline(screen_name=acct.screen_name, include_rts=False, max_id=max_id)\n for s in statuses:\n write_and_restart_line(next(spinner))\n created_feed = USTwitterNewsFeed.objects.create(posted_by=acct,\n created_datetime=datetime.strptime(s.created_at, '%a %b %d %X %z %Y'),\n text=s.text,\n feedid=s.id)\n max_id = created_feed.feedid - 1\n feed_created_dt = created_feed.created_datetime\n except TwitterError as e:\n print(e.message)\n except IntegrityError as e:\n print('integrity error')\n break\n\n # only retrieve until last status created datetime earlier than retrieve until\n # if (feed_created_dt is None) or (feed_created_dt < retrieve_until_dt):\n # break",
"def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1",
"def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200",
"def fetch_tweets(self, screen_name, count):\n return {}",
"def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")",
"def collect_tweets(ticker):\n\n # Authenticate Tweepy credentials\n auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_SECRET_CONSUMER_KEY)\n auth.set_access_token(settings.TWITTER_TOKEN_KEY, settings.TWITTER_SECRET_TOKEN_KEY)\n api = tweepy.API(auth)\n\n stock = Stock.objects.get(ticker=ticker)\n\n # Search for recent Tweets with the specific ticker\n collected_tweets = api.search(q=ticker, result_type='recent', count=100)\n\n # Iterate over the collected Tweets and save them\n for tweet in collected_tweets:\n try:\n Tweet.objects.create(\n text=tweet.text,\n created_at=tweet.created_at,\n user_id=tweet.user.id,\n user_screen_name=tweet.user.screen_name,\n verified=tweet.user.verified,\n followers_count=tweet.user.followers_count,\n friends_count=tweet.user.friends_count,\n favourites_count=tweet.user.favourites_count,\n retweet_count=tweet.retweet_count,\n stock=stock,\n )\n except IntegrityError:\n pass",
"def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets",
"def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets",
"def __refresh_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'r')\n f_tweeted = open(f'{TWEETED}', 'r')\n\n try:\n self.tweets = json.load(f_tweets)\n self.tweeted = json.load(f_tweeted)\n finally:\n f_tweets.close()\n f_tweeted.close()",
"def read_tweets(self)-> None:\n self.no_of_tweets = len(self.list_of_files)\n for i in range(0, self.no_of_tweets):\n # for i in range(0,10): # running a small loop for testing purpose\n try:\n with open(self.list_of_files[i]) as json_file:\n file = json.load(json_file)\n tweet = {'id': file['id']}\n try:\n tweet['created_time'] = file['retweeted_status']['created_at']\n tweet['text'] = file['retweeted_status']['full_text']\n except:\n tweet['created_time'] = file['created_at']\n tweet['text'] = file['full_text']\n self.tweets.append(tweet)\n except:\n print(\"Error for \",self.list_of_files[i])\n if i%1000 == 0:\n print(str(round(i/self.no_of_tweets,2)*100),\"% read\")\n print(\"All Tweets read into memory\")",
"def get_tweets(keyword, max_tweets=200):\n\n # API keys.\n consumer_key = \"kNOG1klRMMUYbsjMuY5TKl4lE\"\n consumer_secret = \"ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v\"\n access_key = \"3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz\"\n access_secret = \"9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn\"\n\n # Initialize tweepy API object and authorize using API key.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n\n \"\"\" Get tweets.\"\"\"\n\n alltweets = []\n for status in tweepy.Cursor(\n api.search,\n q=keyword + \" -RT\", # the -RT flag excludes retweets.\n count=1000,\n result_type=\"recent\",\n include_entities=True,\n monitor_rate_limit=True,\n wait_on_rate_limit=True,\n lang=\"en\",\n ).items():\n\n # get text of the tweet, encoding as utf-8.\n text = str(status.text.encode(\"utf-8\"))\n\n # add to the data structure, alltweets, holding the tweets.\n alltweets.append(text)\n\n # if we've reached max_tweets, break.\n if len(alltweets) >= max_tweets:\n break\n\n return alltweets",
"def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added",
"def get_tweets(self):\n\t\ttweets = ''\n\t\tfor each in self.tweets_posted:\n\t\t\ttweets += each.timeline_format() + '\\n'\n\t\ttweets = tweets.strip('\\n')\n\t\treturn tweets"
] | [
"0.76285994",
"0.7314002",
"0.72527397",
"0.7232973",
"0.7203812",
"0.72006255",
"0.71095735",
"0.7069534",
"0.70468444",
"0.70431787",
"0.70248395",
"0.70181644",
"0.70125926",
"0.70005286",
"0.69737434",
"0.69352496",
"0.6934322",
"0.68964857",
"0.688853",
"0.68642783",
"0.6852069",
"0.68193996",
"0.67879534",
"0.67773",
"0.67575336",
"0.6757283",
"0.67236173",
"0.6714966",
"0.6692328",
"0.6687192"
] | 0.73902327 | 1 |
updates max_importance value if importance is higher then max_importance | def update_importance(self, importance):
if importance > self.max_importance:
self.max_importance = importance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_importance(self, importance):\r\n self.importance = importance\r\n for tweet in self.tweets:\r\n tweet.update_importance(importance)",
"def change_max(self, level, value):\n if value < 0:\n raise AttributeError('max value should be greater than zero')\n if level in self.progress_maxes:\n self.progress_maxes[level] = value",
"def max_price(self, new_max_price):\n self._max_price = new_max_price",
"def userMaximum(self, new_max: float) -> None:\n self._user_maximum = new_max\n self.reset_limits()",
"def set_maximum(self, max_value):\n\n self._progress.setMaximum(max_value)",
"def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value",
"def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value",
"def set_is_max(self, is_max):\n self.__is_max = is_max",
"def set_Ec_max(self, x):\n x = float(x)\n if self.Ec_max != x:\n self.Ec_max = x\n self.Ec[1] = x",
"def set_max(self, max):\n self.set_val((self.val[0], max))",
"def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)",
"def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)",
"def max_percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"max_percentage\")",
"def max_value(self, max_value):\n\n self._max_value = max_value",
"def max_value(self, max_value):\n\n self._max_value = max_value",
"def max_value(self, max_value):\n\n self._max_value = max_value",
"def set_max(self, val):\n self._max = val",
"def _update_max_value(k, mi, by_gene):\n # Update the max mutual info.\n if mi is not None:\n by_gene[k] = max(by_gene.get(k, 0), mi)",
"def update_fodder(self):\n self.remaining_food['Herbivore'] = self.parameters['f_max']",
"def maximal_valance(self) -> int:\n max_valances = {'H': 1, 'B': 4, 'C': 4, 'N': 4, 'O': 3, 'F': 1,\n 'Si': 4, 'P': 6, 'S': 6, 'Cl': 4, 'Br': 4, 'I': 6}\n\n if self.label in max_valances:\n return max_valances[self.label]\n\n else:\n logger.warning(f'Could not find a valid valance for {self}. '\n f'Guessing at 6')\n return 6",
"def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]",
"def set_max_leverage(self, max_leverage):\n control = MaxLeverage(max_leverage)\n self.register_account_control(control)",
"def setMaximumValue(self, value: int):\n self.ui.progress.setMaximum(value)",
"def max(self, max):\n\n self._max = max",
"def max(self, max):\n\n self._max = max",
"def calculate_greatest(self):\n greatest = 0\n for resourceList in self.loading.values():\n for time, use in resourceList:\n if use > greatest:\n greatest = use\n self.emit(\"greatest_calculated\",greatest)\n return greatest",
"def set_maxVal(self, val):\n self.maxVal = val",
"def update_highest(csevo):\n tmax = [t[np.argmax(N)] for (t, N) in figure_to_data(csevo)]\n\n data = [{\n \"x\": list(range(len(tmax))), \"y\":tmax, \"type\":\"bar\"\n }]\n\n layout = {\n \"title\":'Time of largest abundance',\n \"template\":\"plotly_dark\",\n \"xaxis\":{\"title\":\"Charge state\", \"range\":[0, len(tmax)]},\n \"yaxis\":{\"title\":\"Time (s)\", \"type\":\"log\"}\n }\n\n return {\"data\":data, \"layout\":layout}",
"def update_max_search_depth(self, depth):\n if self.max_search_depth < depth:\n self.max_search_depth = depth",
"def set_progress_range(self, maximum):\r\n\r\n pass"
] | [
"0.6120561",
"0.5990391",
"0.56675154",
"0.56649125",
"0.5598316",
"0.5463613",
"0.5463613",
"0.5452502",
"0.541491",
"0.5413382",
"0.5384408",
"0.5384408",
"0.53646934",
"0.53610283",
"0.53610283",
"0.53610283",
"0.5323976",
"0.53171647",
"0.5316251",
"0.5280184",
"0.52691627",
"0.52634156",
"0.52282524",
"0.52093786",
"0.52093786",
"0.51812464",
"0.5181023",
"0.51643115",
"0.51628613",
"0.5137001"
] | 0.86922395 | 0 |
fetches tweets from start date till end date | def get_tweets(self, start_date, end_date):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tweets(self, start_date, end_date):\r\n # get tweets from api\r\n config = crawler.APIConfig()\r\n config.set_api_key(\"8e1618e9-419f-4239-a2ee-c0680740a500\")\r\n config.set_end_time(end_date)\r\n config.set_filter(self.region)\r\n config.set_start_time(start_date)\r\n return crawler.FetchTweets(config).fetch()",
"def getTweets(self, query, start, end):\n gettweets = Twitter.GetTweets(self.rootpath, self.folderpath,\n start, end, query)\n gettweets.start_getTweets()",
"def get_tweets_in_date_range(start, end, screen_name):\n start, end = convert_string_to_datetime(start), convert_string_to_datetime(end)\n culled_tweets = []\n first_date, max_id = start, None\n errors = 0\n while first_date >= start:\n try:\n tweets = get_tweets(max_id=max_id, screen_name=screen_name)\n except TwitterException as e:\n errors += 1\n with open('twitter_errors.txt', 'a') as f:\n f.write(e.message + ',' + screen_name + '\\n')\n if errors != 5:\n time.sleep(1)\n continue\n else:\n if not culled_tweets:\n return False\n break\n if max_id is not None and (tweets and tweets[0]['id_str'] == max_id):\n tweets.pop(0)\n oldest_tweet, newest_tweet = tweets[-1], tweets[0]\n first_date = convert_time_string(oldest_tweet['created_at'])\n last_date = convert_time_string(newest_tweet['created_at'])\n max_id = oldest_tweet['id_str']\n if first_date <= start or last_date >= end:\n tweets = [t for t in tweets\n if convert_time_string(t['created_at']) <= end\n and convert_time_string(t['created_at']) >= start]\n culled_tweets.extend(tweets)\n\n return culled_tweets",
"def populate_twitter_acct_tweets_by_date():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n for acct in twitter_accts:\n results = api.GetSearch(raw_query=\"l=&q=from%3AReutersUS%20since%3A2017-12-01%20until%3A2017-12-02&src=typd\")",
"def get_users_tweets(users, min_date, max_date, result_limit, key, secret_key):\n \n auth = tweepy.OAuthHandler(key, secret_key)\n max_datetime = datetime.datetime.strptime(max_date, '%Y-%m-%d').date()\n min_datetime = datetime.datetime.strptime(min_date, '%Y-%m-%d').date()\n \n #initialize variables\n max_id = None\n min_id = None\n mydata = []\n\n for user in users:\n my_api = tweepy.API(auth)\n\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break\n\n while min_id == None:\n start_id = item.id\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n max_id=start_id,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break \n #get another 25 starting with the max... \n # if min_id is None... then call again... using the bottom of mydata as max_id...\n\n df = pd.DataFrame(mydata).loc[:,'tweet_id':'favourite_count']\n return df",
"def tweets(self, start= None, interval= None):\n if start == None :\n return tweet.objects.filter(user = self)\n if interval == None :\n return tweet.objects.filter(Q(user = self) & Q(timestamp__gte=start) & Q(is_reply=False) & Q(is_quote=False) & Q(is_retweet=False))\n return tweet.objects.filter(Q(user = self) & Q(timestamp__gte=start) & Q(timestamp__lte=start+interval) & Q(is_reply=False) & Q(is_quote=False) & Q(is_retweet=False))",
"def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text",
"def populate_twitter_acct_tweets(retrieve_until_dt=datetime.now(tz=timezone.utc) - timedelta(days=60)):\n spinner = itertools.cycle(['|', '/', '-', '\\\\'])\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n while 1:\n for acct in twitter_accts:\n # acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct).first()\n acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct, created_datetime__gte=date(2018, 2, 7)).first()\n\n max_id = None\n if acct_oldest_tweet is not None:\n max_id = acct_oldest_tweet.feedid - 1\n\n # do api call 15 for each account times due to twitter rate limit\n for _ in range(15):\n feed_created_dt = None\n try:\n statuses = api.GetUserTimeline(screen_name=acct.screen_name, include_rts=False, max_id=max_id)\n for s in statuses:\n write_and_restart_line(next(spinner))\n created_feed = USTwitterNewsFeed.objects.create(posted_by=acct,\n created_datetime=datetime.strptime(s.created_at, '%a %b %d %X %z %Y'),\n text=s.text,\n feedid=s.id)\n max_id = created_feed.feedid - 1\n feed_created_dt = created_feed.created_datetime\n except TwitterError as e:\n print(e.message)\n except IntegrityError as e:\n print('integrity error')\n break\n\n # only retrieve until last status created datetime earlier than retrieve until\n # if (feed_created_dt is None) or (feed_created_dt < retrieve_until_dt):\n # break",
"def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass",
"def getTweets(self, fromDate, toDate):\n return self.session.query(Tweet.text).\\\n filter(Tweet.created_at > fromDate).\\\n filter(Tweet.created_at < toDate).all()",
"def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added",
"def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1",
"def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets",
"def crawl(self):\n retrievedTweets = []\n\n count = 1\n \n today = datetime.datetime.now()\n today = today.replace(hour=23, minute=59, second=59, microsecond=999999)\n gap = 1\n yesterday = today - datetime.timedelta(gap) \n nextDay = yesterday + datetime.timedelta(gap)\n \n while True:\n try:\n lst = tweepy.Cursor(self.api.search, lang='en', q=self.keyword, count=50, until=nextDay.date(), result_type='popular').items(50)\n for tweet in lst:\n self.data = [tweet.created_at, tweet.id, tweet.text,\n tweet.user._json['screen_name'], tweet.user._json['name'], \n tweet.favorite_count, tweet.retweet_count, tweet.user.location]\n self.data = tuple(self.data)\n retrievedTweets.append(self.data)\n break\n except tweepy.TweepError as e:\n print(e.reason)\n continue\n except StopIteration: \n break\n\n return retrievedTweets",
"def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets",
"def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets",
"def get_twitter_data(keyword, from_date, to_date):\r\n # Creating list to append tweet data to\r\n counts_list = []\r\n dates_list = []\r\n \r\n days = pd.date_range(start = from_date, end = to_date)\r\n \r\n for i in range(len(days)-1):\r\n \r\n # Using TwitterSearchScraper to count daily tweets\r\n daily_count = 0\r\n for item in sntwitter.TwitterSearchScraper(keyword + ' since:' + str(days[i].date()) + ' until:' + str(days[i+1].date())).get_items():\r\n daily_count = daily_count + 1\r\n \r\n print(\"Day\", str(days[i].date()), \"had:\", daily_count, \". Going to next day...\")\r\n \r\n dates_list.append(days[i].date())\r\n counts_list.append(daily_count)\r\n \r\n return pd.DataFrame({'date': dates_list, 'tweets': counts_list})",
"def get_tweets_by_topic(topic, start_date, end_date):\n try:\n query = f\"select tweet, sentence, polarity, subjectivity from {db_schema}.{db_table_tweet} t, {db_schema}.{db_table_pred} tp where t.id_tweet=tp.id_tweet and topic='{topic}' and tweet_date between str_to_date('{start_date}', '%Y-%m-%d') and str_to_date('{end_date}', '%Y-%m-%d')\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweets = cur.fetchall()\n columns = [col[0] for col in cur.description]\n logger.info(f'TOPIC: {topic}, N° TWEETS: {len(tweets)}') \n return tweets, columns\n \n except Exception as ex:\n logger.exception(ex)\n return f'Exception: {ex}'",
"def get_tweets(api):\n return api.user_timeline()",
"def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]",
"def grab_tweets():\n\n tweets = []\n long_tweets = []\n\n for each in lists:\n tweets = tweets + twitter.GetListTimeline(list_id=each.id,\n count=count,\n include_rts=True)\n for tweet in tweets:\n if len(tweet.text) >= min_tweet_len:\n long_tweets.append(tweet)\n shuffle(long_tweets)\n\n if len(long_tweets) >= num_tweets:\n return long_tweets[:num_tweets]\n else:\n return long_tweets",
"def query_all_tweets(query):\n year = 2006\n month = 3\n\n limits = []\n while date(year=year, month=month, day=1) < date.today():\n nextmonth = month + 1 if month < 12 else 1\n nextyear = year + 1 if nextmonth == 1 else year\n\n limits.append(\n (date(year=year, month=month, day=1),\n date(year=year, month=month, day=10))\n )\n limits.append(\n (date(year=year, month=month, day=10),\n date(year=year, month=month, day=20))\n )\n limits.append(\n (date(year=year, month=month, day=20),\n date(year=nextyear, month=nextmonth, day=1))\n )\n year, month = nextyear, nextmonth\n\n queries = ['{} since:{} until:{}'.format(query, since, until)\n for since, until in reversed(limits)]\n\n pool = Pool(20)\n all_tweets = []\n try:\n for new_tweets in pool.imap_unordered(query_tweets_once, queries):\n all_tweets.extend(new_tweets)\n logging.info(\"Got {} tweets ({} new).\".format(\n len(all_tweets), len(new_tweets)))\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Returning all tweets \"\n \"gathered so far.\")\n\n return sorted(all_tweets)",
"def retrieve_all_tweets(api, id_scr):\n full_tweet_list = []\n new_tweets = api.user_timeline(user_id=id_scr, count=200)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n while len(new_tweets) > 0:\n print \"getting tweets before {}\".format(oldest)\n new_tweets = api.user_timeline(user_id=id_scr, count=200, max_id=oldest)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n out_tweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\"), tweet.entities] for tweet in\n full_tweet_list]\n\n with open('{}_tweets.csv'.format(id_scr), 'wb') as f:\n writer = csv.writer(f)\n writer.writerow([\"id\", \"created_at\", \"text\", \"entities\"])\n writer.writerows(out_tweets)",
"def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets",
"def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets",
"def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)",
"def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))",
"def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass",
"def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)",
"def twitter_get_timeline(self):\n if self.twitter_bearer_token is None:\n return None\n\n url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=100&screen_name=' + \\\n self.private_data['twitter']['screen_name']\n\n headers = {'Authorization': 'Bearer %s' % self.twitter_bearer_token,\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}\n\n resp = requests.get(url, headers=headers)\n tweets = []\n if resp.status_code == 200:\n content = json.loads(resp.content)\n for i in range(0, len(content)):\n tweets.append(content[i]['text'])\n else:\n print('ERROR: unable to retrieve timeline')\n print(resp.content)\n\n return tweets"
] | [
"0.7896825",
"0.76824534",
"0.7510762",
"0.7041128",
"0.6890227",
"0.6792959",
"0.6664002",
"0.66307527",
"0.65022916",
"0.6492793",
"0.64922196",
"0.6477469",
"0.6452656",
"0.6447653",
"0.6434829",
"0.6408373",
"0.6390277",
"0.63862234",
"0.6347076",
"0.6342707",
"0.6322864",
"0.62890965",
"0.62758493",
"0.6263058",
"0.621384",
"0.61119354",
"0.6081211",
"0.60539496",
"0.6006532",
"0.5988172"
] | 0.8704661 | 0 |
fetches tweets from start date till end date | def get_tweets(self, start_date, end_date):
# get tweets from api
config = crawler.APIConfig()
config.set_api_key("8e1618e9-419f-4239-a2ee-c0680740a500")
config.set_end_time(end_date)
config.set_filter(self.region)
config.set_start_time(start_date)
return crawler.FetchTweets(config).fetch() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tweets(self, start_date, end_date):\r\n pass",
"def getTweets(self, query, start, end):\n gettweets = Twitter.GetTweets(self.rootpath, self.folderpath,\n start, end, query)\n gettweets.start_getTweets()",
"def get_tweets_in_date_range(start, end, screen_name):\n start, end = convert_string_to_datetime(start), convert_string_to_datetime(end)\n culled_tweets = []\n first_date, max_id = start, None\n errors = 0\n while first_date >= start:\n try:\n tweets = get_tweets(max_id=max_id, screen_name=screen_name)\n except TwitterException as e:\n errors += 1\n with open('twitter_errors.txt', 'a') as f:\n f.write(e.message + ',' + screen_name + '\\n')\n if errors != 5:\n time.sleep(1)\n continue\n else:\n if not culled_tweets:\n return False\n break\n if max_id is not None and (tweets and tweets[0]['id_str'] == max_id):\n tweets.pop(0)\n oldest_tweet, newest_tweet = tweets[-1], tweets[0]\n first_date = convert_time_string(oldest_tweet['created_at'])\n last_date = convert_time_string(newest_tweet['created_at'])\n max_id = oldest_tweet['id_str']\n if first_date <= start or last_date >= end:\n tweets = [t for t in tweets\n if convert_time_string(t['created_at']) <= end\n and convert_time_string(t['created_at']) >= start]\n culled_tweets.extend(tweets)\n\n return culled_tweets",
"def populate_twitter_acct_tweets_by_date():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n for acct in twitter_accts:\n results = api.GetSearch(raw_query=\"l=&q=from%3AReutersUS%20since%3A2017-12-01%20until%3A2017-12-02&src=typd\")",
"def get_users_tweets(users, min_date, max_date, result_limit, key, secret_key):\n \n auth = tweepy.OAuthHandler(key, secret_key)\n max_datetime = datetime.datetime.strptime(max_date, '%Y-%m-%d').date()\n min_datetime = datetime.datetime.strptime(min_date, '%Y-%m-%d').date()\n \n #initialize variables\n max_id = None\n min_id = None\n mydata = []\n\n for user in users:\n my_api = tweepy.API(auth)\n\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break\n\n while min_id == None:\n start_id = item.id\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n max_id=start_id,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break \n #get another 25 starting with the max... \n # if min_id is None... then call again... using the bottom of mydata as max_id...\n\n df = pd.DataFrame(mydata).loc[:,'tweet_id':'favourite_count']\n return df",
"def tweets(self, start= None, interval= None):\n if start == None :\n return tweet.objects.filter(user = self)\n if interval == None :\n return tweet.objects.filter(Q(user = self) & Q(timestamp__gte=start) & Q(is_reply=False) & Q(is_quote=False) & Q(is_retweet=False))\n return tweet.objects.filter(Q(user = self) & Q(timestamp__gte=start) & Q(timestamp__lte=start+interval) & Q(is_reply=False) & Q(is_quote=False) & Q(is_retweet=False))",
"def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text",
"def populate_twitter_acct_tweets(retrieve_until_dt=datetime.now(tz=timezone.utc) - timedelta(days=60)):\n spinner = itertools.cycle(['|', '/', '-', '\\\\'])\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n while 1:\n for acct in twitter_accts:\n # acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct).first()\n acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct, created_datetime__gte=date(2018, 2, 7)).first()\n\n max_id = None\n if acct_oldest_tweet is not None:\n max_id = acct_oldest_tweet.feedid - 1\n\n # do api call 15 for each account times due to twitter rate limit\n for _ in range(15):\n feed_created_dt = None\n try:\n statuses = api.GetUserTimeline(screen_name=acct.screen_name, include_rts=False, max_id=max_id)\n for s in statuses:\n write_and_restart_line(next(spinner))\n created_feed = USTwitterNewsFeed.objects.create(posted_by=acct,\n created_datetime=datetime.strptime(s.created_at, '%a %b %d %X %z %Y'),\n text=s.text,\n feedid=s.id)\n max_id = created_feed.feedid - 1\n feed_created_dt = created_feed.created_datetime\n except TwitterError as e:\n print(e.message)\n except IntegrityError as e:\n print('integrity error')\n break\n\n # only retrieve until last status created datetime earlier than retrieve until\n # if (feed_created_dt is None) or (feed_created_dt < retrieve_until_dt):\n # break",
"def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass",
"def getTweets(self, fromDate, toDate):\n return self.session.query(Tweet.text).\\\n filter(Tweet.created_at > fromDate).\\\n filter(Tweet.created_at < toDate).all()",
"def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added",
"def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1",
"def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets",
"def crawl(self):\n retrievedTweets = []\n\n count = 1\n \n today = datetime.datetime.now()\n today = today.replace(hour=23, minute=59, second=59, microsecond=999999)\n gap = 1\n yesterday = today - datetime.timedelta(gap) \n nextDay = yesterday + datetime.timedelta(gap)\n \n while True:\n try:\n lst = tweepy.Cursor(self.api.search, lang='en', q=self.keyword, count=50, until=nextDay.date(), result_type='popular').items(50)\n for tweet in lst:\n self.data = [tweet.created_at, tweet.id, tweet.text,\n tweet.user._json['screen_name'], tweet.user._json['name'], \n tweet.favorite_count, tweet.retweet_count, tweet.user.location]\n self.data = tuple(self.data)\n retrievedTweets.append(self.data)\n break\n except tweepy.TweepError as e:\n print(e.reason)\n continue\n except StopIteration: \n break\n\n return retrievedTweets",
"def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets",
"def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets",
"def get_twitter_data(keyword, from_date, to_date):\r\n # Creating list to append tweet data to\r\n counts_list = []\r\n dates_list = []\r\n \r\n days = pd.date_range(start = from_date, end = to_date)\r\n \r\n for i in range(len(days)-1):\r\n \r\n # Using TwitterSearchScraper to count daily tweets\r\n daily_count = 0\r\n for item in sntwitter.TwitterSearchScraper(keyword + ' since:' + str(days[i].date()) + ' until:' + str(days[i+1].date())).get_items():\r\n daily_count = daily_count + 1\r\n \r\n print(\"Day\", str(days[i].date()), \"had:\", daily_count, \". Going to next day...\")\r\n \r\n dates_list.append(days[i].date())\r\n counts_list.append(daily_count)\r\n \r\n return pd.DataFrame({'date': dates_list, 'tweets': counts_list})",
"def get_tweets_by_topic(topic, start_date, end_date):\n try:\n query = f\"select tweet, sentence, polarity, subjectivity from {db_schema}.{db_table_tweet} t, {db_schema}.{db_table_pred} tp where t.id_tweet=tp.id_tweet and topic='{topic}' and tweet_date between str_to_date('{start_date}', '%Y-%m-%d') and str_to_date('{end_date}', '%Y-%m-%d')\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweets = cur.fetchall()\n columns = [col[0] for col in cur.description]\n logger.info(f'TOPIC: {topic}, N° TWEETS: {len(tweets)}') \n return tweets, columns\n \n except Exception as ex:\n logger.exception(ex)\n return f'Exception: {ex}'",
"def get_tweets(api):\n return api.user_timeline()",
"def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]",
"def grab_tweets():\n\n tweets = []\n long_tweets = []\n\n for each in lists:\n tweets = tweets + twitter.GetListTimeline(list_id=each.id,\n count=count,\n include_rts=True)\n for tweet in tweets:\n if len(tweet.text) >= min_tweet_len:\n long_tweets.append(tweet)\n shuffle(long_tweets)\n\n if len(long_tweets) >= num_tweets:\n return long_tweets[:num_tweets]\n else:\n return long_tweets",
"def query_all_tweets(query):\n year = 2006\n month = 3\n\n limits = []\n while date(year=year, month=month, day=1) < date.today():\n nextmonth = month + 1 if month < 12 else 1\n nextyear = year + 1 if nextmonth == 1 else year\n\n limits.append(\n (date(year=year, month=month, day=1),\n date(year=year, month=month, day=10))\n )\n limits.append(\n (date(year=year, month=month, day=10),\n date(year=year, month=month, day=20))\n )\n limits.append(\n (date(year=year, month=month, day=20),\n date(year=nextyear, month=nextmonth, day=1))\n )\n year, month = nextyear, nextmonth\n\n queries = ['{} since:{} until:{}'.format(query, since, until)\n for since, until in reversed(limits)]\n\n pool = Pool(20)\n all_tweets = []\n try:\n for new_tweets in pool.imap_unordered(query_tweets_once, queries):\n all_tweets.extend(new_tweets)\n logging.info(\"Got {} tweets ({} new).\".format(\n len(all_tweets), len(new_tweets)))\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Returning all tweets \"\n \"gathered so far.\")\n\n return sorted(all_tweets)",
"def retrieve_all_tweets(api, id_scr):\n full_tweet_list = []\n new_tweets = api.user_timeline(user_id=id_scr, count=200)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n while len(new_tweets) > 0:\n print \"getting tweets before {}\".format(oldest)\n new_tweets = api.user_timeline(user_id=id_scr, count=200, max_id=oldest)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n out_tweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\"), tweet.entities] for tweet in\n full_tweet_list]\n\n with open('{}_tweets.csv'.format(id_scr), 'wb') as f:\n writer = csv.writer(f)\n writer.writerow([\"id\", \"created_at\", \"text\", \"entities\"])\n writer.writerows(out_tweets)",
"def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets",
"def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets",
"def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)",
"def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))",
"def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass",
"def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)",
"def twitter_get_timeline(self):\n if self.twitter_bearer_token is None:\n return None\n\n url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=100&screen_name=' + \\\n self.private_data['twitter']['screen_name']\n\n headers = {'Authorization': 'Bearer %s' % self.twitter_bearer_token,\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}\n\n resp = requests.get(url, headers=headers)\n tweets = []\n if resp.status_code == 200:\n content = json.loads(resp.content)\n for i in range(0, len(content)):\n tweets.append(content[i]['text'])\n else:\n print('ERROR: unable to retrieve timeline')\n print(resp.content)\n\n return tweets"
] | [
"0.8704661",
"0.76824534",
"0.7510762",
"0.7041128",
"0.6890227",
"0.6792959",
"0.6664002",
"0.66307527",
"0.65022916",
"0.6492793",
"0.64922196",
"0.6477469",
"0.6452656",
"0.6447653",
"0.6434829",
"0.6408373",
"0.6390277",
"0.63862234",
"0.6347076",
"0.6342707",
"0.6322864",
"0.62890965",
"0.62758493",
"0.6263058",
"0.621384",
"0.61119354",
"0.6081211",
"0.60539496",
"0.6006532",
"0.5988172"
] | 0.7896825 | 1 |
Return a final mad lib with parts of speech replaced by user input. | def make_madlib(get_input):
# call the get_input function and make a variable from its output list
replaced_list = get_input(read_text)
index = 0
# we want both the index and the word that we want to replace in text_split
for (i, word) in enumerate(text_split):
# find the parts of speech, denoted by brackets
if word[0] == "[":
# replace the word at that recorded index with user's input words
text_split[i] = replaced_list[index]
# increase index for next loop so that user's next word is used
index += 1
final_madlib = " ".join(text_split)
print final_madlib | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mad_libs():\n\n # The parts of speech we are intrested in removing and replacing\n to_replace = [\"JJ\", \"JJR\", \"JJS\", \"NN\", \"NNS\", \"RB\", \"RBR\", \"RBS\"]\n done = False\n while not done:\n print(\"You're in the madlibs menu\")\n print(\"Here are your options:\")\n print(\"(1) Enter a file\")\n print(\"(2) Enter a text\")\n print(\"(3) Produce a mad lib from file\")\n print(\"(4) Fill in a mad lib using a text\")\n user_in = input(\"Choose an option by entering it's text. \" \\\n \"To exit enter nothing: \")\n print()\n \n if user_in.upper() == \"ENTER A FILE\":\n # Gets the text to turn into a madlib\n madlib_file = open(get_file())\n madlib_text = madlib_file.read()\n \n elif user_in.upper() == \"ENTER A TEXT\":\n # Gets a text from the gutenburg libary to get a bunch of words\n # we could use to replace words in the madlibs with.\n replacement_txt = input(\"Please enter the name of a text for words: \") + \".txt\"\n replacement_txt = corpus.gutenberg.words(replacement_txt)\n fill_words = get_words_for_fill(replacement_txt, to_replace)\n \n elif user_in.upper() == \"PRODUCE A MAD LIB FROM FILE\" or \\\n user_in.upper() == \"PRODUCE MAD LIB\":\n # Produces the madlib by removing a random amount of certain words\n # Outputs the madlib\n madlib_text = madlib_remove(madlib_text, to_replace)\n print(stringprettify(madlib_text))\n \n elif user_in.upper() == \"FILL IN A MAD LIB USING A TEXT\" or \\\n user_in.upper() == \"FILL IN MAD LIB\":\n # Fills in the madlib with words from the text from\n # the gutenburg library. \n # For example, austen-persuasion gives you\n # Jane Austen's Persuasion.\n # Outputs the filled-in madlib\n result = fill_in_words(madlib_text, fill_words, to_replace)\n print(stringprettify(result))\n elif user_in == \"\":\n # Exit the menu\n done = True\n else:\n print(\"Input not recognized! Please enter a valid input.\\n\")",
"def madlib2():\n print(\"\\n\")\n print(\n \"Alright! First let me ask you a few questions to get the story \"\n \"telling started.\")\n print(\"Please make sure to not misspell your answers.\")\n print(\"\")\n question1 = input(\"Choose any male name for your protagonist: Please \"\n \"capitalize the name: \")\n question2 = input(\"Choose any male name for your antagonist: Please \"\n \"capitalize the name: \")\n question3 = input(\"Choose one of the following verbs: jump, squat, \"\n \"or spin: \")\n question4 = input(\"Choose one of the following adjectives: kooky, \"\n \"fluffy, or confused: \")\n question5 = input(\"Choose one of the following nouns: toothbrush, \"\n \"shoe, \"\n \"or bottle-cap: \")\n question6 = input(\"Choose one of the following verbs: tickled, poked, \"\n \"or tackled: \")\n question7 = input(\"Choose one of the following nouns: Please \"\n \"capitalize the noun: Diaper, Dandelion, \"\n \"or Diva: \")\n question8 = input(\"Choose one of the following adjectives: aggressive, \"\n \"attentive, or affordable: \")\n question9 = input(\"Choose one of the following items: AAA battery, \"\n \"Q-tip, or USB: \")\n question10 = input(\"Choose one of the following adjectives: blank, \"\n \"happy, \"\n \"or sideways: \")\n question11 = input(\"Choose one of the following adjectives: noisy, \"\n \"stinky, or deadly: \")\n print(\"\")\n print(\"EXCELLENT, Let the story telling commence.\")\n print(\"\\n\")\n print(\"ONCE UPON A TIME...\")\n print(\"There was a young king named\", question1 + ',' + \" who ruled a \"\n \"magical \"\n \"land called\",\n question7, \"Desert.\\nThis piece of land's name was a sort of \"\n \"misnomer \"\n \"because it wasn't a desert at all, it was in fact an \"\n \"island.\\nBut having been deserted for centuries by it's \"\n \"past inhabitants, \"\n \"king\",\n question1, \"decided to name the land over it's loneliness.\")\n print(\"The other reason king\", question1, \"named the land\", question7,\n \"Desert was because it was full of them.\", question7 + 's' + \" \"\n \"that \"\n \"is.\")\n print(\"As king\", question1, \"woke up one day, he practiced his morning \"\n \"routine where he would\", question3,\n \"for 2 full hours before \"\n \"eating breakfast.\")\n print(\"But this morning struck king\", question1, \"differently.\\nHe was \"\n \"feeling rather\",\n question4, \"for whatever unknown reason.\\nTherefore he concluded \"\n \"that \"\n \"only one person was to blame for this...it had to \"\n \"have \"\n \"been because of lord\", question2 + ',' + \" HAD TO HAVE \"\n \"BEEN, \"\n \"the king \"\n \"thought to \"\n \"himself.\")\n print(\"He decided to confront lord\",\n question2 + ',' + \" his evil twin brother \"\n \"who ruled Vapor \"\n \"Valley. A land full of \"\n \"vapor.\")\n print(\"The king decided he was going to get to the bottom of this issue \"\n \"and use his ultimate weapon, which was a\", question5 + ',' +\n \" to destroy \"\n \"his twin \"\n \"for making \"\n \"him feel\",\n question4 + '.')\n print(\"As the king walked over to his ship to sail to Vapor Valley, \"\n \"he was suddenly\", question6, \"by someone.\")\n print(\"He regained his altered focus and turned to his girlfriend,\",\n question8, \"Ashley.\\nShe had rushed to him to tell him that it \"\n \"wasn't \"\n \"his evil twin that made him feel\", question4 + ',' +\n \" it as \"\n \"was \"\n \"something\"\n \" else.\")\n print(\"She continued to explain that it was because he forgot to carry \"\n \"his lucky\", question9, \"while he slept.\")\n print(\"King\", question1, \"looked at\", question8, \"Ashley with \"\n \"disbelief. He \"\n \"couldn't entirely \"\n \"believe it.\")\n print(\"The king turned turned back to his ship, and there he \"\n \"was...lord\",\n question2, \"surrounded by his loyal\", question11, \"minions.\")\n print(\"The king looked at his twin with a\", question10, \"facial \"\n \"expression \"\n \"knowing what \"\n \"was about to \"\n \"happen.\")\n print(\"The king hit one single\", question3, \"and his ship exploded into \"\n \"a million pieces...\")\n print(question7, \"Desert remained untouched by his evil twin and king\",\n question1, \"continued ruling the land until eventually his son took \"\n \"over.\\nWho was named after himself and the land, Sir\",\n question1,\n question7,\n \"Jr.\")\n print(\"\")\n print(\"THE END.\")\n print(\"\\n\")\n outro()",
"def madlib1():\n print(\"\\n\")\n print(\n \"Alright! First let me ask you a few questions to get the story \"\n \"telling started.\")\n print(\"Please make sure to not misspell your answers.\")\n print(\"\")\n question1 = input(\n \"Choose any female name for your main character. Please capitalize \"\n \"the name: \")\n question2 = input(\n \"Choose any male name for your second main character. Please \"\n \"capitalize the name: \")\n question3 = input(\n \"Choose one of the following verbs: run, smash, or explode: \")\n question4 = input(\n \"Choose one of the following adjectives: smelly, hairy, or shiny: \")\n question5 = input(\n \"Choose one of the following verbs: tripped, tumbled, or collided: \")\n question6 = input(\"Choose any number from 2-100: \")\n question7 = input(\n \"Choose one of the following nouns: chicken-nugget, fork, \"\n \"or jolly-rancher: \")\n question8 = input(\n \"Choose one of the following names: Toby From HR, ToeJam, or McLovin: \")\n question9 = input(\n \"Choose one of the following verbs: launch, toss, kick: \")\n question10 = input(\n \"Choose one of the following adjectives: god-like, mystic, \"\n \"or superhuman: \")\n question11 = input(\n \"Choose one of the following nouns: hot air balloon, walrus, \"\n \"or category 5 hurricane: \")\n question12 = input(\n \"Choose one of the following adjectives: baffled, drunk, \"\n \"or dehydrated: \")\n print(\"\")\n print(\"EXCELLENT, Let the story telling commence.\")\n print(\"\\n\")\n print(\"ONCE UPON A TIME...\")\n print(\"There was a young grasshopper named\", question1, \"that loved to\",\n question3, \"through walls.\")\n print(question1,\n \"had incredible super grasshopper strength and always hopped as if \"\n \"nothing around her mattered.\\nOne day\",\n question1, \"ran into a very\", question4, \"frog named\",\n question2 + '.')\n print(question1, \"only knew this frog's name because he had a nametag on.\")\n print(question2, \"was so startled by\", question1 + \"'s approach that he\",\n question5,\n \"against a nearby mushroom causing him to spin out of control and \"\n \"completely collapse on the ground.\")\n print(\"As\", question2, \"laid there completely unconscious,\", question1,\n \"tried her best to get it together and see if\", question2,\n \"was alright.\")\n print(question1, \"got closer to\", question2,\n \"and for whatever reason started to get the sense that something \"\n \"harmful was approaching them.\")\n print(\"Without hesitation\", question1, \"quickly grabbed\", question2,\n \"with her super strength and started to hop away from whatever she \"\n \"sensed.\")\n print(question1, \"only got to hop away\", question6,\n \"times before she ran into a wall that for some reason she couldn't\",\n question3, \"through.\")\n print(question1,\n \"desperately tried to go around the wall that seemed to be as long \"\n \"as the Wall of China, but it was too late...it had caught up to \"\n \"them.\\nShe turned around still carrying an unconscious\",\n question2 + ',', \"to make eye-contact with a...\", question7 + '...',\n \"who had keyboards for hands and coded for fun.\")\n print(question1, \"was so surprised she hopped straight up in the air\",\n question6,\n \"times. SHE COULDN'T BELIEVE IT, it was...IT WAS...the one and \"\n \"only...\" + question8 + ',',\n \"her arch nemesis.\")\n print(\"Before she could use her super strength to power\", question9,\n question2,\n \"to safety, a sudden...ribbit interrupted her.\")\n print(question2, \"had finally woken up and looked\",\n question10 + \", for an amphibian that is.\")\n print(\"As\", question2,\n \"got up on his froglegs, he let out another roar-like ribbit and \"\n \"started huffing and puffing like your stereotypical big bad \"\n \"wolf.\\nHe grew as big as a\",\n question11, \"and turned directly to\", question8,\n \"while standing in a karate fighting stance.\")\n print(question8, \"looked extremely\", question12, \"at the sight of\",\n question2 + \"'s appearance that he turned around and ran for his\",\n question7 + \"-like life.\")\n print(question1 + \" and \" + question2,\n \"looked at each other, knowing exactly what had to be done \"\n \"next.\\nThey combined their super power abilities to destroy the \"\n \"wall in a matter of minutes, incase anyone else without the \"\n \"blessing of super power abilities ever got in the situation they \"\n \"were just in.\\nAfterwards they decided they would journey \"\n \"together and spread peace across their existing worlds.\\nThey \"\n \"hopped into the setting sun horizon and disappeared for eternity.\")\n print(\"\")\n print(\"THE END.\")\n print(\"\\n\")\n outro()",
"def handle_audio_input(message):\n def build_context(msg: Message):\n ctx = {'client_name': 'mycroft_listener',\n 'source': msg.context.get(\"source\" or \"speech_api\"),\n 'destination': [\"skills\"],\n \"audio_parser_data\": msg.context.get(\"audio_parser_data\"),\n \"client\": msg.context.get(\"client\"), # origin (local, klat, nano, mobile, api)\n \"neon_should_respond\": msg.context.get(\"neon_should_respond\"),\n \"username\": msg.context.get(\"username\"),\n \"timing\": {\"start\": msg.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": msg.context.get(\"ident\", time.time())\n }\n if msg.context.get(\"klat_data\"):\n ctx[\"klat_data\"] = msg.context(\"klat_data\")\n ctx[\"nick_profiles\"] = msg.context.get(\"nick_profiles\")\n return ctx\n\n ident = message.context.get(\"ident\") or \"neon.audio_input.response\"\n wav_file_path = message.data.get(\"audio_file\")\n lang = message.data.get(\"lang\")\n try:\n _, parser_data, transcriptions = _get_stt_from_file(wav_file_path, lang)\n message.context[\"audio_parser_data\"] = parser_data\n context = build_context(message)\n data = {\n \"utterances\": transcriptions,\n \"lang\": message.data.get(\"lang\", \"en-us\")\n }\n handled = _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))\n bus.emit(message.reply(ident, data={\"parser_data\": parser_data,\n \"transcripts\": transcriptions,\n \"skills_recv\": handled}))\n except Exception as e:\n LOG.error(e)\n bus.emit(message.reply(ident, data={\"error\": repr(e)}))",
"def filter(self, word):\n \n word = word.lower()\n try:\n self.engine.fetch(word)\n except socket.error:\n raise LemmaAPIError\n part_of_speeches = self.engine.part_of_speeches\n\n \n self.basic_form = word\n for part in part_of_speeches:\n if part == 'verb':\n if self.engine.is_verb_conjugated():\n if not self.conEngine.is_verb_regular(word, self.engine.get_basic_verb()):\n self.basic_form = self.engine.get_basic_verb()\n return word\n else:\n self.basic_form = self.engine.get_basic_verb()\n\n elif part == 'noun':\n if self.engine.is_noun_plural():\n if not self.conEngine.is_noun_regular(word, self.engine.get_singular_noun()):\n self.basic_form = self.engine.get_singular_noun() \n return word\n else:\n self.basic_form = self.engine.get_singular_noun()\n\n return self.basic_form",
"def m() -> str:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.adjust_for_ambient_noise(source)\n logger.info(\"Microphone Active! Waiting for prompt!\")\n audio = r.listen(source)\n\n s = r.recognize_google(audio) #Send the audio to google\n result = s.lower()\n return result",
"def test_madlib_substitution():\n actual = madlib(input_values)\n expected = output_text\n assert actual == expected",
"def text_to_speech(entry):\n text = entry.get_text()\n if text:\n subprocess.call([\"milena_say\", text])",
"def takecommand():\r\n\r\n r=sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"listening....\")\r\n r.pause_threshold=1\r\n \"\"\" Pause_threshold will let you to speak with your own pace\"\"\"\r\n\r\n #r.energy_threshold=500\r\n \"\"\" energy threshold will stop hindrens from outside\"\"\"\r\n\r\n audio=r.listen(source)\r\n\r\n try:\r\n print(\"In process of recognizing..\")\r\n query=r.recognize_google(audio,language=\"en-in\")\r\n \"\"\" query will take date that has been spoken by user with the help of google API\"\"\"\r\n print(\"you said :\",query)\r\n\r\n except Exception as e:\r\n print(\"can you speak this again\")\r\n return \"none\"\r\n return query",
"def get_speech(self, phrase):\n src = os.path.join(constants.CONFIG_PATH, self.voice)\n text = phrase\n\n def preprocess(syllables):\n temp = []\n for syllable in syllables:\n for p in self.punctuation:\n syllable = syllable.replace(p, \"\")\n if syllable.isdigit():\n syllable = atc.num2chinese(syllable)\n new_sounds = lazy_pinyin(syllable, style=pypinyin.TONE3)\n for e in new_sounds:\n temp.append(e)\n else:\n temp.append(syllable)\n return temp\n \n if not os.path.exists(src):\n logger.error('{} 合成失败: 请先下载 syllables.zip (https://sourceforge.net/projects/hantts/files/?source=navbar) 并解压到 ~/.wukong 目录下'.format(self.SLUG))\n return None\n logger.debug(\"{} 合成中...\".format(self.SLUG))\n delay = 0\n increment = 355 # milliseconds\n pause = 500 # pause for punctuation\n syllables = lazy_pinyin(text, style=pypinyin.TONE3)\n syllables = preprocess(syllables)\n \n # initialize to be complete silence, each character takes up ~500ms\n result = AudioSegment.silent(duration=500*len(text))\n for syllable in syllables:\n path = os.path.join(src, syllable+\".wav\")\n sound_file = Path(path)\n # insert 500 ms silence for punctuation marks\n if syllable in self.punctuation:\n short_silence = AudioSegment.silent(duration=pause)\n result = result.overlay(short_silence, position=delay)\n delay += increment\n continue\n # skip sound file that doesn't exist\n if not sound_file.is_file():\n continue\n segment = AudioSegment.from_wav(path)\n result = result.overlay(segment, position=delay)\n delay += increment\n\n tmpfile = ''\n with tempfile.NamedTemporaryFile() as f:\n tmpfile = f.name\n result.export(tmpfile, format=\"wav\")\n logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))\n return tmpfile",
"def takeCommand():\n\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n try:\n print(\"Recognizing... \")\n voice_input = r.recognize_google(audio, language=\"en-US\")\n print(f\"The user said: {voice_input}\\n\")\n except Exception as e:\n # print(e)\n print(\"Please say that again\")\n return \"None\"\n return voice_input",
"def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if example_slot in slots:\n curr_word = slots[example_slot].value\n handler_input.attributes_manager.session_attributes[\n example_slot_key] = curr_word\n\n try:\n response = http_get(curr_word, False)\n\n if response:\n example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0]\n if example == \"vis\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n elif example == \"wsgram\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n except Exception as e:\n speech = (\"No example is available for {}. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = \"I'm not sure what word to look up, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response",
"def onWordRecognised(self, *_args):\n # Unsubscribe to the event when talking,\n # to avoid repetitions\n memory.unsubscribeToEvent(\"WordRecognized\",\"AudioRecognition\")\n\n # We access to the word recognised in the memory\n word = memory.getData(\"WordRecognized\")\n\n # Debug : Print the word recognised\n print(\"Mot :\")\n print(word[0])\n print(\"Indice de confiance :\")\n print(word[1])\n print\n\n\n # We acknoledge a word if the trust is high enough\n if (word[1] > 0.28):\n self.mot = word[0]\n #self.tts.say(\"Le mot reconnu est :\"+self.mot)\n StateManager(self)\n \n\n # Subscribe again to the event\n memory.subscribeToEvent(\"WordRecognized\",\n \"AudioRecognition\",\n \"onWordRecognised\")",
"def get_user_speech_input(self):\n\t\twith sr.Microphone() as source:\n\t\t\tprint \"You can speak!\"\n\t\t\taudio = self.recog.listen(source, 5)\n\t\t\t\n\t\t#WIT_AI_KEY = \"4KKA5EH6VFWPMWYZTSFHNJJZYCZHGTAQ\"\n\t\tprint \"sending it\"\n\t\ttry:\n\t\t\tprint \"Google thinks: \" + self.recog.recognize_google(audio)\n\t\texcept sr.UnknownValueError:\n\t\t\tprint(\"Google Speech Recognition could not understand audio\")\n\t\texcept sr.RequestError as e:\n\t\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e))",
"def takeCommand():\r\n r=sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n print(\"Listening....\")\r\n r.pause_threshold = 1 #pause threshold is if we pause in between speaking it shouldnt consider the sentence as complete\r\n audio = r.listen(source)\r\n\r\n try:\r\n print(\"Recognizing...\")\r\n query= r.recognize_google(audio,language='en-in')\r\n print(f\"User said: {query} \\n\")\r\n\r\n except Exception as e:\r\n print(e)\r\n print(\"Please say that again...\")\r\n return \"None\"\r\n\r\n\r\n return query",
"def recognize_speech(self, bot, update, args=[]):\n\n if not getattr(update.message, \"reply_to_message\", None):\n text = self._(\"/recog lang_code\\n\"\n \"Reply to a voice with this command to recognize it.\\n\"\n \"examples:\\n/recog zh\\n/recog en-US\\n\\nSupported languages:\\n\")\n text += \"\\n\".join(\"%s: %r\" % (i.engine_name, i.lang_list) for i in self.voice_engines)\n return self.bot.reply_error(update, text)\n if not getattr(update.message.reply_to_message, \"voice\"):\n return self.bot.reply_error(update,\n self._(\"Reply only to a voice with this command \"\n \"to recognize it. (RS02)\"))\n\n if update.message.reply_to_message.voice.duration > 60:\n return self.bot.reply_error(update, self._(\"Only voice shorter than 60s \"\n \"is supported. (RS04)\"))\n\n file, _, _ = self.bot.download_file(update.message, update.message.reply_to_message.voice, MsgType.Audio)\n\n results = OrderedDict()\n for i in self.voice_engines:\n results[\"%s (%s)\" % (i.engine_name, args[0])] = i.recognize(file.name, args[0])\n\n msg = \"\"\n for i in results:\n msg += \"\\n<b>%s</b>:\\n\" % html.escape(i)\n for j in results[i]:\n msg += \"%s\\n\" % html.escape(j)\n msg = self._(\"Results:\\n{0}\").format(msg)\n self.bot.send_message(update.message.reply_to_message.chat.id, msg,\n reply_to_message_id=update.message.reply_to_message.message_id,\n parse_mode=telegram.ParseMode.HTML)\n\n file.close()",
"def mic_input():\n try:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print('Say something...')\n r.pause_threshold = 1\n r.adjust_for_ambient_noise(source, duration=1)\n audio = r.listen(source)\n try:\n command = r.recognize_google(audio).lower()\n print('You said: ' + command + '\\n')\n except sr.UnknownValueError:\n print('....')\n command = self.mic_input()\n return command\n except Exception as e:\n print(e)\n return False",
"def createAnswer(self, input):\n\n input = input.strip()\n\n if len(input) == 1:\n if input in self.guessedChars:\n return \"Oled juba tähte \" + input + \" pakkunud. Paku midagi muud. \\nHetkel proovitud \" + ' '.join(\n self.guessedChars) + \"\\n\" + self.wordKnown\n else:\n self.addChar(input)\n if self.isWordSet():\n return self.answerIsSet(input)\n else:\n self.filterDict(input)\n if self.isWordSet():\n return self.answerIsSet(input)\n else:\n return \"Kahjuks tähte \" + input + \" sõnas ei ole. Vaja veel \" + str(\n self.wordKnown.count(\"_\")) + \" ära arvata. \\nHetkel proovitud \" + ' '.join(\n self.guessedChars) + \" \\n\" + self.wordKnown\n elif input == \"\":\n return \"Võiks midagi ikka sisestada ka...\\nHetkel proovitud \" + ' '.join(\n self.guessedChars) + \" \\n\" + self.wordKnown\n else:\n if input == \"aitab\":\n self.active = False\n return \"Kui aitab siis aitab. Sõna, mida ma mõtlesin, ma sulle ikkagi ei ütle. Jäägu see elu lõpuni \" \\\n \"Sind piinama.\"\n if self.word == input:\n self.active = False\n return \"Arvasid ära, mõtlesin tõesti sõna \" + self.word + \".\"\n else:\n self.removeWordFromDict(input)\n return \"Ei, ma kohe kindlasti ei mõelnud sõna \" + input + \"... Proovi veel. \\nHetkel proovitud \" \\\n \"\" \\\n \"\" \\\n \"\" + ' '.join(self.guessedChars) \\\n + \" \\n\" + self.wordKnown",
"def _resolve(input_string, output_file=None):\n macro_calls = \"option, -echo;\\n\" + _resolve_required_macros(input_string) + \"option, echo;\\n\\n\"\n full_madx_script = macro_calls + input_string\n if output_file is not None:\n with open(output_file, \"w\") as output:\n output.write(full_madx_script)\n return full_madx_script",
"def decodeSpeech(hmmd, lmdir, dictp, wavfile):\n\n try:\n import sphinxbase\n import pocketsphinx as ps\n\n except:\n import pocketsphinx as ps\n print \"\"\"Pocket sphinx and sphixbase is not installed\n in your system. Please install it with package manager.\n \"\"\"\n speechRec = ps.Decoder(hmm=hmmd, lm=lmdir, dict=dictp)\n wavFile = file(wavfile, 'rb')\n speechRec.decode_raw(wavFile)\n result = speechRec.get_hyp()\n print result[0]\n return result[0]",
"def voice():\n resp = VoiceResponse()\n\n gather = Gather(num_digits=1, action='/gather')\n gather.say(\n 'For Spanish press 1, for Italian press 2, for German press 3, for French press 4, for Mandarin Chinese '\n 'press 5, for Japanese press 6, to manually enter a language press 9', voice='Alice', language=languages[source][2])\n resp.append(gather)\n\n # If the user doesn't select an option, redirect them into a loop\n resp.redirect('/voice')\n\n return str(resp)",
"def show_madlib():\n\n mad_name = request.args.get(\"person\")\n mad_color = request.args.get(\"color\")\n mad_noun = request.args.get(\"noun\")\n mad_planet = request.args.get(\"planet\")\n mad_adverb = request.args.get(\"adverb\")\n mad_adjectives = request.args.getlist(\"adjectives\")\n\n return render_template(\"madlib.html\",\n person=mad_name,\n color=mad_color,\n noun=mad_noun,\n planet=mad_planet,\n adverb=mad_adverb,\n adjectives=mad_adjectives,\n )",
"def takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source: #don't forget the () after microphone\n print(\"Listening ...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing..\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"User said: {query}\\n\")\n\n except Exception as e:\n print(e)\n print(\"Say that again please..\")\n return \"None\"\n return query",
"def generate_madlib(state):\n line = None\n while not line:\n if not state['corpus']:\n if state['options']['corpus'] == \"None\":\n name = None\n else:\n name = state['options']['corpus']\n if state['options']['corporaset'] == \"None\":\n set = None\n else:\n set = state['options']['corporaset']\n \n # will raise IOError if corpus invalid\n if name:\n state['corpus'] = nlp.corpus(set=set, name=name)\n else:\n state['corpus'] = nlp.random_corpus(set=set)\n \n try:\n line = nlp.random_line(state['corpus'])\n except UnicodeDecodeError:\n state['corpus'] == None\n\n doc = nlp.nlp(line)\n\n # truncate line if too long\n maxlen = state['options']['linemaxlen']\n if len(line) > maxlen:\n line = \"\"\n for span in doc.sents:\n sent = ''.join(doc[i].string for i in range(\n span.start, span.end\n )).strip()\n if len(line) + len(sent) > maxlen:\n break\n line += sent + \" \"\n doc = nlp.nlp(line)\n \n ddict = defaultdict(list)\n\n for (index, token) in enumerate(doc):\n if token.pos_ in ['ADJ', 'ADV', 'NOUN', 'VERB']:\n ddict[token].append(index)\n\n slist = sorted(ddict, key=lambda t: t.prob)\n\n # build list of tokens+whitespace from parsed output\n words = map(lambda x: x.string, list(doc))\n\n # 2 subs + 1 more per word wrap line\n limit = min(len(line) / 80 + 2, 6)\n\n slots = []\n for t in slist[:limit]:\n for ctr in ddict[t]:\n words[ctr] = underline + u\" \" + t.pos_ + \" \" +\\\n underline + t.whitespace_\n slots.append(ctr)\n\n slots.sort()\n\n state['doc'] = doc\n state['text'] = \"\".join(words)\n state['textshape'] = slots",
"def hear_answer(tts, speech_recognition, memory, cur_time):\n speech_recognition.setVocabulary(numbers, False)\n tts.say(\"\")\n answer = \"\"\n memory.subscribeToEvent(\"TouchChanged\",\n \"ReactToTouch\",\n \"onTouched\")\n while answer == \"\":\n if touched:\n speech_recognition.subscribe(\"GET_ANSWER\")\n print('Speech recognition engine started')\n speech_recognition.pause(False)\n time.sleep(3.0)\n speech_recognition.pause(True)\n answer = memory.getData(\"WordRecognized\")\n print(\"data: %s\" % answer)\n # Confidence must be bigger than 0.5 in order to continue\n if answer[1] < 0.45:\n answer = \"\"\n else:\n answer = str(answer[0])\n speech_recognition.unsubscribe(\"GET_ANSWER\")\n if answer == \"\":\n no_answer(tts, randint(0, 3))\n set_touched(False)\n elif not warned and datetime.datetime.now() > (cur_time + datetime.timedelta(minutes=3)):\n global warned\n warned = True\n tts.say(\"Je werkt nu 3 minuten aan deze som. Fouten maken mag. Het is niet erg als je het antwoord niet weet. Zeg maar gewoon wat je denkt.\")\n memory.unsubscribeToEvent(\"TouchChanged\",\n \"ReactToTouch\")\n global warned\n warned = False\n return answer",
"def takecommand():\n r = src.Recognizer()\n with src.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"user said: {query}\")\n\n except Exception as e:\n speak(\"Sorry, Can You repeat this please\")\n query = None\n return query\n return query",
"def takeCommand():\r\n recognizer = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listenging...\")\r\n audio = recognizer.listen(source)\r\n\r\n try:\r\n print(\"LOADING...\")\r\n command = recognizer.recognize_google(audio, language=\"en-un\")\r\n print(f\"user said: {command}\")\r\n\r\n except Exception as e:\r\n speak(f\"Please say that again\")\r\n command = None\r\n return command",
"def take_command(self):\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening.....\")\r\n r.pause_threshold = 1\r\n audio = r.listen(source)\r\n try:\r\n query = r.recognize_google(audio, language=\"en-in\")\r\n print(\"Recognizing.....\")\r\n print(\"Query=\", query)\r\n except Exception as e :\r\n print(e)\r\n self.speak(\"Say that again please....\")\r\n return \"None\"\r\n return query",
"async def app_say() -> Response:\n voice = request.args.get(\"voice\", \"\")\n assert voice, \"No voice provided\"\n\n # cache=false or cache=0 disables WAV cache\n use_cache = request.args.get(\"cache\", \"\").strip().lower() not in {\"false\", \"0\"}\n\n # Text can come from POST body or GET ?text arg\n if request.method == \"POST\":\n text = request.data.decode()\n else:\n text = request.args.get(\"text\")\n\n assert text, \"No text provided\"\n\n vocoder = request.args.get(\"vocoder\")\n denoiser_strength = request.args.get(\"denoiserStrength\")\n if denoiser_strength is not None:\n denoiser_strength = float(denoiser_strength)\n\n wav_bytes = await text_to_wav(\n text,\n voice,\n vocoder=vocoder,\n denoiser_strength=denoiser_strength,\n use_cache=use_cache,\n )\n\n return Response(wav_bytes, mimetype=\"audio/wav\")",
"def processExpansionVoices(self, content):\n matches = re.findall(r'((N106|FDS|VRC6)-([A-Z]+) )', content)\n for match in matches:\n content = content.replace(match[0], self.getVoiceFor(match[1], match[2]) + ' ')\n\n return content"
] | [
"0.63713914",
"0.6036372",
"0.57943606",
"0.5604704",
"0.55267847",
"0.549841",
"0.54870534",
"0.5482093",
"0.5372294",
"0.5347282",
"0.5323033",
"0.53198546",
"0.52982825",
"0.5293129",
"0.52888864",
"0.52587306",
"0.5246758",
"0.5227932",
"0.52277255",
"0.5220096",
"0.5219653",
"0.52195054",
"0.52006036",
"0.51980793",
"0.5181178",
"0.51767695",
"0.5171536",
"0.51501596",
"0.5142829",
"0.5138794"
] | 0.7572062 | 0 |
Generates a unary relation from our graph by first sampling a value from dist (must return a number between 1 and N, where N is the number of nodes in the graph), and then sampling that many nodes from the graph with replacement | def generateUnaryRel(graph, dist=None):
if dist is None:
dist = lambda: random.randint(1, len(graph.nodes()))
count = dist()
return random.sample(graph.nodes(), count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(axis=-1)\n\n return graph_from_edges(variable_names, dist_func, edges)",
"def mutate_increase_dist(child):\n if not child.complete():\n return child\n var_node = gen_random_var()\n var_edge = gen_random_var()\n old_st = random.choice([SOURCE_VAR, TARGET_VAR])\n new_triple = random.choice([\n (old_st, var_edge, var_node), # outgoing new triple\n (var_node, var_edge, old_st), # incoming new triple\n ])\n new_child = child + (new_triple,)\n # replace the old source/target node with the new node and vice-versa to\n # move the old node one hop further away from everything else\n new_child = new_child.replace({old_st: var_node, var_node: old_st})\n return new_child",
"def generate_full(variable_names, dist_func, **kwargs):\n return generate_random_graph(variable_names, dist_func, edge_prob=1.0)",
"def repress_node_removal_old(graph, active_nodes):\n # list_active = list(active_nodes)\n num_neighbors = {node: len(list(graph.neighbors(node))) for node in active_nodes}\n total_neighbors = sum(num_neighbors.values())\n to_remove = set()\n for node in active_nodes:\n if np.random.random() < num_neighbors[node] / total_neighbors:\n to_remove.add(node)\n # only remove nodes at end so that probabilities are from the same time\n graph.remove_nodes_from(to_remove)\n active_nodes -= to_remove",
"def _random_replace_nodes_attribute(graph, residues, weights, attribute, seed=None):\n random.seed(seed)\n for node in graph.nodes:\n resname = random.choices(residues, weights=weights)\n graph.nodes[node][attribute] = resname[0]\n\n return graph",
"def FixedInGraph(N,degrees,replace = True):\n \n conn = np.zeros([N*degrees,2])\n nodes = np.arange(0,N)\n ii = 0\n for i in range(N):\n # for deg in range(degrees):\n source = np.random.choice(nodes,size =degrees,replace=replace)\n conn[ii:ii+len(source),0] = source\n conn[ii:ii+len(source),1] = i\n ii+=len(source) \n return conn",
"def to_undirected_graph(self):\n visited = set() \n G = Graph.Graph()\n \n for node in self.node_set:\n \n if node not in visited:\n visited.add(node)\n for i in self.suffix[node]:\n G.add_edge(node, i)\n \n return G",
"def make_random_undirected_graph(num_nodes, probility):\n graph = {}\n edges = 0\n for dummy_node in range(num_nodes):\n if dummy_node not in graph:\n graph[dummy_node] = set()\n for dummy_node_pair in range(num_nodes):\n if dummy_node_pair != dummy_node:\n a = random.random() # a real number [0,1)\n if a < probility:\n print dummy_node, dummy_node_pair\n graph[dummy_node].add(dummy_node_pair)\n if dummy_node_pair not in graph:\n graph[dummy_node_pair] = set([dummy_node])\n else:\n graph[dummy_node_pair].add(dummy_node)\n edges += len(graph[dummy_node])\n print \"number of edges are \", edges/2\n\n return graph",
"def make_synthetic_undirected_graph(num_nodes, num_exist):\n graph = {}\n edges = 0\n graph = make_complete_graph(num_exist) #creating a complete directed graph on m nodes\n dpa_graph = UPATrial(num_exist)\n for dummy_node in range(num_exist, num_nodes):\n node_neighbors = dpa_graph.run_trial(num_exist)\n graph[dummy_node] = set(node_neighbors)\n for dummy_node_pair in node_neighbors:\n graph[dummy_node_pair] = graph.get(dummy_node_pair,set([]))\n graph[dummy_node_pair].add(dummy_node)\n edges += len(graph[dummy_node])\n\n print \"number of edges are \", edges/2\n return graph",
"def random_assignment(graph, possibilities):\n for node in graph.nodes.values():\n node.set_value(random.choice(possibilities))",
"def random_one_graph(n):\n return nx.fast_gnp_random_graph(n, 1/(n*n), directed=True)",
"def generate_chain(variable_names, dist_func, **kwargs):\n shuffle(variable_names) # To have a random order\n num_vars = len(variable_names)\n\n adj_matrix = np.zeros((num_vars, num_vars), dtype=np.bool)\n for v_idx in range(num_vars-1):\n adj_matrix[v_idx, v_idx+1] = True\n\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix)",
"def _graph_fn_sample_deterministic(self, distribution):\n raise NotImplementedError",
"def permut_graph(self,permutaions,add_delete_prop):\n edges = list(self.graph.edges)\n nonedges = list(nx.non_edges(self.graph))\n nonedges = self.filter_edges(nonedges)\n\n for _ in tqdm(range(permutaions)):\n if self.random.random() < add_delete_prop:\n chosen_edge = self.random.choice(nonedges)\n self.graph.add_edge(chosen_edge[0], chosen_edge[1])\n nonedges.remove(chosen_edge)\n edges.append(chosen_edge)\n else:\n chosen_edge = self.random.choice(edges)\n self.graph.remove_edge(chosen_edge[0], chosen_edge[1])\n edges.remove(chosen_edge)\n nonedges.append(chosen_edge)\n self.graph = self.fix_graph(self.graph)",
"def inverse_transform_sampling(g, kmin, kmax, size, offset):\n # Power-law distribution\n c = sp.special.zeta(g)\n def dist(k): return 1/(np.asarray(k)**g*c)\n\n # Calculates the probability to find nodes with degree up to kmax and\n # the corresponding cumulative distribution.\n dist_n = dist(range(kmin, kmax+1))\n cumsum = np.cumsum(dist_n)\n rand = np.random.rand(size) * cumsum[-1]\n node_dist = np.array([kmin]*size)\n \n # Checks in which region of the cum. distr. the random values lie\n # and assigns the corresponding node degree.\n for i in range(size):\n j = offset\n while (cumsum[j] - rand[i]) < 0:\n node_dist[i] += 1\n j += 1\n \n return node_dist.astype(int)",
"def nonuniform_mutation(random, candidate, args):\r\n bounder = args['_ec'].bounder\r\n num_gens = args['_ec'].num_generations\r\n max_gens = args['max_generations']\r\n strength = args.setdefault('mutation_strength', 1)\r\n exponent = (1.0 - num_gens / float(max_gens)) ** strength\r\n mutant = copy.copy(candidate)\r\n for i, (c, lo, hi) in enumerate(zip(candidate, bounder.lower_bound, bounder.upper_bound)):\r\n if random.random() <= 0.5:\r\n new_value = c + (hi - c) * (1.0 - random.random() ** exponent)\r\n else:\r\n new_value = c - (c - lo) * (1.0 - random.random() ** exponent)\r\n mutant[i] = new_value\r\n return mutant",
"def _randomize(self):\n return self.graph",
"def Initialite_Random_Graph(rect_x=800,rect_y=800,nodes_amount=420,link_dist=75):\n graph = [[None]*rect_x]*rect_y\n nodes = []\n links = []\n \n for _ in range(nodes_amount):\n fine = False\n while not fine:\n x = random.randrange(rect_x)\n y = random.randrange(rect_y)\n if graph[y][x] is None:\n fine = True\n near_nodes=[] \n for N in nodes:\n xo, yo = N.coordinates()\n xd = abs(x-xo)\n yd = abs(y-yo)\n if link_dist**2>=xd**2+yd**2:\n near_nodes.append(N)\n graph[y][x] = node(x,y)\n nodes.append(graph[y][x])\n for N in near_nodes:\n xo, yo = N.coordinates()\n xd = abs(x-xo)\n yd = abs(y-yo)\n li = link(N,sqrt(xd**2+yd**2),graph[y][x])\n N.add_link(li)\n ln = link(graph[y][x],sqrt(xd**2+yd**2),N)\n graph[y][x].add_link(ln)\n links.append(li)\n links.append(ln)\n return nodes, links",
"def mutate_nonstructural(self):\n # TODO consider clamping weights and biases?\n for link in self.gene_links:\n # Disable/Enable links\n if event(link_toggle_prob): # Chance of toggling link\n link.enabled = True if link.enabled is False else False\n if link.enabled is False and event(link_enable_prob): # Chance of enabling a disabled link\n link.enabled = True\n # Mutate weights\n if event(weight_mutate_rate):\n if event(weight_replace_rate): # replace with random weight\n link.weight = random.uniform(weight_init_min, weight_init_max)\n else: # adjust weight\n link.weight += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n for node in self.gene_nodes:\n # Mutate bias\n if event(bias_mutate_rate):\n if event(bias_replace_rate): # replace with random bias\n node.bias = random.uniform(bias_init_min, bias_init_max)\n else: # adjust bias\n node.bias += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n # Mutate activation func\n if node.can_modify:\n if event(change_act_prob):\n node.act_func = self.act_set.get_random_activation_func()\n # reinit freq amp and vshift when act func changes\n if node.act_func.__name__[0] == \"g\":\n node.freq = random.uniform(-gauss_freq_range, gauss_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-gauss_vshift_range, gauss_vshift_range)\n elif node.act_func.__name__[0] == \"s\":\n node.freq = random.uniform(-sin_freq_range, sin_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-sin_vshift_range, sin_vshift_range)\n # Adjust freq amp and vshift of activation function\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)\n # Mutate substrate width/height rectangles\n if event(width_mutate_prob):\n if event(0.5):\n self.substrate_width += 1\n elif self.substrate_width > 1:\n self.substrate_width -= 1\n if event(height_mutate_prob):\n if event(0.5):\n self.substrate_height += 1\n elif self.substrate_height > 1:\n self.substrate_height -= 1\n \"\"\" ES-HyperNeat - no longer used\n # Mutate QuadTree variance\n if event(var_mutate_prob):\n self.var_thresh += np.random.normal(scale=gauss_var_scale)\n self.var_thresh = self.var_thresh if self.var_thresh > 0 else 0\n # Mutate QuadTree band thresh\n if event(band_mutate_prob):\n self.band_thresh += np.random.normal(scale=gauss_band_scale)\n self.band_thresh = self.band_thresh if self.band_thresh > 0 else 0\n \"\"\"",
"def generate_jungle(variable_names, dist_func, num_levels=2, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n\n edges = []\n for i in range(num_vars):\n level = int(np.log2(i+1))\n idx = i + 1 - 2 ** level\n for l in range(1, num_levels+1):\n gl = (2**l) * idx + 2 ** (level + l) - 1\n edges += [[i, gl + j] for j in range(2**l)]\n edges = [e for e in edges if max(e) < num_vars]\n\n return graph_from_edges(variable_names, dist_func, edges)",
"def generate_random_graph(variable_names, dist_func, edge_prob, connected=False, max_parents=-1, num_latents=0, **kwargs):\n shuffle(variable_names) # To have a random order\n num_vars = len(variable_names)\n\n # Generate random adjacency matrix with specified edge probability\n adj_matrix = np.random.binomial(n=1, p=edge_prob, size=(num_vars, num_vars))\n\n # Make sure that adjacency matrix is half diagonal\n for v_idx in range(num_vars):\n adj_matrix[v_idx, :v_idx+1] = 0\n\n # Nodes that do not have any parents or children are connected\n for v_idx in range(num_vars):\n has_connection = (adj_matrix[v_idx, :].any() or adj_matrix[:, v_idx].any())\n if not has_connection:\n con_idx = np.random.randint(num_vars-1)\n if con_idx >= v_idx:\n con_idx += 1\n adj_matrix[v_idx, con_idx] = True\n else:\n adj_matrix[con_idx, v_idx] = True\n\n # Ensure that a node has less than N parents\n if max_parents > 0:\n for v_idx in range(adj_matrix.shape[0]):\n num_parents = adj_matrix[:, v_idx].sum()\n if num_parents > max_parents:\n indices = np.where(adj_matrix[:, v_idx] == 1)[0]\n indices = indices[np.random.permutation(indices.shape[0])[:num_parents-max_parents]]\n adj_matrix[indices, v_idx] = 0\n\n # Connect nodes to one connected graph\n if connected:\n visited_nodes, connected_nodes = [], [0]\n while len(visited_nodes) < num_vars:\n while len(connected_nodes) > 0:\n v_idx = connected_nodes.pop(0)\n children = np.where(adj_matrix[v_idx, :])[0].tolist()\n parents = np.where(adj_matrix[:, v_idx])[0].tolist()\n neighbours = children + parents\n for n in neighbours:\n if (n not in visited_nodes) and (n not in connected_nodes):\n connected_nodes.append(n)\n if v_idx not in visited_nodes:\n visited_nodes.append(v_idx)\n if len(visited_nodes) < num_vars:\n node1 = np.random.choice(np.array(visited_nodes))\n node2 = np.random.choice(np.array([i for i in range(num_vars) if i not in visited_nodes]))\n adj_matrix[min(node1, node2), max(node1, node2)] = True\n connected_nodes.append(node1)\n\n # Add latent confounders \n if num_latents > 0:\n # Latent confounders are identified by their variable name \"X_{l,...}\"\n variable_names = [r\"$X_{l,%i}$\" % (i+1) for i in range(num_latents)] + variable_names\n # Latent confounders are added in the graph structure. When exporting the graph, \n # we remove those variables so that we can apply our structure learning algorithm\n # without any changes.\n node_idxs = [v_idx+num_latents for v_idx in range(num_vars)\n if (adj_matrix[:, v_idx].sum() < max_parents or max_parents <= 0)]\n adj_matrix = np.concatenate([np.zeros((num_latents, num_vars)), adj_matrix], axis=0)\n adj_matrix = np.concatenate([np.zeros((num_vars+num_latents, num_latents)), adj_matrix], axis=1)\n # Randomly select the node pairs on which we want to have a latent confounder\n latent_children = []\n for l in range(num_latents):\n node_pair = None\n # We sample unique node pairs where there exists no direct edge between both nodes\n while node_pair is None or node_pair in latent_children or adj_matrix[node_pair[0], node_pair[1]]:\n node_pair = random.sample(node_idxs, k=2)\n node_pair = sorted(node_pair)\n latent_children.append(node_pair)\n adj_matrix[l, node_pair[0]] = 1\n adj_matrix[l, node_pair[1]] = 1\n latents = np.array([[i]+lc for i, lc in enumerate(latent_children)])\n else:\n latents = None\n\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix, latents=latents)",
"def repress_edge_removal(graph, active_nodes, repression_rate):\n for node in active_nodes:\n neighbors = list(graph[node].keys())\n remove_which = np.random.binomial(1, repression_rate, size=(len(neighbors)))\n for idx in range(len(neighbors)):\n if remove_which[idx]:\n graph.remove_edge(node, neighbors[idx])",
"def fix_graph(self,graph):\n graph_compleate_reachable = False\n while not graph_compleate_reachable:\n not_reachable_in ,not_reachable_out = self.not_reachable(graph)\n for n in not_reachable_in:\n graph.add_edge(self.random.randint(0,n-1),n)\n for n in not_reachable_out:\n graph.add_edge(n,self.random.randint(n+1, self.nodes-1))\n graph_compleate_reachable = len(not_reachable_in)==0 and len(not_reachable_out)==0\n return graph",
"def random_path(length):\n assert length > 0\n edges = [Variable('ve%d' % i) for i in range(1, length + 1)]\n nodes = [Variable('vn%d' % i) for i in range(1, length)] + [TARGET_VAR]\n s = SOURCE_VAR # start at source\n triples = []\n for e, n in zip(edges, nodes):\n triples.append((s, e, n))\n s = n\n gp = GraphPattern([\n (o, p, s) if random.random() < .5 else (s, p, o)\n for s, p, o in triples\n ])\n return gp",
"def _uniform_random_walk(self, start_node = None):\n\t\tif start_node == None:\n\t\t\t# Sampling is uniform w.r.t V, and not w.r.t E\n\t\t\tstart_node = random.choice(range(self.nodes_size))\n\t\tpath = [start_node]\n\t\twhile len(path) < self._walk_length:\n\t\t\t#if random.random() < self._walk_restart:\n\t\t\t# path.append(start_node)\n\t\t\t# continue\n\t\t\tcur = path[-1]\n\t\t\tadj_list = self._net.get_adj_list(cur)\n\t\t\tif len(adj_list) > 0:\n\t\t\t\tpath.append(random.choice(adj_list)) # Generate a uniform random sample\n\t\t\telse:\n\t\t\t\t# logger.warning('no type-corresponding node found, walk discontinued, generate a path less than specified length.')\n\t\t\t\t# break\n\t\t\t\t# logger.warning('no type-corresponding node found, walk restarted.')\n\t\t\t\tpath.append(start_node)\n\n\t\treturn [str(node) for node in path]",
"def complete_graph(n):\n return wgraph_from_adjacency(np.ones((n, n)))",
"def er_random_graph_generator(n, p, ng, seed, w_base, w_top):\n\n f_er_graph_list = []\n for i in range(0, ng):\n f_g = nx.erdos_renyi_graph(n, p, seed + i, directed=False)\n for (u, v, w) in f_g.edges(data=True):\n w['weight'] = random.randint(w_base, w_top)\n f_er_graph_list.append(f_g)\n return f_er_graph_list",
"def gen_graph(self):",
"def erdos_renyi(n,p):\r\n\tassert n>= 0 and 0 <= p <=1\r\n\tG = [[] for _ in range(n)]\r\n\tE = []\r\n\tfor u in range(n):\r\n\t\tfor v in range(u+1,n):\r\n\t\t\tq = random.random()\r\n\t\t\tif q < p:\r\n\t\t\t\tE.append((u,v))\r\n\t\t\t\tG[u].append(v)\r\n\t\t\t\tG[v].append(u)\r\n\r\n\tG2 = nx.Graph()\r\n\tfor i in range(n):\r\n\t\tG2.add_node(i)\r\n\r\n\tG2.add_edges_from(E)\r\n\r\n\treturn (G,G2)",
"def dunn(dist, labels):\n return _dunn(data=None, dist=dist, labels=labels)"
] | [
"0.6225221",
"0.6075832",
"0.57536215",
"0.5595702",
"0.55751705",
"0.5566251",
"0.5530265",
"0.54891706",
"0.5453418",
"0.5447563",
"0.5430584",
"0.5427841",
"0.54138327",
"0.5403605",
"0.54019284",
"0.53796065",
"0.5331073",
"0.5319422",
"0.53087515",
"0.5298584",
"0.5298136",
"0.52813554",
"0.5261063",
"0.5258553",
"0.52269113",
"0.52111757",
"0.5201344",
"0.51872194",
"0.5167132",
"0.51546144"
] | 0.8003123 | 0 |
Plots the theoretical probability distribution for the random walk. | def plot_distribution(self,show=True):
k_vals,prob_vals = self.tuple_of_probabilities
plt.figure("Probability distribution of Random Walk, theoretical")
plt.scatter(k_vals,prob_vals,s=4)
plt.xlim((-self.n-1,self.n+1))
plt.xlabel("x\u2099 - Position after n jumps")
plt.ylabel("Probability")
plt.suptitle("Random Walk: p={}, n={}, \u0394x={}".format(self.p,self.n,self.delta_x))
if show == True:
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def geneticAlgorithmPlot(population, popSize, fittestSize, mutationRate, generations):\n pop = GA.initialPopulation(popSize, population)\n progress = []\n progress.append(1 / GA.rankRoutes(pop)[0][1])\n \n for i in range(0, generations):\n pop = GA.nextGeneration(pop, fittestSize, mutationRate)\n progress.append(1 / GA.rankRoutes(pop)[0][1])\n \n plt.plot(progress)\n plt.ylabel('Distance')\n plt.xlabel('Generation')\n plt.show()",
"def distribution_plot(data):\r\n ready_data = sorted((data))\r\n fit = stats.norm.pdf(ready_data, np.mean(ready_data), np.std(ready_data))\r\n plt.plot(ready_data, fit, '-o')\r\n plt.ylabel(\"Prob\")\r\n plt.xlabel(\"Prices\")\r\n plt.title(\"Distribution of prices (Under 50 days) Demand Function\")\r\n plt.show()",
"def plot_distribution_prob(fig_name):\n dir = \"log/peps mini\"\n pattern = r'(internal|access|lock)\\\\\\d{1,2}.csv$'\n pattern_valid = r'(3|6|9|12).csv$'\n utils.construct_set(dir, pattern, pattern_valid, filter=1)\n X_train, X_valid, y_train, y_valid = utils.load_train_valid()\n utils.train(X_train, X_valid, y_train, y_valid, method='RF',\n param={\"max_features\": 2, \"n_estimators\": 100}, save_prob=True)\n utils.plot_max_probablity_distribution('RF')\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')",
"def plot_p(self, show = False):\n try:\n difference = self.binom_null\n except:\n self.simulate_significance()\n difference = self.binom_null\n\n observed_difference = self.p_treatment - self.p_control\n\n mu, sigma = stats.norm.fit(difference)\n crit_density = stats.norm.pdf(observed_difference, mu, sigma)\n\n x = np.linspace(min(difference), max(difference), self.n_control + self.n_treatment)\n y = stats.norm.pdf(x, mu, sigma)\n\n line_curve = dict(color = 'blue', width = 2)\n\n data = [\n go.Scatter(\n x = x,\n y = y,\n mode = 'lines',\n showlegend = False,\n line = line_curve\n ),\n go.Scatter(\n x = x[x > observed_difference],\n y = y[np.where(x > observed_difference)],\n fill = 'tozeroy',\n showlegend = False,\n line = line_curve\n )\n ]\n\n layout = dict(\n plot_bgcolor = 'white',\n width = 800,\n height = 600,\n title = 'Significance',\n xaxis = dict(\n title = 'Difference in Probabilities',\n showgrid = False,\n zeroline = False,\n showline = True,\n linecolor = 'black'\n ),\n yaxis = dict(\n title = 'Density',\n showgrid = False,\n zeroline = False,\n showline = True,\n linecolor = 'black'\n )\n )\n\n fig = go.Figure(data = data, layout = layout)\n\n fig.add_vline(x = observed_difference,\n line_width = 2,\n line_dash = 'dash',\n line_color = 'black',\n annotation_text = 'P Value {:.4f}'.format(self.p_value),\n annotation_position = 'top right')\n\n if show:\n # Intended to be used in notebooks.\n # .py app files that use this module will handle saving and opening from desktop\n fig.show();\n\n return fig",
"def perm_plot(obs, perm, p, fig_title, tails = 1):\n plot_rows = len(perm.keys())\n \n fig, axes = plt.subplots(plot_rows, 1)\n\n for n, term in enumerate(perm.keys()):\n\n if plot_rows > 1:\n sns.distplot(perm[term], ax = axes[n], norm_hist = True)\n\n #Formatting\n axes[n].axvline(obs[term], 0, 1, linestyle = '--', color = [1, 0, 0], label = 'Observed')\n \n if tails == -1:\n thresh = np.percentile(perm[term], 5, interpolation = 'nearest')\n axes[n].axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n \n if tails == 1:\n thresh = np.percentile(perm[term], 95, interpolation = 'nearest')\n axes[n].axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n elif tails == 2:\n thresh = np.percentile(perm[term], [2.5, 97.5], interpolation = 'nearest')\n axes[n].axvline(thresh[0], 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n axes[n].axvline(thresh[1], 0, 1, linestyle = '-', color = [0, 0, 0])\n \n axes[n].set_title(term, fontsize = 16, x = 0.1, y = 1.05)\n axes[n].set_xlabel('Permuted Test Value', fontsize = 15)\n if p[term] < 0.001:\n axes[n].text(0.6, 0.5, 'p < 0.001', fontsize = 20, transform = axes[n].transAxes)\n else:\n axes[n].text(0.6, 0.5, 'p = ' + str(np.round(p[term], decimals = 5)), fontsize = 20, transform = axes[n].transAxes) \n \n\n for tick in axes[n].xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in axes[n].yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n \n if n == np.around(plot_rows / 2, decimals = 0) - 1:\n axes[n].legend(fontsize = 20, loc = \"center left\", bbox_to_anchor = (1, 0.5), numpoints = 1)\n\n\n else:\n sns.distplot(perm[term], ax = axes, norm_hist = True)\n\n #Formatting\n axes.axvline(obs[term], 0, 1, linestyle = '--', color = [1, 0, 0], label = 'Observed')\n \n if tails == -1:\n thresh = np.percentile(perm[term], 5, interpolation = 'nearest')\n axes.axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n \n if tails == 1:\n thresh = np.percentile(perm[term], 95, interpolation = 'nearest')\n axes.axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n elif tails == 2:\n thresh = np.percentile(perm[term], [2.5, 97.5], interpolation = 'nearest')\n axes.axvline(thresh[0], 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n axes.axvline(thresh[1], 0, 1, linestyle = '-', color = [0, 0, 0])\n \n \n axes.set_title(term, fontsize = 16, x = 0.1, y = 1.05)\n axes.set_xlabel('Permuted Test Value', fontsize = 15)\n if p[term] < 0.001:\n axes.text(0.6, 0.5, 'p < 0.001', fontsize = 20, transform = axes.transAxes)\n else:\n axes.text(0.6, 0.5, 'p = ' + str(np.round(p[term], decimals = 5)), fontsize = 20, transform = axes.transAxes) \n \n for tick in axes.xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in axes.yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n\n axes.legend(fontsize = 20, loc = \"center left\", bbox_to_anchor = (1, 0.5), numpoints = 1)\n\n if fig_title != None: \n fig.suptitle(fig_title, fontsize = 24, y = 1.05) \n \n plt.tight_layout() \n plt.show()\n \n return(fig, axes)",
"def plot_sample_distribution(samples):\n plt.hist(samples, 50)\n plt.xlabel('Value of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample distribution')\n plt.show()",
"def plot_learning(self):\n plt.plot([i for i in range(len(self.fitness_list))], self.fitness_list)\n plt.ylabel(\"Fitness\")\n plt.xlabel(\"Iteration\")\n plt.show()",
"def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])",
"def plot5a(nSamples):\n rv = stats.poisson(2.0)\n expectations, variances = simulate(nSamples, rv)\n plot(nSamples, expectations, variances, 2.0, 2.0, \"Poisson distribution\")",
"def not_pokemon_function():\n # Ironic I'm using random inside seed\n numpy.random.seed(random.randint(1, 1000))\n sample = numpy.random.normal(size=1000)\n counts, bin_edges = numpy.histogram(sample, bins=39)\n fig = tpl.figure()\n fig.hist(counts, bin_edges, grid=[15, 25], force_ascii=False)\n fig.show()\n print(\"Hopefully this random histogram(because I couldn't generate plot graphs) which is generated cheers you up\")",
"def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()",
"def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')",
"def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()",
"def plot_random_generated_images(self):\n dimensions=(10, 10)\n figsize=(10, 10)\n n_samples=100\n \n (X, _), _ = self.generate_generator_prediction_samples(n_samples)\n \n self.grid_plot(X, dimensions=dimensions, figsize=figsize)",
"def plot_likelihood(par_num, par_rng):\n\n likelihoods = np.load('data%s_RM.npy' % (par_num))\n\n plt.figure()\n plt.plot(par_rng, likelihoods, 'bo-')\n plt.xlabel('Value Mapped')\n plt.ylabel('Log(Likelihood)')\n plt.title('Likelihood Function of Parameter %s: %s'\n % (par_num, hammu12.jf12_parameter_names[par_num]))\n plt.minorticks_on()\n plt.savefig('fig%s_RM.png' % (par_num))\n plt.close()",
"def redraw_whole_plot(self):\n pcent_rand = self.rand\n pcent_decimal = pcent_rand/100\n self.x = np.array([\n n*np.random.uniform(low=1-pcent_decimal, high=1+pcent_decimal) \n for n in np.linspace(3, 9, self.num_points)\n ])\n self.y = np.array([\n n*np.random.uniform(low=1-pcent_decimal, high=1+pcent_decimal)\n for n in np.linspace(3, 9, self.num_points)\n ])\n self.redraw_slope()",
"def show():\n setup()\n plt.show()",
"def plot_distribution(d, start=0.01, stop=10.0, resolution=0.1):\n import pylab\n X = numpy.arange(start, stop, resolution)\n Y = [math.exp(d.log_pdf(x)) for x in X]\n pylab.plot(X, Y)",
"def problem2():\n k = 4\n total_draws = 20\n total_balls = 50\n\n plt.figure()\n for _ in range(50):\n for num_samples in [10000]:\n experiment_results = []\n for samples in range(num_samples):\n N = np.random.randint(1, k, total_balls - 1)\n N = np.append(N, k)\n N = np.array(N).flatten()\n random.shuffle(N)\n draw = N[:total_draws]\n experiment_result = np.any(draw == 4)\n experiment_results.append(experiment_result)\n plt.plot(np.cumsum(experiment_results) / np.arange(1, num_samples + 1))\n old_result = experiment_results[:]\n\n plt.xlabel('Total Draws')\n plt.ylabel('Probability')\n plt.show()",
"def plot_graphy_resilience_random():\n \n global counter\n counter += 1\n random_graph = make_random_undirected_graph(1239, 0.004)\n attack_order = random_order(random_graph)\n random_resilience = compute_resilience(random_graph, attack_order)\n plt.plot(range(len(random_resilience)), random_resilience, '-b', label= 'random, p =0.004')\n \n synthetic_undirected_graph = make_synthetic_undirected_graph(1239, 5)\n attack_order = random_order(synthetic_undirected_graph)\n synthetic_resilience = compute_resilience(synthetic_undirected_graph, attack_order)\n plt.plot(range(len(synthetic_resilience)), synthetic_resilience, '-r', label = 'UPA, m = 5')\n \n network_graph = load_graph(NETWORK_URL)\n attack_order = random_order(network_graph)\n network_resilience = compute_resilience(network_graph, attack_order)\n plt.plot(range(len(network_resilience)), network_resilience, '-g', label = 'Network')\n \n plt.legend(loc='upper right')\n \n plt.title(\" plot of graph resilience\")\n plt.xlabel(\"number of nodes removed\")\n plt.ylabel(\"the size of the largest connect component \")\n plt.savefig(\"graph_resilience_\"+str(counter)+\".png\", dpi = 72)\n plt.gcf().clear() # hose-keeping",
"def showPlot4():\n overall_data = []\n per_to_clean = [round(x * 0.1,1) for x in range(0,10)]\n number_of_robots = list(range(1,6))\n for per in per_to_clean:\n proc_sim_data = []\n for item in number_of_robots:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, per, 10, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n overall_data.append(proc_sim_data)\n plot(per_to_clean, overall_data)\n title('cleaning time vs. percentage cleaned')\n xlabel('percentage clean')\n ylabel('mean time (clocks)')\n show()",
"def monte_carlo(num=100, plot_fname='monte_carlo_thindisk.pdf'):\n theta, phi = sample_angles(num)\n\n axis_ratios = get_axisratio(theta, phi, r=1)\n bins = np.arange(0,1.1,0.1)\n\n \n plt.figure(figsize=(8,5))\n plt.hist(axis_ratios, bins, histtype='step', #density=True,\n linewidth=3, alpha=0.2, color='red',\n label=r'$\\mathrm{Randomly \\, Oriented \\, Thin \\, Disks}$')\n plt.xlabel(r'$\\mathrm{Axis \\, Ratio}$', fontsize=22)\n plt.savefig(plot_fname, bbox_inches='tight')\n plt.close()\n\n \"\"\"\n # Uncomment code block to view sampling distribution of (theta, phi)\n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(x,y,z, marker='.', s=1)\n ax.set_xlabel(r'$x$')\n ax.set_ylabel(r'$y$')\n ax.set_zlabel(r'$z$')\n plt.savefig('monte_carlo_sampling.pdf')\n\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n ax.scatter(x,y, marker='.', s=1)\n ax.set_xlabel(r'$x$')\n ax.set_ylabel(r'$y$')\n plt.savefig('monte_carlo_xy.pdf')\n\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n ax.scatter(y, z, marker='.', s=1)\n ax.set_xlabel(r'$y$')\n ax.set_ylabel(r'$z$')\n plt.savefig('monte_carlo_yz.pdf')\n \"\"\"",
"def display(self, bin_size):\n xs = np.linspace(self.sample_min, self.sample_max, 2000)\n ys = np.zeros_like(xs)\n for (l, s), w in zip(self.gauss_params, self.dist_weights):\n ys += ss.norm.pdf(xs, loc=l, scale=s) * w\n plt.plot(xs, ys, color=\"blue\")\n plt.hist(self.samples, density=True, bins=bin_size, color=\"palegreen\")\n plt.xlabel(\"duration\")\n plt.ylabel(\"density\")\n _, _, ymin, ymax = plt.axis()\n if self.lower_bound > 0:\n plt.vlines([self.lower_bound], ymin, ymax, color=\"crimson\")\n if self.upper_bound < float(\"inf\"):\n plt.vlines([self.upper_bound], ymin, ymax, color=\"crimson\")\n plt.show()",
"def bench_plotter(self):\n\n # plot random as histogram, upper en lower bound as a red line\n minima = []\n for i in range(1, 4):\n cost_list = []\n with open(f\"../output_runs/text_info_random{i}_10k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n counter = 0\n for number in text:\n counter += 1\n if number is not \"\":\n cost_list.append(int(number))\n if counter == 1000:\n break\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random:\", minim, maxim)\n plt.axvline(x=53188, color='r')\n plt.axvline(x=103030, color=\"r\")\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Random walk\")\n\n # plot histogram of priority and hillclimber\n cost_list = []\n with open(f\"../output_runs/text_info_prior_hill{i}_\\\n 1k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"prior hill:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Priority + Hill\")\n\n # plot histogram of simulated annealing\n cost_list = []\n with open(f\"../output_runs/simulated_annealing{i}_1000.txt\",\n \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+anneal:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Random + sim anneal\")\n\n # plot histogram of random plus hillclimber\n cost_list = []\n with open(f\"../output_runs/random_hill{i}_1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+hill:\", minim, maxim)\n plt.hist(cost_list, bins=100, alpha=0.5,\n label=f\"Random + Hillclimber\")\n\n # plot histogram of kmeans plus hillclimber\n cost_list = []\n with open(f\"../output_runs/text_k-means_hill{i}_\\\n 1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Kmean and hill {i}\")\n totalmin = min(minima)\n plt.axvline(x=totalmin, color=\"g\")\n plt.title(f\"4 algorithms Wijk {i}, lowest cost: {totalmin}\")\n plt.xlabel(\"Cost\")\n plt.ylabel(\"Frequency\")\n plt.legend(loc='upper right')\n plt.show()",
"def plot_probability(\n observed: np.ndarray,\n theoretical: np.ndarray,\n ax: typing.Optional[plt.Axes] = None,\n figsize: tuple = (8, 8),\n) -> typing.Tuple[plt.Figure, plt.Axes]:\n with plt.rc_context(rc=pyextremes_rc):\n # Create figure\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize, dpi=96)\n else:\n try:\n fig = ax.figure\n except AttributeError as _error:\n raise TypeError(\n f\"invalid type in {type(ax)} for the 'ax' argument, \"\n f\"must be matplotlib Axes object\"\n ) from _error\n\n # Configure axes\n ax.grid(False)\n\n # Plot scatter of observed and theoretical probabilities\n ax.scatter(\n theoretical,\n observed,\n marker=\"o\",\n s=20,\n lw=0.75,\n facecolor=\"k\",\n edgecolor=\"w\",\n zorder=10,\n )\n\n # Plot a diagonal perfect-fit line\n min_value = min([min(ax.get_xlim()), min(ax.get_ylim())])\n max_value = max([max(ax.get_xlim()), max(ax.get_ylim())])\n ax.plot(\n [min_value, max_value],\n [min_value, max_value],\n color=\"#5199FF\",\n lw=1,\n ls=\"--\",\n zorder=5,\n )\n\n # Label axes\n ax.set_xlabel(\"Theoretical\")\n ax.set_ylabel(\"Observed\")\n\n # Calculate Pearson R statistic and show it in the figure\n pearsonr, p_value = scipy.stats.pearsonr(theoretical, observed)\n axes_range = max_value - min_value\n ax.text(\n x=min_value + 0.05 * axes_range,\n y=max_value - 0.05 * axes_range,\n s=f\"$R^2={pearsonr:.3f}$\\n$p={p_value:.3f}$\",\n horizontalalignment=\"left\",\n verticalalignment=\"top\",\n )\n\n # Set axes limits\n ax.set_xlim(min_value, max_value)\n ax.set_ylim(min_value, max_value)\n\n return fig, ax",
"def draw(self):\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n plt.show()",
"def show():\n\tplt.show()",
"def random_simulation(self, title, simulation=False):\n\n counter = 0\n plt.figure()\n\n # plot each battery\n for battery in self.grid.batteries:\n plt.plot(battery.x, battery.y, marker='x', color=colors[counter],\n markersize=10)\n x = []\n y = []\n for house in battery.connections:\n x.append(house.x)\n y.append(house.y)\n plt.scatter(x, y, marker='p', color=colors[counter])\n counter += 1\n\n # plot the connection\n counter = 0\n for battery in self.grid.batteries:\n for house in battery.connections:\n curr_x, curr_y = house.x, house.y\n end_x, end_y = battery.x, battery.y\n if curr_x > end_x:\n x_step = -1\n else:\n x_step = 1\n if curr_y > end_y:\n y_step = -1\n else:\n y_step = 1\n while not curr_x == end_x and not curr_y == end_y:\n if random.random() < 0.5:\n plt.plot([curr_x, curr_x], [curr_y, curr_y + y_step],\n color=colors[counter], linewidth=.3)\n curr_y = curr_y + y_step\n else:\n plt.plot([curr_x, curr_x + x_step], [curr_y, curr_y],\n color=colors[counter], linewidth=.3)\n curr_x = curr_x + x_step\n plt.plot([curr_x, end_x], [curr_y, end_y],\n color=colors[counter], linewidth=.3)\n counter += 1\n\n # display the process in an animation\n if simulation:\n plt.pause(1)\n plt.draw()"
] | [
"0.6606563",
"0.6557504",
"0.6498785",
"0.641404",
"0.640985",
"0.6358938",
"0.62723446",
"0.6208371",
"0.6195964",
"0.61664635",
"0.6133573",
"0.60698766",
"0.6052493",
"0.6051445",
"0.60448354",
"0.6023211",
"0.5997611",
"0.59773487",
"0.59731567",
"0.5966449",
"0.59633803",
"0.59604585",
"0.5957782",
"0.5947599",
"0.5946305",
"0.5921696",
"0.5915944",
"0.5893708",
"0.5883944",
"0.5880059"
] | 0.83731085 | 0 |
Runs a Monte Carlo simulation of the random walk for a specified number of trials. It then plots the results as a frequency distribution. Mean and variance values of the Monte Carlo simulation can be retrieved by calling mc.mean and mc.variance, respectively. Method parameters | def run_monte_carlo(self,number_of_trials=2000,plot=True,histogram=False,show=True,overlay=False):
trial_data = []
for _ in range(number_of_trials):
steps = self._random_walk_simulation()
trial_data.append( sum(steps) + self.x_initial )
x_n, counts = np.unique(trial_data, return_counts=True)
self.mc_mean = np.mean(trial_data)
self.mc_variance = np.var(trial_data)
mean_total = 0
for i in range(len(x_n)):
x,count = x_n[i],counts[i]
weighted_distance = abs(x - self.x_initial) * count
mean_total += weighted_distance
self.mc_mean_distance = mean_total/number_of_trials
if histogram == True:
plt.figure("Monte Carlo simulation of random walk - results")
plt.hist( trial_data, bins=int(round(np.sqrt(self.n))) )
plt.suptitle("Histogram of Monte Carlo simulation results: p={},n={}, \u0394x={}, N={}".format(
self.p,self.n,self.delta_x,number_of_trials))
if show == True:
plt.show()
if plot == False:
return trial_data
plt.figure("Monte Carlo simulation of Random Walk")
plt.scatter(x_n,counts,s=4)
plt.xlim((-self.n-1,self.n+1))
plt.xlabel("x\u2099 - Position after n jumps")
plt.ylabel("Frequency")
plt.suptitle("Monte Carlo simulation of random walk: p={}, n={}, \u0394x={}, N={}".format(
self.p,self.n,self.delta_x,number_of_trials))
if overlay == True: # IF TRUE, PLOT THEORETICAL RESULTS OVER MONTE CARLO RESULTS
k_vals,prob_vals = self.tuple_of_probabilities
prob_vals = [p*number_of_trials for p in prob_vals]
plt.scatter(k_vals,prob_vals,s=4)
if show == True:
plt.show()
return trial_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def simulate(self, number_of_simulations):\n self.number_of_simulations = number_of_simulations\n\n for iteration_num in range(0, number_of_simulations, 1):\n self.add_grain(0)\n self.check_pile(iteration_num)\n self.mass_when_iteration.append(self.mass_count - self.mass_fallen_count)\n self.plot_iteration(self.angles_array, self.radial_array, self.array, iteration_num)\n print(self.array)",
"def simulationDelayedTreatment(numTrials):\n \n \n results = []\n gutresults = []\n for a in range(300):\n results.append([])\n gutresults.append([])\n for b in range(numTrials):\n viruses = []\n for c in range(10000):\n resistances = {'guttagonol': False}\n vir = ResistantVirus(.1, .05, resistances, .005)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 300):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n FinalResults = results[299]\n print len(FinalResults)\n \n \n \n pylab.figure(5)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('Simulation with Drugs - Frequency')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.legend()\n pylab.show()",
"def simulationDelayedTreatment(numTrials):\n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False}\n mutProb = 0.005\n\n delays = [300, 150, 75, 0]\n results = []\n\n for delay in delays:\n for i in range(numTrials):\n virusList = []\n virusPop = 0\n for n in range(numViruses):\n virusList.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n my_patient = TreatedPatient(virusList, maxPop)\n\n for step in range(delay + 150):\n if step == delay:\n my_patient.addPrescription('guttagonol')\n virusPop = my_patient.update()\n results.append(virusPop)\n\n toPlot = []\n for i in range(0, len(results), numTrials):\n toPlot.append(results[i:i + numTrials])\n # print toPlot\n\n for i, _ in enumerate(delays):\n pylab.subplot(2, 2, i + 1)\n pylab.hist(toPlot[i])\n pylab.show()",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type, visualize):\n #initialization of variables\n list_of_results = []\n \n #trial loop\n for i in range(num_trials):\n list_of_results.append(singleSimulation(num_robots, speed, width, height, min_coverage, robot_type, visualize))\n return list_of_results",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n trialsRecord = []\n for trail in range(num_trials):\n #VISUALIZING ROBOTS - refer course pdf note 'Optional_Visualizing Robots Problem Set 2.pdf'\n #anim = ps2_visualize.RobotVisualization(num_robots, width, height)\n #create room\n room = RectangularRoom(width, height)\n #create robots & store in array\n robots = []\n count = 0\n for i in range(num_robots):\n robots.append(robot_type(room, speed))\n #NB: how does robot_type(room, speed) create a robot object???? what magic is this???\n #while calcualted coverage is < min_coverage, update positions & repeat\n while float(room.getNumCleanedTiles()) / room.getNumTiles() < min_coverage:\n #anim.update(room, robots)\n #do more cleaning - update robot positions\n for robot in robots:\n robot.updatePositionAndClean()\n count += 1\n trialsRecord.append(count)#record number of steps to achieve min_coverage in this trial.\n #after loop, close animation\n #anim.done()\n #calculate average number of steps over trials.\n return sum(trialsRecord)/float(len(trialsRecord))\n #raise NotImplementedError",
"def simulationDelayedTreatment(numTrials):\n \n #Initialization\n #delayList = [300, 150, 75, 0]\n delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': True }\n mutProb = 0.005\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n for i in range(numTrials):\n pop = runTrial(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop == 0:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)",
"def run(self):\n self.axs[0][0].clear()\n simulate(params=self.params,plt=plt,callback=self.callback,home=self.home,work=self.work, positions=self.initial_positions, stopping_t=150)",
"def run():\n\n for simulation in range(0, N_SIMULATIONS):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n # TODO: Change later enforce_deadline=True\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=N_TRIALS) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n if simulation == N_SIMULATIONS - 1:\n\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['alpha', 'gamma', 'epsilon', 'success_rate', 'last_failure']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for index in range(0,len(simulation_rates)):\n writer.writerow({\n 'alpha': get_simulation_params(0)[0],\n 'gamma': get_simulation_params(0)[1],\n 'epsilon': get_simulation_params(0)[2],\n 'success_rate': simulation_rates[index],\n 'last_failure': last_errors[index]})\n\n\n if N_SIMULATIONS > 1: #multiple simulation AND last simulation\n\n plt.figure(1)\n\n plt.subplot(211)\n plt.plot(simulation_rates)\n plt.title('Success Rate/Simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Success Rate')\n\n plt.subplot(212)\n plt.plot(last_errors)\n plt.title('Last failed trial per simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Last failed trial')\n\n plt.show()",
"def simulationDelayedTreatment(numTrials):\n \n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False}\n mutProb = 0.005\n delays = [300, 150, 75, 0]\n f, axarr = pylab.subplots(2, 2)\n x_plot = []\n\n for delay in delays:\n FinalPopSize = [0.0 for x in range(numTrials)]\n for trial in range(numTrials):\n viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) for n in range(numViruses)]\n patient = TreatedPatient(viruses, maxPop)\n for i in range(delay):\n patient.update()\n patient.addPrescription('guttagonol')\n for j in range(delay, delay+150):\n patient.update()\n FinalPopSize[trial] = patient.getTotalPop()\n x_plot.append(FinalPopSize)\n\n axarr[0, 0].hist(x_plot[0])\n axarr[0, 1].hist(x_plot[1])\n axarr[1, 0].hist(x_plot[2])\n axarr[1, 1].hist(x_plot[3])\n pylab.show()\n\n # pylab.plot(avgPopSize, label = 'avg pop size')\n # pylab.plot(avgGuttagonolResistantPop, label = 'avg pop size guttagonol-resistant')\n # pylab.xlabel(\"Time\")\n # pylab.ylabel(\"Average Population Size\")\n # pylab.title(\"Average Size of the Virus Populations\")\n # pylab.legend(loc = 'best')\n # pylab.show()",
"def simulate_monte_carlo(times=1000):\r\n print(sum(simulate() for _ in range(times))/times)",
"def drawsims(self, simparams, n=100, nc=10, ncat=1, nrea=1, stampsize=200):\n\t\t\n\t\tdrawcatkwargs = {\"n\":n, \"nc\":nc, \"stampsize\":stampsize}\n\t\tdrawimgkwargs = {}\n\t\t\n\t\tmegalut.sim.run.multi(self.worksimdir, simparams, drawcatkwargs, drawimgkwargs, \n\t\t\tpsfcat = None, ncat=ncat, nrea=nrea, ncpu=self.ncpu,\n\t\t\tsavepsfimg=False, savetrugalimg=False)",
"def simulationWithDrug(numTrials = 100, numTimeSteps = 300):\n random.seed()\n\n # Virus Characteristics.\n maxPop = 1000\n numViruses = 100\n maxBirthProb = 0.1\n clearProb = 0.05\n \n gutResistVirusMatrix = numpy.zeros(shape = (numTrials, numTimeSteps))\n dataMatrix = numpy.zeros(shape = (numTrials, numTimeSteps)) \n for trial in range(numTrials): \n\n # Model a random patient with the given virus charateristics. \n viruses = virusCollection(numViruses, maxBirthProb, clearProb, ['guttagonol'])\n randPatientX = Patient(viruses, maxPop)\n\n # Simulate the time-steps.\n dataMatrix[trial][0] = numViruses\n for time in range(1, numTimeSteps):\n if time == 150:\n randPatientX.addPrescription('guttagonol')\n dataMatrix[trial][time] = randPatientX.update()\n gutResistVirusMatrix[trial][time] = randPatientX.getResistPop(['guttagonol']) \n \n # Statistical Analysis.\n meanData = dataMatrix.mean(0)\n time = numpy.arange(numTimeSteps) \n stdData95_CI = dataMatrix.std(0) * 2\n selectedTime = numpy.arange(0, numTimeSteps, 10)\n\n meanResistVirus = gutResistVirusMatrix.mean(0)\n\n #f = pylab.figure(figsize=(15, 7))\n\n # Plotting.\n #pylab.subplot(121)\n pylab.plot(time, meanData, label='Mean Virus Population')\n pylab.errorbar(time[selectedTime], meanData[selectedTime], stdData95_CI[selectedTime], fmt = 'o', color = 'blue')\n pylab.grid() \n pylab.xlabel('Time Steps')\n pylab.ylabel('Total Virus Population')\n pylab.title('Effect of Guttagonol on Virus Population being administered\\nafter {} Timesteps over a total period of {} Timesteps'.format('150', '300'), fontsize='medium')\n\n stdDevGutVirusPop = gutResistVirusMatrix.std(0) * 2\n\n # Plotting 2nd graph\n #pylab.subplot(122)\n pylab.plot(time, meanResistVirus, label='Mean Guttagonol-resistant Virus Population', color = 'red')\n pylab.errorbar(time[selectedTime], meanResistVirus[selectedTime], stdDevGutVirusPop[selectedTime], fmt = 'o', color = 'red')\n pylab.legend(fontsize='x-small', loc='best')\n #pylab.grid()\n #pylab.xlabel('Time Steps')\n #pylab.ylabel('Total Guttagonol-Resistant Virus Population')\n #pylab.title('Total Number of Guttagonol-Resistant Virus Population after {} Timesteps\\nDrug administered after {} Timesteps'.format('300', '150'), fontsize='medium')\n pylab.show()",
"def simulationTwoDrugsDelayedTreatment(numTrials):\n results = []\n gutresults = []\n \n for a in range(375):\n results.append([])\n gutresults.append([])\n \n for b in range(numTrials):\n viruses = []\n for c in range(100):\n resistances = {'guttagonol': False, 'grimpex': False}\n vir = ResistantVirus(.1, .05, resistances, .02)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 225):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n Mark.addPrescription('grimpex')\n \n for f in range(225, 375):\n newpop = Mark.update()\n results[f].append(newpop)\n \n \n FinalResults = results[374]\n print len(FinalResults)\n \n \n pylab.figure(6)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('300 day delay')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.show()",
"def simulationWithDrug(numTrials = 20, numTimeSteps = 500):\r\n random.seed()\r\n\r\n # Virus Characteristics.\r\n maxPop = 1000\r\n numViruses = 100\r\n maxBirthProb = 0.1\r\n clearProb = 0.05\r\n resistances={'guttagonol':False}\r\n mutProb= 0.005\r\n dataMatrix = numpy.zeros(shape = (numTrials, numTimeSteps)) \r\n for trial in range(numTrials): \r\n\r\n # Model a random patient with the given virus charateristics. \r\n viruses = resistantVirusCollection(numViruses, maxBirthProb, clearProb,resistances,mutProb)\r\n randPatientX = Patient(viruses, maxPop)\r\n\r\n #Use drug on patient\r\n randPatientX.addPrescription('guttagonol')\r\n\r\n # Simulate the time-steps.\r\n dataMatrix[trial][0] = numViruses\r\n for time in range(1, numTimeSteps):\r\n dataMatrix[trial][time] = randPatientX.update() \r\n \r\n # Statistical Analysis.\r\n meanData = dataMatrix.mean(0)\r\n time = numpy.arange(numTimeSteps) \r\n stdData95_CI = dataMatrix.std(0) * 2\r\n selectedTime = numpy.arange(0, numTimeSteps, 10)\r\n\r\n # Ploting.\r\n pylab.plot(time, meanData)\r\n pylab.errorbar(time[selectedTime], meanData[selectedTime], stdData95_CI[selectedTime], fmt = 'o') \r\n pylab.show()",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n raise NotImplementedError",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n raise NotImplementedError",
"def test_simulation(walk_length_array, number_of_simulations, walker_class_type):\n for walk_length in walk_length_array:\n _distances_ = simulate_walks(walk_length, number_of_simulations, walker_class_type)\n print(walker_class_type.__name__, \" random walk of {} steps\".format(walk_length), \" After {} simulations\".format(number_of_simulations))\n print(\" Mean= {}\".format(round(sum(_distances_)/len(_distances_),4)))\n print(\" Max= {}\".format(round(max(_distances_), 4)))\n print(\" Min= {}\".format(round(min(_distances_),4)))",
"def run(self,mc_sample=None):\n if mc_sample:\n self.mc_sample = mc_sample\n\n total_scores = 0.0\n total_scores_square = 0.0\n self.scores_list =[]\n \n for i in range(self.num_runs): #runs the specified number of Monte Carlo samples\n score = next(self.mc_sample) #next score\n self.scores_list.append(score) \n total_scores += score\n total_scores_square += score**2\n\n self.xhat = total_scores / self.num_runs #mean of score\n self.x2hat = total_scores_square / self.num_runs #mean of score^2\n\n self.sample_variance = (self.num_runs / (self.num_runs - 1.0)) * (self.x2hat - (self.xhat**2))\n self.sample_stddev = np.sqrt(self.sample_variance)\n self.mean_variance = self.sample_variance / (self.num_runs - 1.0)\n self.mean_stddev = np.sqrt(self.mean_variance)",
"def simulationDelayedTreatment(numTrials):\n\n delays = [300,150,75,0]\n results = [[],[],[],[]]\n for place in range(0, 4):\n for trial in range(numTrials):\n viruses = []\n for num in range(100):\n viruses.append(ResistantVirus(0.1,0.05, {'guttagonol': False}, 0.005))\n patient = TreatedPatient(viruses, 1000)\n for delay in range(delays[place]):\n patient.update()\n patient.addPrescription(\"guttagonol\") \n for l in range(150):\n patient.update()\n results[place].append(patient.getTotalPop())\n pylab.hist(results[0])\n pylab.hist(results[1])\n pylab.hist(results[2])\n pylab.hist(results[3])\n pylab.show()\n for x in range(0, 10):",
"def simulationWithoutDrug(numTrials = 20, numTimeSteps = 500):\r\n random.seed()\r\n\r\n # Virus Characteristics.\r\n maxPop = 1000\r\n numViruses = 100\r\n maxBirthProb = 0.1\r\n clearProb = 0.05\r\n \r\n dataMatrix = numpy.zeros(shape = (numTrials, numTimeSteps)) \r\n for trial in range(numTrials): \r\n\r\n # Model a random patient with the given virus charateristics. \r\n viruses = virusCollection(numViruses, maxBirthProb, clearProb)\r\n randPatientX = SimplePatient(viruses, maxPop)\r\n\r\n # Simulate the time-steps.\r\n dataMatrix[trial][0] = numViruses\r\n for time in range(1, numTimeSteps):\r\n dataMatrix[trial][time] = randPatientX.update() \r\n \r\n # Statistical Analysis.\r\n meanData = dataMatrix.mean(0)\r\n time = numpy.arange(numTimeSteps) \r\n stdData95_CI = dataMatrix.std(0) * 2\r\n selectedTime = numpy.arange(0, numTimeSteps, 10)\r\n\r\n # Ploting.\r\n pylab.plot(time, meanData)\r\n pylab.errorbar(time[selectedTime], meanData[selectedTime], stdData95_CI[selectedTime], fmt = 'o') \r\n pylab.show()",
"def run_simulation(random_seed=None, workers_count=[1, 1, [1, 1]]):\n if random_seed:\n random.seed(random_seed)\n\n metrics = OrderedDict()\n plot_data = {}\n\n env = Canteen(workers_count)\n\n env.process(source(env))\n env.run(until=SIMULATION_DURATION)\n\n places = [env.places[PlaceName.HOT], env.places[PlaceName.COLD]]\n cash_desks = env.cash_desks\n\n height = 3\n width = max(2, len(cash_desks))\n plot_data['size'] = [height, width]\n\n plot_data['data'] = []\n for i, place in enumerate(places + cash_desks):\n\n if not place.data:\n continue\n\n x, y = np.array(place.data).transpose()\n plot_data['data'].append([\n i+1 if i < 2 else width+i-1,\n repr(place).strip('<>'),\n x, y\n ])\n\n for place in places[:2] + cash_desks:\n max_time, mean_time = max_and_mean_time(place.time_list)\n\n data = np.array(place.data)\n mean_clients = sum(data[:, 1])/len(data[:, 1])\n max_clients = max(data[:, 1])\n\n metrics[place] = [mean_time, max_time, mean_clients, max_clients]\n\n cumulative = get_cumulative_proportional_time(Client.client_list)\n metrics['Cumulative proportional time'] = cumulative\n\n x, y = np.array(env.client_count_list).transpose()\n plot_data['data'].append([\n width * 2 + 1,\n 'Total',\n x, y\n ])\n\n return plot_data, metrics",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type, visualize):\n\trobots = []\n\troom = RectangularRoom(width, height)\n\tfor i in range(0, num_robots):\n\t\trobots.append(robot_type(room, speed))\n\ttrial = []\n\tfor i in range(0, num_trials):\n\t\tif visualize: anim = ps11_visualize.RobotVisualization(num_robots, width, height)\n\t\tj = []\n\t\tpercentage = .01 * (float(room.getNumCleanedTiles()) * (100.0 / float(room.getNumTiles())))\n\t\twhile percentage < min_coverage:\n\t\t\t\tfor r in robots:\n\t\t\t\t\tr.updatePositionAndClean()\n\t\t\t\tpercentage = .01 * (float(room.getNumCleanedTiles()) * (100.0 / float(room.getNumTiles())))\n\t\t\t\tj.append(percentage)\n\t\t\t\tif visualize: anim.update(room, robots)\n\t\tif percentage >= min_coverage:\n\t\t\t\"\"\" finished cleaning\n\t\t\t\"\"\"\n\t\t\ttrial.append(j)\n\t\t\tif visualize: anim.done()\n\t\troom.dirtyRoom()\n\treturn trial",
"def simulationWithDrug(numViruses, maxPop, maxBirthProb, clearProb, resistances,\n mutProb, numTrials):\n \n #create viruses list\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n \n #create test patient P1\n results = np.zeros(numTrials*300).reshape(300,numTrials)\n resultsPopResist = np.zeros(numTrials*300).reshape(300,numTrials)\n \n #runs numTrials of 300 steps, putting results in an array of 300 lines, \n # numTrials columns\n for t in range(numTrials) :\n P1 = TreatedPatient(viruses, maxPop)\n for s in range(150):\n P1.update()\n results[s][numTrials-1] += P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n P1.addPrescription('guttagonol')\n for s in range(150,300):\n P1.update()\n results[s][numTrials-1]+=P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n \n #calculating average of virus population size at each step \n yValues1 = []\n for i in range(300):\n a = sum(results[i].tolist())/len(results[i])\n yValues1.append(a)\n \n yValues2 = []\n for i in range(300):\n a = sum(resultsPopResist[i].tolist())/len(resultsPopResist[i])\n yValues2.append(a)\n\n pylab.plot(yValues1,label='pop average')\n pylab.plot(yValues2,'r--',label = 'resistant virus population')\n pylab.title('virus pop average at each step')\n pylab.legend()\n pylab.xlabel('Time Steps')\n pylab.ylabel('pop #')\n pylab.show()",
"def run_model(nsims = 10, ncats = 1):\n loc_lists = []\n plt.figure()\n for i in range(ncats):\n loc_lists.append(run_sims(nsims = nsims))\n plot_matrix(loc_lists[-1])\n return(loc_lists)",
"def simulationTwoDrugsDelayedTreatment(numTrials):\n #Initialization\n delayList = [300, 150, 75, 0]\n #delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': False, 'grimpex' : False }\n #mutProb = 0.005\n mutProb = 0.010\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n print \"Running trials for delay %(delay)d\" % {'delay' : n}\n for i in range(numTrials):\n #print \"Trial: \" + str(i)\n pop = runTrialTwoDrugs(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop < 50:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)",
"def simulate_memories(simulation_length):\n \n \n pass",
"def simulationTwoDrugsDelayedTreatment(numTrials):\n \n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False, 'grimpex': False}\n mutProb = 0.005\n delays = [300, 150, 75, 0]\n f, axarr = pylab.subplots(2, 2)\n x_plot = []\n\n for delay in delays:\n FinalPopSize = [0.0 for x in range(numTrials)]\n for trial in range(numTrials):\n viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) for n in range(numViruses)]\n patient = TreatedPatient(viruses, maxPop)\n for i in range(150):\n patient.update()\n patient.addPrescription('guttagonol')\n for j in range(150, 150+delay):\n patient.update()\n patient.addPrescription('grimpex')\n for k in range(150+delay, 300+delay):\n patient.update()\n FinalPopSize[trial] = patient.getTotalPop()\n x_plot.append(FinalPopSize)\n\n axarr[0, 0].hist(x_plot[0])\n axarr[0, 1].hist(x_plot[1])\n axarr[1, 0].hist(x_plot[2])\n axarr[1, 1].hist(x_plot[3])\n pylab.show()\n return x_plot",
"def main():\n file_txt = open('results.txt','w+')\n positions = [1,10,100,1000]\n num_trials = 10000\n \n # Simulate the investment and plot histogram for different positions\n for position in positions:\n daily_ret = simulation(position, num_trials)\n plt.figure()\n plt.hist(daily_ret, 100, range=[-1,1])\n plt.title('The histogram of daily return for position ={}'.format(position))\n plt.xlabel('Daily return')\n plt.ylabel('The number of trials')\n plt.savefig('histogram_{}_pos.pdf'.format(str(position).zfill(4)))\n \n # Save the results of the simulation into a txt file \n file_txt.write('Position: {}\\n'.format(position))\n file_txt.write('Mean: {}; Std: {}\\n'.format(np.mean(daily_ret),np.std(daily_ret)))\n file_txt.write('\\n')\n file_txt.close()",
"def main():\n all_returns = []\n\n for i in range(10):\n trial_return = run_trial()\n all_returns.append(trial_return)\n print(f'Trial {i+1}, average trial return: {np.mean(trial_return)}')\n\n mean_returns = np.mean(all_returns, axis=0)\n std_returns = np.std(all_returns, axis=0)\n\n x = range(mean_returns.shape[0])\n plt.plot(x, mean_returns)\n plt.title('Mean return over 10 trials')\n plt.fill_between(x, mean_returns - std_returns, mean_returns + std_returns, alpha=0.2)\n plt.ylabel('Mean return')\n plt.xlabel('1000 frames')\n plt.savefig('avg_return.png')\n plt.show()",
"def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)"
] | [
"0.6701685",
"0.6585736",
"0.6476485",
"0.6432378",
"0.6383453",
"0.631216",
"0.625372",
"0.62430805",
"0.6218561",
"0.62165946",
"0.62092024",
"0.6172401",
"0.61608857",
"0.6157016",
"0.6096449",
"0.6096449",
"0.609388",
"0.60926163",
"0.60920125",
"0.60714257",
"0.60618424",
"0.60582185",
"0.60574675",
"0.6056813",
"0.60481817",
"0.6045493",
"0.5994077",
"0.59732825",
"0.59445155",
"0.5940119"
] | 0.7543116 | 0 |
This method produces an animated simulation of a 1D random walk. | def random_walk_draw(self,num_plots,animated=False,show=True):
t_x_arrays = []
t_max = self.n
for _ in range(num_plots):
current_x = self.x_initial
x_array = [current_x]
t_array = range(t_max + 1)
steps = self._random_walk_simulation()
for s in steps:
current_x += s
x_array.append(current_x)
t_x_arrays.append( [x_array,t_array] )
fig = plt.figure('Random walk simulation')
ax = fig.add_subplot(1,1,1)
ax.set_ylim([(round(min(x_array) - np.sqrt(self.n)*3)),round(max(x_array) + np.sqrt(self.n)*3)])
ax.set_xlim([-(round(np.sqrt(self.n))),self.n+(round(np.sqrt(self.n)))])
if animated == True: # THIS CASE CURRENTLY HAS BUG FOR SOME REASON. CODE IS IDENTICAL TO 2D ANIMATION?
fig.suptitle('Simulation of 1D random walk, live')
self.index = 0
def update(i):
ax.clear()
ax.set_ylim([(round(min(x_array) - np.sqrt(self.n)*3)), round(max(x_array) + np.sqrt(self.n)*3)])
ax.set_xlim([-(round(np.sqrt(self.n))), self.n+(round(np.sqrt(self.n)))])
for i in t_x_arrays:
x_vals,t_vals = i
ax.plot(t_vals[:self.index], x_vals[:self.index])
self.index += 1
a = anim.FuncAnimation(fig, update, frames=self.n, repeat=False,interval=10)
else:
fig.suptitle('Simulation of 1D random walk, static')
for i in t_x_arrays:
x_vals,t_vals = i
ax.plot(t_vals, x_vals)
if show == True:
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)",
"def animate(agent, steps, initialize=None):\n grid, r, c = random_world()\n image = plt.imshow(grid, cmap=cmap, norm=norm)\n if initialize:\n state = initialize()\n for t in range(steps):\n draw_world(grid, r, c, image)\n percept = get_percept(grid, r, c)\n if initialize:\n action, *state = agent(percept, *state)\n else:\n action = agent(percept)\n\n r, c = apply(grid, r, c, action)\n plt.pause(0.0001)\n plt.show()",
"def animate_starter(self, **kwargs):\n interval = 5 # this number works fine, but is rather arbirtary, presumably in milliseconds\n print(\"The timer length is \" + str(len(self.sy.short_timer)))\n print(\"Shape of coordinate_grid is \" + str(np.shape(self.sy.coordinate_grid)))\n print(\"The animator interval was \" + str(interval) + \" in unknown units\")\n # I don't currently understand why the galaxy chooses\n # to slow down mid way through.\n # Perhaps I should look at the FuncAnimation\n # dictionary and work out what has gone wrong.\n with plt.style.context((\"dark_background\")):\n ani = animation.FuncAnimation(\n self.fig,\n self.animate,\n frames=len(self.sy.short_timer),\n interval=interval,\n blit=True,\n init_func=self.ani_init,\n )\n ani.save(\n str(self.co.out)\n + \"/\"\n + str(self.name)\n + \"move_with_\"\n + str(self.move_with)\n + \".mp4\",\n writer=writer,\n )\n plt.clf() # always make sure you close the lid",
"def animate_simulation(self, **kwargs):\n \n # Check is trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n ani = self.get_animator()\n \n return ani.animate_simulation( self.traj, **kwargs)",
"def start_sim(self):\n self.anim = animation.FuncAnimation(self.fig, self.anim_func, frames = self.timesteps, interval = 1, blit=True)\n plt.show()",
"def animation(self, t):\n self.program['u_clock'] = 2*t\n gloo.clear('black')\n self.program.draw('points')\n return _screenshot((0, 0, self.size[0], self.size[1]))[:,:,:3]",
"def anim():\n i = 0\n while 1:\n\n for r in Reprs:\n r.draw(i)\n i = i+ 1\n i = i % len(t)\n yield",
"def generate_simulation_html_video(self, **kwargs):\n \n # Check is trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n animator = self.get_animator()\n animator.animate_simulation( self.traj, show = False , **kwargs )\n html_video = animator.ani.to_html5_video()\n \n return html_video",
"def static(fps, duration):\n\n frames = int(duration * fps)\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing, frames)\n return animate",
"def _animate(self):\n steps = (1, 7, 14)\n if self.rect.x < self.start_x - 100:\n self.change_dir = False\n elif self.rect.x > self.start_x + 100:\n self.change_dir = True\n self.direction = -1 if self.change_dir else 1\n self.rect.x += self.direction * choice(steps)",
"def draw():\n ant.move(aim)\n ant.x = wrap(ant.x)\n ant.y = wrap(ant.y)\n\n aim.move(random() - 0.5)\n aim.rotate(random() * 10 - 5)\n\n clear()\n goto(ant.x, ant.y)\n dot(4)\n\n ontimer(draw, 100)",
"def TestAnimation(self,event=None):\n wx.GetApp().Yield(True)\n Range = (-10,10)\n self.Range = Range\n\n self.UnBindAllMouseEvents()\n Canvas = self.Canvas\n Canvas.InitAll()\n\n ## Random tests of everything:\n colors = self.colors\n # Rectangles\n for i in range(3):\n xy = (random.uniform(Range[0],Range[1]), random.uniform(Range[0],Range[1]))\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n wh = (random.randint(1,5), random.randint(1,5) )\n Canvas.AddRectangle(xy, wh, LineWidth = lw, FillColor = colors[cf])\n\n # Ellipses\n for i in range(3):\n xy = (random.uniform(Range[0],Range[1]), random.uniform(Range[0],Range[1]))\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n wh = (random.randint(1,5), random.randint(1,5) )\n Canvas.AddEllipse(xy, wh, LineWidth = lw, FillColor = colors[cf])\n\n # Circles\n for i in range(5):\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n D = random.randint(1,5)\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddCircle(xy, D, LineWidth = lw, LineColor = colors[cl], FillColor = colors[cf])\n Canvas.AddText(\"Circle # %i\"%(i), xy, Size = 12, BackgroundColor = None, Position = \"cc\")\n\n # Lines\n for i in range(5):\n points = []\n for j in range(random.randint(2,10)):\n point = (random.randint(Range[0],Range[1]),random.randint(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,10)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddLine(points, LineWidth = lw, LineColor = colors[cl])\n\n # Polygons\n for i in range(3):\n points = []\n for j in range(random.randint(2,6)):\n point = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,6)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddPolygon(points,\n LineWidth = lw,\n LineColor = colors[cl],\n FillColor = colors[cf],\n FillStyle = 'Solid')\n\n # Scaled Text\n String = \"Scaled text\"\n for i in range(3):\n ts = random.random()*3 + 0.2\n cf = random.randint(0,len(colors)-1)\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n Canvas.AddScaledText(String, xy, Size = ts, Color = colors[cf], Position = \"cc\")\n\n\n # Now the Foreground Object:\n C = Canvas.AddCircle((0,0), 7, LineWidth = 2,LineColor = \"Black\",FillColor = \"Red\", InForeground = True)\n T = Canvas.AddScaledText(\"Click to Move\", (0,0), Size = 0.6, Position = 'cc', InForeground = True)\n C.Bind(FloatCanvas.EVT_FC_LEFT_DOWN, self.MoveMe)\n C.Text = T\n\n self.Timer = wx.PyTimer(self.ShowFrame)\n self.FrameDelay = 50 # milliseconds\n\n Canvas.ZoomToBB()",
"def do_animations(self):\n self.animate_bloop(700, 160, 50)",
"def start_animation(self):\n\t\ttime.sleep(1)\n\t\tself.fishbowl.animate_balls()",
"def simulateOneTimeStep(self):\n\n self.susceptibleToInfected()\n self.infectedToRecovered()\n\n # add the new values of healthy/infected/recovered to the arrays keeping track\n SIR_t = np.array([self.getSusceptible(), self.getInfected(), self.getRecovered()])\n #update SIR time series\n self.SIR = np.concatenate([self.SIR, SIR_t[:,np.newaxis]], axis=1)\n\n # add the new snapshot of the simulation\n self.snapshots.append(self.getSpace().copy())",
"def random_walk(n):\n x,y = 0,0\n for i in range(n):\n (dx,dy) = random.choice([(0,1),(1,0),(0,-1),(-1,0)])\n x += dx\n y+=dy\n return(x,y)",
"def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate",
"def animate(self):\n if self.board.automaton.get() == \"life\":\n self.state = self.life.game_of_life(self.state)\n #self.life.random_activations(self.state)\n elif self.board.automaton.get() == \"seeds\":\n self.state = self.life.seeds(self.state)\n else:\n pass\n\n self.board.update_cells(self.state)\n self.parent.after(DELAY, self.animate)",
"def animate_pendulum():\n # frame_rate = 100\n # steps_per_frame = 10\n # h = 1.0/(frame_rate*steps_per_frame) # size of single step\n for i in range(steps_per_frame):\n k1 = h * f_theta_omega(angles, t)\n k2 = h * f_theta_omega(angles + 0.5 * k1, t)\n k3 = h * f_theta_omega(angles + 0.5 * k2, t)\n k4 = h * f_theta_omega(angles + k3, t)\n angles += (k1 + 2 * k2, 2 * k3, k4)/6\n pass",
"def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5",
"def update(self):\n if self.iteration > self.rate:\n self.iteration = 0\n heading = (random.random() * 180) - 90\n self.speed = 0.1\n if heading >= 0:\n self.heading = heading\n else:\n self.heading = 360 + heading\n self.iteration += 1\n self.setVector(self.speed, self.heading)",
"def walking(self):\r\n if (self.current_time - self.animate_timer) > 125:\r\n if self.frame_index == 0:\r\n self.frame_index += 1\r\n elif self.frame_index == 1:\r\n self.frame_index = 0\r\n self.animate_timer = self.current_time",
"def _animation_step(self, par_dict):\n\n t0 = time.time()\n dt = par_dict[\"dt\"]\n controller = par_dict[\"controller\"]\n integrator = par_dict[\"integrator\"]\n if controller is not None:\n _, _, tau = controller.get_control_output(\n meas_pos=self.x[:self.plant.dof],\n meas_vel=self.x[self.plant.dof:],\n meas_tau=np.zeros(self.plant.dof),\n meas_time=self.t)\n else:\n tau = np.zeros(self.plant.n_actuators)\n self.step(tau, dt, integrator=integrator)\n ee_pos = self.plant.forward_kinematics(self.x[:self.plant.dof])\n ee_pos.insert(0, self.plant.base)\n ani_plot_counter = 0\n for link in range(self.plant.n_links):\n self.animation_plots[ani_plot_counter].set_data(\n [ee_pos[link][0], ee_pos[link+1][0]],\n [ee_pos[link][1], ee_pos[link+1][1]])\n ani_plot_counter += 1\n self.animation_plots[ani_plot_counter].set_data(ee_pos[link+1][0],\n ee_pos[link+1][1])\n ani_plot_counter += 1\n\n set_arrow_properties(self.tau_arrowarcs[link],\n self.tau_arrowheads[link],\n float(np.squeeze(tau)),\n ee_pos[link][0],\n ee_pos[link][1])\n t = float(self.animation_plots[ani_plot_counter].get_text()[4:])\n t = round(t+dt, 3)\n self.animation_plots[ani_plot_counter].set_text(f\"t = {t}\")\n\n # if the animation runs slower than real time\n # the time display will be red\n if time.time() - t0 > dt:\n self.animation_plots[ani_plot_counter].set_color(\"red\")\n else:\n self.animation_plots[ani_plot_counter].set_color(\"black\")\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads",
"def gen_random_walk(self,n_step=100):\n # Warning about the small number of steps\n if n_step < 30:\n print(\"WARNING! The number of steps is small. It may not generate a good stochastic process sequence!\")\n \n w = np.ones(n_step)*self.x0\n \n for i in range(1,n_step):\n # Sampling from the Normal distribution with probability 1/2\n yi = np.random.choice([1,-1])\n # Weiner process\n w[i] = w[i-1]+(yi/np.sqrt(n_step))\n \n return w",
"def animate(directory,gifname,n_t,step=2,duration=0.2):\n\t# create list of filenames\n\tfnames = dir_fname(directory,\"*\")\n\t# create list of plots\n\timages=[] \n\tfor k in range(0,n_t):\n\t\tk = k*step\n\t\tprint('Mounting Im '+ str(k))\n\t\tFIG_NAME=fnames[k]\n\t\timages.append(imageio.imread(FIG_NAME)) # read\n\t# Now we can assemble the video\n\timageio.mimsave(gifname, images,duration=duration) # create gif\n\tprint('Animation'+gifname+'Ready')\n\treturn True",
"def make_simulation(self):\n pass",
"def random_walk_2(n):\n x,y=0,0\n for i in range(n):\n dx,dy = random.choice([(0,1), (0,-1),(1,0) ,(-1,0)])\n x+= dx\n y+= dy\n return (x,y)",
"def main(*args):\n #\n # Use argparse to handle parsing the command line arguments.\n # https://docs.python.org/3/library/argparse.html\n #\n parser = argparse.ArgumentParser(description='Animate an epidemic')\n parser.add_argument('--size', metavar='N', type=int, default=50,\n help='Use a N x N simulation grid')\n parser.add_argument('--duration', metavar='T', type=int, default=100,\n help='Simulate for T days')\n parser.add_argument('--recovery', metavar='P', type=float, default=0.1,\n help='Probability of recovery (per day)')\n parser.add_argument('--infection', metavar='P', type=float, default=0.1,\n help='Probability of infecting a neighbour (per day)')\n parser.add_argument('--death', metavar='P', type=float, default=0.005,\n help='Probability of dying when infected (per day)')\n parser.add_argument('--cases', metavar='N', type=int, default=2,\n help='Number of initial infected people')\n parser.add_argument('--plot', action='store_true',\n help='Generate plots instead of an animation')\n parser.add_argument('--file', metavar='N', type=str, default=None,\n help='Filename to save to instead of showing on screen')\n args = parser.parse_args(args)\n\n # Set up the simulation\n simulation = Simulation(args.size, args.size,\n args.recovery, args.infection, args.death)\n simulation.infect_randomly(args.cases)\n\n # Plot or animation?\n if args.plot:\n fig = plot_simulation(simulation, args.duration)\n\n if args.file is None:\n # python runsim.py --plot\n plt.show()\n else:\n # python runsim.py --plot --file=plot.pdf\n fig.savefig(args.file)\n else:\n animation = Animation(simulation, args.duration)\n\n if args.file is None:\n # python runsim.py\n animation.show()\n else:\n # python runsim.py --file=animation.mp4\n #\n # NOTE: this needs ffmpeg to be installed.\n animation.save(args.file)",
"def simulate(self):\n self._t = self._t + 1\n if self._t == self._cycle:\n # End of a season, start of the next one. Year is also cyclic that is WINTER -> SPRING.\n self._t = 0\n self._season = self._season.next()\n\n # When the ammount of newly produced food in a cell is over and the cell can seed we\n # randomly choose another spot where some random ammount of newly produced food should\n # be stored.\n for i in range(self._height):\n for j in range(self._width):\n if self._env[i][j].get_newly() == 0 and not self._seeded[i][j]:\n # if the cell become empty just now seed in once in a randomn cell on the grid.\n self._seeded[i][j] = True\n cap = self._height + self._width\n while cap > 0:\n seedi = random.randint(0, self._height - 1)\n seedj = random.randint(0, self._width - 1)\n\n production_cap = self._food_per_season[self._season.value]\n\n production_cap -= self._env[seedi][seedj].get_newly()\n\n if production_cap > 0:\n seed_amount = random.randint(1, production_cap)\n self._env[seedi][seedj].produce(seed_amount)\n self._seeded[seedi][seedj] = False\n break\n\n cap = cap - 1",
"def waiting_animation():\n animation = [\"[■□□□□□□□□□]\",\"[■■□□□□□□□□]\", \"[■■■□□□□□□□]\", \"[■■■■□□□□□□]\", \n \"[■■■■■□□□□□]\", \"[■■■■■■□□□□]\", \"[■■■■■■■□□□]\", \"[■■■■■■■■□□]\", \n \"[■■■■■■■■■□]\", \"[■■■■■■■■■■]\", \"[□■■■■■■■■■]\", \"[□□■■■■■■■■]\",\n \"[□□□■■■■■■■]\", \"[□□□□■■■■■■]\", \"[□□□□□■■■■■]\", \"[□□□□□□■■■■]\",\n \"[□□□□□□□■■■]\", \"[□□□□□□□□■■]\", \"[□□□□□□□□□■]\", \"[□□□□□□□□□□]\"\n ]\n for i in range(len(animation)):\n time.sleep(0.2)\n sys.stdout.write(\"\\r\" + animation[i % len(animation)])\n sys.stdout.flush()"
] | [
"0.6422432",
"0.6396238",
"0.636496",
"0.63317525",
"0.62908727",
"0.6250839",
"0.6221125",
"0.6113204",
"0.61060905",
"0.60530597",
"0.6004688",
"0.59976655",
"0.5970178",
"0.5949534",
"0.5910587",
"0.5881018",
"0.58617455",
"0.58095884",
"0.5787556",
"0.5768405",
"0.57596225",
"0.57517755",
"0.5738987",
"0.57359564",
"0.57013273",
"0.5694511",
"0.5674949",
"0.5669764",
"0.56648046",
"0.5648819"
] | 0.7161393 | 0 |
Returns the theoretical average distance from x_initial. | def _calculate_mean_distance_theoretical(self):
x_mean_distance = 0
x_vals,prob_vals = self.tuple_of_probabilities
for i in range(len(x_vals)):
x_val, prob = x_vals[i], prob_vals[i]
x_distance = abs(x_val - self.x_initial)
x_weighted = x_distance * prob
x_mean_distance += x_weighted
return x_mean_distance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def avgX(self):\n return np.mean(self.getx())",
"def _avg_sample(self):\n samples = [0] * self.num_samples\n for i in range(self.num_samples):\n samples[i] = self.sensor.measure_distance()\n time.sleep(self.sample_delay)\n if self.drop_extremes:\n samples.sort()\n samples = samples[1:-1]\n return sum(samples) / len(samples)",
"def average_distance(self):\r\n total = 0\r\n edges = 0\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n total += edge.distance\r\n edges += 1\r\n return total / edges",
"def mean_deviation(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: abs(x - _mean), self.sample))/len(self.sample)",
"def deviationAvg(xs):\n\treturn deviation(xs) / sqrt(len(xs))",
"def average(self):\n return (self.current + self.last) / 2.0",
"def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm",
"def __compute_distance(self, x, centroid):\n \n diff = x - centroid\n return np.sqrt(np.dot(diff.T, diff))",
"def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)",
"def x_mean(self):\n return self._get_mean_pole(\"x\")",
"def average(self):\n return self.summation() / self.count()",
"def x_distance(self):\n return self.get_distance(self.X_INDEX)",
"def __calculate_average_distance(self):\n game = self.__game # type: Game\n all_icebergs = game.get_all_icebergs()\n all_icebergs_length = len(all_icebergs)\n sum_distances = 0\n for i in range(all_icebergs_length):\n for j in range(i + 1, all_icebergs_length):\n iceberg1 = all_icebergs[i]\n iceberg2 = all_icebergs[j]\n sum_distances += iceberg1.get_turns_till_arrival(iceberg2)\n\n return sum_distances / (all_icebergs_length * (all_icebergs_length - 1) / 2)",
"def getAverage(self):\n return sum(self.scores) / len(self.scores)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def my_mean(x):\n return my_sum(x) / my_len(x)",
"def get_avg(self) -> float:\n if self._cur_elem_count < 1:\n return 0\n self._mtx.acquire()\n avg = self._sum / float(self._cur_elem_count)\n self._mtx.release()\n return avg",
"def mahalanobis_distance(self, x: np.ndarray) -> float:\n # this method could be vectorized for efficient calls\n error = x - self.mean\n mahalanobis_distance = error.T @ nla.solve(self.cov, error)\n return mahalanobis_distance",
"def average_speed(self):\n return self.total_distance * 3600 / self.total_time",
"def norm_ape(self) -> float:\n return float(np.sqrt(np.sum(np.square(self._percentage_error() - self.mape())) / (len(self.true) - 1)))",
"def std_deviation_of_mean_value(x):\n\n import numpy as np\n x = np.array(x)\n M = np.size(x)\n\n if M == 1: return 0\n\n return empirical_std_deviation(x)/np.sqrt(M)",
"def get_mean(self):\n return numpy.mean(self._x) - numpy.mean(self._y)",
"def distance_to(self, x):\n return np.linalg.norm(np.array(x) - self.closest_point_to(x))",
"def average(self):\n s = self.sum()\n flat_shape = self.flatten_shape(self.shape)\n num_of_elements = fct.reduce(opr.mul, flat_shape, 1)\n average = s / num_of_elements\n return average",
"def calc_error_dist(self):\n pass",
"def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu",
"def computeX0 (self):\n self.m_x0 = np.sum(self.m_arr, axis=0)\n \"\"\" Subtract the point for which f(x) is max \"\"\"\n self.m_x0 -= self.m_arr[self.m_sorted[-1], :]\n \"\"\" Compute average \"\"\"\n self.m_x0 /= self.m_dim\n _debugPrint(\"Centroid: %s\" %self.m_x0)",
"def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)"
] | [
"0.65964574",
"0.6528893",
"0.65209246",
"0.63227236",
"0.6232084",
"0.62193644",
"0.61996084",
"0.61704206",
"0.6161522",
"0.61305714",
"0.61239797",
"0.6103318",
"0.6088901",
"0.5991759",
"0.59127855",
"0.59127855",
"0.59127855",
"0.59074956",
"0.58808935",
"0.58742917",
"0.58724093",
"0.5839817",
"0.583781",
"0.58371776",
"0.5834861",
"0.58074135",
"0.58041173",
"0.5766301",
"0.57597315",
"0.57524264"
] | 0.80323946 | 0 |
Calculates the probability that x_n = k delta_x. This method uses the values of n and p in its calculations. | def _calculate_probability(self,k):
if abs(k * self.delta_x) > (3 * np.sqrt(self.variance)):
return 0.0
binom_coeff = special.binom(self.n,(self.n + k)/2)
b_value = binom_coeff * ((self.p) ** ((self.n + k)/2)) * ((1-self.p) ** ((self.n - k)/2))
return b_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def probability(n, k, p):\n prob = 0\n power = expotentation_by_squaring((1-p), n)\n count_mult = math.log(n, 2)\n p_fraction = p/(1-p)\n count_mult += 1\n for i in range(0, k+1):\n element = newton(n, i)*power\n prob += element\n power *= p_fraction\n count_mult += 2\n return prob, count_mult",
"def Poisson(n, k):\n\tp = math.exp(-k) * math.pow(k, n) / float(Factorial(n))\n\tassert 0.0 <= p <= 1.0, \"Error, value of p is invalid probability: \" + str(p)\n\treturn p",
"def calculate_probability(k: int, m: int, n: int) -> float:\n population = [\"AA\" for _ in range(k)] + [\"Aa\" for _ in range(m)] + [\"aa\" for _ in range(n)]\n pairings = it.combinations(population, 2)\n probabilities = [PROBABILITIES[pairing] for pairing in pairings]\n output = sum(probabilities) / len(probabilities)\n\n return output",
"def _prob_kuiper(d, n_eff, dtype=\"f8\"):\n n_time_slices = np.size(d) # single value or vector\n n_points = 100\n\n en = math.sqrt(n_eff)\n k_lambda = (en + 0.155 + 0.24 / en) * d # see [1]\n l2 = k_lambda**2.0\n j2 = (np.arange(n_points) + 1) ** 2\n j2 = j2.repeat(n_time_slices).reshape(n_points, n_time_slices)\n fact = 4.0 * j2 * l2 - 1.0\n\n # compute normalized pK value in range [0,1]\n a = -2.0 * j2 * l2\n b = 2.0 * fact\n pk_norm = -logsumexp(a, b=b, axis=0) / (2.0 * n_eff)\n\n # check for no difference to uniform cdf\n pk_norm = np.where(k_lambda < 0.4, 0.0, pk_norm)\n\n # check for round off errors\n pk_norm = np.where(pk_norm > 1.0, 1.0, pk_norm)\n\n return pk_norm",
"def _compute_parameters(self, p, k):\n for i in range(self._.d + 1):\n p[0, i, i] = k[i]\n p[i, 0, i] = Integer(1)\n p[i, i, 0] = Integer(1)\n for i in range(self._.d):\n p[i+1, 1, i+1] = self._.a[i+1]\n p[i, 1, i+1] = self._.b[i]\n p[i+1, 1, i] = self._.c[i+1]\n for i in range(2, self._.d + 1):\n for j in range(1, self._.d + 1):\n for h in range(1, self._.d):\n p[h, i, j] = self._check_parameter(\n h, i, j,\n _simplify(_expand((\n self._.c[h] * p[h-1, i-1, j]\n + self._.b[h] * p[h+1, i-1, j]\n - self._.b[i-2] * p[h, i-2, j]\n + (self._.a[h] - self._.a[i-1]) * p[h, i-1, j]\n ) / self._.c[i])))\n p[self._.d, i, j] = self._check_parameter(\n self._.d, i, j,\n _simplify(_expand((\n self._.c[self._.d] * p[self._.d-1, i-1, j]\n - self._.b[i-2] * p[self._.d, i-2, j]\n + (self._.a[self._.d] - self._.a[i-1])\n * p[self._.d, i-1, j]\n ) / self._.c[i])))",
"def bpmf(k, n, p):\n # this does not work for large n\n return comb(n, k) * (p**k) * ((1 - p)**(n - k))",
"def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result",
"def calPFP(n, m, k):\n return pow(1-math.exp(-k*(n+0.5)/(m-1)), k)",
"def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total",
"def pmf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n\n c = (Binomial.factorial(self.n)) / \\\n (Binomial.factorial(k) * self.factorial((self.n - k)))\n\n return c * pow(self.p, k) * pow((1 - self.p), (self.n - k))",
"def payoff_n_p(p, n=3,\n MLB_contract=4158333, minor_contract=6600, thresh=1500000):\n distribution = []\n for n_makers in range(n + 1):\n if n_makers == 0:\n payoff_prob = [1 - prob for prob in p.values()]\n payoff_prob = np.prod(payoff_prob)\n distribution.append((minor_contract, payoff_prob))\n elif n_makers == n:\n payoff_prob = [prob for prob in p.values()]\n payoff_prob = np.prod(payoff_prob)\n distribution.append((MLB_contract, payoff_prob))\n else:\n makers = list(combinations(range(1, n + 1), n_makers))\n for maker_set in makers:\n if 1 in maker_set:\n payoff = MLB_contract - 0.1*(MLB_contract-thresh)\n payoff += (n_makers-1)*0.1*(MLB_contract-thresh)/(n-1)\n payoff_prob = [p[player] for player in maker_set]\n payoff_prob += [1-p[player] for player in p.keys() if player not in maker_set]\n payoff_prob = np.prod(payoff_prob)\n distribution.append((payoff, payoff_prob))\n else:\n payoff = minor_contract\n payoff += n_makers*0.1*(MLB_contract-thresh)/(n-1)\n payoff_prob = [p[player] for player in maker_set]\n payoff_prob += [1-p[player] for player in p.keys() if player not in maker_set]\n payoff_prob = np.prod(payoff_prob)\n distribution.append((payoff, payoff_prob))\n E_payoff = [a*b for (a, b) in distribution]\n E_payoff = sum(E_payoff)\n var_payoff = [((a-E_payoff)**2)*b for (a, b) in distribution]\n var_payoff = sum(var_payoff)\n return E_payoff, var_payoff**0.5",
"def probability(delta_cost: float, temperature: float, k: float = 1) -> float:\n if delta_cost < 0:\n return 1\n else:\n return np.exp(-delta_cost / (k * temperature))",
"def bpmfln(k, n, p):\n return np.exp(combinln(n, k) + k * np.log(p) + (n - k) * np.log(1 - p))",
"def binomialTest(k, n, p = 0.5, exact = False):\n\tassert(k <= n)\n\tassert(k >= 0 and n > 0)\n\tn = int(n)\n\tk = int(k)\n\tp_value = 1.0\n\n\t# Trivial cases where p = 0 or p = 1\n\tif p == 0.0: # Must then have k = 0\n\t\tif k > 0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 1.0\n\tif p == 1.0: # Must then have k = n\n\t\tif k <= n:\n\t\t\treturn 1.0\n\n\tif k == 0:\n\t\t# Probability of at least zero successes is 1\n\t\tp_value = 1.0\n\telif k == n:\n\t\t# Probability of all successes\n\t\tp_value = p**n\n\telse:\n\t\tif not exact and n*p > 30 and n*(1-p) > 30:\n\t\t\t# Use normal approximation\n\t\t\tmu = n*p\n\t\t\tsd = math.sqrt(n*p*(1-p))\n\t\t\tz = (k-mu)/sd\n\t\t\tif z < 0.0:\n\t\t\t\tp_value = 1-Prob_Z(z)\n\t\t\telse:\n\t\t\t\tp_value = Prob_Z(z)\n\t\telse:\n\t\t\tp_value = p**n # The last term in the sum\n\t\t\tfor j in range(k,n):\n\t\t\t\t# Compute logarithm of (n choose j) p^j (1-p)^ (n-j), the\n\t\t\t\t# binomial probability. Use logarithm to avoid overflow\n\t\t\t\t# problems with potentially enormous factorials.\n\t\t\t\tlog_p = logChoose(n,j) + j*math.log(p) + (n-j)*math.log(1-p)\n\t\t\t\tp_value += math.exp(log_p)\n\t\t\tif p_value > 1.0:\n\t\t\t\tp_value = 1.0\n\treturn p_value",
"def dbinom(self, x, n, p):\n f = math.factorial\n C = Decimal(f(n) / (f(x) * f(n-x)))\n return C * p**x * (1-p)**(n-x)",
"def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n",
"def calculateP(SD, numDiff):\n return numDiff/SD",
"def calculateP(SD, numDiff):\n return numDiff/SD",
"def pmf(self, k):\n if k % 1 != 0:\n k = int(k)\n if k < 0 and k <= self.n:\n return 0\n q = 1 - self.p\n co = (self.factorial(self.n) / ((self.factorial(self.n-k)\n * self.factorial(k))))\n q2 = q ** (self.n - k)\n return co * (self.p ** k) * q2",
"def PN(self, n):\n if not self.isVaild():\n pass\n if n < self.C:\n return self.P0()*(self.r()**n)/math.factorial(n)\n else:\n return self.P0()*(self.r()**n)/(math.factorial(self.C)*self.C**(n-self.C))",
"def tourney_prob(k, N, m):\n\n if N < m:\n print \"The second argument cannot be smaller than the third one.\"\n sys.exit()\n\n if m < 1 or k <= 0:\n return 0.0\n elif m == 1:\n return 1.0 / N\n else:\n return float(N - k) * m / (N * (m - 1)) * tourney_prob(k, N - 1, m - 1)",
"def _calculate_p(vector: pd.DataFrame, n: int) -> pd.DataFrame:\r\n\r\n ## Isolate permuted walk scores (these fields begin w/ p_), identify permuted scores\r\n ## of equal or greater magnitude than the one originally observed, sum the scores,\r\n ## then calculate the p-value.\r\n vector['p'] = (\r\n vector.filter(regex=r'p_\\d+')\r\n .apply(lambda x: x >= vector.probability)\r\n .select_dtypes(include=['bool'])\r\n .sum(axis=1)\r\n )\r\n vector['p'] = (vector.p + 1) / (n + 1)\r\n\r\n ## Get rid of all the permuted score columns\r\n return vector[['node_from', 'node_to', 'probability', 'p']]",
"def bernul(n, k, p):\n return comb(n, k) * p ** k * (1 - p) ** (n-k)",
"def compute_prob_mle(X: np.ndarray, n: int) -> float:\n\n assert n > 1, \"for n = 1 use Bernoulli distribution.\"\n Binomial._check_input_data(X=X)\n Binomial._check_support(X=X, n=n)\n\n prob = X.mean() / n\n return prob",
"def get_power_one_side(alpha, n, k, effect_size):\n c = sc.t.ppf(1 - alpha, (1 + k) * n - 2)\n delta_norm = effect_size * np.sqrt(1 / (1 / n + 1 / (n * k)))\n p = 1 - sc.nct.cdf(c, (1 + k) * n - 2, delta_norm)\n return p",
"def binom_pdf(k, n,p,binom):\n return binom * p**k * (1-p)**(n-k)",
"def multiple_comparisons(p, n):\r\n if p > 1e-6: # if p is large and n small, calculate directly\r\n return 1 - (1 - p) ** n\r\n else:\r\n return one_minus_exp(-n * p)",
"def _bernoulli_lower(self, p, n, delta):\n if p < 1e-6:\n return 0.\n else:\n lower = scipy.stats.beta.ppf(delta / 2, p * n, n - p * n + 1)\n return lower",
"def payoff_n(n=3, p=0.06,\n MLB_contract=4158333, minor_contract=6600, thresh=1500000):\n distribution = []\n for n_makers in range(n + 1): # For every number of possible players who make it\n if n_makers == 0:\n distribution.append((minor_contract, (1-p)**n))\n elif n_makers == n:\n distribution.append((MLB_contract, p**n))\n else:\n # number of combinations of players who make it\n n_combinations = factorial(n)\n n_combinations /= (factorial(n-n_makers)*factorial(n_makers))\n n_combinations = int(n_combinations)\n\n # number of combinations where player 1 makes it\n n_indv_inmajors = factorial(n-1)\n n_indv_inmajors /= (factorial((n-1)-(n_makers-1))*factorial(n_makers-1))\n n_indv_inmajors = int(n_indv_inmajors)\n\n # probability that n_makers of players make it\n payoff_prob = p**n_makers * (1-p)**(n-n_makers)\n\n # payoff when player 1 is one of the players who makes it\n payoff = MLB_contract - 0.1*(MLB_contract-thresh)\n payoff += (n_makers-1)*0.1*(MLB_contract-thresh)/(n-1)\n distribution.append((payoff, payoff_prob*n_indv_inmajors))\n\n # payoff when player 1 is not one of the players who makes it\n payoff = minor_contract\n payoff += n_makers*0.1*(MLB_contract-thresh)/(n-1)\n distribution.append((payoff, payoff_prob*(n_combinations-n_indv_inmajors)))\n E_payoff = [a*b for (a, b) in distribution]\n E_payoff = sum(E_payoff)\n var_payoff = [((a-E_payoff)**2)*b for (a, b) in distribution]\n var_payoff = sum(var_payoff)\n return E_payoff, var_payoff**0.5",
"def compute_prob_mle(X: np.ndarray, k: int) -> np.ndarray:\n\n assert k > 2, \"for k = 2 use Bernoulli distribution.\"\n Categorical._check_input_data(X=X)\n Categorical._check_support(X=X, k=k)\n\n prob = np.zeros(k)\n for x in X:\n prob[x] += 1\n prob /= prob.sum()\n\n return prob"
] | [
"0.7945974",
"0.7007212",
"0.6929289",
"0.6782928",
"0.67037106",
"0.65899515",
"0.6540211",
"0.6514137",
"0.6489262",
"0.63936114",
"0.636305",
"0.6362294",
"0.62638086",
"0.6237782",
"0.6176205",
"0.61600775",
"0.613456",
"0.613456",
"0.6123985",
"0.6123276",
"0.61229426",
"0.6118929",
"0.6117606",
"0.6103296",
"0.60972047",
"0.60952836",
"0.60885805",
"0.606381",
"0.6063598",
"0.6063093"
] | 0.7519626 | 1 |
Gets a tuple of the form (kvalues,probabilities) in the range [n,n]. | def _get_tuple_of_probabilities(self):
k_array = np.arange(-self.n,self.n+1,2)
probability_array = []
for k in k_array:
probability_array.append(self._calculate_probability(k))
return (k_array,probability_array) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def probability(n, k, p):\n prob = 0\n power = expotentation_by_squaring((1-p), n)\n count_mult = math.log(n, 2)\n p_fraction = p/(1-p)\n count_mult += 1\n for i in range(0, k+1):\n element = newton(n, i)*power\n prob += element\n power *= p_fraction\n count_mult += 2\n return prob, count_mult",
"def calculate_probability(k: int, m: int, n: int) -> float:\n population = [\"AA\" for _ in range(k)] + [\"Aa\" for _ in range(m)] + [\"aa\" for _ in range(n)]\n pairings = it.combinations(population, 2)\n probabilities = [PROBABILITIES[pairing] for pairing in pairings]\n output = sum(probabilities) / len(probabilities)\n\n return output",
"def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]",
"def n_choose_kv(newK):\n values = np.zeros((1,newK+1))\n ks = np.arange(newK+1)\n \n for i in range(newK+1):\n values[i] = scipy.misc.comb(newK, ks[i])\n\n return values",
"def get_rank_probabilities(n: int) -> List[float]:\n alpha = 3.5\n ranks = [1 / i**alpha for i in range(1, n + 1)]\n\n return [r / sum(ranks) for r in ranks]",
"def ks_pval(data):\n n,p = np.shape(data)\n pvals = [None] * p\n for i in range(p):\n foo, pvals[i] = stats.kstest(data[:,i], \"beta\", args = (1,p-i))\n return pvals",
"def prob_list(n, p):\n ...\n#pn is the probability of seeing a pair of aces two hands in a row in n hands.\n#qn = 1 - pn\n#thus qn is the probability of NOT seeing a par of aces two hands in a row in n hands.\n list = []\n qn_a = 1\n qn_b = 1\n list.append(qn_a)\n list.append(qn_b)\n\n for i in range(n-2):\n i += 0\n qn_next = (1-p)*qn_b + p*(1-p)*qn_a\n list.append(qn_next)\n qn_a = qn_b\n qn_b = qn_next\n return list\n\n\n # returns list of probs. [q0,... qn]",
"def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]",
"def get_n(self, n):\n \n return [self.get_values() for _ in range(n)]",
"def nchoosek(n, k):\n if (n, k) in known:\n return known[(n,k)]\n if k == 0:\n return 1\n if n == k:\n return 1\n if n < k:\n return \"n must be greater than k\"\n result = nchoosek(n - 1, k - 1) + nchoosek(n - 1, k)\n known[(n,k)] = result\n return result",
"def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total",
"def binomial_coefficient3(n, k):\n return reduce(lambda a, b: a * (n - b) / (b + 1), xrange(k), 1)",
"def basis_generic(n, *, p, bound=1):\r\n if n == 0:\r\n return ((0,),) # \r\n if n == 1:\r\n return ((1,),)\r\n result = []\r\n \r\n # append P^{last} beta^{epsilon}\r\n for epsilon in [0,1]:\r\n # Without this lower bound edge case we lose the element (0, 1, 1) in degree 5.\r\n # I don't have a good explanation for what it means yet.\r\n lower_bound = bound + epsilon if bound > 1 else 1\r\n for last in range(lower_bound, 1 + (n // (2*(p - 1)))):\r\n remaining_degree = n - 2*(p-1)*last - epsilon\r\n basis_in_remaining_degree = basis_generic(remaining_degree, p=p, bound=p * last)\r\n for vec in basis_in_remaining_degree:\r\n result.append(vec + (last, epsilon))\r\n return tuple(result)",
"def choose(n, k):\n ans, k = 1, min(k, n-k)\n for i in range(k):\n ans *= n-i\n ans //= i+1\n return ans",
"def combinations(n, k):\r\n return exp(gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1))",
"def binomialTest(k, n, p = 0.5, exact = False):\n\tassert(k <= n)\n\tassert(k >= 0 and n > 0)\n\tn = int(n)\n\tk = int(k)\n\tp_value = 1.0\n\n\t# Trivial cases where p = 0 or p = 1\n\tif p == 0.0: # Must then have k = 0\n\t\tif k > 0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 1.0\n\tif p == 1.0: # Must then have k = n\n\t\tif k <= n:\n\t\t\treturn 1.0\n\n\tif k == 0:\n\t\t# Probability of at least zero successes is 1\n\t\tp_value = 1.0\n\telif k == n:\n\t\t# Probability of all successes\n\t\tp_value = p**n\n\telse:\n\t\tif not exact and n*p > 30 and n*(1-p) > 30:\n\t\t\t# Use normal approximation\n\t\t\tmu = n*p\n\t\t\tsd = math.sqrt(n*p*(1-p))\n\t\t\tz = (k-mu)/sd\n\t\t\tif z < 0.0:\n\t\t\t\tp_value = 1-Prob_Z(z)\n\t\t\telse:\n\t\t\t\tp_value = Prob_Z(z)\n\t\telse:\n\t\t\tp_value = p**n # The last term in the sum\n\t\t\tfor j in range(k,n):\n\t\t\t\t# Compute logarithm of (n choose j) p^j (1-p)^ (n-j), the\n\t\t\t\t# binomial probability. Use logarithm to avoid overflow\n\t\t\t\t# problems with potentially enormous factorials.\n\t\t\t\tlog_p = logChoose(n,j) + j*math.log(p) + (n-j)*math.log(1-p)\n\t\t\t\tp_value += math.exp(log_p)\n\t\t\tif p_value > 1.0:\n\t\t\t\tp_value = 1.0\n\treturn p_value",
"def nchoosek(n, k):\n if n < k:\n return 0\n return partition(n, [k, n - k])",
"def get_probability(letters, n):\n return {l: c/n for l, c in letters.items()}",
"def normal_aproximation_to_binomial(n: int, p: float) -> Tuple[float, float]:\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def n_choose_k(N,K):\n return factorial(N) // (factorial(N - K) * factorial(K))",
"def probabilities(self):\n raise NotImplementedError",
"def get_witnesses(n):\n assert (n > 2) and (n % 2 == 1)\n if n < 2047:\n # References: [1], [2], [4]\n witnesses = (2,)\n elif n < 1373653: # ~1.3 million\n # References: [1], [2], [3], [4]\n witnesses = (2, 3)\n elif n < 9080191: # ~9.0 million\n # References: [3], [4]\n witnesses = (31, 73)\n elif n < 25326001: # ~25.3 million\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5)\n elif n < 3215031751: # ~3.2 billion\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5, 7)\n elif n < 4759123141: # ~4.7 billion\n # References: [3], [4]\n witnesses = (2, 7, 61)\n elif n < 2152302898747: # ~2.1 trillion\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5, 7, 11)\n elif n < 3474749660383: # ~3.4 trillion\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5, 7, 11, 13)\n elif n < 341550071728321: # ~341.5 trillion\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5, 7, 11, 13, 17)\n elif n < 3825123056546413051: # ~3.8 million trillion\n # References: [1], [4]\n witnesses = (2, 3, 5, 7, 11, 13, 17, 19, 23)\n elif n <= 2**64:\n witnesses = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37)\n else:\n witnesses = None\n \n return witnesses",
"def choose(n, k):\n # http://stackoverflow.com/a/3025547/313967\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def choose(n, k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def choose(n, k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def multinomial_pmf(sample, probabilities):\r\n # TODO\r\n a=[]\r\n b=[]\r\n i=0\r\n key_list=[]\r\n value_list=[]\r\n for key,value in sample.items():\r\n key_list.append(key)\r\n value_list.append(value)\r\n b=list(sample)\r\n while i< len(b):\r\n a.append(probabilities.keys()[probabilities.values().index(value_list[i])])\r\n\r\n\r\n return a",
"def _calculate_probability(self,k):\n\t\tif abs(k * self.delta_x) > (3 * np.sqrt(self.variance)):\n\t\t\treturn 0.0\n\t\tbinom_coeff = special.binom(self.n,(self.n + k)/2)\n\t\tb_value = binom_coeff * ((self.p) ** ((self.n + k)/2)) * ((1-self.p) ** ((self.n - k)/2))\n\t\treturn b_value",
"def k(n):\r\n primes = u.sieve(n)\r\n l = [1, 0]\r\n for i in range(2, n + 1):\r\n l1 = [l[r] * sopf(i - r, primes) for r in range(1, i)]\r\n s = (sum(l1) + sopf(i, primes)) // i\r\n l.append(s)\r\n return l[n]",
"def bincoeff(n: int, k: int = None) -> Union[int, List[int]]:\n if k is not None:\n return comb(n, k)\n else:\n result = []\n for i in range(0, n + 1):\n result.append(comb(n, i))\n return result",
"def get_vals(self) -> (float, float, float):\n return (self._controller.kp, self._controller.ki, self._controller.kd)"
] | [
"0.6795796",
"0.6431642",
"0.6371308",
"0.63636243",
"0.62259746",
"0.6038752",
"0.59965384",
"0.5926337",
"0.59128183",
"0.5910087",
"0.5896185",
"0.58841133",
"0.584079",
"0.58231497",
"0.5818503",
"0.58054143",
"0.5799345",
"0.5798068",
"0.5791992",
"0.5765857",
"0.5764065",
"0.574794",
"0.5742223",
"0.5740768",
"0.5740768",
"0.57310474",
"0.5725249",
"0.5696687",
"0.5695373",
"0.5681311"
] | 0.80612916 | 0 |
tries to decode unicode to deal with python unicode strangeness | def unicode_decode(text):
try:
return text.encode('utf-8').decode()
except UnicodeDecodeError:
return text.encode('utf-8') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unicode_decode(text):\n\n try:\n return text.encode('utf-8').decode()\n except UnicodeDecodeError:\n return text.encode('utf-8')",
"def escapeDecode(s: unicode) -> unicode:\n ...",
"def TryDecode(text):\n try:\n return unicode(text, \"utf8\")\n except (TypeError, UnicodeDecodeError):\n return text",
"def utf82unicode(s):\n return s.decode(encoding='utf-8', errors='ignore')",
"def decode_utf8(string):\n if isinstance(string, str):\n try: \n return string.decode(\"utf-8\")\n except:\n return string\n return unicode(string)",
"def safe_decode_utf8(s):\n if isinstance(s, bytes):\n return s.decode('utf-8', 'surrogateescape')\n return s",
"def decode_string(string):\n return unicode(string, 'utf-8')",
"def _force_unicode(data):\n try:\n data = unicode(data, \"utf-8\")\n except UnicodeDecodeError:\n data = unicode(data, \"latin1\")\n return data",
"def try_unicode(string):\n if isinstance(string, str):\n return string.decode(\"utf-8\")\n else:\n return string",
"def _decode_utf8(value):\n try:\n return value if not isinstance(value, bytes) else value.decode('utf-8', 'ignore')\n except UnicodeDecodeError:\n return None",
"def unicodise(string, encoding = None, errors = \"replace\"):\n global preferred_encoding\n \n if not encoding:\n encoding = preferred_encoding\n\n if type(string) == unicode:\n return string\n try:\n return string.decode(encoding, errors)\n except UnicodeDecodeError:\n raise UnicodeDecodeError(\"Conversion to unicode failed: %r\" % string)",
"def decode_to_unicode(content):\n if content:\n try:\n # Try to decode ISO-8859-1 to unicode\n return content.decode(\"ISO-8859-1\")\n except UnicodeEncodeError:\n # Assume content is unicode already\n return content",
"def decode(text):\r\n try:\r\n if text.startswith(BOM_UTF8):\r\n # UTF-8 with BOM\r\n return unicode(text[len(BOM_UTF8):], 'utf-8'), 'utf-8-bom'\r\n elif text.startswith(BOM_UTF16):\r\n # UTF-16 with BOM\r\n return unicode(text[len(BOM_UTF16):], 'utf-16'), 'utf-16'\r\n elif text.startswith(BOM_UTF32):\r\n # UTF-32 with BOM\r\n return unicode(text[len(BOM_UTF32):], 'utf-32'), 'utf-32'\r\n coding = get_coding(text)\r\n if coding:\r\n return unicode(text, coding), coding\r\n except (UnicodeError, LookupError):\r\n pass\r\n # Assume UTF-8\r\n try:\r\n return unicode(text, 'utf-8'), 'utf-8-guessed'\r\n except (UnicodeError, LookupError):\r\n pass\r\n # Assume Latin-1 (behaviour before 3.7.1)\r\n return unicode(text, \"latin-1\"), 'latin-1-guessed'",
"def convertFromUnicode(content):\n return content",
"def safe_decode_inner(s):\n if isinstance(s, unicode):\n return s\n for encoding in preflist:\n try:\n return s.decode(encoding, 'strict')\n except UnicodeDecodeError:\n if logger is not None:\n logger.warn(\"Assuming %(encoding)r, can't decode %(s)r\",\n locals())\n if errors != 'strict' and preferred:\n return s.decode(preferred, errors)\n raise",
"def cast_unicode(s, encoding='utf-8'):\n if isinstance(s, bytes) and not PY3:\n return s.decode(encoding, \"replace\")\n return s",
"def _string_convert(str):\n if isinstance(str, unicode):\n return str\n try:\n return str.decode(locale.getpreferredencoding(), 'strict')\n except UnicodeError:\n try:\n return str.decode(locale.getpreferredencoding(), 'replace')\n except UnicodeError:\n # unrepresentable string\n return u'????'",
"def utf8_decoder(s):\n if s is None:\n return None\n return s.decode('utf-8')",
"def try_decode(text, encoding=\"utf-8\"):\n try:\n return text.decode(encoding, \"ignore\")\n except Exception:\n return text",
"def decode_to_utf8(text) -> bytes: # pragma: no cover\n try:\n return text.decode(\"utf-8\")\n except (AttributeError, UnicodeEncodeError):\n return text",
"def asunicode(s):\n if isinstance(s, bytes):\n return s.decode('utf-8', 'replace')\n else:\n return s",
"def decode(self, s):",
"def decode(self, s):",
"def _as_unicode(s):\n if isinstance(s, str):\n return s\n # Assume it is a bytes string\n # Note ISO-8859-1 aka Latin-1 preserves first 256 chars\n return codecs.latin_1_decode(s)[0]",
"def to_unicode(s):\n\n def brute_enc(s2):\n \"\"\"Trying to decode via simple brute forcing.\"\"\"\n encodings = (\"ascii\", \"utf8\", \"latin1\")\n for enc in encodings:\n try:\n return unicode(s2, enc)\n except UnicodeDecodeError:\n pass\n return None\n\n def chardet_enc(s2):\n \"\"\"Guess encoding via chardet.\"\"\"\n enc = chardet.detect(s2)[\"encoding\"]\n\n try:\n return unicode(s2, enc)\n except UnicodeDecodeError:\n pass\n return None\n\n # If already in unicode, skip.\n if isinstance(s, unicode):\n return s\n\n # First try to decode against a little set of common encodings.\n result = brute_enc(s)\n\n # Try via chardet.\n if not result:\n result = chardet_enc(s)\n\n # If not possible to convert the input string, try again with\n # a replace strategy.\n if not result:\n result = unicode(s, errors=\"replace\")\n\n return result",
"def _get_unicode_value(value: Union[Text, bytes]) -> Text:\n decoded_value = stats_util.maybe_get_utf8(value)\n # Check if we have a valid utf-8 string. If not, assign a placeholder.\n if decoded_value is None:\n _NON_UTF8_VALUES_COUNTER.inc()\n decoded_value = constants.NON_UTF8_PLACEHOLDER\n return decoded_value",
"def decoding_strings(data):\n if isinstance(data, str):\n data = data.replace(\"b'\", \"\")\n return data\n elif isinstance(data, bytes):\n return data.decode()\n else:\n return False",
"def unicodise(string, encoding = None, errors = \"replace\"):\n\n\tif not encoding:\n\t\tencoding = Config.Config().encoding\n\n\tif type(string) == unicode:\n\t\treturn string\n\tdebug(\"Unicodising %r using %s\" % (string, encoding))\n\ttry:\n\t\treturn string.decode(encoding, errors)\n\texcept UnicodeDecodeError:\n\t\traise UnicodeDecodeError(\"Conversion to unicode failed: %r\" % string)",
"def force_unicode(s, encoding=encoding, errors='strict'):\n if isinstance(s, unicode):\n return s\n elif hasattr(s, '__unicode__'):\n return unicode(s)\n elif isinstance(s, str):\n return s.decode(encoding, errors)\n else:\n return str(s).decode(encoding, errors)",
"def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")"
] | [
"0.7843637",
"0.7709333",
"0.768689",
"0.74438226",
"0.73078275",
"0.7273341",
"0.72526544",
"0.71388495",
"0.70961183",
"0.708849",
"0.7074156",
"0.7049334",
"0.7018959",
"0.6978158",
"0.6967038",
"0.69328606",
"0.6872787",
"0.68676764",
"0.6857858",
"0.6856613",
"0.6843041",
"0.6821354",
"0.6821354",
"0.68010414",
"0.67491186",
"0.67392254",
"0.6725663",
"0.6709787",
"0.6691644",
"0.6690571"
] | 0.79577845 | 0 |
Reads lines in file_id and fetches relevant facebook comments, using the facebook graph api, saving the result to result_file | def scrapeFacebookComments(file_id, result_file, access_token):
with open(file_id, 'r', encoding='utf8') as f, \
open(result_file, 'w', encoding='utf8', newline='') as o:
input_file = csv.DictReader(f)
output_file = csv.DictWriter(o,
fieldnames=[
'sentence_id',
'sentence_text'])
output_file.writeheader()
num_processed = 0
scrape_starttime = datetime.datetime.now()
base = "https://graph.facebook.com/v2.12"
parameters = "/?access_token={}".format(access_token)
print("Scraping {} Comments: {}\n".format(
file_id, scrape_starttime))
comment_contents = {}
for row in input_file:
if row['comment_id'] in comment_contents:
comment = comment_contents[row['comment_id']]
else:
node = "/{}".format(row['comment_id'])
url = base + node + parameters
reply = request_until_succeed(url)
if not reply:
print("Comment doesn't exists anymore: " + row['comment_id'])
continue
try:
comment = json.loads(reply)
except:
comment = json.loads(reply.decode('utf-8')) #python 3.5 and earlier bugfix
comment_contents[row['comment_id']] = comment # cache result in case of reuse
comment_message = '' if 'message' not in comment \
or comment['message'] is '' else \
unicode_decode(comment['message'])
sentence_texts = sent_tokenize(comment_message,
language='german')
sentence_text = sentence_texts[int(row['sentence_number'])]
ha = hashlib.md5(sentence_text.encode()).hexdigest()
if ha != row['md5_hash']:
print("Wrong MD5 hash for comment: " + row['comment_id'] + ", " + sentence_text)
continue
output_file.writerow({'sentence_id': row['sentence_id'],
'sentence_text': sentence_text})
num_processed += 1
if num_processed % 100 == 0:
print("{} Comments Processed: {}".format(
num_processed, datetime.datetime.now()))
print("\nDone!\n{} Comments Processed in {}".format(
num_processed, datetime.datetime.now() - scrape_starttime)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_comments(video_id, CLIENT_SECRETS_FILE):",
"def process_reddit_comment_file(f,\n output_folder):\n ## Output File\n if output_folder is not None:\n fname = os.path.basename(f).replace(\"comments.json\",\"processed.comments.json\")\n if not fname.endswith(\".gz\"):\n fname = fname + \".gz\"\n output_folder = output_folder.rstrip(\"/\")\n fname = f\"{output_folder}/{fname}\"\n if os.path.exists(fname):\n return fname\n ## Load Comment Data\n if f.endswith(\".gz\"):\n file_opener = gzip.open\n else:\n file_opener = open\n try:\n with file_opener(f, \"r\") as the_file:\n comment_data = json.load(the_file)\n except json.JSONDecodeError:\n with file_opener(f, \"r\") as the_file:\n comment_data = []\n for line in the_file:\n comment_data.append(json.loads(line))\n ## Check Data\n if len(comment_data) == 0:\n return None\n ## Transform into DataFrame\n comment_data = pd.DataFrame(comment_data).dropna(subset=[\"body\"])\n ## Tokenize Text\n comment_data[\"text_tokenized\"] = comment_data[\"body\"].map(tokenizer.tokenize)\n ## Add Meta\n comment_data[\"source\"] = f\n comment_data[\"entity_type\"] = \"comment\"\n comment_data[\"date_processed_utc\"] = int(datetime.utcnow().timestamp())\n ## Rename Columns and Subset\n comment_data.rename(columns = DB_SCHEMA[\"reddit\"][\"comment\"], inplace=True)\n comment_data = comment_data[list(DB_SCHEMA[\"reddit\"][\"comment\"].values())]\n ## Format Into JSON\n formatted_data = comment_data.apply(lambda row: row.to_json(), axis=1).tolist()\n formatted_data = list(map(lambda x: json.loads(x), formatted_data))\n ## Dump Processed Data (or return)\n if output_folder is None:\n return formatted_data\n else:\n with gzip.open(fname, \"wt\", encoding=\"utf-8\") as the_file:\n json.dump(formatted_data, the_file)\n return fname",
"def extract_comments(comments_file, output_filename=direc+\"/comments.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting comments from \" + comments_file + \"...\")\r\n comments_dict = {}\r\n with open(output_filename, \"w\", encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(comments_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n if child.attrib['PostId'] not in comments_dict:\r\n comments_dict[child.attrib['PostId']] = []\r\n comments_dict[child.attrib['PostId']].append(child.attrib['Id'])\r\n clean_comment = clean_markdown(child.attrib['Text'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['PostId'] + \"\\t\" + clean_comment + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n f.write(line)\r\n\r\n current += 1\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting comments from \" + comments_file + \".\\n\")\r\n return comments_dict",
"def save_comments(self, videoId):\n comm_obj = self.get_comment_obj(videoId)# need to get the id \n\n file_exists = os.path.isfile(self.path)\n f = open(self.path, 'a', encoding='utf-8-sig')\n writer_top = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n if not file_exists:\n writer_top.writerow(['etag'] + ['videoId'] + ['commentId'] + ['text'] + ['author'] + ['like'] + ['time'])\n f.close()\n\n f = open(self.path, 'a', encoding='utf-8-sig')\n writer_top = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n\n for i in comm_obj['items']:\n\n result_row = [[i['etag'], i['snippet']['videoId'], i['snippet']['topLevelComment']['id'], i['snippet']['topLevelComment']['snippet']['textDisplay'], i['snippet']['topLevelComment']['snippet']['authorDisplayName'], i['snippet']['topLevelComment']['snippet']['likeCount'], i['snippet']['topLevelComment']['snippet']['publishedAt']]]\n writer_top.writerows(result_row)\n f.close()",
"def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #goes through idList\n for id in tqdm(idList):\n #performs request and replaces trouble some parts\n commentsString = self.session.get(self.commentURL.format(id)).text.\\\n replace('true', 'True').replace('false', 'False').replace('null', 'None')\n #gets only the comments\n commentsDict = ast.literal_eval(commentsString)[\"bugs\"][str(id)][\"comments\"]\n\n #enters comments into db or file if there are any comments for the id\n if commentsDict:\n if self.mongoDB:\n self.mongoDB[\"Comments\"].insert_many(commentsDict)\n if self.folder:\n with open(self.folderpath + \"Bugzilla_Comments.txt\", 'a') as f:\n f.write(str(commentsDict) + \"\\n\")",
"def read_comment(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM comments WHERE personid=?\", (person_id,))\n comment_list = []\n for row in c:\n _comment = Comment()\n _comment.person_id = row[\"personid\"]\n _comment.comment = row[\"comment\"]\n _comment.comment_id = row[\"commentid\"]\n comment_list.append(_comment)\n conn.close()\n return comment_list\n except:\n return []",
"def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))",
"def get(self, request, *args, **kwargs):\n commentfile_id = kwargs.get('commentfile_id')\n comment_file = get_object_or_404(comment_models.CommentFile, id=commentfile_id)\n groupcomment = get_object_or_404(group_models.GroupComment, id=comment_file.comment.id)\n\n # Check that the cradmin role and the AssignmentGroup is the same.\n if groupcomment.feedback_set.group.id != request.cradmin_role.id:\n raise Http404()\n\n # If it's a private GroupComment, the request.user must be the one that created the comment.\n if groupcomment.visibility != group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE:\n if groupcomment.user != self.request.user:\n raise Http404()\n\n # Load file as chunks rather than loading the whole file into memory\n filewrapper = FileWrapper(comment_file.file)\n response = http.HttpResponse(filewrapper, content_type=comment_file.mimetype)\n filename = re.subn(r'[^a-zA-Z0-9._ -]', '', comment_file.filename.encode('ascii', 'replace').decode())[0]\n response['content-disposition'] = 'attachment; filename={}'.format(filename)\n response['content-length'] = comment_file.filesize\n\n return response",
"def save_local_files(name_file):\n global collection\n name_file = name_file.split('.')[0]\n document = collection.find({'id': name_file})\n if document.count() > 0 and document[0].get('ida_comments', ''):\n print('Comments already extracted for document [%s], skipping.' %\n document[0]['id'])\n return\n if document.count() == 0:\n document = {\n 'id': name_file,\n 'ida_comments': []}\n else:\n document = document[0]\n print('Saving comments for document [%s].' % document['id'])\n asm = open_asm2(document['id'])\n asm = [to_utf(line) for line in asm]\n comments = filter_comments(asm)\n document['ida_comments'] = comments\n collection.save(document)",
"def get_specific_comment_info(comment_id):\n start = time.time()\n\n comment = REDDIT.comment(comment_id)\n\n end = time.time()\n print(end - start)\n return comment.created_utc, comment.permalink, comment.score, comment.link_id",
"def fetch_comment(self, comment_id, **args):\n return self.fetch(\"/comment/\" + comment_id, **args)",
"def skip_comments(filepointer):\n\tcomments = []\n\tdata = '#'\n\ttry:\n\t\tpos = filepointer.tell()\n\texcept:\n\t\tprint(\"Could not read file.\")\n\t\treturn None\t\n\t\n\twhile data[0] == '#':\n\t\tdata = filepointer.readline()\n\t\tif not data:\n\t\t\traise Exception(\"Unexpected end of file while reading comments.\")\n\n\t\tif data[0] == '#':\n\t\t\tcomments.append(data)\n\t\t\tpos = filepointer.tell()\n\t\telse:\n\t\t\tfilepointer.seek(pos)\n\treturn comments",
"def iterateComments(db, post_id):\n c=db.cursor()\n c.execute(\"\"\"SELECT * FROM comments WHERE post_id=%d\"\"\" % post_id)\n for comment in c.fetchall():\n yield Comment(answer)\n c.close()",
"def get_comment_information_by_id(comment_id):\n comment = REDDIT.comment(comment_id)\n print(comment.body)\n print(vars(comment))",
"def task_fetch_posts_and_comments(\n author_id,\n count=28,\n posts_out='data/posts_data.xlsx',\n comments_out='data/comments_data.xlsx'):\n\n # Create query instances for posts and comments\n post_query = Query(PostParser)\n comment_query = Query(CommentParser)\n\n # Query posts data\n post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {\n \"id\": author_id,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)\n\n # Query comments data of posts\n comment_data = []\n for i, post in enumerate(post_data):\n logger.info(\"Get comment of %d %s\" % (i, post['short_code']))\n comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {\n \"shortcode\": post['short_code'],\n \"first\": 50,\n }, None)\n for comment in comment_data_of_one_post:\n comment['post_short_code'] = post['short_code']\n comment_data.extend(comment_data_of_one_post)\n logger.info(\"Count of comment_data: %d\" % len(comment_data))\n\n # Save the comments data\n comment_data_df = pd.DataFrame(comment_data)\n comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)\n logger.info(\"Save the comments data to %s.\" % comments_out)",
"def comments_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(\n repository_id, \"comments\", access_token)",
"def read_chumps_from_file(self):\n for chump in self.comment_store.get_stored_chumps():\n self.add_chump(chump)",
"def get_file(file_name):\r\n f = open(file_name)\r\n\r\n tids = f.readlines()\r\n \r\n dataset = [(int(tid), get_from_id(int(tid))) for tid in tids]\r\n\r\n f.close()\r\n return dataset",
"def get_file_contents(file_id, meta_err=False, user_settings=None):\r\n metadata = get_metadata(file_id, user_settings)\r\n if (metadata.get('fileExtension', None) == 'csv' or metadata.get('mimeType', None) == 'text/csv') and metadata.get(\r\n 'webContentLink', None):\r\n drive_service = CredentialManager.get_client_drive_service(user_settings)\r\n if user_settings is None:\r\n user_settings = threading.current_thread().settings\r\n temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)\r\n if not os.path.exists(temp_dir_path):\r\n os.makedirs(temp_dir_path)\r\n file_path = temp_dir_path + str(file_id) + \".csv\"\r\n if not os.path.exists(file_path):\r\n request = drive_service.files().get_media(fileId=file_id)\r\n fh = io.FileIO(file_path, mode='wb')\r\n downloader = MediaIoBaseDownload(fh, request, chunksize=1024 * 1024)\r\n done = False\r\n while done is False:\r\n status, done = downloader.next_chunk()\r\n fh.close()\r\n header, rows = [], []\r\n with open(file_path, 'rb') as csv_file:\r\n for line in csv_file.readlines():\r\n if not header:\r\n header = [str(heading).strip() for heading in str(line).split(',')]\r\n else:\r\n row = line.split(',')\r\n row_dict = {}\r\n for index, column in enumerate(row):\r\n row_dict[header[index]] = str(column).strip()\r\n rows.append(row_dict)\r\n return rows\r\n elif metadata.get('mimeType', None) == 'application/vnd.google-apps.fusiontable':\r\n ft_service = CredentialManager.get_client_fusion_table_service(user_settings)\r\n query = ft_service.query()\r\n table = query.sql(sql='SELECT * FROM ' + str(file_id), hdrs=False).execute(num_retries=3)\r\n result_rows = []\r\n columns = [str(column) for column in table['columns']]\r\n rows = table['rows']\r\n for row in rows:\r\n result_row = {}\r\n for index, cell in enumerate(row):\r\n result_row[columns[index]] = str(cell) if isinstance(cell, unicode) else cell\r\n result_rows.append(result_row)\r\n return result_rows\r\n elif meta_err:\r\n raise Exception('Unsupported file type for the file - ' + str(metadata['name'] + '.'))\r\n return []",
"def __call__(self,\n comment,\n file,\n id,\n ):\n optional_kwargs = {}\n\n return BaseAPIEndpoint.__call__(self,\n comment=comment,\n file=file,\n id=id,\n **optional_kwargs\n )",
"def getSongComments(self, songID, limit = 10, offset = 0, total = False):\n currAPIVersion = self.config['apiVersion']\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n\n rid = \"R_SO_4_%s\" % songID\n\n currDict = {\n \"limit\" : limit,\n \"offset\" : offset,\n \"rid\" : rid,\n \"total\" : total,\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict, rid)\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n\n return currR, currAPIURL[2]",
"async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count",
"def _read_until_end_of_comments(self, fileobj):\n offset = fileobj.tell()\n line = fileobj.readline()\n if not line:\n raise EOFError(\"Read until EOF\")\n\n line = line.strip()\n if line.startswith(\"#\"):\n return self._read_until_end_of_comments(fileobj)\n\n fileobj.seek(offset)",
"def process_comments(session, comments):\n for c in tqdm(comments, desc=\"Injecting comments into DB\"):\n db_comment = session.query(Comment).get(c['id'])\n if db_comment:\n db_comment.update(session, **c)\n else:\n Comment.create(session, **c)",
"def clowder_file_metadata(session, url, fileid):\n try:\n ret = session.get(posixpath.join(url, \"api/files\", fileid, \"metadata.jsonld\"))\n except session.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n return ret",
"def get_comments(yt_id):\n\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)\n\n video_comments = client.commentThreads().list(\n videoId = yt_id,\n part=\"snippet,replies\").execute()\n\n comment_items = video_comments['items']\n\n class MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n def strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n comments = []\n for sub_block in comment_items:\n comments.append(strip_tags(sub_block['snippet']['topLevelComment']['snippet']['textDisplay']))\n\n comments_all = ' '.join(comments)\n\n print(\"YouTube comments scanned\")\n return comments_all",
"def check_comments():\n\n # Get the id of the group track\n try:\n group_track = soundcloud.get('/me/tracks')[config.post_track_id]\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)\n sys.exit(1)\n else:\n raise\n\n # Get the comment list for the group track\n comments = soundcloud.get('/tracks/%d/comments' % group_track.id)\n if not comments:\n logging.info('Nothing found...')\n return\n \n # Process each comment and delete it\n for comment in reversed(comments): \n logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)\n response = None\n \n # Try to process the comment\n try:\n response = process_comment(comment)\n except HTTPError as e:\n if e.response.status_code == 429:\n logging.exception('Failed to repost track: too many requests:')\n return\n elif e.response.status_code // 100 == 4:\n logging.exception('Failed to process comment due to a client request error:')\n else:\n raise\n except Exception as e: # Program crash\n logging.exception('Failed to process comment:')\n else:\n if response:\n logging.info('The comment would have this response: %s', response) \n else:\n logging.info('Comment processed successfully')\n \n # Delete the processed comment\n try:\n soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.warning('Comment already deleted')\n else:\n raise\n\n if config.use_advanced_description and should_update_description:\n update_description()",
"def _readComments(self): \n self.NSCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readSpecialComments()\n self.NNCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readNormalComments()",
"def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments",
"def get_comments(self, asset_id):\n endpoint = '/assets/{}/comments'.format(asset_id)\n return self._api_call('get', endpoint)"
] | [
"0.6400023",
"0.62088317",
"0.6198404",
"0.5723124",
"0.54117316",
"0.5385133",
"0.53159404",
"0.524866",
"0.51766294",
"0.5171449",
"0.5160384",
"0.5138199",
"0.51008713",
"0.50976366",
"0.50750273",
"0.5037629",
"0.50367475",
"0.502809",
"0.5021747",
"0.50176424",
"0.50108653",
"0.49998692",
"0.4985475",
"0.49848107",
"0.49784312",
"0.49581736",
"0.49569604",
"0.49517962",
"0.49186343",
"0.49157882"
] | 0.7850107 | 0 |
Generate an instance of the HPE OneView client. Generates an instance of the HPE OneView client using the hpOneView lib. | def get_hponeview_client():
manager_url = prepare_manager_url(CONF.oneview.manager_url)
config = {
"ip": manager_url,
"credentials": {
"userName": CONF.oneview.username,
"password": CONF.oneview.password
}
}
return hponeview_client.OneViewClient(config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_heat_client(self):\n\n print \"\\t* Generating heat client\"\n # request a new auth token from keystone\n keystone = ksclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)\n auth_token = keystone.auth_token\n heat_url = 'http://%s:8004/v1/%s' % (self.ip, self.tenant_id)\n\n # instantiate client\n self.heatclient = hClient('1', endpoint=heat_url, token=auth_token)",
"def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')",
"def gen_neutron_client(self):\n\n print \"\\t* Generating neutron client\"\n self.neutronclient = neutronclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)",
"def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')",
"def client(self, hostname_or_ip):\n hostname, aliases, ip = self.resolve(hostname_or_ip)\n try:\n client = Client.objects.get(name=hostname)\n printer_name = client.label_printer.cups_printer_name\n self.cups_server = client.label_printer.cups_server_hostname\n cups_hostname = self.cups_server.hostname\n self._label_printer = client.label_printer\n except Client.DoesNotExist:\n self.cups_server = 'localhost' # default\n cups_hostname = self.cups_server.hostname\n self._client = ClientTuple(hostname, aliases, ip, None, cups_hostname)\n try:\n printer_name = self.label_printer.cups_printer_name\n except AttributeError:\n printer_name = None\n self._client = ClientTuple(hostname, aliases, ip, printer_name, cups_hostname)",
"def test_create_hyperflex_hxdp_version(self):\n pass",
"def create_client(self) -> None:\n pass",
"def makehxx(self, gen):\n services = []\n for serv in self.services:\n service = \" %s %s(\" % (corba_rtn_type(serv.ret,gen.module.name),serv.name)\n service = service+gen.makeArgs(serv)+\");\"\n services.append(service)\n\n if self.addedmethods:\n services.append(self.addedmethods)\n servicesdef = \"\\n\".join(services)\n\n inheritedclass=self.inheritedclass\n if self.inheritedclass:\n inheritedclass= \" public virtual \" + self.inheritedclass + \",\"\n\n return hxxCompo.substitute(component=self.name, module=gen.module.name,\n servicesdef=servicesdef, inheritedclass=inheritedclass,\n compodefs=self.compodefs)",
"def pure_client():\n return VoximplantClient(\n host='https://api.host.com',\n account_id='100500',\n api_key='secret',\n )",
"def __init__(self, client):\n\n self.__route_tag = \"wallet-ng\"\n self.__path = \"poe\"\n self.__client = client",
"def __init__(self, client, boiler):\n self._client = client\n self._boiler = boiler\n\n self._operation_list = list(HA_OPMODE_TO_GH)",
"def create_foundation_sdk_instance():\n instance = LockedInstance(\n lock=threading.Lock(),\n instance=SDK(**(request.get_json() or {})),\n module=None,\n entity=SDK_ENTITY_NAME,\n uuid=str(uuid.uuid4().hex),\n created_at=datetime.datetime.utcnow(),\n )\n STORE[instance.uuid] = instance\n response = app.response_class(\n response=json.dumps(serialise_instance(instance)),\n status=201,\n mimetype='application/json'\n )\n return response",
"def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])",
"def start_horizon(self):\n if self.is_client():\n return\n\n self._logger.info(\"Starting Horizon...\")\n\n db_name = \"stellar_%d_db\" % self.my_id\n horizon_db_name = \"stellar_horizon_%d_db\" % self.my_id\n args = '--port %d ' \\\n '--ingest ' \\\n '--db-url \"postgresql://tribler:tribler@localhost:5432/%s?sslmode=disable\" ' \\\n '--stellar-core-db-url \"postgresql://tribler:tribler@localhost:5432/%s?sslmode=disable\" ' \\\n '--stellar-core-url \"http://127.0.0.1:%d\" ' \\\n '--network-passphrase=\"Standalone Pramati Network ; Oct 2018\" ' \\\n '--apply-migrations ' \\\n '--log-level=info ' \\\n '--history-archive-urls \"file:///tmp/stellar-core/history/vs\" ' \\\n '--per-hour-rate-limit 0' % (19000 + self.my_id, horizon_db_name, db_name, 11000 + self.my_id)\n\n # First initialize Horizon with an empty genesis state\n cmd = '/home/martijn/gocode/bin/horizon expingest init-genesis-state %s > horizon_expingest.out 2>&1' % args\n os.system(cmd)\n\n # Now start Horizon\n cmd = '/home/martijn/gocode/bin/horizon %s' % args\n out_file = open(\"horizon.out\", \"w\")\n self.horizon_process = subprocess.Popen(shlex.split(cmd), stdout=out_file, stderr=out_file)",
"def page_client() -> Client:\n return Client()",
"def main():\n # get the service API URL\n params = demisto.params()\n environment_id = params.get('environment_id')\n region = params.get('region')\n tld = '.com'\n\n if region == 'EU':\n tld = '.eu'\n elif region == 'Asia':\n tld = '.asia'\n\n base_url = urljoin(f'https://api.pingone{tld}', f'/v1/environments/{environment_id}/')\n auth_url = urljoin(f'https://auth.pingone{tld}', f'/{environment_id}/as/token')\n\n client_id = demisto.params().get('credentials', {}).get('identifier')\n client_secret = demisto.params().get('credentials', {}).get('password')\n\n verify_certificate = not demisto.params().get('insecure', False)\n proxy = demisto.params().get('proxy', False)\n\n auth_params = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'base_url': base_url,\n 'auth_url': auth_url,\n }\n\n demisto.debug(f'Command being called is {demisto.command()}')\n\n commands = {\n 'test-module': test_module,\n 'pingone-unlock-user': unlock_user_command,\n 'pingone-deactivate-user': deactivate_user_command,\n 'pingone-activate-user': activate_user_command,\n 'pingone-set-password': set_password_command,\n 'pingone-add-to-group': add_user_to_group_command,\n 'pingone-remove-from-group': remove_from_group_command,\n 'pingone-get-groups': get_groups_for_user_command,\n 'pingone-get-user': get_user_command,\n 'pingone-create-user': create_user_command,\n 'pingone-update-user': update_user_command,\n 'pingone-delete-user': delete_user_command,\n }\n\n command = demisto.command()\n\n client = Client(\n auth_params=auth_params,\n base_url=base_url,\n verify=verify_certificate,\n proxy=proxy\n )\n\n try:\n if command in commands:\n human_readable, outputs, raw_response = commands[command](client, demisto.args())\n return_outputs(readable_output=human_readable, outputs=outputs, raw_response=raw_response)\n\n # Log exceptions\n except Exception as e:\n return_error(f'Failed to execute {command} command. Error: {str(e)}')",
"def build_client(url=None, port_name=None, **kwargs):\n if url is None and port_name is None:\n mirror = get_online_vso_url()\n if mirror is None:\n raise ConnectionError(\"No online VSO mirrors could be found.\")\n url = mirror['url']\n port_name = mirror['port']\n elif url and port_name:\n if not check_connection(url):\n raise ConnectionError(f\"Can't connect to url {url}\")\n else:\n raise ValueError(\"Both url and port_name must be specified if either is.\")\n\n if \"plugins\" not in kwargs:\n kwargs[\"plugins\"] = [SunPyLoggingZeepPlugin()]\n\n client = zeep.Client(url, port_name=port_name, **kwargs)\n client.set_ns_prefix('VSO', 'http://virtualsolar.org/VSO/VSOi')\n return client",
"def view_with_client_from_config(cls, conf, config_section, logger=None):\n if cls == PapiViewClient:\n # we're implementing this factory in the base-class, so we don't\n # have to copy-paste it into every single view. This means that\n # someone could invoke it in the abstract base, which does not make\n # any sense, so we have to catch it\n raise Exception(\"Factory must be called on a specific \"\n \"PapiViewClient subclass\")\n\n base_client = papi_client.papi_client.PapiClientFactory.\\\n client_from_config(conf, config_section, logger)\n return cls.client_from_config(base_client, conf, logger)",
"def create_sparrow_client() -> SparrowClient:\n atomic_hessian_switch = True\n pos = [\n (-0.7 * su.BOHR_PER_ANGSTROM, 0.0, 0.0),\n (0.7 * su.BOHR_PER_ANGSTROM, 0.0, 0.0),\n ]\n element_strings = [\"H\", \"H\"]\n settings = CalculatorSettings().__dict__\n\n client = SparrowClient(atomic_hessian_switch, settings, 1)\n client.update_calculator(pos, element_strings, settings)\n return client",
"def _setup_hpos():\n hpo_dao = HPODao()\n hpo_dao.insert(\n HPO(hpoId=UNSET_HPO_ID, name=\"UNSET\", displayName=\"Unset\", organizationType=OrganizationType.UNSET)\n )\n hpo_dao.insert(\n HPO(hpoId=PITT_HPO_ID, name=\"PITT\", displayName=\"Pittsburgh\", organizationType=OrganizationType.HPO)\n )\n hpo_dao.insert(\n HPO(hpoId=AZ_HPO_ID, name=\"AZ_TUCSON\", displayName=\"Arizona\", organizationType=OrganizationType.HPO)\n )\n\n org_dao = OrganizationDao()\n org_dao.insert(\n Organization(\n organizationId=AZ_ORG_ID,\n externalId=\"AZ_TUCSON_BANNER_HEALTH\",\n displayName=\"Banner Health\",\n hpoId=AZ_HPO_ID,\n )\n )\n org_dao.insert(\n Organization(\n organizationId=PITT_ORG_ID,\n externalId=\"PITT_BANNER_HEALTH\",\n displayName=\"PITT display Banner Health\",\n hpoId=PITT_HPO_ID,\n )\n )\n\n site_dao = SiteDao()\n site_dao.insert(\n Site(\n siteName=\"Monroeville Urgent Care Center\",\n googleGroup=\"hpo-site-monroeville\",\n mayolinkClientNumber=7035769,\n organizationId=PITT_ORG_ID,\n hpoId=PITT_HPO_ID,\n )\n )\n site_dao.insert(\n Site(\n siteName=\"Phoenix Urgent Care Center\",\n googleGroup=\"hpo-site-bannerphoenix\",\n mayolinkClientNumber=7035770,\n organizationId=PITT_ORG_ID,\n hpoId=PITT_HPO_ID,\n )\n )\n site_dao.insert(\n Site(\n siteName=\"Phoenix clinic\",\n googleGroup=\"hpo-site-clinic-phoenix\",\n mayolinkClientNumber=7035770,\n organizationId=AZ_ORG_ID,\n hpoId=AZ_HPO_ID,\n )\n )",
"def build_hpo_from_args(cls, args):\n return cls(args)",
"def build_hpo_from_args(cls, args):\n return cls(args)",
"def main():\n utils.vip_main(ahu_agent, version=__version__)",
"def horde_init(self, horde_info= {}):",
"def create_new_client(main: MainApplication) -> str:\n client = main.create_window(\"client\", \"IPLMS\", main.client_ui.get_layout())\n client[\"_CLIENT_ID_\"].Update(getUUID())\n client[\"_CP_NAME_IP_\"].Update(\"\")\n client[\"_CP_PHONE_IP_\"].Update(\"\")\n client[\"_CP_ADDRESS_IP_\"].Update(\"\")\n client.un_hide()\n event, values = client.read()\n client_logic = Client(main, event, values)\n name = client_logic.run(main)\n client.hide()\n return name",
"def client_setup(self):\n self.client = Client()",
"def create_generated_client() -> None:\n print(\"Generating client\")\n\n delete_generated_client()\n args = [\n \"{}/../scripts/generate.sh\".format(ROOT),\n \"-i\",\n \"http://localhost:8000/openapi.json\",\n \"-p\",\n CLIENT_NAME,\n \"--include-auth\",\n \"-o\",\n ROOT,\n \"-t\",\n \"/tmp\",\n \"-m\",\n ]\n\n process_result = subprocess.run(args, capture_output=True)\n\n with open(os.path.join(LOG_DIR, \"generation.log\"), \"wb\") as file:\n file.write(process_result.stdout)\n\n with open(os.path.join(LOG_DIR, \"generation.err\"), \"wb\") as file:\n file.write(process_result.stderr)\n\n if process_result.returncode != 0: # pragma: no cover\n if process_result.stderr:\n sys.stderr.write(process_result.stderr.decode(\"utf-8\"))\n pytest.exit(\n \"Failed to generate client api, code {}\"\n \"\\nLogs are in logs/generation.log and logs/generation.err\".format(process_result.returncode),\n returncode=process_result.returncode,\n )\n\n print(\"Client created in {}, logs in logs/generation.log\\n\".format(CLIENT_DIR))",
"def generatePrimaryHDU(hdu_header='header_primaryHDU.txt'):\n \n hdu = pf.PrimaryHDU()\n cards = generateCards(hdu_header)\n \n for card in cards:\n #print card\n if card.keyword == 'COMMENT':\n pass\n hdu.header.add_comment(card.value)\n elif card.keyword == 'HISTORY':\n pass\n hdu.header.add_history(card.value)\n else:\n hdu.header.set(card.keyword, card.value, card.comment)\n \n return hdu",
"def __init__(self, hostname, port, username, password, tenant_id, connect=True):\n self.cmd_gw_ws_api = HawkularWebsocketClient(\n url=\"ws://{}:{}/hawkular/command-gateway/ui/ws\".format(hostname, port),\n headers={\"Hawkular-Tenant\": tenant_id, \"Accept\": \"application/json\"},\n username=username, password=password)\n self.tenant_id = tenant_id\n if connect:\n self.cmd_gw_ws_api.connect()",
"def Create(options: Options) -> HolLight:\n return HolLight(options)"
] | [
"0.62012696",
"0.59542215",
"0.5604468",
"0.53502053",
"0.5333491",
"0.52706295",
"0.5257632",
"0.5231252",
"0.5147483",
"0.5104047",
"0.50083214",
"0.5004166",
"0.49704948",
"0.49391007",
"0.49293235",
"0.49172926",
"0.49049976",
"0.48713294",
"0.48525918",
"0.48477373",
"0.48474872",
"0.48474872",
"0.481795",
"0.4804431",
"0.48032767",
"0.47947648",
"0.47837502",
"0.47799268",
"0.47650903",
"0.4762868"
] | 0.78882736 | 0 |
Get the needed information to access ilo. Get the host_ip and a token of an iLO remote console instance which can be used to perform operations on that controller. | def _get_ilo_access(remote_console):
url = remote_console.get('remoteConsoleUrl')
url_parse = parse.urlparse(url)
host_ip = parse.parse_qs(url_parse.netloc).get('addr')[0]
token = parse.parse_qs(url_parse.netloc).get('sessionkey')[0]
return host_ip, token | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_details(module):\n\n login_list = [module.params['login'], os.getenv('DCI_LOGIN')]\n login = next((item for item in login_list if item is not None), None)\n\n password_list = [module.params['password'], os.getenv('DCI_PASSWORD')]\n password = next((item for item in password_list if item is not None), None)\n\n url_list = [module.params['url'], os.getenv('DCI_CS_URL')]\n url = next((item for item in url_list if item is not None), 'https://api.distributed-ci.io')\n\n return login, password, url",
"def info(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.get_info())\n console.print(f\"[{ip}] Light {id}:\\n{json.dumps(resp, indent=2)}\")",
"def connection_details(self):\n try:\n self.open(\"https://ipinfo.io/json\")\n self.log.debug(\"IPINFO Server returned (%s)\", self.response().content)\n res = json.loads(self.response().content.decode('utf-8'))\n except (requests.exceptions.ProxyError,\n requests.exceptions.ConnectionError):\n return {'ip': 'Unknown'}\n except ValueError:\n self.log.error(\"Server returned no JSON (%s)\", self.response().content)\n return {'ip': 'Unknown'}\n except Exception as exc: # TODO\n self.log.error(\"Unknown exception %s\", exc)\n return {'ip': 'Unknown'}\n else:\n return res",
"async def get_ip():\n\turl = 'https://cheese.formice.com/api/tfm/ip'\n\tdata = await request_api(url)\n\n\tif not len(data):\n\t\t# Empty dictionary, request failed, let's use default server IP\n\t\tsuccess = True\n\telse:\n\t\tsuccess = data.pop('success', False)\n\t\terror = data.pop('error', '').capitalize()\n\t\tdescription = data.pop('description', 'No description was provided.')\n\n\tif not success:\n\t\tif error == 'Maintenance':\n\t\t\traise MaintenanceError('The game is under maintenance.')\n\n\t\tif error == 'Internal':\n\t\t\traise InternalError(description)\n\n\t\traise EndpointError(f'{error}: {description}')\n\n\treturn Keys(version=666, **data.get('server', {}))",
"def get_token_info_remote(self, token_info_url):",
"def get_info(task):\n cmd = \"show version\"\n sh_version = task.run(\n task=netmiko_send_command, command_string=cmd, use_textfsm=True\n )\n # test Nornir result\n test_norn_textfsm(task, sh_version.result, cmd)\n # save show version output to task.host\n task.host[\"sh_version\"] = sh_version.result[0]\n # pull model from show version\n sw_model = task.host[\"sh_version\"][\"hardware\"][0].split(\"-\")\n # save model to task.host\n task.host[\"sw_model\"] = sw_model[1]\n # get interfaces; use TextFSM\n cmd = \"show interface switchport\"\n interfaces = task.run(\n task=netmiko_send_command, command_string=cmd, use_textfsm=True\n )\n # test Nornir result\n test_norn_textfsm(task, interfaces.result, cmd)\n # save interfaces to task.host\n task.host[\"intfs\"] = interfaces.result\n # convert vlans in inventory from int to str\n vlans = []\n for vlan in task.host[\"vlans\"]:\n vlans.append(str(vlan))\n # save list of vlans strings back to task.host\n task.host[\"vlans\"] = vlans\n # create vlan_list string\n task.host[\"vlan_list\"] = \",\".join(task.host[\"vlans\"])\n\n # choose template based on switch model\n if \"3750V2\" in task.host[\"sw_model\"] or \"3750G\" in task.host[\"sw_model\"]:\n # 3750V2's use IBNSv1\n task.host[\"ibns_ver\"] = \"v1\"\n c_print(f\"*** {task.host}: IBNS version 1 ***\")\n\n elif \"3750X\" in task.host[\"sw_model\"]:\n # 3750X's use IBNSv2-modified\n task.host[\"ibns_ver\"] = \"v2-alt\"\n c_print(f\"*** {task.host}: IBNS version 2 (modified) ***\")\n\n else:\n # all else use IBNSv2\n task.host[\"ibns_ver\"] = \"v2\"\n c_print(f\"*** {task.host}: IBNS version 2 ***\")\n\n # get ip interface brief; use TextFSM\n cmd = \"show ip interface brief | e unas\"\n ip_int_br = task.run(\n task=netmiko_send_command, command_string=cmd, use_textfsm=True\n )\n # test Nornir result\n test_norn_textfsm(task, ip_int_br.result, cmd)\n # save ip interfaces to task.host\n task.host[\"ip_int_br\"] = ip_int_br.result",
"def pull_info(task):\n\n interface_result = task.run(task=send_command, command=\"show interfaces\")\n task.host[\"facts\"] = interface_result.scrapli_response.genie_parse_output()\n interfaces = task.host[\"facts\"]\n for interface in interfaces:\n try:\n mac_addr = interfaces[interface][\"mac_address\"]\n if target == mac_addr:\n target_list.append(mac_addr)\n intf = interface\n print_info(task, intf)\n except KeyError:\n pass",
"def get_ip(self):",
"def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}",
"def getRemoteHost():",
"def logon(self):\n log_object.log_debug(\"logon started\")\n logon_obj = LogonRequest.Logon()\n self.ip,self.x_api_session = logon_obj.LogonRequest()\n global ip,x_api_session\n ip = str(self.ip)\n x_api_session = str(self.x_api_session)",
"def get_ilorest_client(server_hardware):\n oneview_client = get_hponeview_client()\n remote_console = oneview_client.server_hardware.get_remote_console_url(\n server_hardware\n )\n host_ip, ilo_token = _get_ilo_access(remote_console)\n base_url = \"https://%s:%s\" % (host_ip, ILOREST_BASE_PORT)\n return redfish.rest_client(base_url=base_url, sessionkey=ilo_token)",
"def remote_info():\n run('uname -a')",
"def get_http_details(self):\n self.clear_screen()\n port = input('enter http port \\n'\n 'default [8080]: ')\n default = 8080\n port = set_values(port, default)\n port = validate_port(port)\n ignition_dir = input('specify dir where ignition files will be placed \\n'\n 'directory will be created under /var/www/html \\n'\n 'default [ignition]: ')\n default = 'ignition'\n ignition_dir = set_values(ignition_dir, default)\n ocp_version = input('specify the version of ocp \\n'\n 'default [4.3]: ')\n default = 4.3\n ocp_version = set_values(ocp_version, default)\n logging.info('adding http_port: {} http_ignition: {} version: {}'.format(port, ignition_dir, ocp_version))\n self.inventory_dict['csah']['vars']['http_port'] = int(port)\n self.inventory_dict['csah']['vars']['os'] = 'rhcos'\n self.inventory_dict['csah']['vars']['http_ignition'] = ignition_dir\n self.inventory_dict['csah']['vars']['version'] = ocp_version",
"def getHostInfo():",
"async def identify(self):\n await self.send({\n \"op\": 2,\n \"d\" : {\n \"token\" : self.client.token,\n \"properties\": {\n \"$os\" : platform,\n \"$browser\": \"SpeedCord\",\n \"$device\" : \"SpeedCord\"\n },\n \"intents\" : self.client.intents,\n \"shard\" : (self.id, self.client.shard_count)\n }\n })",
"def ip_command():\n # 1. Get input host from Demisto\n ip = demisto.args().get('ip')\n if not is_ip_valid(ip):\n return_error('Invalid IP address, Please retry with a valid IP address')\n # 2. Get the host reputation from SlashNext API\n response = ip_lookup(ip=ip)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, ip_cont = get_dbot_std_context(\n ip, 'IP', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(ip, 'IP', response.get('threatData'))\n\n ec = {\n 'SlashNext.IP(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'IP': ip_cont\n }\n\n title = 'SlashNext Phishing Incident Response - IP Lookup\\n' \\\n '##### ip = {}'.format(ip)\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)",
"def retrieve_connection_info():\n # Define the global variables at this module level\n global base_url\n global api_credentials\n base_url, api_credentials = core.get_connection_info()\n return",
"def retrieve_connection_info():\n # Define the global variables at this module level\n global base_url\n global api_credentials\n base_url, api_credentials = core.get_connection_info()\n return",
"def main():\n with Scrapli(**MY_DEVICE) as conn:\n print(conn.get_prompt())\n print(conn.send_command(\"show run | i hostname\").result)",
"def opencloud_fetch_host_info( hostname ):\n raise Exception(\"Opencloud support not implemented\")",
"def print_info(task, intf):\n\n rprint(\"\\n[green]*** TARGET IDENTIFIED ***[/green]\")\n print(f\"MAC ADDRESS: {target} is present on {task.host}'s {intf}\")\n rprint(\"\\n[cyan]GENERATING DETAILS...[/cyan]\")\n cdp_result = task.run(task=send_command, command=\"show cdp neighbors\")\n task.host[\"cdpinfo\"] = cdp_result.scrapli_response.genie_parse_output()\n dev_id = \"\"\n index = task.host[\"cdpinfo\"][\"cdp\"][\"index\"]\n for num in index:\n local_intf = index[num][\"local_interface\"]\n if local_intf == intf:\n dev_id = index[num][\"device_id\"]\n port_id = index[num][\"port_id\"]\n\n ver_result = task.run(task=send_command, command=\"show version\")\n task.host[\"verinfo\"] = ver_result.scrapli_response.genie_parse_output()\n version = task.host[\"verinfo\"][\"version\"]\n serial_num = version[\"chassis_sn\"]\n oper_sys = version[\"os\"]\n uptime = version[\"uptime\"]\n version_short = version[\"version_short\"]\n print(f\"DEVICE MGMT IP: {task.host.hostname}\")\n print(f\"DEVICE SERIAL NUMBER: {serial_num}\")\n print(f\"DEVICE OPERATION SYSTEM: {oper_sys}\")\n print(f\"DEVICE UPTIME: {uptime}\")\n print(f\"DEVICE VERSION: {version_short}\")\n if dev_id:\n rprint(\"[cyan]REMOTE CONNECTION DETAILS...[/cyan]\")\n print(f\"Connected to {port_id} on {dev_id}\")",
"def main():\n\n ericsson_connect = {\n \"device_type\": \"ericsson_ipos\",\n \"ip\": \"1.1.1.1\",\n \"username\": \"admin\",\n \"password\": \"admin\",\n }\n\n net_connect = ConnectHandler(**ericsson_connect)\n output = net_connect.send_command(\"show ip int brief\")\n print(output)\n\n output_commit = net_connect.commit()\n print(output_commit)",
"def get_iproutes(auth):\n url_iproutes = \"http://\" + auth.ipaddr + \"/rest/\"+auth.version+\"/ip-route\"\n try:\n r = requests.get(url_iproutes, headers = auth.cookie)\n iproutes = json.loads(r.text)['ip_route_element']\n return iproutes\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + \" get_iproutes: An Error has occured\"",
"def read_lwm2m_info():\n response, secure = lwm2m.get_lwm2m_security_info()\n \n if response != return_values.RESULT_SUCCESS:\n raise Exception(\"Failed to retrieve the lwm2m connection information. Return value {}.\".format(response))\n try:\n lwm2m_uri = \"coaps://\" + secure[\"LWM2M_HOST_NAME\"] + \":5684\"\n lwm2m_endpoint = secure[\"LWM2M_ENDPOINT\"]\n lwm2m_identity = secure[\"LWM2M_IDENTITY\"]\n lwm2m_security = secure[\"LWM2M_SECRET_KEY\"]\n except KeyError:\n raise Exception(\"The lwm2m security info message received from the api server is not in the expected format. Unable to proceed.\")\n \n return lwm2m_uri, lwm2m_endpoint, lwm2m_identity, lwm2m_security",
"def request_from_identity(cls, identity):\n params = identity.request_params\n params.update({\n 'action': 'info',\n 'ipaddr': 'true',\n 'hdd': 'true',\n 'mem': 'true',\n 'bw': 'true',\n 'status': 'true'\n })\n\n response = requests.get(\n identity.vendor.endpoint + cls._ENDPOINT, params=params)\n if response.status_code != requests.codes.ok:\n raise RuntimeError(\n 'Unable to retrieve host: {0}'.format(response.text))\n return cls.from_response(response.text, identity)",
"def ip_get_info(ipaddr, show=False):\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.post('https://imhsc.imhadmin.net/index.php?v=IPManager',\n data={'type': 'ip', 'query': ipaddr})\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # parse results\n trr = bs.table.tbody.find_all('tr')\n if len(trr) > 0:\n # get IP id\n try:\n t_id = re.match(r'.+id=([0-9]+).+', trr[0].find_all('td')[8].a['href'], re.I).group(1)\n except:\n t_id = False\n\n # gather IP infos\n t_info = {\n 'id': t_id,\n 'ip': trr[0].find_all('td')[0].string,\n 'domain': trr[0].find_all('td')[1].string,\n 'server': trr[0].find_all('td')[2].string,\n 'net': trr[0].find_all('td')[3].string,\n 'usage': trr[0].find_all('td')[4].string,\n 'user': trr[0].find_all('td')[5].string,\n 'assigned': trr[0].find_all('td')[6].string,\n 'note': trr[0].find_all('td')[7].string,\n 'edit_url': trr[0].find_all('td')[8].a['href']\n }\n else:\n t_info = None\n\n if show:\n if t_info:\n print(\"[%(usage)s] %(ip)s (%(net)s) --> %(server)s [User: %(user)s / Domain: %(domain)s / Assigned: %(assigned)s]\\n\\tNote: %(note)s\" % t_info)\n else:\n print(\"IP address '%s' not found\" % (ipaddr))\n\n return (t_info, bs)",
"def get(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.get_state())\n console.print(f\"[{ip}] Light {id} State:\\n{json.dumps(resp, indent=2)}\")",
"def get_ip(tag,env=None,eip=False):\n api_url = 'http://api.rahulinux.io/ip?host={0}&env={1}&eip={2}'\n try:\n resp = requests.get(api_url.format(tag,env,eip))\n except requests.exceptions.RequestException as e:\n return e\n if len(resp.text) >= 30:\n return resp.text.split()\n return [ resp.text ]",
"def get_hub_info(self, session_key):\n \n username = None\n password = None\n hub_address = None\n hub_port = None\n \n uri = urllib.quote('/servicesNS/nobody/insteon_control/admin/alert_actions/send_insteon_command') + '?output_mode=json'\n \n try:\n serverResponse, serverContent = splunk.rest.simpleRequest(uri, method='GET', sessionKey=session_key)\n info = json.loads(serverContent)\n \n username = info['entry'][0]['content']['param.username']\n password = info['entry'][0]['content']['param.password']\n hub_address = info['entry'][0]['content']['param.address']\n hub_port = info['entry'][0]['content']['param.port']\n \n except AuthenticationFailed as e:\n raise e\n except Exception as e: \n self.logger.exception(\"Error when attempting to load send_insteon_command alert action configuration\")\n \n raise e\n \n return hub_address, hub_port, username, password"
] | [
"0.57575613",
"0.57030374",
"0.5658261",
"0.561578",
"0.55102676",
"0.5456767",
"0.54272753",
"0.54270345",
"0.53561795",
"0.53542095",
"0.5327382",
"0.5325279",
"0.5313275",
"0.5305687",
"0.528909",
"0.52887946",
"0.52811927",
"0.52711695",
"0.52711695",
"0.526446",
"0.52539116",
"0.5251917",
"0.5238199",
"0.5226711",
"0.5212846",
"0.52087307",
"0.5206741",
"0.5201003",
"0.5191798",
"0.5187584"
] | 0.808896 | 0 |
Verify if fields and namespaces of a node are valid. Verifies if the 'driver_info' field and the 'properties/capabilities' namespace exist and are not empty. | def verify_node_info(node):
capabilities_dict = utils.capabilities_to_dict(
node.properties.get('capabilities', '')
)
driver_info = node.driver_info
_verify_node_info('properties/capabilities', capabilities_dict,
REQUIRED_ON_PROPERTIES)
_verify_node_info('driver_info', driver_info,
REQUIRED_ON_DRIVER_INFO) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _verify_node_info(node_namespace, node_info_dict, info_required):\n missing_keys = set(info_required) - set(node_info_dict)\n\n if missing_keys:\n raise exception.MissingParameterValue(\n _(\"Missing the keys for the following OneView data in node's \"\n \"%(namespace)s: %(missing_keys)s.\") %\n {'namespace': node_namespace,\n 'missing_keys': ', '.join(missing_keys)\n }\n )\n\n # False and 0 can still be considered as valid values\n missing_values_keys = [k for k in info_required\n if node_info_dict[k] in ('', None)]\n if missing_values_keys:\n missing_keys = [\"%s:%s\" % (node_namespace, k)\n for k in missing_values_keys]\n raise exception.MissingParameterValue(\n _(\"Missing parameter value for: '%s'\") % \"', '\".join(missing_keys)\n )",
"def verify_namespace_attrs(self, node):\n for cls in node.classes:\n for var in cls.variables:\n self.check_var_attrs(cls, var)\n for func in cls.functions:\n self.check_fcn_attrs(func)\n\n for func in node.functions:\n self.check_fcn_attrs(func)\n\n for ns in node.namespaces:\n self.verify_namespace_attrs(ns)",
"def validate(self, node):",
"def validate(self, node_uuid):\n # check if node exists\n node = objects.Node.get_by_uuid(pecan.request.context, node_uuid)\n return pecan.request.rpcapi.validate_driver_interfaces(\n pecan.request.context, node.uuid)",
"def test_validation_class(self):\n\n for data in ('tbldata', 'dihedraldata', 'rdcdata', 'danidata', 'tensordata', 'pcsdata'):\n v = self.web.query_nodes(key=data)\n\n if not v.empty():\n self.assertTrue(v.validate())",
"def validate_fields(self, tree):\n # Check fields\n fields = list(tree.keys())\n for k in self.fields:\n assert (k in fields)",
"def validate(self, task):\n # FIXME(lintan): validate hangs if unable to reach AMT, so dont\n # connect to the node until bug 1314961 is resolved.\n amt_common.parse_driver_info(task.node)",
"def check_version(self, node):\n assert \"version\" in node, \"Version node does not contain attribute 'version'\"\n assert len(node[\"version\"]) >= 1, \"Expecting at least one 'version' value\"\n # TODO: add more thorough checks",
"def test_field_nullable(self):\n node_dict = {\n 'host_name': 'abc',\n 'local_router_id': '1.1.1.1',\n 'as_num': 100,\n 'bgpls_id': '0.0.0.0',\n 'igp_id': '0.0.0.0'\n }\n node = Node(**node_dict)\n for name, field in node_dict.items():\n self.assertEqual(field, node.__dict__[name])",
"def verify_attrs(self):\n self.verify_namespace_attrs(self.newlibrary.wrap_namespace)",
"def test_validate_invalid(self):\r\n self.assertEqual(get_tree_and_validate(self.invalid_xml, open(self.SCHEMA, 'r').read()), 0)",
"def _validate_node_properties(node_properties):\n prefix = \"node_properties\"\n\n node_config = [\n {\n 'field_name': 'num_nodes',\n 'field_value': node_properties.get('num_nodes'),\n 'prefix': prefix,\n 'required_type': int,\n 'validators': [\n _validate_field_type,\n _validate_required_field\n ]\n },\n {\n 'field_name': 'main_node',\n 'field_value': node_properties.get('main_node'),\n 'prefix': prefix,\n 'required_type': int,\n 'validators': [\n _validate_field_type,\n _validate_required_field\n ]\n },\n {\n 'field_name': 'node_range_properties',\n 'field_value': node_properties.get('node_range_properties'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type,\n _validate_required_field\n ]\n },\n ]\n _process_config(node_config)\n\n node_range_properties = node_properties.get('node_range_properties')\n\n node_range_prefix = prefix + \"__node_range_properties\"\n for node in node_range_properties:\n container_properties = node.get('container')\n _validate_required_field(\n field_name='container',\n field_value=container_properties,\n prefix=node_range_prefix\n )\n _validate_field_type(\n field_name='container',\n field_value=node.get('container'),\n prefix=node_range_prefix,\n required_type=dict,\n )\n\n container_prefix = node_range_prefix + '__container'\n _validate_container_properties(container_properties, prefix=container_prefix)",
"def test_get_node_properties(self):\n pass",
"def test_node_exists():\n assert Node",
"def test_node_exists():\n assert Node",
"def node_filter_capabilities_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if values is not None: # pylint: disable=too-many-nested-blocks\n node_type = presentation._get_node_type(context)\n if node_type is not None:\n capabilities = node_type._get_capabilities(context)\n for name, value in values:\n capability = capabilities.get(name)\n if capability is not None:\n properties = value.properties\n capability_properties = capability.properties\n if (properties is not None) and (capability_properties is not None):\n for property_name, _ in properties:\n if property_name not in capability_properties:\n context.validation.report(\n u'node filter refers to an unknown capability definition'\n u' property in \"{0}\": {1}'\n .format(node_type._name, property_name),\n locator=presentation._locator, level=Issue.BETWEEN_TYPES)\n else:\n context.validation.report(\n u'node filter refers to an unknown capability definition in \"{0}\": {1}'\n .format(node_type._name, name),\n locator=presentation._locator, level=Issue.BETWEEN_TYPES)",
"def validate(self):\n # double-checks node/device names matches dictionary keys\n for name, node in self.nodes.iteritems():\n if name != node.name:\n raise ConfigurationNameMismatchError(name, node.name)\n self.validateName(node)\n\n # make sure system-manager alias exists\n if \"system-manager\" not in self.aliases:\n raise ConfigurationMissingSystemManagerAliasError()\n\n # make sure the node that the alias points to exists\n for alias, nodeName in self.aliases.iteritems():\n if nodeName not in self.nodes:\n raise ConfigurationMissingAliasNodeError(alias, nodeName)\n\n # make sure there is one and only one active node\n activeNodes = [node.name for node in self.nodes.values() if node.role == Roles.ACTIVE]\n if not activeNodes:\n raise ConfigurationMissingActiveNodeError()\n if len(activeNodes) > 1:\n raise ConfigurationTooManyActiveNodesError(activeNodes)",
"def test_validate_valid_org(self):\r\n assert self.org_tree != 0",
"def test_registration_empty_Fields(self):\r\n print('========================================================================')\r\n print('Negative test for check the validation entering the strigs with spaces on registration fields')\r\n # Load Registrtion page\r\n self.reg_page.open_registration_page()\r\n driver = self.reg_page.driver\r\n\r\n # cheks if right title\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n str_with_spaces = ' '\r\n\r\n self.reg_page.fill_name(str_with_spaces)\r\n self.reg_page.fill_email(str_with_spaces)\r\n self.reg_page.fill_password(str_with_spaces)\r\n self.reg_page.fill_confirm_password(str_with_spaces)\r\n\r\n self.reg_page.click_sign_up_btn()\r\n\r\n #test that regiastartion page is opened\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n prifileObj = RegistrationProfile()\r\n\r\n time.sleep(3)\r\n #get count elements with error message\r\n cnt_error = self.reg_page.get_count_hasError_fields()\r\n print('cnt_error='+str(cnt_error))\r\n\r\n #check that we have right the error elements count\r\n\r\n assert cnt_error == prifileObj.count_registration_hasError_fields, \\\r\n \"Count requirements fields has Errors doesn't match\"\r\n\r\n # check that we have right the header about incorrect input dara\r\n assert self.reg_page.is_error_validation_header(), \"No error header\"\r\n\r\n #check that each required field has uder the right error validation text\r\n\r\n #check field Name\r\n assert self.reg_page.get_hasError_validation_text_for_field('Name') == prifileObj.valid_requirement_text_for_name, \\\r\n \"No validation message for Name field\"\r\n #check field Email Adress\r\n assert self.reg_page.get_hasError_validation_text_for_field('E-Mail Address') == prifileObj.valid_requirement_text_for_email, \\\r\n \"No validation message for Email field\"\r\n # check field Password\r\n assert self.reg_page.get_hasError_validation_text_for_field(\r\n 'Password') == prifileObj.valid_requirement_text_for_password, \\\r\n \"No validation message for Password field\"\r\n\r\n print('--------- SUCCESS test_registration_empty_Fields -----------')\r\n driver.quit()",
"def test_validate_valid_person(self):\r\n assert self.person_tree != 0",
"def verify(self):\n if \"robot\" not in self.keys():\n raise Exception(\"No Section 'robot' in RobotConfig\")\n# if \"name\" not in self[\"robot\"]:\n# raise Exception(\"No robot.name specified in RobotConfig\")\n if \"controller_file\" not in self['robot']:\n raise Exception(\"No robot.controller_file specified in RobotConfig\")\n# if \"ros_master_uri\" not in self['robot']:\n# raise Exception(\"No robot.ros_master_uri specified in RobotConfig\")\n# if \"bluegigga_dev\" not in self['robot']:\n# raise Exception(\"No robot.bluegigga_dev specified in RobotConfig\")\n if \"robot_dev\" not in self['robot']:\n raise Exception(\"No robot.robot_dev specified in RobotConfig\")",
"def test_field_none_nullable(self):\n node_dict = {\n 'host_name': 'abc'\n }\n try:\n Node(**node_dict)\n except Exception as e:\n self.assertEqual(type(e), ValueError)",
"def check(self):\n self.isNodes = True\n self.isFixable = False\n defaults = ['persp', 'top', 'front', 'side']\n project_defaults = ['__SUBSET__', '__SET__', '__CAMERA__', '__CHARS__', '__PROPS__']\n\n errorNodes = list()\n for each in pm.ls(assemblies=1):\n if str(each) in defaults:\n continue\n if str(each) in project_defaults:\n continue\n errorNodes.append(str(each))\n self.setStatus('OK')\n if len(errorNodes) > 0:\n self.setStatus('WARNING')\n self.errorNodes = errorNodes\n self.errorMessage = '%s numbers of extra root nodes found in the scene.' % str(len(self.errorNodes))",
"def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()",
"def test_get_node_requirements(self):\n pass",
"def test_validate_connector(self):\n connector = {'wwpns': [\"not empty\"],\n 'wwnns': [\"not empty\"]}\n self.volume.driver.validate_connector(connector)",
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']",
"def test_checkLinkoStructure(self):\n self.performTestForParams()",
"def validate(self, namespace):\n pass"
] | [
"0.62888443",
"0.5970122",
"0.5792318",
"0.5749915",
"0.57480246",
"0.5558528",
"0.555058",
"0.54527915",
"0.54470193",
"0.54410964",
"0.54384977",
"0.5430039",
"0.54069996",
"0.53724563",
"0.53724563",
"0.5347799",
"0.53438234",
"0.53368545",
"0.53343976",
"0.5322387",
"0.53197026",
"0.524414",
"0.5199359",
"0.5197789",
"0.51904196",
"0.51903695",
"0.51871526",
"0.5182259",
"0.5173227",
"0.51721656"
] | 0.7963399 | 0 |
Get OneView information from the node. | def get_oneview_info(node):
try:
capabilities_dict = utils.capabilities_to_dict(
node.properties.get('capabilities', '')
)
except exception.InvalidParameterValue as e:
raise exception.OneViewInvalidNodeParameter(node_uuid=node.uuid,
error=e)
driver_info = node.driver_info
oneview_info = {
'server_hardware_uri':
driver_info.get('server_hardware_uri'),
'server_hardware_type_uri':
capabilities_dict.get('server_hardware_type_uri'),
'enclosure_group_uri':
capabilities_dict.get('enclosure_group_uri'),
'server_profile_template_uri':
capabilities_dict.get('server_profile_template_uri'),
'applied_server_profile_uri':
driver_info.get('applied_server_profile_uri'),
}
return oneview_info | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def node_show(self, node):\n if node.instance_uuid:\n n = self.ironic_client.node.get_by_instance_uuid(\n node.instance_uuid)\n else:\n n = self.ironic_client.node.get(node.uuid)\n return n",
"def get_node_details(self, node):\n node_details = self.parser.find_server_by_ip(node.get('ip')) or \\\n self.parser.find_server_by_hostname(node.get('host'))\n\n return node_details",
"def view(self) -> 'outputs.ViewDefinitionResponse':\n return pulumi.get(self, \"view\")",
"def info(self, node_uuid):\n if node_uuid is None:\n return None\n uri = '{}/{}'.format(self.store.aroot, node_uuid)\n infos = self.store.actual.resolve(uri)\n if infos is None:\n return None\n return json.loads(infos)",
"def getView(self) -> 'NodeGraphicsView':\n return self.grScene.views()[0]",
"def GetNodeInfo(self, hvparams=None):\n return self.GetLinuxNodeInfo()",
"def detail(self):\n info = self.info()\n return info",
"def get_info(self):\n return None",
"def GetView(self):\r\n return self.model.GetView()",
"def getViews(read):\n ...",
"def get_view ( self, object ):\n return self.view",
"def show_node(self):\n if self.controller.node_id:\n self.print_object(\n 'node',\n ('uid', 'status', 'roles'),\n self.controller.get_node()\n )\n else:\n print(\"Please select node at first.\")",
"def get_info(self, key: str) -> TaskInfo:\n return self.task_graph.nodes[key][\"info\"]",
"def get(self, request, nnid, wfver, desc):\n try:\n return_data = NNCommonManager().get_nn_node_info(nnid, wfver, desc)\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))",
"def view(self) -> str:\n return pulumi.get(self, \"view\")",
"def get_info(self):\n pass",
"def get_info(self):\n pass",
"def view(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"view\")",
"def info(self):\n return self.client.call('GET', self.name + 'info')",
"def get(view):\n\n vid = view.id()\n\n if vid not in __view_data:\n __view_data[vid] = ViewData()\n\n return __view_data[vid]",
"def getInfo(self):\n return self.info",
"def get_view(self):\n return self.view",
"def getInfo(self, id):\n facade = self._getFacade()\n monitor = facade.get(id)\n data = Zuul.marshal(ITreeNode(monitor))\n return DirectResponse.succeed(data=data)",
"def info(self):\n return self._info",
"def get_detail(self, request, **kwargs):\n\t\tself.method_check(request, allowed=['get'])\n\t\tdata = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))\n\t\tdoc_node = Document.objects.get(CTS=data.get(\"CTS\"))\t\n\t\treturn self.create_response(request, data)",
"def getNodeInfo(self, node, state=None, happy_only=False):\n happy_node_info = self.getNodes()[node]\n node_info = {\"happy\": happy_node_info}\n\n # get extension state including weave\n if not happy_only:\n for i in six.iteritems(self.getExtensionState(state)):\n extState = self.getNodes(i[1])\n if extState and node in extState:\n node_info[i[0]] = extState[node]\n\n return node_info",
"def info(self):",
"def info(self):",
"def info(self):\n path = self._get_path('info')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return response",
"def get_metadata(self):\n return self.parent.get_metadata_for_node(self)"
] | [
"0.65129185",
"0.60302633",
"0.60019517",
"0.59819186",
"0.5964509",
"0.5886509",
"0.57704425",
"0.5651187",
"0.5634503",
"0.5597397",
"0.5592938",
"0.55923563",
"0.5571141",
"0.55683917",
"0.55459785",
"0.55267394",
"0.55267394",
"0.54931307",
"0.54928416",
"0.5488017",
"0.5481489",
"0.54798216",
"0.54686964",
"0.54414296",
"0.5431315",
"0.5421889",
"0.5412907",
"0.5412907",
"0.54122823",
"0.5396642"
] | 0.797151 | 0 |
Validate if the node configuration is consistent with OneView. This method calls hpOneView functions to validate if the node configuration is consistent with the OneView resources it represents, including serverHardwareUri, serverHardwareTypeUri, serverGroupUri serverProfileTemplateUri, enclosureGroupUri and node ports. If any validation fails, the driver will raise an appropriate OneViewError. | def validate_oneview_resources_compatibility(task):
ports = task.ports
oneview_client = get_hponeview_client()
oneview_info = get_oneview_info(task.node)
_validate_node_server_profile_template(oneview_client, oneview_info)
_validate_node_server_hardware_type(oneview_client, oneview_info)
_validate_node_enclosure_group(oneview_client, oneview_info)
_validate_server_profile_template_mac_type(oneview_client, oneview_info)
_validate_node_port_mac_server_hardware(
oneview_client, oneview_info, ports) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_node_server_hardware_type(oneview_client, oneview_info):\n node_server_hardware_type_uri = oneview_info['server_hardware_type_uri']\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n server_hardware_sht_uri = server_hardware.get('serverHardwareTypeUri')\n\n if server_hardware_sht_uri != node_server_hardware_type_uri:\n message = _(\"Node server_hardware_type_uri is inconsistent \"\n \"with OneView's server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def validate(self):\n # double-checks node/device names matches dictionary keys\n for name, node in self.nodes.iteritems():\n if name != node.name:\n raise ConfigurationNameMismatchError(name, node.name)\n self.validateName(node)\n\n # make sure system-manager alias exists\n if \"system-manager\" not in self.aliases:\n raise ConfigurationMissingSystemManagerAliasError()\n\n # make sure the node that the alias points to exists\n for alias, nodeName in self.aliases.iteritems():\n if nodeName not in self.nodes:\n raise ConfigurationMissingAliasNodeError(alias, nodeName)\n\n # make sure there is one and only one active node\n activeNodes = [node.name for node in self.nodes.values() if node.role == Roles.ACTIVE]\n if not activeNodes:\n raise ConfigurationMissingActiveNodeError()\n if len(activeNodes) > 1:\n raise ConfigurationTooManyActiveNodesError(activeNodes)",
"def _validate_node_enclosure_group(oneview_client, oneview_info):\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n sh_enclosure_group_uri = server_hardware.get('serverGroupUri')\n node_enclosure_group_uri = oneview_info['enclosure_group_uri']\n\n if node_enclosure_group_uri and (\n sh_enclosure_group_uri != node_enclosure_group_uri):\n message = _(\n \"Node enclosure_group_uri '%(node_enclosure_group_uri)s' \"\n \"is inconsistent with OneView's server hardware \"\n \"serverGroupUri '%(sh_enclosure_group_uri)s' of \"\n \"ServerHardware %(server_hardware)s\") % {\n 'node_enclosure_group_uri': node_enclosure_group_uri,\n 'sh_enclosure_group_uri': sh_enclosure_group_uri,\n 'server_hardware': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def validate(self):\n valid = True\n \n # Check that link information is valid\n for ij in self.link:\n valid = valid and self.link[ij].head in self.node\n valid = valid and self.link[ij].tail in self.node\n if not valid:\n print(\"Error: Link tail/head not found: %s %s\" % (self.link[ij].tail, self.link[ij].head))\n raise utils.BadFileFormatException\n valid = valid and self.link[ij].capacity >= 0\n valid = valid and self.link[ij].length >= 0\n valid = valid and self.link[ij].freeFlowTime >= 0\n valid = valid and self.link[ij].alpha >= 0\n valid = valid and self.link[ij].beta >= 0\n valid = valid and self.link[ij].speedLimit >= 0\n valid = valid and self.link[ij].toll >= 0\n if not valid:\n print(\"Link %s has negative parameters.\" % ij)\n \n # Then check that all OD pairs are in range\n for ODpair in self.ODpair:\n (origin, destination) = (self.ODpair[ODpair].origin, self.ODpair[ODpair].destination)\n valid = valid and origin in self.node\n valid = valid and destination in self.node\n if not valid:\n print(\"Error: Origin/destination %s not found\" % ODpair)\n raise utils.BadFileFormatException\n valid = valid and self.node[origin].isZone == True\n valid = valid and self.node[destination].isZone == True\n if not valid:\n print(\"Error: Origin/destination %s does not connect two zones\" % str(ODpair))\n raise utils.BadFileFormatException\n valid = valid and self.ODpair[ODpair].demand >= 0\n if not valid:\n print(\"Error: OD pair %s has negative demand\" % ODpair)\n raise utils.BadFileFormatException\n \n # Now error-check using metadata\n if self.numNodes != None and len(self.node) != self.numNodes:\n print(\"Warning: Number of nodes implied by network file %d different than metadata value %d\" % (len(self.node), self.numNodes))\n self.numNodes = len(self.node)\n if self.numLinks != None and len(self.link) != self.numLinks:\n print(\"Warning: Number of links given in network file %d different than metadata value %d\" % (len(self.link), self.numLinks))\n self.numLinks = len(self.link)\n if self.numZones != None and len([i for i in self.node if self.node[i].isZone == True]) != self.numZones:\n print(\"Warning: Number of zones given in network file %d different than metadata value %d\" % (len([i for i in self.node if self.node[i].isZone == True]), self.numZones))\n self.numLinks = len(self.link)\n if self.totalDemandCheck != None:\n if self.totalDemand != self.totalDemandCheck:\n print(\"Warning: Total demand is %f compared to metadata value %f\" % ( self.totalDemand, self.totalDemandCheck))",
"def check_corleone_config():\n try:\n# Checking for neo4j is obsolete because there won't be such service\n# Lionfish is taking over neo4j (no REST console)\n# neo4j_host = du.get_configuration('neo4j', 'host')\n# neo4j_port = du.get_configuration('neo4j', 'port')\n lionfish_host = du.get_configuration('lionfish', 'host')\n lionfish_port = du.get_configuration('lionfish', 'port')\n except Exception as error:\n print unicode(error)\n return False\n# Again: obsolete\n# if not neo4j_host or not neo4j_port or not lionfish_host \\\n# or not lionfish_port:\n\n if not lionfish_port or not lionfish_host:\n return False\n return True",
"def validate_config(self):\r\n c = self.config\r\n \r\n # Make sure that we have a database_path, and an image_path...\r\n assert 'database_path' in c\r\n assert 'image_path' in c\r\n # We should probably check if these paths exist and make them as well...\r\n \r\n # Set the default values.\r\n graph_draw_frequency = c['graph_draw_frequency']\r\n for period, interval in self.default_config['graph_draw_frequency'].iteritems():\r\n graph_draw_frequency.setdefault(period, interval)\r\n \r\n # A quick check to make sure that our port is an integer.\r\n c['httpd_port'] = int(c['httpd_port'])\r\n \r\n # Make sure that no duplicate IDs exist, and that the template exists as well.\r\n ids = set()\r\n for graph in c['graphs']:\r\n graph.setdefault('config', {})\r\n graph['config'].setdefault('periods', [])\r\n assert graph['id'] not in ids\r\n ids.add(graph['id'])\r\n assert(template_exists(graph['template']))",
"def validate(self, raise_on_error: bool = True) -> bool:\n cls_name = self.__class__.__name__\n status = True\n\n for edge_type, store in self._edge_store_dict.items():\n src, _, dst = edge_type\n\n num_src_nodes = self[src].num_nodes\n num_dst_nodes = self[dst].num_nodes\n if num_src_nodes is None:\n status = False\n warn_or_raise(\n f\"'num_nodes' is undefined in node type '{src}' of \"\n f\"'{cls_name}'\", raise_on_error)\n\n if num_dst_nodes is None:\n status = False\n warn_or_raise(\n f\"'num_nodes' is undefined in node type '{dst}' of \"\n f\"'{cls_name}'\", raise_on_error)\n\n if 'edge_index' in store:\n if (store.edge_index.dim() != 2\n or store.edge_index.size(0) != 2):\n status = False\n warn_or_raise(\n f\"'edge_index' of edge type {edge_type} needs to be \"\n f\"of shape [2, num_edges] in '{cls_name}' (found \"\n f\"{store.edge_index.size()})\", raise_on_error)\n\n if 'edge_index' in store and store.edge_index.numel() > 0:\n if store.edge_index.min() < 0:\n status = False\n warn_or_raise(\n f\"'edge_index' of edge type {edge_type} contains \"\n f\"negative indices in '{cls_name}' \"\n f\"(found {int(store.edge_index.min())})\",\n raise_on_error)\n\n if (num_src_nodes is not None\n and store.edge_index[0].max() >= num_src_nodes):\n status = False\n warn_or_raise(\n f\"'edge_index' of edge type {edge_type} contains \"\n f\"larger source indices than the number of nodes \"\n f\"({num_src_nodes}) of this node type in '{cls_name}' \"\n f\"(found {int(store.edge_index[0].max())})\",\n raise_on_error)\n\n if (num_dst_nodes is not None\n and store.edge_index[1].max() >= num_dst_nodes):\n status = False\n warn_or_raise(\n f\"'edge_index' of edge type {edge_type} contains \"\n f\"larger destination indices than the number of nodes \"\n f\"({num_dst_nodes}) of this node type in '{cls_name}' \"\n f\"(found {int(store.edge_index[1].max())})\",\n raise_on_error)\n\n return status",
"def _validate_config(self):\n pass",
"def _validate_node_port_mac_server_hardware(oneview_client,\n oneview_info, ports):\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n if not ports:\n return\n\n # NOTE(nicodemos) If hponeview client's unable to get the MAC of the Server\n # Hardware and raises an exception, the driver will try to get it from\n # the iLOrest client.\n try:\n mac = _get_server_hardware_mac(server_hardware)\n except exception.OneViewError:\n mac = _get_server_hardware_mac_from_ilo(server_hardware)\n\n incompatible_macs = []\n for port in ports:\n if port.address.lower() == mac.lower():\n return\n incompatible_macs.append(port.address)\n\n message = _(\"The ports of the node are not compatible with its \"\n \"server hardware %(server_hardware_uri)s. There are no Ironic \"\n \"port MAC's: %(port_macs)s, that matches with the \"\n \"server hardware's MAC: %(server_hardware_mac)s\") % {\n 'server_hardware_uri': server_hardware.get('uri'),\n 'port_macs': ', '.join(incompatible_macs),\n 'server_hardware_mac': mac}\n raise exception.OneViewError(message)",
"def _validate_configurations(self) -> None:\n if self.__exception:\n raise self.__exception",
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def test_140_keystone_endpoint(self):\n u.log.debug('Checking keystone api endpoint data...')\n endpoints = self.keystone_v2.endpoints.list()\n admin_port = '35357'\n internal_port = public_port = '5000'\n expected = {\n 'id': u.not_null,\n 'region': 'RegionOne',\n 'adminurl': u.valid_url,\n 'internalurl': u.valid_url,\n 'publicurl': u.valid_url,\n 'service_id': u.not_null\n }\n ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,\n public_port, expected)\n if ret:\n amulet.raise_status(amulet.FAIL,\n msg='keystone endpoint: {}'.format(ret))",
"def validate_connection(self):\n for hostInfo in self.client.transport.hosts:\n host = hostInfo.get('host')\n port = hostInfo.get('port')\n self.validate_server_connection(host, port)",
"def validate_config(self):\n\n # LOCALHOST\n if self.location == 'localhost':\n if 'browserName' not in self.config.keys():\n msg = \"Add the 'browserName' in your local_config: e.g.: 'Firefox', 'Chrome', 'Safari'\" # noqa\n self.runner.critical_log(msg)\n raise BromeBrowserConfigException(msg)\n\n # EC2\n elif self.location == 'ec2':\n self.validate_ec2_browser_config()\n\n # VIRTUALBOX\n elif self.location == 'virtualbox':\n self.validate_virtualbox_config()",
"def validate(self):\n if not self.os_repos:\n raise ValueError(\"No OS repository available for OS {}\".format(\n self.operating_system.name))\n if not self.template:\n raise ValueError(\"No autoinstallation template specified\")\n if not self.installer_template:\n raise ValueError(\"No installer command line template specified\")\n if not self.system_profile._gateway:\n raise ValueError(\"No gateway interface present\")\n\n self.system_profile.hypervisor.validate()\n\n for iface in self.system_profile.ifaces:\n iface.validate()\n\n # verify gateway interface has IP address and gateways\n if not self.system_profile.list_gateway_networks():\n raise ValueError(\n \"Gateway interface {} has no IP address\"\n \" or gateway route\".format(\n self.system_profile._gateway.os_device_name\n ))\n\n # verify that total partition size is not bigger than disk size\n failing_volume_ids = []\n for volume in [volume for volume in self.system_profile.volumes\n if isinstance(volume, (self.DasdVolume,\n self.ZfcpVolume))]:\n total_part_size = sum(\n [partition.size for partition in volume.partitions])\n if total_part_size > volume.size:\n failing_volume_ids.append(str(volume))\n\n if failing_volume_ids:\n raise ValueError(\n \"Partitioning exceeds volume size for volumes {}\".format(\n failing_volume_ids))",
"def _check_whole_network(self):\n if not self.network.check_network():\n # check_network has failed, issue error\n self._display_semantic_error(\"network\")",
"def checkconfig(self): \n validconfig = {\n 'loglevel': lambda s: s in self.loglevels,\n 'logfilelevel': lambda s: s in self.loglevels,\n 'nodes': lambda s: isinstance(s, list),\n 'pynodes': lambda s: isinstance(s, list)\n }\n alive = True\n for key in self.config: \n if (key in validconfig and \n not validconfig[key](self.config[key])):\n logging.critical(\"Invalid configuration option {}: {}\".format(\n key, self.config[key]))\n alive = False\n return alive",
"def check_port_validity(self):\n # Check if ports provided are already present in VPLEX\n if self.ports:\n LOG.info(\"Validating the ports\")\n for port in self.ports:\n obj = None\n try:\n obj = self.storageview.get_port(self.cl_name, port)\n except (utils.ApiException, ValueError, TypeError) as err:\n msg = \"Could not get port {0} details in {1} due to\"\n err_msg = msg.format(port, self.cl_name) + \" error {0}\"\n e_msg = utils.display_error(err_msg, err)\n LOG.error(\"%s\\n%s\\n\", e_msg, err)\n self.module.fail_json(msg=e_msg)\n\n if obj is None:\n msg = (\"Could not get port {0} details in {1}\"\n .format(port, self.cl_name))\n LOG.error(msg)\n self.module.fail_json(msg=msg)",
"def validate(self, node_uuid):\n # check if node exists\n node = objects.Node.get_by_uuid(pecan.request.context, node_uuid)\n return pecan.request.rpcapi.validate_driver_interfaces(\n pecan.request.context, node.uuid)",
"def validate_pool_settings(ns):\n if not ns.json_file and not ns.template:\n if ns.node_agent_sku_id and not ns.image:\n raise ValueError(\"Missing required argument: --image\")\n if not ns.id:\n raise ValueError(\"id is required\")\n if not ns.vm_size:\n raise ValueError(\"The --vm-size is required\")\n\n validate_mutually_exclusive(ns, False, 'target_dedicated_nodes', 'auto_scale_formula')\n validate_mutually_exclusive(ns, True, 'os_family', 'image')",
"def _validatePortConfig(self):\n if config.BindHTTPPorts:\n if config.HTTPPort == 0:\n raise UsageError(\n \"HTTPPort required if BindHTTPPorts is not empty\"\n )\n elif config.HTTPPort != 0:\n config.BindHTTPPorts = [config.HTTPPort]\n if config.BindSSLPorts:\n if config.SSLPort == 0:\n raise UsageError(\n \"SSLPort required if BindSSLPorts is not empty\"\n )\n elif config.SSLPort != 0:\n config.BindSSLPorts = [config.SSLPort]",
"def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()",
"def _validate_host_config(self, addr, cfg):\n err_prefix = f\"Host {addr}\"\n assert isinstance(cfg, dict) and len(cfg) >= len(HOST_CONFIG_KEYS), \\\n (f\"{err_prefix} configurations must be a dict of length >= \"\n f\"{len(HOST_CONFIG_KEYS)}. {cfg} is invalid\")\n\n for k in HOST_CONFIG_KEYS:\n assert k in cfg, f\"{err_prefix} configuration missing key: {k}\"\n\n host_services = cfg[u.HOST_SERVICES]\n for service in host_services:\n assert service in self.services, \\\n (f\"{err_prefix} Invalid service in configuration services \"\n f\"list: {service}\")\n\n assert len(host_services) == len(set(host_services)), \\\n (f\"{err_prefix} configuration services list cannot contain \"\n \"duplicates\")\n\n host_processes = cfg[u.HOST_PROCESSES]\n for process in host_processes:\n assert process in self.processes, \\\n (f\"{err_prefix} invalid process in configuration processes\"\n f\" list: {process}\")\n\n assert len(host_processes) == len(set(host_processes)), \\\n (f\"{err_prefix} configuation processes list cannot contain \"\n \"duplicates\")\n\n host_os = cfg[u.HOST_OS]\n assert host_os in self.os, \\\n f\"{err_prefix} invalid os in configuration: {host_os}\"\n\n fw_err_prefix = f\"{err_prefix} {u.HOST_FIREWALL}\"\n if u.HOST_FIREWALL in cfg:\n firewall = cfg[u.HOST_FIREWALL]\n assert isinstance(firewall, dict), \\\n (f\"{fw_err_prefix} must be a dictionary, with host \"\n \"addresses as keys and a list of denied services as values. \"\n f\"{firewall} is invalid.\")\n for addr, srv_list in firewall.items():\n addr = self._validate_host_address(addr, err_prefix)\n assert self._is_valid_firewall_setting(srv_list), \\\n (f\"{fw_err_prefix} setting must be a list, contain only \"\n f\"valid services and contain no duplicates: {srv_list}\"\n \" is not valid\")\n else:\n cfg[u.HOST_FIREWALL] = dict()\n\n v_err_prefix = f\"{err_prefix} {u.HOST_VALUE}\"\n if u.HOST_VALUE in cfg:\n host_value = cfg[u.HOST_VALUE]\n assert isinstance(host_value, (int, float)), \\\n (f\"{v_err_prefix} must be an integer or float value. \"\n f\"{host_value} is invalid\")\n\n if addr in self.sensitive_hosts:\n sh_value = self.sensitive_hosts[addr]\n assert math.isclose(host_value, sh_value), \\\n (f\"{v_err_prefix} for a sensitive host must either match \"\n f\"the value specified in the {u.SENSITIVE_HOSTS} section \"\n f\"or be excluded the host config. The value {host_value} \"\n f\"is invalid as it does not match value {sh_value}.\")",
"def _validate_config(self):\n # Simulation ID\n empty_string_check(self._config_dict['@id'])\n \n # Output\n empty_string_check(self._config_dict['output']['@baseDirectory'])\n self._config_dict['output']['@saveInteractionLog'] = parse_boolean(self._config_dict['output']['@saveInteractionLog'])\n self._config_dict['output']['@saveRelevanceJudgments'] = parse_boolean(self._config_dict['output']['@saveRelevanceJudgments'])\n self._config_dict['output']['@trec_eval'] = parse_boolean(self._config_dict['output']['@trec_eval'])\n \n # Topics\n def check_topic(t):\n \"\"\"\n Checks a given topic, t. Looks for a topic ID and a valid topic description file.\n \"\"\"\n empty_string_check(t['@id'])\n filesystem_exists_check(t['@filename'])\n filesystem_exists_check(t['@qrelsFilename'])\n \n if '@backgroundFilename' in t: # A background file was specified.\n filesystem_exists_check(t['@backgroundFilename'])\n else:\n t['@backgroundFilename'] = None # No background file was specified.\n \n topics = self._config_dict['topics']['topic']\n \n if type(topics) == list:\n for topic in topics:\n check_topic(topic)\n else:\n check_topic(topics)\n \n # Users\n users = self._config_dict['users']['user']\n \n if type(users) == list:\n for user in users:\n filesystem_exists_check(user['@configurationFile'])\n else:\n filesystem_exists_check(users['@configurationFile'])\n \n # Search Interface\n empty_string_check(self._config_dict['searchInterface']['@class'])\n check_attributes(self._config_dict['searchInterface'])",
"def validate(self):\n\n r = requests.get(self.config.data_path,\n headers=self.config.headers[\"get\"])\n version = r.json()[\"neo4j_version\"]\n print \"Connected to Neo4j-server OK, version= {0}\".format(version)",
"def check(self):\n self.isNodes = True\n self.isFixable = False\n defaults = ['persp', 'top', 'front', 'side']\n project_defaults = ['__SUBSET__', '__SET__', '__CAMERA__', '__CHARS__', '__PROPS__']\n\n errorNodes = list()\n for each in pm.ls(assemblies=1):\n if str(each) in defaults:\n continue\n if str(each) in project_defaults:\n continue\n errorNodes.append(str(each))\n self.setStatus('OK')\n if len(errorNodes) > 0:\n self.setStatus('WARNING')\n self.errorNodes = errorNodes\n self.errorMessage = '%s numbers of extra root nodes found in the scene.' % str(len(self.errorNodes))",
"def _validate_node_properties(node_properties):\n prefix = \"node_properties\"\n\n node_config = [\n {\n 'field_name': 'num_nodes',\n 'field_value': node_properties.get('num_nodes'),\n 'prefix': prefix,\n 'required_type': int,\n 'validators': [\n _validate_field_type,\n _validate_required_field\n ]\n },\n {\n 'field_name': 'main_node',\n 'field_value': node_properties.get('main_node'),\n 'prefix': prefix,\n 'required_type': int,\n 'validators': [\n _validate_field_type,\n _validate_required_field\n ]\n },\n {\n 'field_name': 'node_range_properties',\n 'field_value': node_properties.get('node_range_properties'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type,\n _validate_required_field\n ]\n },\n ]\n _process_config(node_config)\n\n node_range_properties = node_properties.get('node_range_properties')\n\n node_range_prefix = prefix + \"__node_range_properties\"\n for node in node_range_properties:\n container_properties = node.get('container')\n _validate_required_field(\n field_name='container',\n field_value=container_properties,\n prefix=node_range_prefix\n )\n _validate_field_type(\n field_name='container',\n field_value=node.get('container'),\n prefix=node_range_prefix,\n required_type=dict,\n )\n\n container_prefix = node_range_prefix + '__container'\n _validate_container_properties(container_properties, prefix=container_prefix)",
"def validate(self, config_json):\n pass"
] | [
"0.64884377",
"0.626606",
"0.6191199",
"0.61666006",
"0.56702393",
"0.5576689",
"0.5527156",
"0.5473364",
"0.53926665",
"0.53686446",
"0.5361794",
"0.53390706",
"0.53390706",
"0.5296695",
"0.5285889",
"0.521166",
"0.52000636",
"0.5153872",
"0.5126247",
"0.5111564",
"0.50935376",
"0.50878394",
"0.50655735",
"0.50549746",
"0.50308895",
"0.5025623",
"0.50220513",
"0.5015604",
"0.50080603",
"0.49931446"
] | 0.70742106 | 0 |
Verify if info_required is present in node_namespace. | def _verify_node_info(node_namespace, node_info_dict, info_required):
missing_keys = set(info_required) - set(node_info_dict)
if missing_keys:
raise exception.MissingParameterValue(
_("Missing the keys for the following OneView data in node's "
"%(namespace)s: %(missing_keys)s.") %
{'namespace': node_namespace,
'missing_keys': ', '.join(missing_keys)
}
)
# False and 0 can still be considered as valid values
missing_values_keys = [k for k in info_required
if node_info_dict[k] in ('', None)]
if missing_values_keys:
missing_keys = ["%s:%s" % (node_namespace, k)
for k in missing_values_keys]
raise exception.MissingParameterValue(
_("Missing parameter value for: '%s'") % "', '".join(missing_keys)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_namespace_attrs(self, node):\n for cls in node.classes:\n for var in cls.variables:\n self.check_var_attrs(cls, var)\n for func in cls.functions:\n self.check_fcn_attrs(func)\n\n for func in node.functions:\n self.check_fcn_attrs(func)\n\n for ns in node.namespaces:\n self.verify_namespace_attrs(ns)",
"def verify_node_info(node):\n capabilities_dict = utils.capabilities_to_dict(\n node.properties.get('capabilities', '')\n )\n driver_info = node.driver_info\n\n _verify_node_info('properties/capabilities', capabilities_dict,\n REQUIRED_ON_PROPERTIES)\n\n _verify_node_info('driver_info', driver_info,\n REQUIRED_ON_DRIVER_INFO)",
"def verify_attrs(self):\n self.verify_namespace_attrs(self.newlibrary.wrap_namespace)",
"def test_get_node_requirements(self):\n pass",
"def IsExtraRequire(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if namespace in self._ignored_extra_namespaces:\n return False\n\n if token in self._duplicate_require_tokens:\n return True\n\n if namespace in self._suppressed_requires:\n return False\n\n # If the namespace contains a component that is initial caps, then that\n # must be the last component of the namespace.\n parts = namespace.split('.')\n if len(parts) > 1 and parts[-2][0].isupper():\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for ns in self._used_namespaces:\n if (not ns.alias_definition and (\n namespace == ns.namespace or namespace == ns.identifier)):\n return False\n\n return True",
"def _check_required_if_provider(self):\n return",
"def _check_required_section_found(self, docstring: PetscDocStringImpl) -> None:\n if not self and self.required:\n diag = self.diags.section_header_missing\n mess = f'Required section \\'{self.titles[0]}\\' not found'\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, docstring.extent, highlight=False\n )\n return",
"def __verify_requirements(self):\n if self.major[1] not in self.data[self.root] or self.data[self.root][self.major[1]] is None:\n self.data[self.root][self.major[1]] = {\"Requirement\": []}\n elif \"Requirement\" not in self.data[self.root][self.major[1]] or self.data[self.root][self.major[1]][\"Requirement\"] is None:\n self.data[self.root][self.major[1]][\"Requirement\"] = []\n elif not isinstance(self.data[self.root][self.major[1]][\"Requirement\"], list):\n self.data[self.root][self.major[1]][\"Requirement\"] = [self.data[self.root][self.major[1]][\"Requirement\"]]",
"def check_requirement(self):\n raise NotImplementedError",
"def hasRequiredAttributes(self):\n return _libsbml.Unit_hasRequiredAttributes(self)",
"def check_package_part(self, node, ecosystem, package):\n package_node = node[\"package\"]\n # check the ecosystem and name attributes that are required for a package\n self.check_ecosystem(package_node)\n self.check_name(package_node)\n\n # compare with expected values\n e = package_node[\"ecosystem\"][0]\n p = package_node[\"name\"][0]\n self.compare_ecosystems(e, ecosystem)\n self.compare_packages(p, package)",
"def hasRequiredElements(self):\n return _libsbml.UnitDefinition_hasRequiredElements(self)",
"def _entry_has_required_features(entry: _LexiconEntry) -> None:\n features = _features_of(entry)\n tag = _tag_of(entry)\n required = tags.REQUIRED_FEATURES[tag]\n\n if features == \"~\" and required:\n raise InvalidLexiconEntryError(\"Entry is missing required features.\")",
"def hasRequiredAttributes(self):\n return _libsbml.UnitDefinition_hasRequiredAttributes(self)",
"def check_name(self, node):\n assert \"name\" in node, \"Package node does not contain attribute 'node'\"\n assert len(node[\"name\"]) >= 1, \"Expecting at least one 'name' value\"\n # TODO: add more thorough checks",
"def _check_required_opts(self, namespace=None):\n for info, group in self._all_opt_infos():\n opt = info['opt']\n\n if opt.required:\n if 'default' in info or 'override' in info:\n continue\n\n if self._get(opt.dest, group, namespace) is None:\n raise RequiredOptError(opt.name, group)",
"def check(self):\n illegalNamespaces = list()\n\n progStandard = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}$\")\n progShot = re.compile(\"^SH[0-9]{4}_[0-9]{3}$\")\n\n for namespaces in pm.namespaceInfo(listOnlyNamespaces=True, internal=False, recurse=True):\n for namespace in namespaces.split(\":\"):\n if not progStandard.match(namespace) and not progShot.match(namespace) not in [\"UI\", \"shared\"]:\n illegalNamespaces.append(namespace)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s is a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s illegal namespace\" % (\n len(illegalNamespaces))",
"def ShouldRequireNamespace(namespace, identifier):\n return (\n not self._IsPrivateIdentifier(identifier) and\n namespace not in external_dependencies and\n namespace not in self._provided_namespaces and\n identifier not in external_dependencies and\n identifier not in created_identifiers and\n namespace not in missing_requires)",
"def hasRequiredAttributes(self):\n return _libsbml.Port_hasRequiredAttributes(self)",
"def check_ecosystem(self, node):\n assert \"ecosystem\" in node, \"Package node does not contain attribute 'ecosystem'\"\n assert len(node[\"ecosystem\"]) >= 1, \"Expecting at least one 'ecosystem' value\"\n # TODO: add more thorough checks",
"def testIgnoredExtraNamespaces(self):\n token = self._GetRequireTokens('package.Something')\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n closurized_namespaces=['package'],\n ignored_extra_namespaces=['package.Something'])\n\n self.assertFalse(namespaces_info.IsExtraRequire(token),\n 'Should be valid since it is in ignored namespaces.')\n\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n ['package'], [])\n\n self.assertTrue(namespaces_info.IsExtraRequire(token),\n 'Should be invalid since it is not in ignored namespaces.')",
"def _check_required_fields(self):\n assert self.title\n assert self.format",
"def check(self):\n BadNamespaces = list()\n\n for namespace in pm.listNamespaces():\n BadNamespaces.append(namespace)\n\n if not BadNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = namespace\n for namespace in BadNamespaces:\n self.addError(\"namespace %s exist\" % namespace)\n self.errorMessage = \"%s namespace\" % (len(BadNamespaces))",
"def hasRequiredElements(self):\n return _libsbml.SBase_hasRequiredElements(self)",
"def _check_before_run(self):\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))",
"def check(self, node):\n # do the necessary setup/arguments and call self.visit (node, args)\n self.visit(node, defined=set())",
"def hasRequiredElements(self):\n return _libsbml.InitialAssignment_hasRequiredElements(self)",
"def hasRequiredElements(self):\n return _libsbml.StoichiometryMath_hasRequiredElements(self)",
"def check_pname(self, node):\n assert \"pname\" in node, \"Version node does not contain attribute 'pname'\"\n assert len(node[\"pname\"]) >= 1, \"Expecting at least one 'pname' value\"\n # TODO: add more thorough checks",
"def hasRequiredAttributes(self):\n return _libsbml.SpeciesTypeComponentMapInProduct_hasRequiredAttributes(self)"
] | [
"0.58488464",
"0.58250725",
"0.5789654",
"0.5767724",
"0.576109",
"0.5756803",
"0.57413286",
"0.56746936",
"0.55888873",
"0.55871767",
"0.55615854",
"0.55192447",
"0.54995584",
"0.5496556",
"0.54898274",
"0.5455332",
"0.54527485",
"0.54376256",
"0.5391571",
"0.5388521",
"0.53861755",
"0.53758454",
"0.53694713",
"0.5362287",
"0.53573656",
"0.5355142",
"0.53436804",
"0.5335267",
"0.5327005",
"0.5318667"
] | 0.7387368 | 0 |
Checks if the node's Server Hardware has a Server Profile associated. Decorator to execute before the function execution to check if the Server Profile is applied to the Server Hardware. | def node_has_server_profile(func):
def inner(self, *args, **kwargs):
task = args[0]
has_server_profile(task)
return func(self, *args, **kwargs)
return inner | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_server_profile(task):\n oneview_client = get_hponeview_client()\n try:\n profile = task.node.driver_info.get('applied_server_profile_uri')\n oneview_client.server_profiles.get(profile)\n except client_exception.HPOneViewException as exc:\n LOG.error(\n \"Failed to get server profile from OneView appliance for\"\n \" node %(node)s. Error: %(message)s\",\n {\"node\": task.node.uuid, \"message\": exc}\n )\n raise exception.OneViewError(error=exc)",
"def should_profile():\n if util.dev_server:\n return _config.should_profile_development()\n else:\n return _config.should_profile_production()",
"def _should_profile(self) -> bool:\n if \"profile\" in self._allowed_plugins:\n if not self._one_shot:\n raise ValueError(\n \"Profile plugin currently only supported for one shot.\"\n )\n logger.info(\"Profile plugin is enalbed.\")\n return True\n return False",
"def create_simple_server_profile_by_server_hardware(profile_name, server_name, return_true_if_exists=False):\n logger.info(\"--> creating a server profile with name '%s' ...\" % profile_name)\n # checking if the profile is already existing\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n if VerifyServerProfile.verify_server_profile_not_exist(profile_name, fail_if_false=False) is False:\n logger.warn(\"server profile '%s' already exists\" % profile_name)\n return return_true_if_exists\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_HARDWARE, time_for_loading=5)\n if VerifyHardware.verify_server_hardware_exist(server_name=server_name, fail_if_false=False) is False:\n logger.warn(\"server hardware '%s' does not exist\" % server_name)\n return False\n\n CommonOperationServerHardware.click_server_hardware(server_name=server_name, timeout=5, time_for_loading=5)\n FusionUIBase.select_view_by_name(view_name='Hardware', timeout=5, fail_if_false=False)\n if VerifyHardware.is_create_profile_link_available() is False:\n logger.warn(\"server hardware '%s' does NOT have 'Create profile' link to perform creating profile\" % server_name)\n return False\n\n CommonOperationServerHardware.click_create_profile_link(server_name=server_name)\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(name=profile_name)\n # CreateServerProfile.input_description(description=description)\n\n if VerifyServerProfile.is_power_on_error_visible_when_create_server_profile(server_name=server_name, timeout=5, fail_if_false=False) is True:\n if CreateServerProfile.click_power_off_link_from_powered_on_error(server_name=server_name, timeout=5, fail_if_false=False) is False:\n logger.warn(\"server hardware '%s' is powered on but failed to power it off, creating simple server profile will FAIL\" % server_name)\n return False\n\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(server_name)\n\n if sht_selected[:2:] == 'BL':\n # maybe other needs according to SHT in the future\n pass\n\n CreateServerProfile.click_create_button()\n err_msg_boot_mode = CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode()\n if err_msg_boot_mode is not None:\n logger.warn(\"error message: ['%s'] when creating profile '%s'\" % (err_msg_boot_mode, profile_name))\n if 'select a boot mode' in err_msg_boot_mode.strip().lower():\n logger.debug(\"trying to set 'Boot mode' as 'Legacy BIOS' to remove this error message ...\")\n CommonOperationServerProfile.BootSettings.select_boot_mode_legacy_bios()\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unknown error message, cannot continue to create simple server profile\")\n return False\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n return False\n # ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180) is False:\n return False\n FusionUIBase.show_activity_sidebar()\n if FusionUIBase.wait_activity_action_ok(profile_name, 'Create', timeout=720, fail_if_false=True) is False:\n return False\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=180, fail_if_false=True) is False:\n return False\n logger.info(\"created simple server profile '%s' successfully\" % profile_name)\n return True",
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def power_on_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n already_on_or_not_exists = 0\n powered_on = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"powering on a server profile named '%s'\" % profile.name)\n # check if server profile exists\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n already_on_or_not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=4)\n # check if already powered on\n FusionUIBase.select_view_by_name(view_name='General', timeout=5, fail_if_false=False)\n if VerifyServerProfile.verify_general_server_power(expect_value='Off', timeout=7, fail_if_false=False) is False:\n logger.warn(\"power state of server profile '%s' is not 'Off', 'POWER ON' action is unavailable.\" % profile.name)\n already_on_or_not_exists += 1\n else:\n if power_on_server_profile_by_name(profile.name) is False:\n logger.warn(\"server profile '%s' is NOT powered on successfully\" % profile.name)\n continue\n else:\n powered_on += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_on_or_not_exists == 0:\n # logger.warn(\"no server profile to power on! all %s server profile(s) is NOT applicable to power on (already powered on, or not existing), test is considered PASS\" % already_on_or_not_exists)\n logger.warn(\"no server profile to power on! all %s server profile(s) is NOT applicable to power on (already powered on, or not existing), keyword '%s' returns a 'False'\" % (already_on_or_not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if powered_on < total:\n logger.warn(\"not all of the server profile(s) is successfully powered on - %s out of %s powered on \" % (powered_on, total))\n if powered_on + already_on_or_not_exists == total:\n # logger.warn(\"%s already-on-or-not-existing server profile(s) is skipped being powered on, test is considered PASS\" % already_on_or_not_exists)\n logger.warn(\"%s already-on-or-not-existing server profile(s) is skipped being powered on, keyword '%s' returns a 'False'\" % (already_on_or_not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s already-on-or-not-existing server profile(s) is skipped being powered on, \"\n \"%s server profile(s) left is failed being powered on \" % (already_on_or_not_exists, total - powered_on - already_on_or_not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully powered on - %s out of %s \" % (powered_on, total))\n return True",
"def verify_server_profile_status(expectedserverstatus, *profile_obj):\n\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n # if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n # logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n # else:\n # logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n # return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n # else:\n # logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n # if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % serverhardware, PerfConstants.DEFAULT_SYNC_TIME):\n # logger._warn(\"Server Hardware : \" + serverhardware + \" is not present in the ServerList of the Server Profile page\")\n # return False\n # else:\n # logger._log_to_console_and_log_file(\"Server Hardware : \" + serverhardware + \" is present in the ServerList and Hence verifying for the status..\")\n\n for profile in profile_obj:\n server_hardware = profile.server\n\n logger._log_to_console_and_log_file(\"Verifying status for profile %s\" % profile.name)\n\n if server_hardware == 'unassigned':\n logger._log_to_console_and_log_file(\"Server hardware is unassigned and cannot verify the server's power status\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(2) # wait for fields to load\n\n # ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n if expectedserverstatus == 'OK':\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n if expectedserverstatus == 'ERROR':\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'ERROR' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'ERROR' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n if expectedserverstatus == 'WARNING':\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'WARNING' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'WARNING' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n\n return True",
"def verify_can_edit_server_profile_general_info_when_server_power_on(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile.name)\n\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfile.input_name(profile.newName) if getattr(profile, 'newName', None) is not None else None\n EditServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n\n # Server hardware must be \"power\" on status\n if not VerifyServerProfile.is_power_on_error_visible_when_edit_server_profile(profile.server, 10):\n logger.warn(\"Server hardware '%s' is not 'Powered on, please power on it\" % profile.server)\n continue\n\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n if getattr(profile, 'hardwareType', None) is not None:\n if profile.hardwareType not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(profile.hardwareType, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfile.get_selected_enclosure_group(profile.server)\n if profile.enclgroup not in eg_selected:\n logger.warn(\"enclosure group '%s' of server '%s' is NOT consistent with test data '%s'\" % (eg_selected, profile.server, profile.enclgroup))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(profile.enclgroup, timeout=5, fail_if_false=False)\n\n # EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=False)\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n EditServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n logger.warn(\"Only connection name is allowed to modification\")\n # add connections\n CommonOperationServerProfile.Connection().set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'SANStorage', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'Advanced', None) is not None:\n logger.warn(\"Modify the 'Advanced' section will return error when server power on, so ignore this setting\")\n\n EditServerProfile.click_ok_button()\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n profile_name = profile.newName if getattr(profile, 'newName', None) is not None else profile.name\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=300, fail_if_false=True)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def verify_server_status(server_hardware):\n\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n else:\n logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Server Hardware : \" + server_hardware + \" is not present in the ServerList of the Server Profile page\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Server Hardware : \" + server_hardware + \" is present in the ServerList and Hence verifying for the status..\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK'\")\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n return True",
"def bak_power_on_server_profile(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n logger._log_to_console_and_log_file(\"\")\n error = 0\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n valid_profiles = []\n excluded_profiles = []\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Powering on server profiles '%s'\" % profile.name)\n\n # Validate server profiles\n logger._log_to_console_and_log_file(\"Validating Server Profiles\")\n profile_names = _split_profile_names(profile.name)\n for profile_name in profile_names:\n profile_attributes = get_server_profile_attributes(profile_name, None)\n if profile_attributes is None:\n logger._warn(\"Server Profile '%s' does not exist\" % profile_name)\n selenium2lib.capture_page_screenshot()\n return False\n elif profile_attributes[\"server hardware\"] == \"unassigned\":\n logger._warn(\"Cannot power on Server Profile '%s' due to unassigned server hardware\" % profile_name)\n excluded_profiles.append(profile_name)\n elif profile_attributes[\"server power\"] == \"On\":\n logger._warn(\"Server Profile '%s' is already powered on\" % profile_name)\n excluded_profiles.append(profile_name)\n else:\n valid_profiles.append(profile_name)\n\n if len(valid_profiles) == 0:\n logger._warn(\"All specified Server Profiles are already powered on.\")\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n\n # Select the profile from the left side table\n logger._log_to_console_and_log_file(\"Powering on Server Profiles\")\n if not select_server_profile(profile.name):\n logger._warn(\"Failed to select server profiles\")\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n\n # Select Power off option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if selenium2lib._is_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWERON):\n logger._log_to_console_and_log_file(\"Powering on selected server profiles\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWERON)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"On\", PerfConstants.PROFILE_POWER_VALIDATION)\n BuiltIn().sleep(10)\n logger._log_to_console_and_log_file(\"Successfully powered on Server Profiles\")\n else:\n selenium2lib.capture_page_screenshot()\n logger._log_to_console_and_log_file(\"Power on option is not available in the Actions menu\")\n selenium2lib.capture_page_screenshot()\n error += 1\n\n # Build Activity Message\n args = {}\n args[\"activity\"] = \"Power On\"\n args[\"entity\"] = get_server_profile_attributes(profile_names[0], \"server hardware\") if len(profile_names) == 1 else \"%d server hardware\" % len(profile_names)\n args[\"multiple\"] = len(profile_names) - 1\n if args[\"multiple\"]:\n args[\"completed\"] = valid_profiles if len(valid_profiles) > 1 else [valid_profiles[0]]\n if len(excluded_profiles) > 0:\n args[\"excluded\"] = excluded_profiles if len(excluded_profiles) > 1 else [excluded_profiles[0]]\n\n # Verify Activity\n if not _verify_activity(**args):\n logger._warn(\"Failed to verify Power On Activity\")\n selenium2lib.capture_page_screenshot()\n error += 1\n else:\n logger._log_to_console_and_log_file(\"Successfully verified Power On Activity for Powering On Profile(s): '%s'\" % profile.name)\n\n if error > 0:\n return False\n return True",
"def validate_error_on_create_server_profile(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n continue\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if hasattr(profile, 'Bandwidth_Error'):\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start deleting connections ...\")\n total = len(profile.Connections)\n cls = CommonOperationServerProfile.Connection\n for n, connection in enumerate(profile.Connections):\n expected_message = profile.Bandwidth_Error\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n if cls.verify_connection_not_exist(connection.name, fail_if_false=False) is False:\n logger.warn(\"connection '%s' already exists, skipped ...\" % connection.name)\n continue\n cls.click_add_connection_button()\n cls.wait_add_connection_dialog_shown(time_for_loading=3)\n cls.input_name(connection.name)\n cls.select_function_type_by_text(connection.FunctionType, timeout=10, fail_if_false=True)\n logger.info(\"Expected Error message is '%s' ...\" % expected_message)\n cls.input_select_network(connection.network)\n logger.info(\"n/w selected\")\n cls.input_select_port(connection.port)\n cls.input_requested_bandwidth(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_INPUT_REQUESTED_BANDWIDTH) else None\n cls.select_requested_bandwidth_by_text(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_SELECTBOX_REQUESTED_BANDWIDTH) else None\n cls.click_add_button()\n if not VerifyServerProfile.verify_bandwidth_error(expected_message, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n cls.click_cancel_button()\n logger.info(\"clicked cancel button\")\n else:\n CommonOperationServerProfile.Connection.set(profile.Connections)\n CreateServerProfile.click_create_button()\n status, _ = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if hasattr(profile, 'update_error'):\n if not VerifyServerProfile.verify_error_message_for_update_action(profile.update_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n if not VerifyServerProfile.verify_error_message_in_add_connection(profile.connection_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n logger.info(\"Profile created successfully\")\n return True",
"def has_firewall_component(server):\r\n if server['status'] != 'no_edit':\r\n return True\r\n\r\n return False",
"def _validate_server_profile_template_manage_boot(server_profile_template):\n manage_boot = server_profile_template.get('boot', {}).get('manageBoot')\n\n if not manage_boot:\n message = _(\"Server Profile Template: %s, does not allow to manage \"\n \"boot order.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def is_vendor_profile_page_loaded_properly(self):\n return self.is_element_present(self.save_vendor_profile_locator)",
"def is_vendor_profile_present(self):\n return self.is_element_present(self.vendor_profile_locator)",
"def bak_verify_server_profile_general_info(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n for profile in profile_obj:\n server = profile.server\n hardwaretype = profile.hardwareType\n enclosuregroup = profile.enclgroup\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(5) # wait for fields to load\n\n logger.info(\"Verifying server hardware for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_SERVER, server, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_SERVER)\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, server))\n selenium2lib.capture_page_screenshot()\n return False\n\n logger.info(\"Verifying server hardware type for profile %s\" % profile.name)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_HARDWARE, PerfConstants.DEFAULT_SYNC_TIME, fail_if_false=False) is True:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_HARDWARE)\n if txt.find(hardwaretype) == -1:\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, hardwaretype))\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger.warn(\"Failed to wait server hardware type field display\")\n return False\n\n logger.info(\"Verifying enclosure group for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP, enclosuregroup, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP)\n logger.info(\"Enclosure group of server : %s is not as expected [%s]\" % (txt, enclosuregroup))\n selenium2lib.capture_page_screenshot()\n return False\n\n return True",
"def is_shared_profile_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsSharedProfileEnabled', self.handle))",
"def is_valid_profile(profile):\n\n return profile.metadata.get('os', 'unknown') == 'windows'",
"def test_instance_profile_exists(self) -> None:\n self.assertTrue(self.validate_instance_profile('s3-access-role', is_prod=self.prod_env))",
"def verify_server_profile_boot_settings_info(*profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for _, profile in enumerate(profile_obj):\n logger.info(\"verifying Boot Settings info of a server profile named '%s'\" % profile.name)\n # check if server profile exists\n VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=True)\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=10)\n FusionUIBase.select_view_by_name(view_name='Boot Settings', timeout=5, fail_if_false=True)\n\n if profile.BootSettings.bootMode.lower() == 'legacy bios':\n VerifyServerProfile.verify_legacy_boot_settings(profile, timeout=10, fail_if_false=True)\n else:\n VerifyServerProfile.verify_non_legacy_boot_settings(profile, timeout=10, fail_if_false=True)",
"def verify_required_fields_for_iscsi_boot(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile_obj), '-' * 14))\n logger.info(\"Creating Server Profile for server | %s | ...\" % profile.name)\n\n # checking if the profile already exists\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n ui_lib.fail_test(\"Server profile | %s | already exists\" % profile.name)\n\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Create SP dialog and enter data ...\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n # input 'Server hardware type', 'Enclosure group'\n\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n elif profile.hardwareType not in sht_selected:\n msg = \"selected server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType)\n logger.warn(msg)\n ui_lib.fail_test(msg)\n else:\n # input 'Enclosure group'\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(\n profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n CreateServerProfile.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n\n if hasattr(profile, 'Connections'):\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n\n # add connections with blank iSCSI boot data and verify required field error messages\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start adding connections ...\")\n\n for n, connection in enumerate(profile.Connections):\n logger.info(\"--- <connections> ---: {2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile.Connections), '-' * 14))\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n logger.debug(\"test data for connection '<%s>' is found: '<%s>'\" % (connection.name, connection), also_console=False)\n\n # Verify the connection does not exist\n CommonOperationServerProfile.Connection.verify_connection_not_exist(connection.name, fail_if_false=True)\n\n # Add the connection\n CommonOperationServerProfile.Connection.click_add_connection_button()\n CommonOperationServerProfile.Connection.wait_add_connection_dialog_shown()\n\n CommonOperationServerProfile.Connection.input_name(connection.name)\n CommonOperationServerProfile.Connection.select_function_type_by_text(connection.FunctionType, fail_if_false=True)\n CommonOperationServerProfile.Connection.input_select_network(connection.network)\n CommonOperationServerProfile.Connection.input_select_port(connection.port)\n CommonOperationServerProfile.Connection.input_requested_bandwidth(connection.RequestedBandwidth)\n CommonOperationServerProfile.Connection.select_boot_by_text(connection.boot, fail_if_false=True)\n\n # Input information for the iSCSI boot connection. Data file should have blanks for all fields except secondIp.\n if connection.boot == 'iSCSI primary' or connection.boot == 'iSCSI secondary':\n CommonOperationServerProfile.Connection.set_iscsi_boot_options(connection)\n\n # Click \"Add\" button\n CommonOperationServerProfile.Connection.click_add_button()\n\n # Verify error messages\n CommonOperationServerProfile.Connection.verify_iscsi_initiator_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_initiator_ip_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_subnet_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_gateway_error_message(\"\")\n\n if hasattr(connection, \"vlanId\"):\n CommonOperationServerProfile.Connection.verify_iscsi_vlan_id_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_vlan_id_error_message(\"\")\n\n CommonOperationServerProfile.Connection.verify_iscsi_target_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_lun_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_ip_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_port_error_message(\"This field is required.\")\n\n if getattr(connection, \"secondIp\", \"\") is not \"\":\n CommonOperationServerProfile.Connection.verify_iscsi_second_ip_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_second_ip_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"\")\n\n if hasattr(connection, \"chapLvl\"):\n if connection.chapLvl == \"None\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"\")\n elif connection.chapLvl == \"CHAP\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"\")\n elif connection.chapLvl == \"Mutual CHAP\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"This field is required.\")\n\n # Click \"Cancel\" button\n CommonOperationServerProfile.Connection.click_cancel_button()\n else:\n ui_lib.fail_test(\"Connections object not present in data file for profile with name | %s |\" % profile.name)\n\n CreateServerProfile.click_cancel_button()",
"def cold_boot_server_profiles(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n off_or_unsupported = 0\n not_exists = 0\n done_cold_boot = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile_obj), '-' * 14))\n logger.info(\"cold boot a server profile named '%s'\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=2)\n if VerifyServerProfile.verify_general_server_power(expect_value='On', timeout=5, fail_if_false=False) is False:\n logger.warn(\"Power state of server profile '%s' is not 'On', 'RESET -> COLD BOOT' action is unavailable.\" % profile.name)\n off_or_unsupported += 1\n else:\n if cold_boot_server_profile_by_name(profile.name) is False:\n logger.warn(\"server profile '%s' is NOT done cold boot successfully\" % profile.name)\n continue\n else:\n done_cold_boot += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - off_or_unsupported - not_exists == 0:\n logger.warn(\"no server profile to cold boot! all %s server profile(s) is NOT applicable to cold boot (already powered off/unsupported/not existing), test is considered PASS\" % off_or_unsupported)\n return True\n else:\n if done_cold_boot < total:\n logger.warn(\"not all of these server profile(s) is successfully done cold boot - %s out of %s done cold boot \" % (done_cold_boot, total))\n if done_cold_boot + off_or_unsupported + not_exists == total:\n logger.warn(\"%s off-or-unsupported server profile(s) is skipped, %s not-existing server profile(s) is skipped, test is considered PASS\" % (off_or_unsupported, not_exists))\n return True\n else:\n logger.warn(\"%s off-or-unsupported server profile(s) is skipped, %s not-existing server profile(s) is skipped, \"\n \"%s left is failed to cold boot \" % (off_or_unsupported, not_exists, total - done_cold_boot - off_or_unsupported - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully done cold boot - %s out of %s \" % (done_cold_boot, total))\n return True",
"def check_toolserver(env):\n try:\n blueprint = cli.get_env_blueprint(env)\n if blueprint == 'toolserver':\n return True\n else:\n return False\n except SystemExit:\n return False",
"def verify_server_profile_power_status(expectedpowerstatus, *profile_obj):\n\n # logger._log_to_console_and_log_file(\"\")\n # logger._log_to_console_and_log_file(\"Verifying the power status of list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n error = 0\n\n for profile in profile_obj:\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Verifying power status for profile %s\" % profile.name)\n\n profile_names = _split_profile_names(profile.name)\n for profile_name in profile_names:\n status = get_server_profile_attributes(profile_name)\n\n if status[\"server hardware\"] == 'unassigned':\n logger._warn(\"Server profile '%s' has unassigned server hardware and cannot verify the server's power status, skip\" % profile_name)\n selenium2lib.capture_page_screenshot()\n # error += 1\n continue\n\n if status[\"server power\"].lower() == expectedpowerstatus.lower():\n logger._log_to_console_and_log_file(\"Successfully verified Server Profile '%s' power status: %s\" % (profile_name, status[\"server power\"]))\n continue\n else:\n logger._warn(\"Failed to verify Server Profile '%s' power status: %s, expect: %s\" % (profile_name, status[\"server power\"].lower(), expectedpowerstatus))\n selenium2lib.capture_page_screenshot()\n error += 1\n\n if error > 0:\n return False\n return True",
"def profile_checking(ipydir, profile):\n try:\n profile = ProfileDir.find_profile_dir_by_name(ipydir, profile)\n return True\n except ProfileDirError:\n return False",
"def _validate_server_profile_template_mac_type(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri']\n )\n if server_profile_template.get('macType') != 'Physical':\n message = _(\"The server profile template %s is not set to use \"\n \"physical MAC.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware):\n spt_server_hardware_type_uri = (\n server_profile_template.get('serverHardwareTypeUri')\n )\n sh_server_hardware_type_uri = server_hardware.get('serverHardwareTypeUri')\n\n if spt_server_hardware_type_uri != sh_server_hardware_type_uri:\n message = _(\n \"Server profile template %(spt_uri)s serverHardwareTypeUri is \"\n \"inconsistent with server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def is_profile_device(cls, device: UpnpDevice) -> bool:\n try:\n profile_device = find_device_of_type(device, cls.DEVICE_TYPES)\n except UpnpError:\n return False\n\n # Check that every service required by the subclass is declared by the device\n device_service_ids = {\n service.service_id for service in profile_device.services.values()\n }\n\n if not cls.SERVICE_IDS.issubset(device_service_ids):\n return False\n\n return True",
"def validate_server_profile_task_step(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n not_exists = 0\n verified = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"Validate server profile <%s> task contains <%s>\" % (profile.name, profile.method))\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n\n CommonOperationServerProfile.click_server_profile(profile.name)\n FusionUIBase.select_view_by_name(view_name='Activity', timeout=5, fail_if_false=False)\n CommonOperationServerProfile.click_activity_collapser(profile.task)\n timeout = int(getattr(profile, 'validate_timeout', '5'))\n ret = VerifyServerProfile.verify_activity_contains_text(profile.method, timeout=timeout, fail_if_false=False)\n # Verify method text not exist in steps\n if getattr(profile, 'exist', '').lower() == 'false':\n if ret is True:\n ui_lib.fail_test(\"%s should not exist in task steps\" % profile.method)\n elif ret is False:\n ui_lib.fail_test(\"%s should exist in task steps\" % profile.method)\n\n logger.info(\"Server profile '%s' got the correct task method\" % profile.name)\n verified += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to view! all %s server profile(s) is NOT existing, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if verified < total:\n logger.warn(\"not all of task for the server profile(s) is successfully verified - %s out of %s verified \" % (verified, total))\n if verified + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped, %s profile(s) left is failed being verified \" % (not_exists, total - verified - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully verified - %s out of %s \" % (verified, total))\n return True",
"def test_update_hyperflex_node_profile(self):\n pass"
] | [
"0.64625174",
"0.6329892",
"0.6047561",
"0.59693646",
"0.5888459",
"0.5794492",
"0.57879764",
"0.5742285",
"0.57172704",
"0.55384105",
"0.5498647",
"0.54914945",
"0.5441861",
"0.5415404",
"0.5377407",
"0.5358442",
"0.53436404",
"0.5326051",
"0.5309744",
"0.530064",
"0.52882934",
"0.5251811",
"0.52235824",
"0.5207874",
"0.5192738",
"0.51791227",
"0.5170811",
"0.5169286",
"0.51311636",
"0.5098314"
] | 0.6996314 | 0 |
Checks if the node's Server Hardware has a Server Profile associated. Function to check if the Server Profile is applied to the Server Hardware. | def has_server_profile(task):
oneview_client = get_hponeview_client()
try:
profile = task.node.driver_info.get('applied_server_profile_uri')
oneview_client.server_profiles.get(profile)
except client_exception.HPOneViewException as exc:
LOG.error(
"Failed to get server profile from OneView appliance for"
" node %(node)s. Error: %(message)s",
{"node": task.node.uuid, "message": exc}
)
raise exception.OneViewError(error=exc) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_simple_server_profile_by_server_hardware(profile_name, server_name, return_true_if_exists=False):\n logger.info(\"--> creating a server profile with name '%s' ...\" % profile_name)\n # checking if the profile is already existing\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n if VerifyServerProfile.verify_server_profile_not_exist(profile_name, fail_if_false=False) is False:\n logger.warn(\"server profile '%s' already exists\" % profile_name)\n return return_true_if_exists\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_HARDWARE, time_for_loading=5)\n if VerifyHardware.verify_server_hardware_exist(server_name=server_name, fail_if_false=False) is False:\n logger.warn(\"server hardware '%s' does not exist\" % server_name)\n return False\n\n CommonOperationServerHardware.click_server_hardware(server_name=server_name, timeout=5, time_for_loading=5)\n FusionUIBase.select_view_by_name(view_name='Hardware', timeout=5, fail_if_false=False)\n if VerifyHardware.is_create_profile_link_available() is False:\n logger.warn(\"server hardware '%s' does NOT have 'Create profile' link to perform creating profile\" % server_name)\n return False\n\n CommonOperationServerHardware.click_create_profile_link(server_name=server_name)\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(name=profile_name)\n # CreateServerProfile.input_description(description=description)\n\n if VerifyServerProfile.is_power_on_error_visible_when_create_server_profile(server_name=server_name, timeout=5, fail_if_false=False) is True:\n if CreateServerProfile.click_power_off_link_from_powered_on_error(server_name=server_name, timeout=5, fail_if_false=False) is False:\n logger.warn(\"server hardware '%s' is powered on but failed to power it off, creating simple server profile will FAIL\" % server_name)\n return False\n\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(server_name)\n\n if sht_selected[:2:] == 'BL':\n # maybe other needs according to SHT in the future\n pass\n\n CreateServerProfile.click_create_button()\n err_msg_boot_mode = CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode()\n if err_msg_boot_mode is not None:\n logger.warn(\"error message: ['%s'] when creating profile '%s'\" % (err_msg_boot_mode, profile_name))\n if 'select a boot mode' in err_msg_boot_mode.strip().lower():\n logger.debug(\"trying to set 'Boot mode' as 'Legacy BIOS' to remove this error message ...\")\n CommonOperationServerProfile.BootSettings.select_boot_mode_legacy_bios()\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unknown error message, cannot continue to create simple server profile\")\n return False\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n return False\n # ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180) is False:\n return False\n FusionUIBase.show_activity_sidebar()\n if FusionUIBase.wait_activity_action_ok(profile_name, 'Create', timeout=720, fail_if_false=True) is False:\n return False\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=180, fail_if_false=True) is False:\n return False\n logger.info(\"created simple server profile '%s' successfully\" % profile_name)\n return True",
"def node_has_server_profile(func):\n def inner(self, *args, **kwargs):\n task = args[0]\n has_server_profile(task)\n return func(self, *args, **kwargs)\n return inner",
"def is_shared_profile_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsSharedProfileEnabled', self.handle))",
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def _get_assigned_server_for_profile():\n selenium2lib = ui_lib.get_s2l()\n serverprofiledict = {}\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n\n for profobj in profile_list:\n if not select_server_profile(profobj):\n ui_lib.fail_test(\"Exiting function get assigned server, Not selected profile %s\" % profobj)\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_SELECTOR)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_DROPDOWN_SELECT % 'Overview')\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_HARDWARE)\n strhardware = selenium2lib._get_text(FusionServerProfilesPage.ID_SERVER_HARDWARE)\n if strhardware != 'unassigned' and ('empty' not in strhardware):\n serverprofiledict[profobj] = strhardware\n return serverprofiledict",
"def should_profile():\n if util.dev_server:\n return _config.should_profile_development()\n else:\n return _config.should_profile_production()",
"def verify_server_profile_boot_settings_info(*profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for _, profile in enumerate(profile_obj):\n logger.info(\"verifying Boot Settings info of a server profile named '%s'\" % profile.name)\n # check if server profile exists\n VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=True)\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=10)\n FusionUIBase.select_view_by_name(view_name='Boot Settings', timeout=5, fail_if_false=True)\n\n if profile.BootSettings.bootMode.lower() == 'legacy bios':\n VerifyServerProfile.verify_legacy_boot_settings(profile, timeout=10, fail_if_false=True)\n else:\n VerifyServerProfile.verify_non_legacy_boot_settings(profile, timeout=10, fail_if_false=True)",
"def has_firewall_component(server):\r\n if server['status'] != 'no_edit':\r\n return True\r\n\r\n return False",
"def _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware):\n spt_server_hardware_type_uri = (\n server_profile_template.get('serverHardwareTypeUri')\n )\n sh_server_hardware_type_uri = server_hardware.get('serverHardwareTypeUri')\n\n if spt_server_hardware_type_uri != sh_server_hardware_type_uri:\n message = _(\n \"Server profile template %(spt_uri)s serverHardwareTypeUri is \"\n \"inconsistent with server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def verify_can_edit_server_profile_general_info_when_server_power_on(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile.name)\n\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfile.input_name(profile.newName) if getattr(profile, 'newName', None) is not None else None\n EditServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n\n # Server hardware must be \"power\" on status\n if not VerifyServerProfile.is_power_on_error_visible_when_edit_server_profile(profile.server, 10):\n logger.warn(\"Server hardware '%s' is not 'Powered on, please power on it\" % profile.server)\n continue\n\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n if getattr(profile, 'hardwareType', None) is not None:\n if profile.hardwareType not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(profile.hardwareType, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfile.get_selected_enclosure_group(profile.server)\n if profile.enclgroup not in eg_selected:\n logger.warn(\"enclosure group '%s' of server '%s' is NOT consistent with test data '%s'\" % (eg_selected, profile.server, profile.enclgroup))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(profile.enclgroup, timeout=5, fail_if_false=False)\n\n # EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=False)\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n EditServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n logger.warn(\"Only connection name is allowed to modification\")\n # add connections\n CommonOperationServerProfile.Connection().set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'SANStorage', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'Advanced', None) is not None:\n logger.warn(\"Modify the 'Advanced' section will return error when server power on, so ignore this setting\")\n\n EditServerProfile.click_ok_button()\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n profile_name = profile.newName if getattr(profile, 'newName', None) is not None else profile.name\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=300, fail_if_false=True)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def validate_server_profile_consistency_state(profile_obj):\n count = 0\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n for _, profile in enumerate(profile_obj):\n rc = select_server_profile(profile.name)\n if not rc:\n logger.warn(\"Failed to select server profile '%s'\" % profile.name)\n continue\n FusionUIBase.select_view_by_name(view_name='General', timeout=5, fail_if_false=False)\n if VerifyServerProfile.verify_server_profile_consistency_status(profile.expected_state, timeout=5, fail_if_false=False):\n count += 1\n\n if count == len(profile_obj):\n logger.info(\"All of the given SPs passes consistency check.\")\n return True\n else:\n logger.warn(\"%s out of %s - the given SPs passes consistency check.\" % (count, len(profile_obj)))\n return False",
"def power_on_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n already_on_or_not_exists = 0\n powered_on = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"powering on a server profile named '%s'\" % profile.name)\n # check if server profile exists\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n already_on_or_not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=4)\n # check if already powered on\n FusionUIBase.select_view_by_name(view_name='General', timeout=5, fail_if_false=False)\n if VerifyServerProfile.verify_general_server_power(expect_value='Off', timeout=7, fail_if_false=False) is False:\n logger.warn(\"power state of server profile '%s' is not 'Off', 'POWER ON' action is unavailable.\" % profile.name)\n already_on_or_not_exists += 1\n else:\n if power_on_server_profile_by_name(profile.name) is False:\n logger.warn(\"server profile '%s' is NOT powered on successfully\" % profile.name)\n continue\n else:\n powered_on += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_on_or_not_exists == 0:\n # logger.warn(\"no server profile to power on! all %s server profile(s) is NOT applicable to power on (already powered on, or not existing), test is considered PASS\" % already_on_or_not_exists)\n logger.warn(\"no server profile to power on! all %s server profile(s) is NOT applicable to power on (already powered on, or not existing), keyword '%s' returns a 'False'\" % (already_on_or_not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if powered_on < total:\n logger.warn(\"not all of the server profile(s) is successfully powered on - %s out of %s powered on \" % (powered_on, total))\n if powered_on + already_on_or_not_exists == total:\n # logger.warn(\"%s already-on-or-not-existing server profile(s) is skipped being powered on, test is considered PASS\" % already_on_or_not_exists)\n logger.warn(\"%s already-on-or-not-existing server profile(s) is skipped being powered on, keyword '%s' returns a 'False'\" % (already_on_or_not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s already-on-or-not-existing server profile(s) is skipped being powered on, \"\n \"%s server profile(s) left is failed being powered on \" % (already_on_or_not_exists, total - powered_on - already_on_or_not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully powered on - %s out of %s \" % (powered_on, total))\n return True",
"def is_valid_profile(profile):\n\n return profile.metadata.get('os', 'unknown') == 'windows'",
"def verify_server_profile_status(expectedserverstatus, *profile_obj):\n\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n # if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n # logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n # else:\n # logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n # return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n # else:\n # logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n # if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % serverhardware, PerfConstants.DEFAULT_SYNC_TIME):\n # logger._warn(\"Server Hardware : \" + serverhardware + \" is not present in the ServerList of the Server Profile page\")\n # return False\n # else:\n # logger._log_to_console_and_log_file(\"Server Hardware : \" + serverhardware + \" is present in the ServerList and Hence verifying for the status..\")\n\n for profile in profile_obj:\n server_hardware = profile.server\n\n logger._log_to_console_and_log_file(\"Verifying status for profile %s\" % profile.name)\n\n if server_hardware == 'unassigned':\n logger._log_to_console_and_log_file(\"Server hardware is unassigned and cannot verify the server's power status\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(2) # wait for fields to load\n\n # ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n if expectedserverstatus == 'OK':\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n if expectedserverstatus == 'ERROR':\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'ERROR' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'ERROR' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n if expectedserverstatus == 'WARNING':\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'WARNING' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'WARNING' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n\n return True",
"def verify_server_profile_bios_settings_info(*profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for _, profile in enumerate(profile_obj):\n logger.info(\"verifying server_profile_bios named '%s'\" % profile.name)\n # check if server profile exists\n VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=True)\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=10)\n FusionUIBase.select_view_by_name(view_name='BIOS Settings', timeout=10, fail_if_false=True)\n if hasattr(profile.BIOSSettings.Verify, 'ServerAssetInformation'):\n logger.info(\"verifying server_profile_bios expected values before power on named '%s'\" % profile.name)\n VerifyServerProfile.verify_server_asset_info(profile.name, profile.BIOSSettings.Verify.ServerAssetInformation)",
"def check_toolserver(env):\n try:\n blueprint = cli.get_env_blueprint(env)\n if blueprint == 'toolserver':\n return True\n else:\n return False\n except SystemExit:\n return False",
"def has(self, server):\n return (server in self.servers)",
"def verify_server_status(server_hardware):\n\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n else:\n logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Server Hardware : \" + server_hardware + \" is not present in the ServerList of the Server Profile page\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Server Hardware : \" + server_hardware + \" is present in the ServerList and Hence verifying for the status..\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK'\")\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n return True",
"def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False",
"def _validate_server_profile_template_mac_type(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri']\n )\n if server_profile_template.get('macType') != 'Physical':\n message = _(\"The server profile template %s is not set to use \"\n \"physical MAC.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def bak_power_on_server_profile(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n logger._log_to_console_and_log_file(\"\")\n error = 0\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n valid_profiles = []\n excluded_profiles = []\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Powering on server profiles '%s'\" % profile.name)\n\n # Validate server profiles\n logger._log_to_console_and_log_file(\"Validating Server Profiles\")\n profile_names = _split_profile_names(profile.name)\n for profile_name in profile_names:\n profile_attributes = get_server_profile_attributes(profile_name, None)\n if profile_attributes is None:\n logger._warn(\"Server Profile '%s' does not exist\" % profile_name)\n selenium2lib.capture_page_screenshot()\n return False\n elif profile_attributes[\"server hardware\"] == \"unassigned\":\n logger._warn(\"Cannot power on Server Profile '%s' due to unassigned server hardware\" % profile_name)\n excluded_profiles.append(profile_name)\n elif profile_attributes[\"server power\"] == \"On\":\n logger._warn(\"Server Profile '%s' is already powered on\" % profile_name)\n excluded_profiles.append(profile_name)\n else:\n valid_profiles.append(profile_name)\n\n if len(valid_profiles) == 0:\n logger._warn(\"All specified Server Profiles are already powered on.\")\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n\n # Select the profile from the left side table\n logger._log_to_console_and_log_file(\"Powering on Server Profiles\")\n if not select_server_profile(profile.name):\n logger._warn(\"Failed to select server profiles\")\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n\n # Select Power off option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if selenium2lib._is_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWERON):\n logger._log_to_console_and_log_file(\"Powering on selected server profiles\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWERON)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"On\", PerfConstants.PROFILE_POWER_VALIDATION)\n BuiltIn().sleep(10)\n logger._log_to_console_and_log_file(\"Successfully powered on Server Profiles\")\n else:\n selenium2lib.capture_page_screenshot()\n logger._log_to_console_and_log_file(\"Power on option is not available in the Actions menu\")\n selenium2lib.capture_page_screenshot()\n error += 1\n\n # Build Activity Message\n args = {}\n args[\"activity\"] = \"Power On\"\n args[\"entity\"] = get_server_profile_attributes(profile_names[0], \"server hardware\") if len(profile_names) == 1 else \"%d server hardware\" % len(profile_names)\n args[\"multiple\"] = len(profile_names) - 1\n if args[\"multiple\"]:\n args[\"completed\"] = valid_profiles if len(valid_profiles) > 1 else [valid_profiles[0]]\n if len(excluded_profiles) > 0:\n args[\"excluded\"] = excluded_profiles if len(excluded_profiles) > 1 else [excluded_profiles[0]]\n\n # Verify Activity\n if not _verify_activity(**args):\n logger._warn(\"Failed to verify Power On Activity\")\n selenium2lib.capture_page_screenshot()\n error += 1\n else:\n logger._log_to_console_and_log_file(\"Successfully verified Power On Activity for Powering On Profile(s): '%s'\" % profile.name)\n\n if error > 0:\n return False\n return True",
"def is_vendor_profile_present(self):\n return self.is_element_present(self.vendor_profile_locator)",
"def verify_server_profile_connections_info(*profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n total = len(profile_obj)\n not_exists = 0\n verified_pass = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"verifying Connections info of a server profile named '%s'\" % profile.name)\n # check if server profile exists\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=4)\n # check if already powered off\n FusionUIBase.select_view_by_name(view_name='Connections', timeout=5, fail_if_false=False)\n conn_verify = profile.Connections.Verify\n\n for m, conn in enumerate(conn_verify):\n\n result = {}\n conn_num = m + 1\n\n # Expand the connection to for verification\n FusionUIBase.wait_for_element_and_click(GeneralServerProfilesElements.Connection.ID_TABLE_CONNECTION_DETAIL_INFO % conn_num, timeout=5, fail_if_false=False)\n\n if hasattr(conn, 'name'):\n if not VerifyServerProfile.verify_connections_name(expect_value=conn.name, number=conn_num, timeout=7, fail_if_false=False):\n logger.warn(\"'connect name' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.name))\n result['Connection Name'] = False\n else:\n result['Connection Name'] = True\n\n if hasattr(conn, 'port'):\n if not VerifyServerProfile.verify_connections_port(expect_value=conn.port, number=conn_num, timeout=7, fail_if_false=False):\n logger.warn(\"'connect port' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.port))\n result['Connection Port'] = False\n else:\n result['Connection Port'] = True\n\n if hasattr(conn, 'network'):\n if not VerifyServerProfile.verify_connections_network(expect_value=conn.network, number=conn_num, timeout=7, fail_if_false=False):\n logger.warn(\"'connect network' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.network))\n result['Connection Network'] = False\n else:\n result['Connection Network'] = True\n\n if hasattr(conn, 'boot'):\n if not VerifyServerProfile.verify_connections_boot(expect_value=conn.boot, number=conn_num, timeout=7, fail_if_false=False):\n logger.warn(\"'connect boot' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.boot))\n result['Connection Boot'] = False\n else:\n result['Connection Boot'] = True\n\n if hasattr(conn, 'FunctionType'):\n logger.info(\"Verifying connection '%s' is Type '%s\" % (conn.name, conn.FunctionType))\n if VerifyServerProfile.verify_connection_type(conn.FunctionType):\n logger.info(\"Connection 'Type' contains expected value '%s'\" % conn.FunctionType)\n\n if hasattr(conn, 'RequestedBandwidth'):\n if not conn.RequestedBandwidth.lower() == \"auto\":\n request_bandwidth = conn.RequestedBandwidth + ' Gb/s'\n else:\n request_bandwidth = conn.RequestedBandwidth\n if not VerifyServerProfile.verify_connections_requestedbandwidth(expect_value=request_bandwidth, timeout=7, fail_if_false=False):\n logger.warn(\"'connect RequestedBandwidth' of server profile '%s' is not '%s', verification failed.\" % (profile.name, request_bandwidth))\n result['Connection requestedbandwidth'] = False\n else:\n result['Connection requestedbandwidth'] = True\n\n if hasattr(conn, 'MaxBandwidth'):\n max_bandwidth = conn.MaxBandwidth + ' Gb/s'\n if not VerifyServerProfile.verify_connections_maxbandwidth(expect_value=max_bandwidth, timeout=7, fail_if_false=False):\n logger.warn(\"'connect RequestedBandwidth' of server profile '%s' is not '%s', verification failed.\" % (profile.name, max_bandwidth))\n result['Connection requestedbandwidth'] = False\n else:\n result['Connection requestedbandwidth'] = True\n\n if hasattr(conn, 'RequestedVirtualFunctions'):\n if not VerifyServerProfile.verify_connections_requested_virtual_functions_type(expect_value=conn.RequestedVirtualFunctions, timeout=15, fail_if_false=False):\n logger.warn(\"The expected value '%s' was not found from the attribute 'Requested virtual functions' of connection '%s' of server profile '%s'.\" % (conn.RequestedVirtualFunctions, conn.name, profile.name))\n result['Connection RequestedVirtualFunctions'] = False\n else:\n logger.info(\"The expected value '%s' was found from the attribute 'Requested virtual functions' of connection '%s' of server profile '%s'.\" % (conn.RequestedVirtualFunctions, conn.name, profile.name))\n result['Connection RequestedVirtualFunctions'] = True\n\n if hasattr(conn, 'Interconnect'):\n if not VerifyServerProfile.verify_connections_interconnect(expect_value=conn.Interconnect, timeout=15, fail_if_false=False):\n logger.warn(\"The expected value '%s' was not found from the attribute 'Interconnect' of connection '%s' of server profile '%s'.\" % (conn.Interconnect, conn.name, profile.name))\n result['Connection Interconnect'] = False\n else:\n logger.info(\"The expected value '%s' was found from the attribute 'Interconnect' of connection '%s' of server profile '%s'.\" % (conn.Interconnect, conn.name, profile.name))\n result['Connection Interconnect'] = True\n\n if hasattr(conn, 'IsRequestedVirtualFunctionsDisplayed'):\n if getattr(conn, 'IsRequestedVirtualFunctionsDisplayed', '').lower() == 'yes':\n if not VerifyServerProfile.verify_connections_requested_virtual_functions_visible(timeout=15):\n logger.warn(\"The attribute 'Requested virtual functions' of connection '%s' of server profile '%s' is not visible\" % (conn.name, profile.name))\n result['Connection IsRequestedVirtualFunctionsDisplayed'] = False\n else:\n logger.info(\"The attribute 'Requested virtual functions' of connection '%s' of server profile '%s' is visible.\" % (conn.name, profile.name))\n result['Connection IsRequestedVirtualFunctionsDisplayed'] = True\n if getattr(conn, 'IsRequestedVirtualFunctionsDisplayed', '').lower() == 'no':\n if not VerifyServerProfile.verify_connections_requested_virtual_functions_not_visible(timeout=15):\n logger.warn(\"The attribute 'Requested virtual functions' of connection '%s' of server profile '%s' is visible\" % (conn.name, profile.name))\n result['Connection IsRequestedVirtualFunctionsDisplayed'] = False\n else:\n logger.info(\"The attribute 'Requested virtual functions' of connection '%s' of server profile '%s' is not visible\" % (conn.name, profile.name))\n result['Connection IsRequestedVirtualFunctionsDisplayed'] = True\n\n if hasattr(conn, 'IsAllocatedVirtualFunctionsDisplayed'):\n if getattr(conn, 'IsAllocatedVirtualFunctionsDisplayed', '').lower() == 'yes':\n if not VerifyServerProfile.verify_connections_allocated_virtual_functions_visible(timeout=15):\n logger.warn(\"The attribute 'Allocated virtual functions' of connection '%s' of server profile '%s' is not visible\" % (conn.name, profile.name))\n result['Connection IsAllocatedVirtualFunctionsDisplayed'] = False\n else:\n logger.info(\"The attribute 'Allocated virtual functions' of connection '%s' of server profile '%s' is visible\" % (conn.name, profile.name))\n result['Connection IsAllocatedVirtualFunctionsDisplayed'] = True\n if getattr(conn, 'IsAllocatedVirtualFunctionsDisplayed', '').lower() == 'no':\n if not VerifyServerProfile.verify_connections_allocated_virtual_functions_not_visible(timeout=15):\n logger.warn(\"The attribute 'Allocated virtual functions' of connection '%s' of server profile '%s' is visible\" % (conn.name, profile.name))\n result['Connection IsAllocatedVirtualFunctionsDisplayed'] = False\n else:\n logger.info(\"The attribute 'Allocated virtual functions' of connection '%s' of server profile '%s' is not visible\" % (conn.name, profile.name))\n result['Connection IsAllocatedVirtualFunctionsDisplayed'] = True\n\n if hasattr(conn, 'IsAllocatedBandwidthDisplayed'):\n if getattr(conn, 'IsAllocatedBandwidthDisplayed', '').lower() == 'yes':\n if not VerifyServerProfile.verify_connections_allocated_bandwidth_visible(timeout=15):\n logger.warn(\"The attribute 'Allocated bandwidth' of connection '%s' of server profile '%s' is not visible\" % (conn.name, profile.name))\n result['Connection IsAllocatedBandwidthDisplayed'] = False\n else:\n logger.info(\"The attribute 'Allocated bandwidth' of connection '%s' of server profile '%s' is visible\" % (conn.name, profile.name))\n result['Connection IsAllocatedVirtualFunctionsDisplayed'] = True\n if getattr(conn, 'IsAllocatedVirtualFunctionsDisplayed', '').lower() == 'no':\n if not VerifyServerProfile.verify_connections_allocated_bandwidth_not_visible(timeout=15):\n logger.warn(\"The attribute 'Allocated bandwidth' of connection '%s' of server profile '%s' is visible\" % (conn.name, profile.name))\n result['Connection IsAllocatedBandwidthDisplayed'] = False\n else:\n logger.info(\"The attribute 'Allocated bandwidth' of connection '%s' of server profile '%s' is not visible\" % (conn.name, profile.name))\n result['Connection IsAllocatedVirtualFunctionsDisplayed'] = True\n\n if hasattr(conn, 'IsMaxBandwidthDisplyed'):\n if getattr(conn, 'IsMaxBandwidthDisplyed', '').lower() == 'yes':\n if not VerifyServerProfile.verify_connections_max_bandwidth_visible(timeout=15):\n logger.warn(\"The attribute 'Max bandwidth' of connection '%s' of server profile '%s' is not visible\" % (conn.name, profile.name))\n result['Connection IsMaxBandwidthDisplyed'] = False\n else:\n logger.info(\"The attribute 'Max bandwidth' of connection '%s' of server profile '%s' is visible\" % (conn.name, profile.name))\n result['Connection IsMaxBandwidthDisplyed'] = True\n if getattr(conn, 'IsMaxBandwidthDisplyed', '').lower() == 'no':\n if not VerifyServerProfile.verify_connections_max_bandwidth_not_visible(timeout=15):\n logger.warn(\"The attribute 'Max bandwidth' of connection '%s' of server profile '%s' is visible\" % (conn.name, profile.name))\n result['Connection IsMaxBandwidthDisplyed'] = False\n else:\n logger.info(\"The attribute 'Max bandwidth' of connection '%s' of server profile '%s' is not visible\" % (conn.name, profile.name))\n result['Connection IsMaxBandwidthDisplyed'] = True\n\n if hasattr(conn, 'ConnectionStatus'):\n if not VerifyServerProfile.verify_connection_status(name=conn.name, expect_value=conn.ConnectionStatus, timeout=15, fail_if_false=False):\n logger.warn(\"The connection '%s' of server profile '%s' status is not '%s'.\" % (conn.name, profile.name, conn.ConnectionStatus))\n result['Connection ConnectionStatus'] = False\n else:\n logger.info(\"The connection '%s' of server profile '%s' status is '%s'.\" % (conn.name, profile.name, conn.ConnectionStatus))\n result['Connection ConnectionStatus'] = True\n\n if hasattr(conn, 'MACAddress'):\n\n if not VerifyServerProfile.verify_connections_macaddress_type(expect_value=conn.MACAddress, timeout=15, fail_if_false=False):\n logger.warn(\"'connect MACAddress' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.MACAddress))\n result['Connection Macaddress'] = False\n else:\n logger.info(\"success\")\n result['Connection Macaddress'] = True\n\n if hasattr(conn, 'WWPN'):\n\n if not VerifyServerProfile.verify_connections_wwpn(expect_value=conn.WWPN, timeout=15, fail_if_false=False):\n logger.warn(\"'WWPN' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.WWPN))\n result['WWPN'] = False\n else:\n logger.info(\"success\")\n result['WWPN'] = True\n\n if hasattr(conn, 'WWNN'):\n\n if not VerifyServerProfile.verify_connections_wwnn(expect_value=conn.WWNN, timeout=15, fail_if_false=False):\n logger.warn(\"'WWNN' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.WWNN))\n result['WWNN'] = False\n else:\n logger.info(\"success\")\n result['WWNN'] = True\n\n if hasattr(conn, 'BootVolume'):\n\n if not VerifyServerProfile.verify_connections_boot_volume(expect_value=conn.BootVolume, timeout=15, fail_if_false=False):\n logger.warn(\"'BOOT volume' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.BootVolume))\n result['BootVolume'] = False\n else:\n logger.info(\"success\")\n result['BootVolume'] = True\n\n if hasattr(conn, 'BootTarget'):\n\n if not VerifyServerProfile.verify_connections_boot_target(expect_value=conn.BootTarget, timeout=15, fail_if_false=False):\n logger.warn(\"'BootTarget' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.BootTarget))\n result['BootTarget'] = False\n else:\n logger.info(\"success\")\n result['BootTarget'] = True\n\n if hasattr(conn, 'BootLUN'):\n\n if not VerifyServerProfile.verify_connections_boot_lun(expect_value=conn.BootLUN, timeout=15, fail_if_false=False):\n logger.warn(\"'BootLUN' of server profile '%s' is not '%s', verification failed.\" % (profile.name, conn.BootLUN))\n result['BootLUN'] = False\n else:\n logger.info(\"success\")\n result['BootLUN'] = True\n\n if hasattr(conn, 'initiatorName'):\n logger.info(\"Verifying connection '%s' has Initiator name '%s\" % (conn.name, conn.initiatorName))\n if VerifyServerProfile.verify_connection_initiator_name(conn.initiatorName):\n logger.info(\"Connection 'Initiator name' contains expected value '%s'\" % conn.initiatorName)\n\n if hasattr(conn, 'initiatorIpv4'):\n logger.info(\"Verifying connection '%s' has Initiator IP address '%s\" % (conn.name, conn.initiatorIpv4))\n if VerifyServerProfile.verify_connection_initiator_ip(conn.initiatorIpv4):\n logger.info(\"Connection 'Initiator IP address' contains expected value '%s'\" % conn.initiatorIpv4)\n\n if hasattr(conn, 'subnetMask'):\n logger.info(\"Verifying connection '%s' has Initiator subnet mask '%s\" % (conn.name, conn.subnetMask))\n if VerifyServerProfile.verify_connection_initiator_subnet_mask(conn.subnetMask):\n logger.info(\"Connection 'Initiator subnet mask' contains expected value '%s'\" % conn.subnetMask)\n\n if hasattr(conn, 'gateway'):\n logger.info(\"Verifying connection '%s' has Initiator gateway '%s\" % (conn.name, conn.gateway))\n if VerifyServerProfile.verify_connection_initiator_gateway(conn.gateway):\n logger.info(\"Connection 'Initiator gateway' contains expected value '%s'\" % conn.gateway)\n\n if hasattr(conn, 'targetName'):\n logger.info(\"Verifying connection '%s' has Target name '%s\" % (conn.name, conn.targetName))\n if VerifyServerProfile.verify_connection_target_name(conn.targetName):\n logger.info(\"Connection 'Target name' contains expected value '%s'\" % conn.targetName)\n\n if hasattr(conn, 'targetLun'):\n logger.info(\"Verifying connection '%s' has Target LUN '%s\" % (conn.name, conn.targetLun))\n if VerifyServerProfile.verify_connection_target_lun(conn.targetLun):\n logger.info(\"Connection 'Target LUN' contains expected value '%s'\" % conn.targetLun)\n\n if hasattr(conn, 'targetIp'):\n target_ip = ':'.join([conn.targetIp, conn.targetPort])\n logger.info(\"Verifying connection '%s' has Target IP address '%s'\" % (conn.name, target_ip))\n if VerifyServerProfile.verify_connection_target_ip(target_ip):\n logger.info(\"Connection 'Target IP address' contains expected value '%s'\" % target_ip)\n\n if hasattr(conn, 'secondIp'):\n second_ip = ':'.join([conn.secondIp, conn.secondPort])\n logger.info(\"Verifying connection '%s' has Second IP address '%s\" % (conn.name, second_ip))\n if VerifyServerProfile.verify_connection_second_ip(second_ip):\n logger.info(\"Connection 'Second IP address' contains expected value '%s'\" % second_ip)\n\n if hasattr(conn, 'chapLvl'):\n if conn.chapLvl == 'None':\n VerifyServerProfile.verify_connection_chap_name(\"not set\")\n VerifyServerProfile.verify_connection_mchap_name_not_visible()\n elif conn.chapLvl == 'CHAP':\n VerifyServerProfile.verify_connection_chap_name(conn.chapName)\n VerifyServerProfile.verify_connection_mchap_name_not_visible()\n elif conn.chapLvl == 'Mutual CHAP':\n VerifyServerProfile.verify_connection_chap_name(conn.chapName)\n VerifyServerProfile.verify_connection_mchap_name(conn.mchapName)\n\n if all(result.values()) is not True:\n logger.warn(\"server profile '%s' is FAIL for connections info verification\" % profile.name)\n return False\n else:\n logger.info(\"server profile '%s' is PASS for connections info verification\" % profile.name)\n\n # Collapse the connection after verification\n FusionUIBase.wait_for_element_and_click(GeneralServerProfilesElements.Connection.ID_TABLE_CONNECTION_DETAIL_INFO_EXPAND % conn_num, timeout=5, fail_if_false=False)\n\n verified_pass += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to verify connections info against! all %s server profile(s) is NOT existing, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if verified_pass < total:\n logger.warn(\"not all of the server profile(s) is successfully verified PASS - %s out of %s passed \" % (verified_pass, total))\n if verified_pass + not_exists == total:\n # logger.warn(\"%s not-existing server profile(s) is skipped, test is considered FAIL\" % not_exists)\n logger.warn(\"%s not-existing server profile(s) is skipped, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped, \"\n \"%s server profile(s) left is failed being verified PASS \" % (not_exists, total - verified_pass - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully verified PASS - %s out of %s \" % (verified_pass, total))\n return True",
"def verify_server_profile_power_status(expectedpowerstatus, *profile_obj):\n\n # logger._log_to_console_and_log_file(\"\")\n # logger._log_to_console_and_log_file(\"Verifying the power status of list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n error = 0\n\n for profile in profile_obj:\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Verifying power status for profile %s\" % profile.name)\n\n profile_names = _split_profile_names(profile.name)\n for profile_name in profile_names:\n status = get_server_profile_attributes(profile_name)\n\n if status[\"server hardware\"] == 'unassigned':\n logger._warn(\"Server profile '%s' has unassigned server hardware and cannot verify the server's power status, skip\" % profile_name)\n selenium2lib.capture_page_screenshot()\n # error += 1\n continue\n\n if status[\"server power\"].lower() == expectedpowerstatus.lower():\n logger._log_to_console_and_log_file(\"Successfully verified Server Profile '%s' power status: %s\" % (profile_name, status[\"server power\"]))\n continue\n else:\n logger._warn(\"Failed to verify Server Profile '%s' power status: %s, expect: %s\" % (profile_name, status[\"server power\"].lower(), expectedpowerstatus))\n selenium2lib.capture_page_screenshot()\n error += 1\n\n if error > 0:\n return False\n return True",
"def validate_error_on_create_server_profile(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n continue\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if hasattr(profile, 'Bandwidth_Error'):\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start deleting connections ...\")\n total = len(profile.Connections)\n cls = CommonOperationServerProfile.Connection\n for n, connection in enumerate(profile.Connections):\n expected_message = profile.Bandwidth_Error\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n if cls.verify_connection_not_exist(connection.name, fail_if_false=False) is False:\n logger.warn(\"connection '%s' already exists, skipped ...\" % connection.name)\n continue\n cls.click_add_connection_button()\n cls.wait_add_connection_dialog_shown(time_for_loading=3)\n cls.input_name(connection.name)\n cls.select_function_type_by_text(connection.FunctionType, timeout=10, fail_if_false=True)\n logger.info(\"Expected Error message is '%s' ...\" % expected_message)\n cls.input_select_network(connection.network)\n logger.info(\"n/w selected\")\n cls.input_select_port(connection.port)\n cls.input_requested_bandwidth(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_INPUT_REQUESTED_BANDWIDTH) else None\n cls.select_requested_bandwidth_by_text(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_SELECTBOX_REQUESTED_BANDWIDTH) else None\n cls.click_add_button()\n if not VerifyServerProfile.verify_bandwidth_error(expected_message, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n cls.click_cancel_button()\n logger.info(\"clicked cancel button\")\n else:\n CommonOperationServerProfile.Connection.set(profile.Connections)\n CreateServerProfile.click_create_button()\n status, _ = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if hasattr(profile, 'update_error'):\n if not VerifyServerProfile.verify_error_message_for_update_action(profile.update_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n if not VerifyServerProfile.verify_error_message_in_add_connection(profile.connection_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n logger.info(\"Profile created successfully\")\n return True",
"def cold_boot_server_profiles(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n off_or_unsupported = 0\n not_exists = 0\n done_cold_boot = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile_obj), '-' * 14))\n logger.info(\"cold boot a server profile named '%s'\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=2)\n if VerifyServerProfile.verify_general_server_power(expect_value='On', timeout=5, fail_if_false=False) is False:\n logger.warn(\"Power state of server profile '%s' is not 'On', 'RESET -> COLD BOOT' action is unavailable.\" % profile.name)\n off_or_unsupported += 1\n else:\n if cold_boot_server_profile_by_name(profile.name) is False:\n logger.warn(\"server profile '%s' is NOT done cold boot successfully\" % profile.name)\n continue\n else:\n done_cold_boot += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - off_or_unsupported - not_exists == 0:\n logger.warn(\"no server profile to cold boot! all %s server profile(s) is NOT applicable to cold boot (already powered off/unsupported/not existing), test is considered PASS\" % off_or_unsupported)\n return True\n else:\n if done_cold_boot < total:\n logger.warn(\"not all of these server profile(s) is successfully done cold boot - %s out of %s done cold boot \" % (done_cold_boot, total))\n if done_cold_boot + off_or_unsupported + not_exists == total:\n logger.warn(\"%s off-or-unsupported server profile(s) is skipped, %s not-existing server profile(s) is skipped, test is considered PASS\" % (off_or_unsupported, not_exists))\n return True\n else:\n logger.warn(\"%s off-or-unsupported server profile(s) is skipped, %s not-existing server profile(s) is skipped, \"\n \"%s left is failed to cold boot \" % (off_or_unsupported, not_exists, total - done_cold_boot - off_or_unsupported - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully done cold boot - %s out of %s \" % (done_cold_boot, total))\n return True",
"def profile_exists(profile):\n if os.path.isfile(AWS_CREDENTIALS_FILE):\n boto.config.load_credential_file(AWS_CREDENTIALS_FILE)\n if boto.config.get(profile, 'region'):\n return True\n else:\n return False\n return False",
"def _validate_server_profile_template_manage_boot(server_profile_template):\n manage_boot = server_profile_template.get('boot', {}).get('manageBoot')\n\n if not manage_boot:\n message = _(\"Server Profile Template: %s, does not allow to manage \"\n \"boot order.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def bak_verify_server_profile_general_info(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n for profile in profile_obj:\n server = profile.server\n hardwaretype = profile.hardwareType\n enclosuregroup = profile.enclgroup\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(5) # wait for fields to load\n\n logger.info(\"Verifying server hardware for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_SERVER, server, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_SERVER)\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, server))\n selenium2lib.capture_page_screenshot()\n return False\n\n logger.info(\"Verifying server hardware type for profile %s\" % profile.name)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_HARDWARE, PerfConstants.DEFAULT_SYNC_TIME, fail_if_false=False) is True:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_HARDWARE)\n if txt.find(hardwaretype) == -1:\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, hardwaretype))\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger.warn(\"Failed to wait server hardware type field display\")\n return False\n\n logger.info(\"Verifying enclosure group for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP, enclosuregroup, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP)\n logger.info(\"Enclosure group of server : %s is not as expected [%s]\" % (txt, enclosuregroup))\n selenium2lib.capture_page_screenshot()\n return False\n\n return True",
"def profile_checking(ipydir, profile):\n try:\n profile = ProfileDir.find_profile_dir_by_name(ipydir, profile)\n return True\n except ProfileDirError:\n return False"
] | [
"0.6275056",
"0.61501735",
"0.61437684",
"0.6112742",
"0.6090006",
"0.60501087",
"0.60271287",
"0.599908",
"0.59340584",
"0.5925858",
"0.5887888",
"0.58697414",
"0.58384514",
"0.582165",
"0.57998717",
"0.5766464",
"0.5734024",
"0.57302743",
"0.5717543",
"0.5691349",
"0.5642978",
"0.5634335",
"0.5619797",
"0.55718887",
"0.55689955",
"0.55447894",
"0.5531784",
"0.5522516",
"0.55131304",
"0.54777575"
] | 0.7312112 | 0 |
Validate if the Server Profile Template is consistent. | def _validate_node_server_profile_template(oneview_client, oneview_info):
server_profile_template = oneview_client.server_profile_templates.get(
oneview_info['server_profile_template_uri'])
server_hardware = oneview_client.server_hardware.get(
oneview_info['server_hardware_uri'])
_validate_server_profile_template_server_hardware_type(
server_profile_template, server_hardware)
_validate_spt_enclosure_group(server_profile_template, server_hardware)
_validate_server_profile_template_manage_boot(server_profile_template) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_server_profile_template_manage_boot(server_profile_template):\n manage_boot = server_profile_template.get('boot', {}).get('manageBoot')\n\n if not manage_boot:\n message = _(\"Server Profile Template: %s, does not allow to manage \"\n \"boot order.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware):\n spt_server_hardware_type_uri = (\n server_profile_template.get('serverHardwareTypeUri')\n )\n sh_server_hardware_type_uri = server_hardware.get('serverHardwareTypeUri')\n\n if spt_server_hardware_type_uri != sh_server_hardware_type_uri:\n message = _(\n \"Server profile template %(spt_uri)s serverHardwareTypeUri is \"\n \"inconsistent with server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def _validate_server_profile_template_mac_type(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri']\n )\n if server_profile_template.get('macType') != 'Physical':\n message = _(\"The server profile template %s is not set to use \"\n \"physical MAC.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def _template_isvalid(template_body: str, region: str, profile: str = None) -> bool:\n logger.debug(f\"checking if template is valid in region {region}\")\n cfn_client = _get_cfn_client(region=region, profile=profile)\n try:\n cfn_client.validate_template(TemplateBody=template_body)\n except Exception as e:\n if 'Template format error' in e.__str__():\n logger.warning(e)\n return False\n else:\n raise e\n logger.debug(f\"template is valid\")\n return True",
"def validate_template_config(template_config):\n return template_config_schema.validate(template_config)",
"def validate_server_profile_consistency_state(profile_obj):\n count = 0\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n for _, profile in enumerate(profile_obj):\n rc = select_server_profile(profile.name)\n if not rc:\n logger.warn(\"Failed to select server profile '%s'\" % profile.name)\n continue\n FusionUIBase.select_view_by_name(view_name='General', timeout=5, fail_if_false=False)\n if VerifyServerProfile.verify_server_profile_consistency_status(profile.expected_state, timeout=5, fail_if_false=False):\n count += 1\n\n if count == len(profile_obj):\n logger.info(\"All of the given SPs passes consistency check.\")\n return True\n else:\n logger.warn(\"%s out of %s - the given SPs passes consistency check.\" % (count, len(profile_obj)))\n return False",
"def spt_verify_required_fields_for_iscsi_boot(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile_obj), '-' * 14))\n logger.info(\"Creating Server Profile Template | %s | ...\" % profile.name)\n\n # checking if the profile already exists\n if not VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile.name, fail_if_false=True):\n ui_lib.fail_test(\"Server Profile Template | %s | already exists\" % profile.name)\n\n # open Create SP dialog and enter data ...\n CreateServerProfileTemplate.click_create_server_profile_template_button()\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_shown()\n\n CreateServerProfileTemplate.input_name(profile.name)\n\n if hasattr(profile, 'ref_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.ref_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.ref_server, hardware_type))\n CreateServerProfileTemplate.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfileTemplate.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfileTemplate.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n\n if hasattr(profile, 'Connections'):\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n\n # add connections with blank iSCSI boot data and verify required field error messages\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start adding connections ...\")\n\n for n, connection in enumerate(profile.Connections):\n logger.info(\"--- <connections> ---: {2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile.Connections), '-' * 14))\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n logger.debug(\"test data for connection '<%s>' is found: '<%s>'\" % (connection.name, connection), also_console=False)\n\n # Verify the connection does not exist\n CommonOperationServerProfileTemplate.Connection.verify_connection_not_exist(connection.name, fail_if_false=True)\n\n # Add the connection\n CommonOperationServerProfileTemplate.Connection.click_add_connection_button()\n CommonOperationServerProfileTemplate.Connection.wait_add_connection_dialog_shown()\n\n CommonOperationServerProfileTemplate.Connection.input_name(connection.name)\n CommonOperationServerProfileTemplate.Connection.select_function_type_by_text(connection.FunctionType, fail_if_false=True)\n CommonOperationServerProfileTemplate.Connection.input_select_network(connection.network)\n CommonOperationServerProfileTemplate.Connection.input_select_port(connection.port)\n CommonOperationServerProfileTemplate.Connection.input_requested_bandwidth(connection.RequestedBandwidth)\n CommonOperationServerProfileTemplate.Connection.select_boot_by_text(connection.boot, fail_if_false=True)\n\n # Input information for the iSCSI boot connection\n if connection.boot == 'iSCSI primary' or connection.boot == 'iSCSI secondary':\n CommonOperationServerProfileTemplate.Connection.set_iscsi_boot_options(connection)\n\n # Click \"Add\" button\n CommonOperationServerProfileTemplate.Connection.click_add_button()\n\n # Verify error messages & text field visibility\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_initiator_name_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_initiator_ip_not_visible()\n CommonOperationServerProfile.Connection.verify_iscsi_subnet_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_gateway_error_message(\"\")\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_vlan_id_not_visible()\n\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_target_name_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_target_lun_not_visible()\n CommonOperationServerProfile.Connection.verify_iscsi_target_ip_error_message(\"\")\n\n if getattr(connection, \"targetIp\", \"\") is not \"\" and getattr(connection, \"targetPort\", \"\") is \"\":\n CommonOperationServerProfile.Connection.verify_iscsi_target_port_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_target_port_error_message(\"\")\n\n CommonOperationServerProfile.Connection.verify_iscsi_second_ip_error_message(\"\")\n\n if getattr(connection, \"secondIp\", \"\") is not \"\" and getattr(connection, \"secondPort\", \"\") is \"\":\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"\")\n\n if hasattr(connection, \"chapLvl\"):\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_chap_name_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_chap_secret_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_mchap_name_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_mchap_secret_not_visible()\n\n # Click \"Cancel\" button\n CommonOperationServerProfileTemplate.Connection.click_cancel_button()\n else:\n ui_lib.fail_test(\"Connections object not present in data file for profile template with name | %s |\" % profile.name)\n\n CreateServerProfileTemplate.click_cancel_button()",
"def validate_template(self, contents):\n try:\n self.conn.validate_template(template_body=contents)\n return True\n except BotoServerError as e:\n print contents\n print e.message\n raise",
"def delete_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n not_exists = 0\n deleted = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile template named '%s'\" % profile_template.name)\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template.name)\n not_exists += 1\n else:\n if delete_server_profile_template_by_name(profile_template.name) is False:\n logger.warn(\"server profile template '%s' is NOT deleted successfully, or 'Delete' action is not found in right-side-bar list.\" % profile_template.name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to delete! all %s server profile template(s) is NOT existing, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile template(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s not-existing server profile template(s) is skipped, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s not-existing server profile template(s) is skipped, %s profile template(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True",
"def is_valid_profile(profile):\n\n return profile.metadata.get('os', 'unknown') == 'windows'",
"async def _validate_template(self, template):\n try:\n templater.Template(template, self.hass).async_render()\n return True\n except Exception as exception: # pylint: disable=broad-except\n _LOGGER.error(exception)\n pass\n return False",
"def is_template_valid(template: JSONDict) -> JSONDict:\n\n errors = _rec_is_template_valid(template)\n errors.extend(_check_cyclic_defaults(template))\n\n if errors:\n msg = collate_errors(when=\"checking the template\", errors=errors)\n raise ParselglossyError(msg)\n\n return _reorder_template(template)",
"def test_instance_profile_exists(self) -> None:\n self.assertTrue(self.validate_instance_profile('s3-access-role', is_prod=self.prod_env))",
"def validate_error_on_create_server_profile(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n continue\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if hasattr(profile, 'Bandwidth_Error'):\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start deleting connections ...\")\n total = len(profile.Connections)\n cls = CommonOperationServerProfile.Connection\n for n, connection in enumerate(profile.Connections):\n expected_message = profile.Bandwidth_Error\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n if cls.verify_connection_not_exist(connection.name, fail_if_false=False) is False:\n logger.warn(\"connection '%s' already exists, skipped ...\" % connection.name)\n continue\n cls.click_add_connection_button()\n cls.wait_add_connection_dialog_shown(time_for_loading=3)\n cls.input_name(connection.name)\n cls.select_function_type_by_text(connection.FunctionType, timeout=10, fail_if_false=True)\n logger.info(\"Expected Error message is '%s' ...\" % expected_message)\n cls.input_select_network(connection.network)\n logger.info(\"n/w selected\")\n cls.input_select_port(connection.port)\n cls.input_requested_bandwidth(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_INPUT_REQUESTED_BANDWIDTH) else None\n cls.select_requested_bandwidth_by_text(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_SELECTBOX_REQUESTED_BANDWIDTH) else None\n cls.click_add_button()\n if not VerifyServerProfile.verify_bandwidth_error(expected_message, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n cls.click_cancel_button()\n logger.info(\"clicked cancel button\")\n else:\n CommonOperationServerProfile.Connection.set(profile.Connections)\n CreateServerProfile.click_create_button()\n status, _ = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if hasattr(profile, 'update_error'):\n if not VerifyServerProfile.verify_error_message_for_update_action(profile.update_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n if not VerifyServerProfile.verify_error_message_in_add_connection(profile.connection_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n logger.info(\"Profile created successfully\")\n return True",
"def check_process_ready(project, profile):\n errors = []\n input_templates = InputTemplate.objects.filter(\n corresponding_profile=profile\n )\n for template in input_templates:\n file_settings_amount = FileSetting.objects.filter(\n file__project=project, input_template=template\n ).count()\n if template.optional and template.unique and file_settings_amount > 1:\n errors.append(\n \"Template '{} ({})' requires a unique file but multiple were specified.\".format(\n template.template_id, template.label\n )\n )\n elif (\n not template.optional\n and template.unique\n and file_settings_amount != 1\n ):\n errors.append(\n \"Template '{} ({})' requires a unique file but {} were specified.\".format(\n template.template_id, template.label, file_settings_amount\n )\n )\n elif (\n not template.optional\n and not template.unique\n and file_settings_amount < 1\n ):\n errors.append(\n \"Template '{} ({})' requires a file but none were specified\".format(\n template.template_id, template.label\n )\n )\n\n for parameter in profile.script.variable_parameters:\n try:\n ParameterSetting.objects.get(\n project=project, base_parameter=parameter\n )\n except ParameterSetting.DoesNotExist:\n errors.append(\n \"Parameter '{}' requires a value but none is given.\".format(\n parameter\n )\n )\n return errors",
"def delete_all_appliance_server_profile_templates():\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n profile_template_name_list = CommonOperationServerProfileTemplate.get_server_profile_template_list()\n\n total = len(profile_template_name_list)\n not_exists = 0\n deleted = 0\n\n for n, profile_template_name in enumerate(profile_template_name_list):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile template named '%s'\" % profile_template_name)\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template_name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template_name)\n not_exists += 1\n else:\n if not delete_server_profile_template_by_name(profile_template_name):\n logger.warn(\"server profile template '%s' is NOT deleted successfully.\" % profile_template_name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to delete! all %s server profile template(s) is NOT existing, test is considered PASS\" % not_exists)\n return True\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile template(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s non-existing server profile template(s) is skipped being deleted, test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile template(s) is skipped being deleted, %s profile template(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True",
"def copy_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=8)\n\n total = len(profile_template_obj)\n source_not_exists = 0\n target_already_exists = 0\n copied = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile template with name '%s' ...\" % profile_template.source)\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.source, fail_if_false=False) is False:\n logger.warn(\"source server profile template '%s' does not exist\" % profile_template.source)\n source_not_exists += 1\n continue\n\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False) is False:\n logger.warn(\"target server profile template '%s' already exists!\" % profile_template.name)\n target_already_exists += 1\n continue\n\n # open Copy SP dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.source)\n\n CopyServerProfileTemplate.select_action_copy()\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfileTemplate.input_name(profile_template.name)\n CopyServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = CopyServerProfileTemplate.get_selected_server_hardware_type(profile_template.name)\n # if profile_template.hardwareType not in sht_selected:\n # logger.warn(\"server hardware type '%s' of server profile template '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile_template.name, profile_template.hardwareType))\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n CopyServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfileTemplate.Advanced.set(profile_template)\n\n CopyServerProfileTemplate.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile_template.source, profile_template.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - source_not_exists - target_already_exists == 0:\n logger.warn(\"no server profile template to copy! all %s server profile template(s) is either source-NOT-existing or target-ALREADY-existing, test is considered FAILED\" % (source_not_exists + target_already_exists))\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile template(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + source_not_exists + target_already_exists == total:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, test is considered FAILED\" % (source_not_exists, target_already_exists))\n return False\n else:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, %s template(s) left is failed being copied \" % (source_not_exists, target_already_exists, total - copied - source_not_exists - target_already_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully copied - %s out of %s \" % (copied, total))\n return True",
"def _check_template_name(self, template):\n filename = os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), template, '__init__.ini')\n if self._check_file_exists(filename) and self._check_access(filename, os.R_OK):\n return True\n else:\n return False",
"def _verfify_auth_and_profiles_data (self, data):\n if type(data.get('profiles')) == dict:\n if len(str(data.get('authURL', ''))) > 10 and len(str(data.get('authURL', ''))) < 50:\n return True\n return False",
"def is_template(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTemplate', self.handle))",
"def create_profile_from_template(*template_profile_obj):\n\n logger._log_to_console_and_log_file(\"Navigating to server profile template page...\")\n if not navigate():\n return False\n\n if isinstance(template_profile_obj, test_data.DataObj):\n template_profile_obj = [template_profile_obj]\n elif isinstance(template_profile_obj, tuple):\n template_profile_obj = list(template_profile_obj[0])\n\n for prof in template_profile_obj:\n\n \"\"\" Selecting profile template \"\"\"\n if not select_profile_template(prof.templ_name):\n ui_lib.fail_test(\"profile template is not present in template list\")\n\n logger._log_to_console_and_log_file(\"verifying for profile existence before proceeding to create\")\n if prof.has_property(\"prof_name\") and prof.prof_name.strip() != \"\":\n if serverprofiles.select_server_profile(prof.prof_name):\n ui_lib.fail_test(\"FAIL: Server profile '{0}' is already present\".format(prof.prof_name))\n else:\n ui_lib.fail_test(\"'prof_name' is a mandatory field and should not be empty\")\n\n logger._log_to_console_and_log_file(\"Powering of server '{0}\".format(prof.server))\n if prof.server.strip() != \"unassigned\" and not (serverhardware.power_off_server(prof.server)):\n ui_lib.fail_test(\"Can't proceed with server profile creation on server %s\" % prof.server)\n\n if not ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_PAGE_LABEL):\n if not navigate():\n ui_lib.fail_test(\"FAIL: failed to navigate profile template page\")\n\n logger._log_to_console_and_log_file(\"Selecting Create server profile option from Actions menu\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE)\n\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME)\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME, prof.prof_name)\n\n if prof.has_property(\"prof_description\") and prof.prof_description.strip() != \"\":\n logger._log_to_console_and_log_file(\"Entering profile description: '{0}'\".format(prof.prof_description))\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_DESCRIPTION, prof.prof_description)\n\n if prof.has_property(\"server\") and prof.server.strip() != \"\":\n logger._log_to_console_and_log_file(\"Selecting sever '{0}' to create profile\".format(prof.server))\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_COMBO_SERVER_HARDWARE_DROPDOWN)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server):\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server)\n logger._log_to_console_and_log_file(\"Selected valid server hardware\")\n else:\n ui_lib.fail_test(\"Provided server '{0}' is not a valid\".format(prof.server))\n else:\n ui_lib.fail_test(\"'server' name is a mandatory field and should not be empty\")\n\n if prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'false':\n logger._log_to_console_and_log_file(\"Creating server profile from template without overriding template\")\n elif prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'true':\n logger._log_to_console_and_log_file(\"Creating server profile from template with overriding template\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_CHECKBOX_OVERRIDE_TEMPALTE)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE, PerfConstants.SELECT_ENCLOSURE * 3)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.SELECT_ENCLOSURE)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.WAIT_UNTIL_CONSTANT):\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n else:\n ui_lib.fail_test(ui_lib.get_text(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR))\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % prof.prof_name, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.ignore_staleElementRefException(\"_is_visible\", FusionServerProfilesPage.ID_PROFILE_CHANGING)\n logger._log_to_console_and_log_file(\"Waiting for profile creation to complete..\")\n\n logger._log_to_console_and_log_file(\"Validating profile %s\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ELEMENT_ACTIVITY % prof.prof_name):\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_OK, PerfConstants.CREATE_SERVER_PROFILE_TIME):\n logger._log_to_console_and_log_file(\"Profile template %s created\" % prof.prof_name)\n elif ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_WARNING):\n logger._warn(\"Profile %s created with warning\" % prof.prof_name)\n else:\n logger._warn(\"Failed to create server profile %s\" % prof.prof_name)\n return False\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n\n return True",
"def create_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n created = 0\n already_exists = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile template is already existing\n if not VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile_template.name)\n already_exists += 1\n continue\n\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_selected = get_type_of_server_hardware(profile_template.ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n # open Create SP template dialog and enter data ...\n CreateServerProfileTemplate.click_create_server_profile_template_button()\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_shown()\n\n CreateServerProfileTemplate.input_name(profile_template.name)\n CreateServerProfileTemplate.input_description(getattr(profile_template, 'desc', ''))\n CreateServerProfileTemplate.input_server_profile_description(getattr(profile_template, 'sp_desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n # input 'Enclosure group'\n CreateServerProfileTemplate.input_select_server_hardware_type(sht_selected)\n CreateServerProfileTemplate.input_select_enclosure_group(profile_template.enclgroup) if getattr(profile_template, 'enclgroup', None) is not None else None\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfileTemplate.Advanced.set(profile_template)\n\n CreateServerProfileTemplate.click_create_button()\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile_template.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=720, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=180, fail_if_false=True)\n logger.info(\"created server profile '%s' successfully\" % profile_template.name)\n created += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n logger.warn(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def check_zone_template(template):\n if not os.path.isfile(os.path.join(ZONE_TMPL_DIR, template +\n ZONE_TMPL_SUFFIX)):\n raise ZoneException(\"Template %s does not exist.\" % (template))",
"def testProfileCreation(self):\n small_tree1_equality = self.checkProfileEquality(self.profiles[0], self.small_profile1)\n small_tree2_equality = self.checkProfileEquality(self.profiles[1], self.small_profile2)\n known_tree1_equality = self.checkProfileEquality(self.profiles[2], self.known_profile1)\n known_tree2_equality = self.checkProfileEquality(self.profiles[3], self.known_profile2)\n \n self.assertEqual(small_tree1_equality, True)\n self.assertEqual(small_tree2_equality, True)\n self.assertEqual(known_tree1_equality, True)\n self.assertEqual(known_tree2_equality, True)",
"def testProfileCreation(self):\r\n small_tree1_equality = self.checkProfileEquality(self.profiles[0], self.small_profile1)\r\n small_tree2_equality = self.checkProfileEquality(self.profiles[1], self.small_profile2)\r\n known_tree1_equality = self.checkProfileEquality(self.profiles[2], self.known_profile1)\r\n known_tree2_equality = self.checkProfileEquality(self.profiles[3], self.known_profile2)\r\n\r\n self.assertEqual(small_tree1_equality, True)\r\n self.assertEqual(small_tree2_equality, True)\r\n self.assertEqual(known_tree1_equality, True)\r\n self.assertEqual(known_tree2_equality, True)",
"def profile_exists(profile):\n if os.path.isfile(AWS_CREDENTIALS_FILE):\n boto.config.load_credential_file(AWS_CREDENTIALS_FILE)\n if boto.config.get(profile, 'region'):\n return True\n else:\n return False\n return False",
"def is_vendor_profile_page_loaded_properly(self):\n return self.is_element_present(self.save_vendor_profile_locator)",
"def _verify_profile(self, account_id: str, profile_id: str) -> str:\n profile = self.__ingest_profile\n if profile_id and self.__ip.ProfileExists(account_id=account_id, profile_id=profile_id):\n profile = profile_id\n elif self.__ingest_profile=='':\n response = self.__ip.GetDefaultProfile(account_id=account_id)\n if response.status_code in DynamicIngest.success_responses:\n profile = response.json().get('default_profile_id')\n return profile",
"def validate_server_profile_task_step(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n not_exists = 0\n verified = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"Validate server profile <%s> task contains <%s>\" % (profile.name, profile.method))\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n\n CommonOperationServerProfile.click_server_profile(profile.name)\n FusionUIBase.select_view_by_name(view_name='Activity', timeout=5, fail_if_false=False)\n CommonOperationServerProfile.click_activity_collapser(profile.task)\n timeout = int(getattr(profile, 'validate_timeout', '5'))\n ret = VerifyServerProfile.verify_activity_contains_text(profile.method, timeout=timeout, fail_if_false=False)\n # Verify method text not exist in steps\n if getattr(profile, 'exist', '').lower() == 'false':\n if ret is True:\n ui_lib.fail_test(\"%s should not exist in task steps\" % profile.method)\n elif ret is False:\n ui_lib.fail_test(\"%s should exist in task steps\" % profile.method)\n\n logger.info(\"Server profile '%s' got the correct task method\" % profile.name)\n verified += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to view! all %s server profile(s) is NOT existing, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if verified < total:\n logger.warn(\"not all of task for the server profile(s) is successfully verified - %s out of %s verified \" % (verified, total))\n if verified + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped, %s profile(s) left is failed being verified \" % (not_exists, total - verified - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully verified - %s out of %s \" % (verified, total))\n return True",
"def validate_template(template):\n if not isinstance(template, Template):\n raise TypeError(\"%s is not a template\" % template)"
] | [
"0.7069994",
"0.68792826",
"0.66662836",
"0.6568979",
"0.623397",
"0.6207681",
"0.6184548",
"0.6091524",
"0.6008042",
"0.5996494",
"0.59735686",
"0.5967848",
"0.5924284",
"0.5894213",
"0.58178085",
"0.5812006",
"0.57749254",
"0.5769697",
"0.5671442",
"0.56670874",
"0.5555338",
"0.5547055",
"0.5524649",
"0.5462689",
"0.5451946",
"0.5450486",
"0.54311395",
"0.5413361",
"0.5406482",
"0.5404834"
] | 0.7610132 | 0 |
Validate if the Server Hardware Types are the same. Validate if the Server Profile Template and the Server Hardware have the same Server Hardware Type | def _validate_server_profile_template_server_hardware_type(
server_profile_template, server_hardware):
spt_server_hardware_type_uri = (
server_profile_template.get('serverHardwareTypeUri')
)
sh_server_hardware_type_uri = server_hardware.get('serverHardwareTypeUri')
if spt_server_hardware_type_uri != sh_server_hardware_type_uri:
message = _(
"Server profile template %(spt_uri)s serverHardwareTypeUri is "
"inconsistent with server hardware %(server_hardware_uri)s "
"serverHardwareTypeUri.") % {
'spt_uri': server_profile_template.get('uri'),
'server_hardware_uri': server_hardware.get('uri')}
raise exception.OneViewError(message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_node_server_hardware_type(oneview_client, oneview_info):\n node_server_hardware_type_uri = oneview_info['server_hardware_type_uri']\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n server_hardware_sht_uri = server_hardware.get('serverHardwareTypeUri')\n\n if server_hardware_sht_uri != node_server_hardware_type_uri:\n message = _(\"Node server_hardware_type_uri is inconsistent \"\n \"with OneView's server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def _validate_server_profile_template_mac_type(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri']\n )\n if server_profile_template.get('macType') != 'Physical':\n message = _(\"The server profile template %s is not set to use \"\n \"physical MAC.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def _validate_server_profile_template_manage_boot(server_profile_template):\n manage_boot = server_profile_template.get('boot', {}).get('manageBoot')\n\n if not manage_boot:\n message = _(\"Server Profile Template: %s, does not allow to manage \"\n \"boot order.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def verify_server_status(server_hardware):\n\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n else:\n logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Server Hardware : \" + server_hardware + \" is not present in the ServerList of the Server Profile page\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Server Hardware : \" + server_hardware + \" is present in the ServerList and Hence verifying for the status..\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK'\")\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n return True",
"def _validate_node_port_mac_server_hardware(oneview_client,\n oneview_info, ports):\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n if not ports:\n return\n\n # NOTE(nicodemos) If hponeview client's unable to get the MAC of the Server\n # Hardware and raises an exception, the driver will try to get it from\n # the iLOrest client.\n try:\n mac = _get_server_hardware_mac(server_hardware)\n except exception.OneViewError:\n mac = _get_server_hardware_mac_from_ilo(server_hardware)\n\n incompatible_macs = []\n for port in ports:\n if port.address.lower() == mac.lower():\n return\n incompatible_macs.append(port.address)\n\n message = _(\"The ports of the node are not compatible with its \"\n \"server hardware %(server_hardware_uri)s. There are no Ironic \"\n \"port MAC's: %(port_macs)s, that matches with the \"\n \"server hardware's MAC: %(server_hardware_mac)s\") % {\n 'server_hardware_uri': server_hardware.get('uri'),\n 'port_macs': ', '.join(incompatible_macs),\n 'server_hardware_mac': mac}\n raise exception.OneViewError(message)",
"def validate_error_on_create_server_profile(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n continue\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if hasattr(profile, 'Bandwidth_Error'):\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start deleting connections ...\")\n total = len(profile.Connections)\n cls = CommonOperationServerProfile.Connection\n for n, connection in enumerate(profile.Connections):\n expected_message = profile.Bandwidth_Error\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n if cls.verify_connection_not_exist(connection.name, fail_if_false=False) is False:\n logger.warn(\"connection '%s' already exists, skipped ...\" % connection.name)\n continue\n cls.click_add_connection_button()\n cls.wait_add_connection_dialog_shown(time_for_loading=3)\n cls.input_name(connection.name)\n cls.select_function_type_by_text(connection.FunctionType, timeout=10, fail_if_false=True)\n logger.info(\"Expected Error message is '%s' ...\" % expected_message)\n cls.input_select_network(connection.network)\n logger.info(\"n/w selected\")\n cls.input_select_port(connection.port)\n cls.input_requested_bandwidth(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_INPUT_REQUESTED_BANDWIDTH) else None\n cls.select_requested_bandwidth_by_text(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_SELECTBOX_REQUESTED_BANDWIDTH) else None\n cls.click_add_button()\n if not VerifyServerProfile.verify_bandwidth_error(expected_message, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n cls.click_cancel_button()\n logger.info(\"clicked cancel button\")\n else:\n CommonOperationServerProfile.Connection.set(profile.Connections)\n CreateServerProfile.click_create_button()\n status, _ = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if hasattr(profile, 'update_error'):\n if not VerifyServerProfile.verify_error_message_for_update_action(profile.update_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n if not VerifyServerProfile.verify_error_message_in_add_connection(profile.connection_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n logger.info(\"Profile created successfully\")\n return True",
"def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME_2] + self._DEVICES\n self.check_error(StratisCliInUseSameTierError, command_line, _ERROR)",
"def test_parameters(self):\n # Try to create a machine without an image.\n status = self.proxy.server.create(PROVIDER_ID)\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine without a flavor.\n status = self.proxy.server.create(PROVIDER_ID, IMAGE)\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong image format.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size\"], \"flavor\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong flavor format.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], \"flavor\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong machine_numbers.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"flavor=flavor\"], -1\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong userdata.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"userdata\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong scheduler_hints.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n \"scheduler_hints\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong meta.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"meta\"]\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with reserved meta.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"mysql-fabric=True\"]\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Create a machine.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"name=meta\"]\n )\n self.check_xmlrpc_command_result(status)\n\n # TODO: Test other parameters that were included with database.",
"def verify_required_fields_for_iscsi_boot(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile_obj), '-' * 14))\n logger.info(\"Creating Server Profile for server | %s | ...\" % profile.name)\n\n # checking if the profile already exists\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n ui_lib.fail_test(\"Server profile | %s | already exists\" % profile.name)\n\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Create SP dialog and enter data ...\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n # input 'Server hardware type', 'Enclosure group'\n\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n elif profile.hardwareType not in sht_selected:\n msg = \"selected server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType)\n logger.warn(msg)\n ui_lib.fail_test(msg)\n else:\n # input 'Enclosure group'\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(\n profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n CreateServerProfile.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n\n if hasattr(profile, 'Connections'):\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n\n # add connections with blank iSCSI boot data and verify required field error messages\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start adding connections ...\")\n\n for n, connection in enumerate(profile.Connections):\n logger.info(\"--- <connections> ---: {2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile.Connections), '-' * 14))\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n logger.debug(\"test data for connection '<%s>' is found: '<%s>'\" % (connection.name, connection), also_console=False)\n\n # Verify the connection does not exist\n CommonOperationServerProfile.Connection.verify_connection_not_exist(connection.name, fail_if_false=True)\n\n # Add the connection\n CommonOperationServerProfile.Connection.click_add_connection_button()\n CommonOperationServerProfile.Connection.wait_add_connection_dialog_shown()\n\n CommonOperationServerProfile.Connection.input_name(connection.name)\n CommonOperationServerProfile.Connection.select_function_type_by_text(connection.FunctionType, fail_if_false=True)\n CommonOperationServerProfile.Connection.input_select_network(connection.network)\n CommonOperationServerProfile.Connection.input_select_port(connection.port)\n CommonOperationServerProfile.Connection.input_requested_bandwidth(connection.RequestedBandwidth)\n CommonOperationServerProfile.Connection.select_boot_by_text(connection.boot, fail_if_false=True)\n\n # Input information for the iSCSI boot connection. Data file should have blanks for all fields except secondIp.\n if connection.boot == 'iSCSI primary' or connection.boot == 'iSCSI secondary':\n CommonOperationServerProfile.Connection.set_iscsi_boot_options(connection)\n\n # Click \"Add\" button\n CommonOperationServerProfile.Connection.click_add_button()\n\n # Verify error messages\n CommonOperationServerProfile.Connection.verify_iscsi_initiator_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_initiator_ip_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_subnet_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_gateway_error_message(\"\")\n\n if hasattr(connection, \"vlanId\"):\n CommonOperationServerProfile.Connection.verify_iscsi_vlan_id_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_vlan_id_error_message(\"\")\n\n CommonOperationServerProfile.Connection.verify_iscsi_target_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_lun_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_ip_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_port_error_message(\"This field is required.\")\n\n if getattr(connection, \"secondIp\", \"\") is not \"\":\n CommonOperationServerProfile.Connection.verify_iscsi_second_ip_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_second_ip_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"\")\n\n if hasattr(connection, \"chapLvl\"):\n if connection.chapLvl == \"None\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"\")\n elif connection.chapLvl == \"CHAP\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"\")\n elif connection.chapLvl == \"Mutual CHAP\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"This field is required.\")\n\n # Click \"Cancel\" button\n CommonOperationServerProfile.Connection.click_cancel_button()\n else:\n ui_lib.fail_test(\"Connections object not present in data file for profile with name | %s |\" % profile.name)\n\n CreateServerProfile.click_cancel_button()",
"def test_check_ess_settings(self):\n ess_settings1 = {'gaussian': [self.servers[0]], 'molpro': [self.servers[1], self.servers[0]],\n 'qchem': [self.servers[0]]}\n ess_settings2 = {'gaussian': self.servers[0], 'molpro': self.servers[1], 'qchem': self.servers[0]}\n ess_settings3 = {'gaussian': self.servers[0], 'molpro': [self.servers[1], self.servers[0]],\n 'qchem': self.servers[0]}\n ess_settings4 = {'gaussian': self.servers[0], 'molpro': self.servers[1], 'qchem': self.servers[0]}\n ess_settings5 = {'gaussian': 'local', 'molpro': self.servers[1], 'qchem': self.servers[0]}\n\n ess_settings1 = check_ess_settings(ess_settings1)\n ess_settings2 = check_ess_settings(ess_settings2)\n ess_settings3 = check_ess_settings(ess_settings3)\n ess_settings4 = check_ess_settings(ess_settings4)\n ess_settings5 = check_ess_settings(ess_settings5)\n\n ess_list = [ess_settings1, ess_settings2, ess_settings3, ess_settings4, ess_settings5]\n\n for ess in ess_list:\n for soft, server_list in ess.items():\n self.assertTrue(soft in ['gaussian', 'molpro', 'qchem'])\n self.assertIsInstance(server_list, list)\n\n with self.assertRaises(SettingsError):\n ess_settings6 = {'nosoft': ['server1']}\n check_ess_settings(ess_settings6)\n with self.assertRaises(SettingsError):\n ess_settings7 = {'gaussian': ['noserver']}\n check_ess_settings(ess_settings7)",
"def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME] + self.devices\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)",
"def __compare_types_instances(self, policies, instance_type):\n zones = availabilityZones()\n types_ins = zones.get_typevm_zones()\n\n if ( types_ins[instance_type]['cpu'] == policies['cpu'] and\n types_ins[instance_type]['ram'] == policies['ram'] and\n types_ins[instance_type]['disk']== policies['disk'] ):\n return 1\n return 0",
"def test_create_hyperflex_server_firmware_version(self):\n pass",
"def server_type(self):\n ...",
"def _validate_spt_enclosure_group(server_profile_template, server_hardware):\n spt_enclosure_group_uri = server_profile_template.get('enclosureGroupUri')\n sh_enclosure_group_uri = server_hardware.get('serverGroupUri')\n\n if spt_enclosure_group_uri != sh_enclosure_group_uri:\n message = _(\"Server profile template %(spt_uri)s enclosureGroupUri is \"\n \"inconsistent with server hardware %(sh_uri)s \"\n \"serverGroupUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'sh_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def sameHardware(self, other):\n\n return (self.vendorId == other.vendorId and \\\n self.deviceId == other.deviceId and \\\n self.physicalMemory == other.physicalMemory and \\\n self.osInfo == other.osInfo and \\\n self.cpuSpeed[0] == other.cpuSpeed[0])",
"def verify_server_profile_status(expectedserverstatus, *profile_obj):\n\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n # if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n # logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n # else:\n # logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n # return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n # else:\n # logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n # if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % serverhardware, PerfConstants.DEFAULT_SYNC_TIME):\n # logger._warn(\"Server Hardware : \" + serverhardware + \" is not present in the ServerList of the Server Profile page\")\n # return False\n # else:\n # logger._log_to_console_and_log_file(\"Server Hardware : \" + serverhardware + \" is present in the ServerList and Hence verifying for the status..\")\n\n for profile in profile_obj:\n server_hardware = profile.server\n\n logger._log_to_console_and_log_file(\"Verifying status for profile %s\" % profile.name)\n\n if server_hardware == 'unassigned':\n logger._log_to_console_and_log_file(\"Server hardware is unassigned and cannot verify the server's power status\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(2) # wait for fields to load\n\n # ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n if expectedserverstatus == 'OK':\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n if expectedserverstatus == 'ERROR':\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'ERROR' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'ERROR' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n if expectedserverstatus == 'WARNING':\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'WARNING' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'WARNING' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n\n return True",
"def validate_server_profile_consistency_state(profile_obj):\n count = 0\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n for _, profile in enumerate(profile_obj):\n rc = select_server_profile(profile.name)\n if not rc:\n logger.warn(\"Failed to select server profile '%s'\" % profile.name)\n continue\n FusionUIBase.select_view_by_name(view_name='General', timeout=5, fail_if_false=False)\n if VerifyServerProfile.verify_server_profile_consistency_status(profile.expected_state, timeout=5, fail_if_false=False):\n count += 1\n\n if count == len(profile_obj):\n logger.info(\"All of the given SPs passes consistency check.\")\n return True\n else:\n logger.warn(\"%s out of %s - the given SPs passes consistency check.\" % (count, len(profile_obj)))\n return False",
"def create_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n created = 0\n already_exists = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile template is already existing\n if not VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile_template.name)\n already_exists += 1\n continue\n\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_selected = get_type_of_server_hardware(profile_template.ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n # open Create SP template dialog and enter data ...\n CreateServerProfileTemplate.click_create_server_profile_template_button()\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_shown()\n\n CreateServerProfileTemplate.input_name(profile_template.name)\n CreateServerProfileTemplate.input_description(getattr(profile_template, 'desc', ''))\n CreateServerProfileTemplate.input_server_profile_description(getattr(profile_template, 'sp_desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n # input 'Enclosure group'\n CreateServerProfileTemplate.input_select_server_hardware_type(sht_selected)\n CreateServerProfileTemplate.input_select_enclosure_group(profile_template.enclgroup) if getattr(profile_template, 'enclgroup', None) is not None else None\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfileTemplate.Advanced.set(profile_template)\n\n CreateServerProfileTemplate.click_create_button()\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile_template.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=720, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=180, fail_if_false=True)\n logger.info(\"created server profile '%s' successfully\" % profile_template.name)\n created += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n logger.warn(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def validate(cls, templates):\n super(Product, cls).validate(templates)\n\n for template in templates:\n template.check_type_and_mode()\n\n template.check_gc_min_max()",
"def create_simple_server_profile_by_server_hardware(profile_name, server_name, return_true_if_exists=False):\n logger.info(\"--> creating a server profile with name '%s' ...\" % profile_name)\n # checking if the profile is already existing\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n if VerifyServerProfile.verify_server_profile_not_exist(profile_name, fail_if_false=False) is False:\n logger.warn(\"server profile '%s' already exists\" % profile_name)\n return return_true_if_exists\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_HARDWARE, time_for_loading=5)\n if VerifyHardware.verify_server_hardware_exist(server_name=server_name, fail_if_false=False) is False:\n logger.warn(\"server hardware '%s' does not exist\" % server_name)\n return False\n\n CommonOperationServerHardware.click_server_hardware(server_name=server_name, timeout=5, time_for_loading=5)\n FusionUIBase.select_view_by_name(view_name='Hardware', timeout=5, fail_if_false=False)\n if VerifyHardware.is_create_profile_link_available() is False:\n logger.warn(\"server hardware '%s' does NOT have 'Create profile' link to perform creating profile\" % server_name)\n return False\n\n CommonOperationServerHardware.click_create_profile_link(server_name=server_name)\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(name=profile_name)\n # CreateServerProfile.input_description(description=description)\n\n if VerifyServerProfile.is_power_on_error_visible_when_create_server_profile(server_name=server_name, timeout=5, fail_if_false=False) is True:\n if CreateServerProfile.click_power_off_link_from_powered_on_error(server_name=server_name, timeout=5, fail_if_false=False) is False:\n logger.warn(\"server hardware '%s' is powered on but failed to power it off, creating simple server profile will FAIL\" % server_name)\n return False\n\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(server_name)\n\n if sht_selected[:2:] == 'BL':\n # maybe other needs according to SHT in the future\n pass\n\n CreateServerProfile.click_create_button()\n err_msg_boot_mode = CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode()\n if err_msg_boot_mode is not None:\n logger.warn(\"error message: ['%s'] when creating profile '%s'\" % (err_msg_boot_mode, profile_name))\n if 'select a boot mode' in err_msg_boot_mode.strip().lower():\n logger.debug(\"trying to set 'Boot mode' as 'Legacy BIOS' to remove this error message ...\")\n CommonOperationServerProfile.BootSettings.select_boot_mode_legacy_bios()\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unknown error message, cannot continue to create simple server profile\")\n return False\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n return False\n # ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180) is False:\n return False\n FusionUIBase.show_activity_sidebar()\n if FusionUIBase.wait_activity_action_ok(profile_name, 'Create', timeout=720, fail_if_false=True) is False:\n return False\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=180, fail_if_false=True) is False:\n return False\n logger.info(\"created simple server profile '%s' successfully\" % profile_name)\n return True",
"def _validate_submodels(self, type_promax, type_ms):\n return type_promax in self._submodels and \\\n type_ms in self._submodels and \\\n len(self._submodels[type_promax]) > 0 and \\\n len(self._submodels[type_promax]) == len(self._submodels[type_ms])",
"def _is_valid_interface(device, switch, nos_driver):\n for key in device.keys():\n for (speed, interface) in device[key]:\n if not _is_valid_three_tupple(interface):\n return False\n if not _is_valid_interface_speed(speed):\n return False\n return True",
"def validate(self, server):\n self.validate_type(server)\n self.validate_name(server.name)\n self.validate_username(server.username)\n self.validate_ip(server.ip)",
"def test_create_different_devices(self):\n command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY()\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)",
"def bak_verify_server_profile_general_info(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n for profile in profile_obj:\n server = profile.server\n hardwaretype = profile.hardwareType\n enclosuregroup = profile.enclgroup\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(5) # wait for fields to load\n\n logger.info(\"Verifying server hardware for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_SERVER, server, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_SERVER)\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, server))\n selenium2lib.capture_page_screenshot()\n return False\n\n logger.info(\"Verifying server hardware type for profile %s\" % profile.name)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_HARDWARE, PerfConstants.DEFAULT_SYNC_TIME, fail_if_false=False) is True:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_HARDWARE)\n if txt.find(hardwaretype) == -1:\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, hardwaretype))\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger.warn(\"Failed to wait server hardware type field display\")\n return False\n\n logger.info(\"Verifying enclosure group for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP, enclosuregroup, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP)\n logger.info(\"Enclosure group of server : %s is not as expected [%s]\" % (txt, enclosuregroup))\n selenium2lib.capture_page_screenshot()\n return False\n\n return True",
"def test_message_type_uniqueness(self):\n message_type_characters = map(lambda x: x.message_type.data_type.payload_base_set,\n KNOWN_MESSAGE_TYPES)\n for message_type_character in message_type_characters:\n if self.is_verbose:\n print 'Checking uniqueness of message type {0}'.format(message_type_character)\n self.assertEqual(1, len(filter(lambda x : x == message_type_character, message_type_characters)))",
"def check_type_and_mode(self):\n if not self.is_gift_card:\n return\n\n if (\n self.gift_card_delivery_mode == 'virtual' and\n self.type != 'service'\n ) or (\n self.gift_card_delivery_mode in ['physical', 'combined'] and\n self.type != 'goods'\n ):\n self.raise_user_error(\n \"inappropriate_product\", (\n self.rec_name, self.gift_card_delivery_mode\n )\n )",
"def delete_all_appliance_server_profile_templates():\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n profile_template_name_list = CommonOperationServerProfileTemplate.get_server_profile_template_list()\n\n total = len(profile_template_name_list)\n not_exists = 0\n deleted = 0\n\n for n, profile_template_name in enumerate(profile_template_name_list):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile template named '%s'\" % profile_template_name)\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template_name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template_name)\n not_exists += 1\n else:\n if not delete_server_profile_template_by_name(profile_template_name):\n logger.warn(\"server profile template '%s' is NOT deleted successfully.\" % profile_template_name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to delete! all %s server profile template(s) is NOT existing, test is considered PASS\" % not_exists)\n return True\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile template(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s non-existing server profile template(s) is skipped being deleted, test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile template(s) is skipped being deleted, %s profile template(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True"
] | [
"0.72816855",
"0.7089422",
"0.6603946",
"0.5662966",
"0.5636588",
"0.5465046",
"0.5454747",
"0.5449822",
"0.5423557",
"0.53436553",
"0.5322769",
"0.530526",
"0.5304127",
"0.52597606",
"0.52190274",
"0.5214245",
"0.51793396",
"0.5171909",
"0.51624525",
"0.51224446",
"0.5110577",
"0.50759244",
"0.50673276",
"0.506419",
"0.50429",
"0.5041568",
"0.5039212",
"0.50364256",
"0.5035361",
"0.503"
] | 0.797853 | 0 |
Validate if the Server Profile Template allows to manage the boot order. | def _validate_server_profile_template_manage_boot(server_profile_template):
manage_boot = server_profile_template.get('boot', {}).get('manageBoot')
if not manage_boot:
message = _("Server Profile Template: %s, does not allow to manage "
"boot order.") % server_profile_template.get('uri')
raise exception.OneViewError(message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def spt_verify_required_fields_for_iscsi_boot(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile_obj), '-' * 14))\n logger.info(\"Creating Server Profile Template | %s | ...\" % profile.name)\n\n # checking if the profile already exists\n if not VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile.name, fail_if_false=True):\n ui_lib.fail_test(\"Server Profile Template | %s | already exists\" % profile.name)\n\n # open Create SP dialog and enter data ...\n CreateServerProfileTemplate.click_create_server_profile_template_button()\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_shown()\n\n CreateServerProfileTemplate.input_name(profile.name)\n\n if hasattr(profile, 'ref_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.ref_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.ref_server, hardware_type))\n CreateServerProfileTemplate.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfileTemplate.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfileTemplate.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n\n if hasattr(profile, 'Connections'):\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n\n # add connections with blank iSCSI boot data and verify required field error messages\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start adding connections ...\")\n\n for n, connection in enumerate(profile.Connections):\n logger.info(\"--- <connections> ---: {2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile.Connections), '-' * 14))\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n logger.debug(\"test data for connection '<%s>' is found: '<%s>'\" % (connection.name, connection), also_console=False)\n\n # Verify the connection does not exist\n CommonOperationServerProfileTemplate.Connection.verify_connection_not_exist(connection.name, fail_if_false=True)\n\n # Add the connection\n CommonOperationServerProfileTemplate.Connection.click_add_connection_button()\n CommonOperationServerProfileTemplate.Connection.wait_add_connection_dialog_shown()\n\n CommonOperationServerProfileTemplate.Connection.input_name(connection.name)\n CommonOperationServerProfileTemplate.Connection.select_function_type_by_text(connection.FunctionType, fail_if_false=True)\n CommonOperationServerProfileTemplate.Connection.input_select_network(connection.network)\n CommonOperationServerProfileTemplate.Connection.input_select_port(connection.port)\n CommonOperationServerProfileTemplate.Connection.input_requested_bandwidth(connection.RequestedBandwidth)\n CommonOperationServerProfileTemplate.Connection.select_boot_by_text(connection.boot, fail_if_false=True)\n\n # Input information for the iSCSI boot connection\n if connection.boot == 'iSCSI primary' or connection.boot == 'iSCSI secondary':\n CommonOperationServerProfileTemplate.Connection.set_iscsi_boot_options(connection)\n\n # Click \"Add\" button\n CommonOperationServerProfileTemplate.Connection.click_add_button()\n\n # Verify error messages & text field visibility\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_initiator_name_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_initiator_ip_not_visible()\n CommonOperationServerProfile.Connection.verify_iscsi_subnet_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_gateway_error_message(\"\")\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_vlan_id_not_visible()\n\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_target_name_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_target_lun_not_visible()\n CommonOperationServerProfile.Connection.verify_iscsi_target_ip_error_message(\"\")\n\n if getattr(connection, \"targetIp\", \"\") is not \"\" and getattr(connection, \"targetPort\", \"\") is \"\":\n CommonOperationServerProfile.Connection.verify_iscsi_target_port_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_target_port_error_message(\"\")\n\n CommonOperationServerProfile.Connection.verify_iscsi_second_ip_error_message(\"\")\n\n if getattr(connection, \"secondIp\", \"\") is not \"\" and getattr(connection, \"secondPort\", \"\") is \"\":\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"\")\n\n if hasattr(connection, \"chapLvl\"):\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_chap_name_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_chap_secret_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_mchap_name_not_visible()\n CommonOperationServerProfileTemplate.Connection.verify_iscsi_mchap_secret_not_visible()\n\n # Click \"Cancel\" button\n CommonOperationServerProfileTemplate.Connection.click_cancel_button()\n else:\n ui_lib.fail_test(\"Connections object not present in data file for profile template with name | %s |\" % profile.name)\n\n CreateServerProfileTemplate.click_cancel_button()",
"def pilotIsBootValid (self):\n return self.isBootValid()",
"def pilotValidateBoot (self):\n return self.validateBoot()",
"def verify_required_fields_for_iscsi_boot(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile_obj), '-' * 14))\n logger.info(\"Creating Server Profile for server | %s | ...\" % profile.name)\n\n # checking if the profile already exists\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n ui_lib.fail_test(\"Server profile | %s | already exists\" % profile.name)\n\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Create SP dialog and enter data ...\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n # input 'Server hardware type', 'Enclosure group'\n\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n elif profile.hardwareType not in sht_selected:\n msg = \"selected server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType)\n logger.warn(msg)\n ui_lib.fail_test(msg)\n else:\n # input 'Enclosure group'\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(\n profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n CreateServerProfile.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n\n if hasattr(profile, 'Connections'):\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n\n # add connections with blank iSCSI boot data and verify required field error messages\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start adding connections ...\")\n\n for n, connection in enumerate(profile.Connections):\n logger.info(\"--- <connections> ---: {2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile.Connections), '-' * 14))\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n logger.debug(\"test data for connection '<%s>' is found: '<%s>'\" % (connection.name, connection), also_console=False)\n\n # Verify the connection does not exist\n CommonOperationServerProfile.Connection.verify_connection_not_exist(connection.name, fail_if_false=True)\n\n # Add the connection\n CommonOperationServerProfile.Connection.click_add_connection_button()\n CommonOperationServerProfile.Connection.wait_add_connection_dialog_shown()\n\n CommonOperationServerProfile.Connection.input_name(connection.name)\n CommonOperationServerProfile.Connection.select_function_type_by_text(connection.FunctionType, fail_if_false=True)\n CommonOperationServerProfile.Connection.input_select_network(connection.network)\n CommonOperationServerProfile.Connection.input_select_port(connection.port)\n CommonOperationServerProfile.Connection.input_requested_bandwidth(connection.RequestedBandwidth)\n CommonOperationServerProfile.Connection.select_boot_by_text(connection.boot, fail_if_false=True)\n\n # Input information for the iSCSI boot connection. Data file should have blanks for all fields except secondIp.\n if connection.boot == 'iSCSI primary' or connection.boot == 'iSCSI secondary':\n CommonOperationServerProfile.Connection.set_iscsi_boot_options(connection)\n\n # Click \"Add\" button\n CommonOperationServerProfile.Connection.click_add_button()\n\n # Verify error messages\n CommonOperationServerProfile.Connection.verify_iscsi_initiator_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_initiator_ip_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_subnet_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_gateway_error_message(\"\")\n\n if hasattr(connection, \"vlanId\"):\n CommonOperationServerProfile.Connection.verify_iscsi_vlan_id_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_vlan_id_error_message(\"\")\n\n CommonOperationServerProfile.Connection.verify_iscsi_target_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_lun_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_ip_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_target_port_error_message(\"This field is required.\")\n\n if getattr(connection, \"secondIp\", \"\") is not \"\":\n CommonOperationServerProfile.Connection.verify_iscsi_second_ip_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"This field is required.\")\n else:\n CommonOperationServerProfile.Connection.verify_iscsi_second_ip_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_second_port_error_message(\"\")\n\n if hasattr(connection, \"chapLvl\"):\n if connection.chapLvl == \"None\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"\")\n elif connection.chapLvl == \"CHAP\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"\")\n elif connection.chapLvl == \"Mutual CHAP\":\n CommonOperationServerProfile.Connection.verify_iscsi_chap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_chap_secret_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_name_error_message(\"This field is required.\")\n CommonOperationServerProfile.Connection.verify_iscsi_mchap_secret_error_message(\"This field is required.\")\n\n # Click \"Cancel\" button\n CommonOperationServerProfile.Connection.click_cancel_button()\n else:\n ui_lib.fail_test(\"Connections object not present in data file for profile with name | %s |\" % profile.name)\n\n CreateServerProfile.click_cancel_button()",
"def validateBoot (self):\n self.mountBootPartition()\n stateDictionary = self._createBootInstallationDictionary()\n self._writeDictionaryAsJson(stateDictionary, self._getBootInstallationFilePath())\n self._log(\"validate-boot\").notice(\"boot partition is validated\")",
"def __validate_boot_settings_properties_in_xml_file(profile):\n # TODO: Create a validation for <bootorder> values\n INVALID_ATTRIBUTE_ERROR_MESSAGE = \"Invalid value for %s attribute. Valid values are: %s\"\n\n if profile.has_property(XML_MANAGE_BOOT_MODE_ATTRIBUTE):\n if profile.manageBoot not in XML_BOOLEAN_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_MANAGE_BOOT_MODE_ATTRIBUTE, XML_BOOLEAN_LIST), False)\n elif profile.has_property(XML_BOOT_MODE_ATTRIBUTE):\n if profile.bootMode not in PROFILE_BOOT_MODE_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_BOOT_MODE_ATTRIBUTE, PROFILE_BOOT_MODE_LIST), False)\n elif profile.bootMode == CONSTANT_UEFI or profile.bootMode == CONSTANT_UEFI_OPTIMIZED:\n if profile.has_property(XML_BOOT_POLICY_ATTRIBUTE):\n if profile.bootPolicy not in PROFILE_BOOT_POLICY_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_BOOT_POLICY_ATTRIBUTE, PROFILE_BOOT_POLICY_LIST), False)\n elif profile.has_property(XML_MANAGE_BOOT_ORDER_ATTRIBUTE):\n if profile.manageBootOrder not in XML_BOOLEAN_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_MANAGE_BOOT_ORDER_ATTRIBUTE, XML_BOOLEAN_LIST), False)\n elif profile.has_property(XML_PRIMARY_BOOT_DEVICE):\n if profile.primaryBootDevice not in PROFILE_PRIMARY_BOOT_DEVICE_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_PRIMARY_BOOT_DEVICE, PROFILE_PRIMARY_BOOT_DEVICE_LIST), False)",
"def _validate_server_profile_template_mac_type(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri']\n )\n if server_profile_template.get('macType') != 'Physical':\n message = _(\"The server profile template %s is not set to use \"\n \"physical MAC.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def is_valid_profile(profile):\n\n return profile.metadata.get('os', 'unknown') == 'windows'",
"def is_bootstrapped(self):\n\n # Attempt to bootstrap without providing any of the required fields, and inspect the exception\n try:\n response = self._connection.post(\"/deployment/new\", json={})\n raise TransportError(\"POST {} to /deployment/new should have raised an exception, but didn't\", response)\n except ValueError as e:\n if e.args[0] == 400:\n # The server is willing to accept correct field values to bootstrap with, so isn't bootstrapped yet.\n return False\n if e.args[0] == 403:\n # The server is no longer willing to accept POSTs to /deployment/new, because it's already bootstrapped.\n return True\n raise\n raise TransportError(response)",
"def validate_error_on_create_server_profile(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n continue\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if hasattr(profile, 'Bandwidth_Error'):\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start deleting connections ...\")\n total = len(profile.Connections)\n cls = CommonOperationServerProfile.Connection\n for n, connection in enumerate(profile.Connections):\n expected_message = profile.Bandwidth_Error\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n if cls.verify_connection_not_exist(connection.name, fail_if_false=False) is False:\n logger.warn(\"connection '%s' already exists, skipped ...\" % connection.name)\n continue\n cls.click_add_connection_button()\n cls.wait_add_connection_dialog_shown(time_for_loading=3)\n cls.input_name(connection.name)\n cls.select_function_type_by_text(connection.FunctionType, timeout=10, fail_if_false=True)\n logger.info(\"Expected Error message is '%s' ...\" % expected_message)\n cls.input_select_network(connection.network)\n logger.info(\"n/w selected\")\n cls.input_select_port(connection.port)\n cls.input_requested_bandwidth(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_INPUT_REQUESTED_BANDWIDTH) else None\n cls.select_requested_bandwidth_by_text(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_SELECTBOX_REQUESTED_BANDWIDTH) else None\n cls.click_add_button()\n if not VerifyServerProfile.verify_bandwidth_error(expected_message, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n cls.click_cancel_button()\n logger.info(\"clicked cancel button\")\n else:\n CommonOperationServerProfile.Connection.set(profile.Connections)\n CreateServerProfile.click_create_button()\n status, _ = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if hasattr(profile, 'update_error'):\n if not VerifyServerProfile.verify_error_message_for_update_action(profile.update_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n if not VerifyServerProfile.verify_error_message_in_add_connection(profile.connection_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n logger.info(\"Profile created successfully\")\n return True",
"def test_instance_profile_exists(self) -> None:\n self.assertTrue(self.validate_instance_profile('s3-access-role', is_prod=self.prod_env))",
"def is_vendor_profile_page_loaded_properly(self):\n return self.is_element_present(self.save_vendor_profile_locator)",
"def isBootValid (self):\n if not self._wasSdIdentified:\n self._log(\"is-boot-valid\").notice(\"secure-digital was not identified, its boot partition is not valid.\")\n return False\n\n if not self.isBootPartitionExist():\n self._log(\"is-boot-valid\").notice(\"the secure-digital boot partition does not exist (not valid).\")\n return False\n\n try:\n self.mountBootPartition()\n except:\n self._log(\"is-boot-valid\").exception(\"failed mounting partition, partition is invalid\")\n return False\n\n stateFile = self._getBootInstallationFilePath()\n isValid = os.path.exists(stateFile)\n if isValid:\n self._log(\"is-boot-valid\").notice(\"secure-digital boot partition's state file %s exists, the boot partitions is valid.\", stateFile)\n else:\n self._log(\"is-boot-valid\").notice(\"secure-digital boot partition's state file %s does not exist, the boot partitions is invalid.\", stateFile)\n\n return isValid",
"def check_process_ready(project, profile):\n errors = []\n input_templates = InputTemplate.objects.filter(\n corresponding_profile=profile\n )\n for template in input_templates:\n file_settings_amount = FileSetting.objects.filter(\n file__project=project, input_template=template\n ).count()\n if template.optional and template.unique and file_settings_amount > 1:\n errors.append(\n \"Template '{} ({})' requires a unique file but multiple were specified.\".format(\n template.template_id, template.label\n )\n )\n elif (\n not template.optional\n and template.unique\n and file_settings_amount != 1\n ):\n errors.append(\n \"Template '{} ({})' requires a unique file but {} were specified.\".format(\n template.template_id, template.label, file_settings_amount\n )\n )\n elif (\n not template.optional\n and not template.unique\n and file_settings_amount < 1\n ):\n errors.append(\n \"Template '{} ({})' requires a file but none were specified\".format(\n template.template_id, template.label\n )\n )\n\n for parameter in profile.script.variable_parameters:\n try:\n ParameterSetting.objects.get(\n project=project, base_parameter=parameter\n )\n except ParameterSetting.DoesNotExist:\n errors.append(\n \"Parameter '{}' requires a value but none is given.\".format(\n parameter\n )\n )\n return errors",
"def _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware):\n spt_server_hardware_type_uri = (\n server_profile_template.get('serverHardwareTypeUri')\n )\n sh_server_hardware_type_uri = server_hardware.get('serverHardwareTypeUri')\n\n if spt_server_hardware_type_uri != sh_server_hardware_type_uri:\n message = _(\n \"Server profile template %(spt_uri)s serverHardwareTypeUri is \"\n \"inconsistent with server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def _check_v2(self, start_here: bool = False) -> bool:\n if start_here:\n self.console.info(\"Validating configuration data...\")\n\n self.data[\"compose_files\"] = self._check_for_compose_file()\n\n ret = True\n\n compose_override_list = [\n file for file in self.data[\"compose_files\"] if \"override\" in file\n ]\n if len(compose_override_list) > 1:\n self.console.error(\n \"You must inform only one docker-compose.override.yml file\"\n )\n ret = False\n\n if self.data.get(\"layout\") and self.data.get(\"layout\") not in [\n \"horizontal\",\n \"vertical\",\n ]:\n self.console.error(\"Layout must be vertical or horizontal\")\n ret = False\n\n if (\n self.data.get(\"background_color\")\n and self.data.get(\"background_color\") not in BoxColor.__members__\n ):\n self.console.error(\n \"Valid background colors are: {}\".format(\n \", \".join(BoxColor.available_colors())\n )\n )\n ret = False\n\n if not self.data.get(\"compose_files\"):\n self.console.error(\"You must inform at least one Docker-Compose file path.\")\n ret = False\n elif not isinstance(self.data.get(\"compose_files\"), list):\n self.console.error(\"Docker-Compose files must be a list\")\n ret = False\n\n if self.data.get(\"ignore_services\") is not None and not isinstance(\n self.data.get(\"ignore_services\"), list\n ):\n self.console.error(\"Ignore Services must be a list\")\n ret = False\n\n if self.data.get(\"boxes\"):\n # Check for more than one main box\n main_box_count = [\n box_name\n for box_name in self.data[\"boxes\"]\n if self.data[\"boxes\"].get(box_name).get(\"main\")\n ]\n if len(main_box_count) > 1:\n self.console.error('Only one box must have the \"main\" parameter')\n ret = False\n if len(main_box_count) == 0:\n self.console.error('No box have the \"main\" parameter')\n ret = False\n if len(main_box_count) == 1:\n main_box = self.data[\"boxes\"][main_box_count[0]]\n if main_box.get(\"includes\") is not None:\n self.console.error(\n 'Box with \"main\" parameter must do not contain \"includes\"'\n )\n ret = False\n\n for box_name in self.data.get(\"boxes\", {}):\n data_in_box = self.data[\"boxes\"][box_name]\n if data_in_box.get(\"size\") and data_in_box.get(\"size\") not in [\n \"big\",\n \"small\",\n ]:\n self.console.error(\n 'Size for Box \"{}\" must be \"big\" or \"small\"'.format(box_name)\n )\n ret = False\n if data_in_box.get(\"port_view\") and data_in_box.get(\"port_view\") not in [\n \"column\",\n \"name\",\n \"status\",\n ]:\n self.console.error(\n 'Port View in Box \"{}\" must be \"column\", '\n '\"name\" or \"status\". Value is: {}'.format(\n box_name, data_in_box[\"port_view\"]\n )\n )\n ret = False\n if data_in_box.get(\"port_detail\") and data_in_box.get(\n \"port_detail\"\n ) not in [\"external\", \"internal\", \"both\"]:\n self.console.error(\n 'Port Detail in Box \"{}\" must be '\n '\"external\", \"internal\" or \"both\".'.format(box_name)\n )\n ret = False\n if data_in_box.get(\"includes\") is not None and not isinstance(\n data_in_box.get(\"includes\"), list\n ):\n self.console.error(\n 'Include in Box \"{}\" must be a list'.format(box_name)\n )\n ret = False\n if data_in_box.get(\"categories\") is not None and not isinstance(\n data_in_box.get(\"categories\"), list\n ):\n self.console.error(\n 'Categories in Box \"{}\" must be a list'.format(box_name)\n )\n ret = False\n if self.data.get(\"watch_for_build_using_files\") is not None:\n if not isinstance(self.data.get(\"watch_for_build_using_files\"), list):\n self.console.error(\n \"Watch for Build using Files Check must be a list\"\n )\n ret = False\n if self.data.get(\"watch_for_build_using_git\") is not None:\n if not isinstance(self.data.get(\"watch_for_build_using_git\"), list):\n self.console.error(\"Watch for Build using Git Check must be a list\")\n ret = False\n\n return ret",
"def verify_server_profile_boot_settings_info(*profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for _, profile in enumerate(profile_obj):\n logger.info(\"verifying Boot Settings info of a server profile named '%s'\" % profile.name)\n # check if server profile exists\n VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=True)\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=10)\n FusionUIBase.select_view_by_name(view_name='Boot Settings', timeout=5, fail_if_false=True)\n\n if profile.BootSettings.bootMode.lower() == 'legacy bios':\n VerifyServerProfile.verify_legacy_boot_settings(profile, timeout=10, fail_if_false=True)\n else:\n VerifyServerProfile.verify_non_legacy_boot_settings(profile, timeout=10, fail_if_false=True)",
"def _check_required_if_provider(self):\n return",
"def check_config_mode(self):\n return False",
"def check(self, context):\r\n return context.config.preset is not None",
"def _check_config(self):",
"def _should_profile(self) -> bool:\n if \"profile\" in self._allowed_plugins:\n if not self._one_shot:\n raise ValueError(\n \"Profile plugin currently only supported for one shot.\"\n )\n logger.info(\"Profile plugin is enalbed.\")\n return True\n return False",
"def validate_server_profile_consistency_state(profile_obj):\n count = 0\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n for _, profile in enumerate(profile_obj):\n rc = select_server_profile(profile.name)\n if not rc:\n logger.warn(\"Failed to select server profile '%s'\" % profile.name)\n continue\n FusionUIBase.select_view_by_name(view_name='General', timeout=5, fail_if_false=False)\n if VerifyServerProfile.verify_server_profile_consistency_status(profile.expected_state, timeout=5, fail_if_false=False):\n count += 1\n\n if count == len(profile_obj):\n logger.info(\"All of the given SPs passes consistency check.\")\n return True\n else:\n logger.warn(\"%s out of %s - the given SPs passes consistency check.\" % (count, len(profile_obj)))\n return False",
"def should_profile():\n if util.dev_server:\n return _config.should_profile_development()\n else:\n return _config.should_profile_production()",
"def check_settings(self):\n pass",
"def check_settings(self):\r\n pass",
"def validate(self):\n if not self.os_repos:\n raise ValueError(\"No OS repository available for OS {}\".format(\n self.operating_system.name))\n if not self.template:\n raise ValueError(\"No autoinstallation template specified\")\n if not self.installer_template:\n raise ValueError(\"No installer command line template specified\")\n if not self.system_profile._gateway:\n raise ValueError(\"No gateway interface present\")\n\n self.system_profile.hypervisor.validate()\n\n for iface in self.system_profile.ifaces:\n iface.validate()\n\n # verify gateway interface has IP address and gateways\n if not self.system_profile.list_gateway_networks():\n raise ValueError(\n \"Gateway interface {} has no IP address\"\n \" or gateway route\".format(\n self.system_profile._gateway.os_device_name\n ))\n\n # verify that total partition size is not bigger than disk size\n failing_volume_ids = []\n for volume in [volume for volume in self.system_profile.volumes\n if isinstance(volume, (self.DasdVolume,\n self.ZfcpVolume))]:\n total_part_size = sum(\n [partition.size for partition in volume.partitions])\n if total_part_size > volume.size:\n failing_volume_ids.append(str(volume))\n\n if failing_volume_ids:\n raise ValueError(\n \"Partitioning exceeds volume size for volumes {}\".format(\n failing_volume_ids))",
"def check_for_setup_error(self):\r\n self.helper._check_conf_file()\r\n self.helper._check_service()",
"def check_toolserver(env):\n try:\n blueprint = cli.get_env_blueprint(env)\n if blueprint == 'toolserver':\n return True\n else:\n return False\n except SystemExit:\n return False"
] | [
"0.67219007",
"0.6499709",
"0.6323258",
"0.6316359",
"0.6255033",
"0.6118853",
"0.6111678",
"0.6087322",
"0.6000576",
"0.58550376",
"0.5843932",
"0.57918465",
"0.5738527",
"0.57050484",
"0.56965",
"0.5672641",
"0.56639814",
"0.56551486",
"0.564075",
"0.5627896",
"0.56241286",
"0.56198007",
"0.55892915",
"0.5585865",
"0.55783486",
"0.5542934",
"0.55227834",
"0.5518511",
"0.55029505",
"0.5494869"
] | 0.8403261 | 0 |
Validate if the node's Server Hardware Type matches Server Hardware's. | def _validate_node_server_hardware_type(oneview_client, oneview_info):
node_server_hardware_type_uri = oneview_info['server_hardware_type_uri']
server_hardware = oneview_client.server_hardware.get(
oneview_info['server_hardware_uri'])
server_hardware_sht_uri = server_hardware.get('serverHardwareTypeUri')
if server_hardware_sht_uri != node_server_hardware_type_uri:
message = _("Node server_hardware_type_uri is inconsistent "
"with OneView's server hardware %(server_hardware_uri)s "
"serverHardwareTypeUri.") % {
'server_hardware_uri': server_hardware.get('uri')}
raise exception.OneViewError(message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware):\n spt_server_hardware_type_uri = (\n server_profile_template.get('serverHardwareTypeUri')\n )\n sh_server_hardware_type_uri = server_hardware.get('serverHardwareTypeUri')\n\n if spt_server_hardware_type_uri != sh_server_hardware_type_uri:\n message = _(\n \"Server profile template %(spt_uri)s serverHardwareTypeUri is \"\n \"inconsistent with server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def _validate_node_port_mac_server_hardware(oneview_client,\n oneview_info, ports):\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n if not ports:\n return\n\n # NOTE(nicodemos) If hponeview client's unable to get the MAC of the Server\n # Hardware and raises an exception, the driver will try to get it from\n # the iLOrest client.\n try:\n mac = _get_server_hardware_mac(server_hardware)\n except exception.OneViewError:\n mac = _get_server_hardware_mac_from_ilo(server_hardware)\n\n incompatible_macs = []\n for port in ports:\n if port.address.lower() == mac.lower():\n return\n incompatible_macs.append(port.address)\n\n message = _(\"The ports of the node are not compatible with its \"\n \"server hardware %(server_hardware_uri)s. There are no Ironic \"\n \"port MAC's: %(port_macs)s, that matches with the \"\n \"server hardware's MAC: %(server_hardware_mac)s\") % {\n 'server_hardware_uri': server_hardware.get('uri'),\n 'port_macs': ', '.join(incompatible_macs),\n 'server_hardware_mac': mac}\n raise exception.OneViewError(message)",
"def server_type(self):\n ...",
"def _validate_server_profile_template_mac_type(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri']\n )\n if server_profile_template.get('macType') != 'Physical':\n message = _(\"The server profile template %s is not set to use \"\n \"physical MAC.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def test_get_node_hardware(self):\n pass",
"def validate(self, server):\n self.validate_type(server)\n self.validate_name(server.name)\n self.validate_username(server.username)\n self.validate_ip(server.ip)",
"def is_vserver_kernel():\n\n kinfo = commands.getoutput('/bin/uname -a').split()[2]\n return '-vs' in kinfo",
"def has_firewall_component(server):\r\n if server['status'] != 'no_edit':\r\n return True\r\n\r\n return False",
"def server_type(self, im_self):\n parameter = im_self.instance_manager.config\n server_type = parameter.get(\"server_type\")\n if not server_type:\n server_type = im_self.instance_manager.instance_type\n return server_type",
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def check_parameter_server(self, controller):\n for name in get_rosparam_controller_names(\"/\"):\n if name == controller:\n return True\n return False",
"def server_type(self):\n return self._server_type",
"def test_os_node(self):\n self.assertEqual(self.settings.OS_NODE, platform.node())",
"def _do_has_slave(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n try:\r\n if bus_type == 'rtu':\r\n self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n self.server._servers[1].get_slave(slave_id)\r\n except Exception:\r\n return \"0\"\r\n return \"1\"",
"def test_create_hyperflex_server_firmware_version(self):\n pass",
"def _is_valid_interface(device, switch, nos_driver):\n for key in device.keys():\n for (speed, interface) in device[key]:\n if not _is_valid_three_tupple(interface):\n return False\n if not _is_valid_interface_speed(speed):\n return False\n return True",
"def check_device_type(device_type):\n\n if device_type not in (_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU):\n raise ValueError('Invalid device_type \"%s\"'%device_type)",
"def _valid_protocol_type(protocol):\n\n if protocol == 'ssh' or protocol == 'https':\n return True\n\n return False",
"def verify_server_status(server_hardware):\n\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n else:\n logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Server Hardware : \" + server_hardware + \" is not present in the ServerList of the Server Profile page\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Server Hardware : \" + server_hardware + \" is present in the ServerList and Hence verifying for the status..\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK'\")\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n return True",
"def is_entity_domain_supported(self, source_entity: SourceEntity) -> bool:\n entity_entry = source_entity.entity_entry\n if (\n self.device_type == DeviceType.SMART_SWITCH\n and entity_entry\n and entity_entry.platform in [\"hue\"]\n ): # see https://github.com/bramstroker/homeassistant-powercalc/issues/1491\n return True\n return DEVICE_DOMAINS[self.device_type] == source_entity.domain",
"def is_valid_network_data(server):\n # good ip?\n try:\n good_ip = ipaddress.ip_address(server['ip'])\n except ValueError:\n print(server, file=sys.stderr)\n print('invalid IP: \"{}\"'.format(server['ip']), file=sys.stderr)\n return False\n\n # good gateway ip?\n try:\n good_gateway_ip = ipaddress.ip_address(server['gateway'])\n except ValueError:\n print(server, file=sys.stderr)\n print('invalid gateway IP: \"{}\"'.format(server['gateway']), file=sys.stderr)\n return False\n\n # good netmask?\n try:\n good_ip_network = ipaddress.ip_network('{}/{}'.format(server['ip'], server['netmask']), strict=False)\n except ValueError:\n print(server, file=sys.stderr)\n print('invalid netmask: \"{}\"'.format(server['netmask']), file=sys.stderr)\n return False\n\n # gateway is in network?\n if good_gateway_ip in good_ip_network:\n return True\n else:\n print(server, file=sys.stderr)\n print('invalid: gateway {} not in {} network'.format(good_gateway_ip, good_ip_network), file=sys.stderr)\n return False",
"def is_nvme(self):\n if self.server_params[-1].bdev_class.value == \"nvme\":\n return True\n return False",
"def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True",
"def is_logical(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgHddPart_IsLogical', self.handle))",
"def get_host_os_type(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsType', self.handle)",
"def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False",
"def is_valid(self):\n for server_name in self.server_names.values():\n if not server_name.is_valid:\n return False\n return True",
"def packetCheck(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n MagicNo = int.from_bytes(info[0], \"big\")\n PacketType = int.from_bytes(info[1], \"big\")\n RequestType = int.from_bytes(info[2], \"big\")\n if MagicNo != 0x497E:\n return False\n if PacketType != 0x0001:\n return False\n if RequestType != 0x0001 and RequestType != 0x0002:\n return False\n return True",
"def is_supported_type(self) -> bool:\n t = self.type.strip()\n return t in self.SUPPORTED_LABELS or t.lower() in self.SUPPORTED_LABELS",
"def _get_server_hardware_mac(server_hardware):\n sh_physical_port = None\n\n if server_hardware.get('portMap'):\n for device in server_hardware.get(\n 'portMap', {}).get('deviceSlots', ()):\n for physical_port in device.get('physicalPorts', ()):\n if physical_port.get('type') == 'Ethernet':\n sh_physical_port = physical_port\n break\n if sh_physical_port:\n for virtual_port in sh_physical_port.get('virtualPorts', ()):\n # NOTE(nicodemos): Ironic oneview drivers needs to use a\n # port that type is Ethernet and function identifier 'a' for\n # this FlexNIC to be able to make a deploy using PXE.\n if virtual_port.get('portFunction') == 'a':\n return virtual_port.get('mac', ()).lower()\n raise exception.OneViewError(\n _(\"There is no Ethernet port on the Server Hardware: %s\") %\n server_hardware.get('uri'))\n else:\n raise exception.OneViewError(\n _(\"The Server Hardware: %s doesn't have a list of adapters/slots, \"\n \"their ports and attributes. This information is available only \"\n \"for blade servers. Is this a rack server?\") %\n server_hardware.get('uri'))"
] | [
"0.67908597",
"0.62367",
"0.5962622",
"0.5756479",
"0.56312937",
"0.55868",
"0.5542178",
"0.5457781",
"0.5441118",
"0.542971",
"0.54071677",
"0.53862107",
"0.5355715",
"0.5339024",
"0.53239506",
"0.528533",
"0.5275171",
"0.5271481",
"0.52409774",
"0.52346915",
"0.51931256",
"0.5187244",
"0.5179794",
"0.5155095",
"0.5152764",
"0.51495314",
"0.5124496",
"0.5119427",
"0.5103907",
"0.51019925"
] | 0.8271434 | 0 |
Validate if the node's Enclosure Group matches the Server Hardware's. | def _validate_node_enclosure_group(oneview_client, oneview_info):
server_hardware = oneview_client.server_hardware.get(
oneview_info['server_hardware_uri'])
sh_enclosure_group_uri = server_hardware.get('serverGroupUri')
node_enclosure_group_uri = oneview_info['enclosure_group_uri']
if node_enclosure_group_uri and (
sh_enclosure_group_uri != node_enclosure_group_uri):
message = _(
"Node enclosure_group_uri '%(node_enclosure_group_uri)s' "
"is inconsistent with OneView's server hardware "
"serverGroupUri '%(sh_enclosure_group_uri)s' of "
"ServerHardware %(server_hardware)s") % {
'node_enclosure_group_uri': node_enclosure_group_uri,
'sh_enclosure_group_uri': sh_enclosure_group_uri,
'server_hardware': server_hardware.get('uri')}
raise exception.OneViewError(message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_node_server_hardware_type(oneview_client, oneview_info):\n node_server_hardware_type_uri = oneview_info['server_hardware_type_uri']\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n server_hardware_sht_uri = server_hardware.get('serverHardwareTypeUri')\n\n if server_hardware_sht_uri != node_server_hardware_type_uri:\n message = _(\"Node server_hardware_type_uri is inconsistent \"\n \"with OneView's server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def _validate_spt_enclosure_group(server_profile_template, server_hardware):\n spt_enclosure_group_uri = server_profile_template.get('enclosureGroupUri')\n sh_enclosure_group_uri = server_hardware.get('serverGroupUri')\n\n if spt_enclosure_group_uri != sh_enclosure_group_uri:\n message = _(\"Server profile template %(spt_uri)s enclosureGroupUri is \"\n \"inconsistent with server hardware %(sh_uri)s \"\n \"serverGroupUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'sh_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def check_security_group(self):\n return True",
"def _check_groups_kvm():\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')",
"def _valid_device(device):\n required_fields = ('name', 'type', 'group', 'canonical_name')\n if all(field in device for field in required_fields):\n return True\n return False",
"def is_esi_node():\n\n # Fetch ACME logger and write debug message\n log = logging.getLogger(\"ACME\")\n log.debug(\"Test if hostname matches the pattern 'esi-sv*'\")\n return socket.gethostname().startswith(\"esi-sv\") and os.path.isdir(\"/cs\")",
"def poll(cls, context):\r\n return hasattr(bpy.types.Object, \"BlenderNEURON_node\") and \\\r\n bpy.types.Object.BlenderNEURON_node is not None and \\\r\n bpy.types.Object.BlenderNEURON_node.client is not None and \\\r\n AbstractBlenderNEURONPanel.group_count(context) > 1",
"def prod_load_balancer_sg_valid(self) -> None:\n if self.prod_env:\n sg_name = 'saints-xctf-prod-server-elb-security-group'\n else:\n sg_name = 'saints-xctf-dev-server-elb-security-group'\n\n response = self.ec2.describe_security_groups(Filters=[\n {\n 'Name': 'group-name',\n 'Values': [sg_name]\n }\n ])\n\n security_group = response.get('SecurityGroups')[0]\n\n self.assertTrue(all([\n security_group.get('GroupName') == sg_name,\n self.validate_load_balancer_sg_rules(\n security_group.get('IpPermissions'),\n security_group.get('IpPermissionsEgress')\n )\n ]))",
"def is_valid(self):\n for server_name in self.server_names.values():\n if not server_name.is_valid:\n return False\n return True",
"def is_instance_in_reg_elements(nsr_id, group_name, instance_id):\n for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(include_keyspec=True):\n elem_nsr_id = nsr_id_from_keyspec(keyspec)\n elem_group_name = group_name_from_keyspec(keyspec)\n\n if elem_nsr_id != nsr_id or group_name != elem_group_name:\n continue\n\n if instance_cfg.id == instance_id:\n return True\n\n return False",
"def has_firewall_component(server):\r\n if server['status'] != 'no_edit':\r\n return True\r\n\r\n return False",
"def validate(self):\n\n # Check if motherboard record exists\n motherboard_record_exists = False\n board_info_records = self.groups[constants.RecordType.BASEBOARD_RECORD]\n for handle_id in board_info_records:\n record = self.records[handle_id]\n if 'Type' in record.props and record.props['Type'].val == 'Motherboard':\n motherboard_record_exists = True\n break\n if not motherboard_record_exists:\n self.err_msgs['Motherboard SMBIOS record is missing.'] = (\n 'There should be at least one structure defining the motherboard '\n '(Board Type: 0xA).')\n\n return self.err_msgs",
"def test_get_node_hardware(self):\n pass",
"def has_node_groups(self, namespace=None):\n try:\n return bool(self._source(namespace).reverse_upcall)\n except GroupResolverSourceError:\n return False",
"def check_group_pack(self, cr, uid, context=None):\n return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot')",
"def _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware):\n spt_server_hardware_type_uri = (\n server_profile_template.get('serverHardwareTypeUri')\n )\n sh_server_hardware_type_uri = server_hardware.get('serverHardwareTypeUri')\n\n if spt_server_hardware_type_uri != sh_server_hardware_type_uri:\n message = _(\n \"Server profile template %(spt_uri)s serverHardwareTypeUri is \"\n \"inconsistent with server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def _check_whole_network(self):\n if not self.network.check_network():\n # check_network has failed, issue error\n self._display_semantic_error(\"network\")",
"def __check_registered(self, source_address: Address) -> bool:\n source_ip, source_port = source_address\n source_node = SemiNode(source_ip, source_port)\n return source_node in self.registered",
"def is_entity_domain_supported(self, source_entity: SourceEntity) -> bool:\n entity_entry = source_entity.entity_entry\n if (\n self.device_type == DeviceType.SMART_SWITCH\n and entity_entry\n and entity_entry.platform in [\"hue\"]\n ): # see https://github.com/bramstroker/homeassistant-powercalc/issues/1491\n return True\n return DEVICE_DOMAINS[self.device_type] == source_entity.domain",
"def check(self):\n illegalNamespaces = list()\n\n progStandard = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}$\")\n progShot = re.compile(\"^SH[0-9]{4}_[0-9]{3}$\")\n\n for namespaces in pm.namespaceInfo(listOnlyNamespaces=True, internal=False, recurse=True):\n for namespace in namespaces.split(\":\"):\n if not progStandard.match(namespace) and not progShot.match(namespace) not in [\"UI\", \"shared\"]:\n illegalNamespaces.append(namespace)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s is a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s illegal namespace\" % (\n len(illegalNamespaces))",
"def validate(self, node_uuid):\n # check if node exists\n node = objects.Node.get_by_uuid(pecan.request.context, node_uuid)\n return pecan.request.rpcapi.validate_driver_interfaces(\n pecan.request.context, node.uuid)",
"def at_least_a_group(exp, mesh, mod):\n is_valid = True\n if not exp.find_groups(mesh):\n mess = \"At least a group needs to be defined on the selected object\"\n mod.launch(GC.ERROR, mess)\n is_valid = False\n return is_valid",
"def test_has_group_address(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", value_type=\"temperature\", group_address_state=\"1/2/3\"\n )\n self.assertTrue(sensor.has_group_address(GroupAddress(\"1/2/3\")))\n self.assertFalse(sensor.has_group_address(GroupAddress(\"1/2/4\")))",
"def checkonly(self):\n OTHER_WSREP.append(socket.gethostbyname(socket.gethostname()))\n for hostitem in ALL_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n for wsrephost in OTHER_WSREP:\n checkwsrep(wsrephost)\n print ''",
"def test_has_group_address(self):\n xknx = XKNX(loop=self.loop)\n sensor = Sensor(\n xknx,\n 'TestSensor',\n value_type='temperature',\n group_address_state='1/2/3')\n self.assertTrue(sensor.has_group_address(GroupAddress('1/2/3')))\n self.assertFalse(sensor.has_group_address(GroupAddress('1/2/4')))",
"def check_encapsulated(obj_type, first_obj, second_obj, db):\n if obj_type == 'network':\n # the indexing is to get the list of networks out of the tuple[1] and\n # list[0] returned by get_nets\n first = get_nets([first_obj], db)[0][1]\n second = get_nets([second_obj], db)[0][1]\n\n elif obj_type == 'service':\n first = get_ports([first_obj], db)[0][1]\n second = get_ports([second_obj], db)[0][1]\n else:\n raise ValueError(\"check_encapsulated() currently only supports \"\n \"'network' and 'service' for the obj_type parameter\")\n # iterates over each object in the first group, and then each obj in the\n # second group, making sure each one in the first is contained\n # somewhere in the second.\n for obj in first:\n for sec_obj in second:\n if obj.version == sec_obj.version:\n if obj.subnet_of(sec_obj):\n break\n # if we got through every object in the second group, and didn't have\n # a match, then the first group is not entirely contained.\n else:\n return False\n # if we got here, then the group was fully contained.\n return True",
"def is_valid_network_data(server):\n # good ip?\n try:\n good_ip = ipaddress.ip_address(server['ip'])\n except ValueError:\n print(server, file=sys.stderr)\n print('invalid IP: \"{}\"'.format(server['ip']), file=sys.stderr)\n return False\n\n # good gateway ip?\n try:\n good_gateway_ip = ipaddress.ip_address(server['gateway'])\n except ValueError:\n print(server, file=sys.stderr)\n print('invalid gateway IP: \"{}\"'.format(server['gateway']), file=sys.stderr)\n return False\n\n # good netmask?\n try:\n good_ip_network = ipaddress.ip_network('{}/{}'.format(server['ip'], server['netmask']), strict=False)\n except ValueError:\n print(server, file=sys.stderr)\n print('invalid netmask: \"{}\"'.format(server['netmask']), file=sys.stderr)\n return False\n\n # gateway is in network?\n if good_gateway_ip in good_ip_network:\n return True\n else:\n print(server, file=sys.stderr)\n print('invalid: gateway {} not in {} network'.format(good_gateway_ip, good_ip_network), file=sys.stderr)\n return False",
"def test_check_numa_aware_ksm_status(self):\n self.check_host_activation(ksm_merge_across_nodes=True)",
"def on_dedicated(self):\n\n return self.is_valid_platform() and self['MODE'] == 'enterprise'",
"def is_in_use(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgHddPart_IsInUse', self.handle))"
] | [
"0.6324016",
"0.6095127",
"0.5762267",
"0.5492249",
"0.5368418",
"0.5298437",
"0.52928054",
"0.524873",
"0.5237448",
"0.51903677",
"0.5187325",
"0.5177885",
"0.5053734",
"0.50441307",
"0.50437135",
"0.50406593",
"0.50092286",
"0.50085694",
"0.5004797",
"0.5001447",
"0.500056",
"0.49992567",
"0.49849543",
"0.49600482",
"0.49528724",
"0.4950187",
"0.49488345",
"0.49465698",
"0.49085367",
"0.49069422"
] | 0.68265074 | 0 |
Validate if a port matches the node's Server Hardware's MAC. | def _validate_node_port_mac_server_hardware(oneview_client,
oneview_info, ports):
server_hardware = oneview_client.server_hardware.get(
oneview_info['server_hardware_uri'])
if not ports:
return
# NOTE(nicodemos) If hponeview client's unable to get the MAC of the Server
# Hardware and raises an exception, the driver will try to get it from
# the iLOrest client.
try:
mac = _get_server_hardware_mac(server_hardware)
except exception.OneViewError:
mac = _get_server_hardware_mac_from_ilo(server_hardware)
incompatible_macs = []
for port in ports:
if port.address.lower() == mac.lower():
return
incompatible_macs.append(port.address)
message = _("The ports of the node are not compatible with its "
"server hardware %(server_hardware_uri)s. There are no Ironic "
"port MAC's: %(port_macs)s, that matches with the "
"server hardware's MAC: %(server_hardware_mac)s") % {
'server_hardware_uri': server_hardware.get('uri'),
'port_macs': ', '.join(incompatible_macs),
'server_hardware_mac': mac}
raise exception.OneViewError(message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _mac_test(mac):\n\n\t\tif re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def isMACCommand(self):\n return self.payload.fport == 0",
"def validate_port(port_id, serial_id):\n check_port = False\n api_uri = f\"/v1/devices/{serial_id}/switch/ports/{port_id}\"\n data = get_meraki_api_data(api_uri)\n if data:\n check_port = True\n else:\n check_port = False\n return check_port",
"def regmac(mac):\n return len(mac.split(\":\")[1]) == 12 and mac.split(\":\")[1] or None",
"def checkMac(self, mac):\n\t\tif mac in self.seenMacs:\n\t\t\treturn True\n\t\treturn False",
"def checkPort(self, port, alternatePort=None):\n raise NotImplementedError()",
"def isMac(cls, mac):\n return mac.startswith(cls.MAC_PREFIX)",
"def valid_mikettle_mac(mac, pat=re.compile(r\"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac",
"def check(interface, mac):\n\tifconfig = sp.check_output(['sudo','ifconfig',interface]).decode()\n\tregexMax = re.compile(r'(\\w\\w:){5}\\w\\w')\n\tresult = regexMax.search(ifconfig)\n\tif not result == None and result.group() == mac:\n\t\tprint('Mac changed')\n\t\tprint('[+] '+interface+' --> '+mac)\n\telse:\n\t\tprint('[[[[!]]]] Faliour',result.group())",
"def check_port(self):\r\n\t\treturn(self.connect.is_open)",
"def check_port(PORT):\n if PORT < 1024 or PORT > 64000:\n print(\"The Port number is not within specified range (1024 - 64000)\")\n return False\n return True",
"def isMAC(s):\n\n s = s.replace(':', '')\n if len(s) != 12: return 0\n for char in s:\n if re.compile('[a-zA-Z0-9]+').match(char) == None: return 0\n return 1",
"def check(self, ip, port):\r\n ip = struct.unpack(\">I\", socket.inet_aton(ip))[0]\r\n if (ip & self.netmask) == self.ip:\r\n if self.port_low <= port and port <= self.port_high:\r\n return self.match\r\n return -1",
"def IsRetiredMac(self, serial):\n return False",
"def is_port_taken(host, port):\n socket = socketserver.socket\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((host, port))\n s.shutdown(1)\n time.sleep(2)\n return True\n except:\n return False",
"def _get_server_hardware_mac(server_hardware):\n sh_physical_port = None\n\n if server_hardware.get('portMap'):\n for device in server_hardware.get(\n 'portMap', {}).get('deviceSlots', ()):\n for physical_port in device.get('physicalPorts', ()):\n if physical_port.get('type') == 'Ethernet':\n sh_physical_port = physical_port\n break\n if sh_physical_port:\n for virtual_port in sh_physical_port.get('virtualPorts', ()):\n # NOTE(nicodemos): Ironic oneview drivers needs to use a\n # port that type is Ethernet and function identifier 'a' for\n # this FlexNIC to be able to make a deploy using PXE.\n if virtual_port.get('portFunction') == 'a':\n return virtual_port.get('mac', ()).lower()\n raise exception.OneViewError(\n _(\"There is no Ethernet port on the Server Hardware: %s\") %\n server_hardware.get('uri'))\n else:\n raise exception.OneViewError(\n _(\"The Server Hardware: %s doesn't have a list of adapters/slots, \"\n \"their ports and attributes. This information is available only \"\n \"for blade servers. Is this a rack server?\") %\n server_hardware.get('uri'))",
"def valid_mitemp_mac(mac, pat=re.compile(r\"4C:65:A8:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac",
"def check(self, target, port):\n pass",
"def checkPort(port):\n try:\n p = int(port)\n if p >= 1 and p<= 65535:\n return True\n else:\n return False\n except ValueError:\n return False",
"def check_free_port(host, port, verbose=True):\n sock = socket.socket()\n try:\n sock.bind((host, port))\n sock.close()\n print(\"host {} on port {} is AVAIL\".format(host, port))\n return(True)\n except:\n print(\"host {} on port {} is BUSY\".format(host, port))\n sock.close()\n return(False)",
"def isvalidport(txt):\n return txt.isdigit() and int(txt) <= 65535 and int(txt) >= 0",
"def _is_self(self, ip, port):\n import socket as sk\n self_ip = sk.gethostbyname(sk.gethostname())\n self_port = self.config['API_PORT']\n return str(self_ip) == ip and self_port == port",
"def read_mac_address_port(self, port_num: int) -> Macs:\n raise NotImplementedError",
"def check_ethernet_network():\n default_iface = get_default_route()\n\n assert default_iface[1] == sc.conf.iface, \"incorrect sc.conf.iface\"\n iface_str = ''\n if sys.platform.startswith('win'):\n iface_info = sc.conf.iface\n iface_str = iface_info.guid\n else:\n iface_str = sc.conf.iface\n\n ifaddresses = netifaces.ifaddresses(str(iface_str))\n try:\n iface_mac = ifaddresses[netifaces.AF_LINK][0]['addr']\n except KeyError:\n return False\n return iface_mac != ''",
"def isMacAddr(string):\n return (True)",
"def test_port_validation(runner: CliRunner) -> None:\n invalid_res = runner.invoke(cli.main, [\"-p\", \"66666\"])\n assert invalid_res.exit_code == 2\n assert 'Invalid value for \"-p\" / \"--port\"' in invalid_res.output\n assert \"'port' is invalid in configuration\" in invalid_res.output",
"def test_port(self):\n self.assertEqual(self.gmail_case.port, None)\n self.assertEqual(self.telnet_case.port, 80)\n self.assertEqual(self.foo_case.port, 8042)",
"def _check_port_available(hostname, port):\n for config_file in config_files:\n network_config = networkConfig(config_file)\n for name, host in network_config.hostDict.items():\n if port == host.port:\n return False\n\n return _check_socket_is_free(hostname, port)",
"def check_free_port(host, port):\n import socket\n from contextlib import closing\n\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n if sock.connect_ex((host, port)) == 0:\n # Port is open, so not free\n return False\n else:\n # Port is not open, so free\n return True",
"def checkMACAddress(MACAddress):\n \n MACPattern = re.compile('^[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}$')\n MACMatch = MACPattern.match(MACAddress)\n \n return MACPattern.match(MACAddress)"
] | [
"0.7097071",
"0.674294",
"0.67142147",
"0.66719925",
"0.6523463",
"0.652277",
"0.6445301",
"0.64277035",
"0.618244",
"0.61718965",
"0.61240774",
"0.6104669",
"0.6086369",
"0.6079721",
"0.6074816",
"0.6058944",
"0.60415375",
"0.6036376",
"0.59993446",
"0.5959773",
"0.59428114",
"0.5890368",
"0.5871197",
"0.58710164",
"0.584716",
"0.5825391",
"0.5797176",
"0.57968736",
"0.57774353",
"0.57605606"
] | 0.7183407 | 0 |
Validate if the node's Server Profile Template's MAC type is physical. | def _validate_server_profile_template_mac_type(oneview_client, oneview_info):
server_profile_template = oneview_client.server_profile_templates.get(
oneview_info['server_profile_template_uri']
)
if server_profile_template.get('macType') != 'Physical':
message = _("The server profile template %s is not set to use "
"physical MAC.") % server_profile_template.get('uri')
raise exception.OneViewError(message) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware):\n spt_server_hardware_type_uri = (\n server_profile_template.get('serverHardwareTypeUri')\n )\n sh_server_hardware_type_uri = server_hardware.get('serverHardwareTypeUri')\n\n if spt_server_hardware_type_uri != sh_server_hardware_type_uri:\n message = _(\n \"Server profile template %(spt_uri)s serverHardwareTypeUri is \"\n \"inconsistent with server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'spt_uri': server_profile_template.get('uri'),\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def check_ethernet_network():\n default_iface = get_default_route()\n\n assert default_iface[1] == sc.conf.iface, \"incorrect sc.conf.iface\"\n iface_str = ''\n if sys.platform.startswith('win'):\n iface_info = sc.conf.iface\n iface_str = iface_info.guid\n else:\n iface_str = sc.conf.iface\n\n ifaddresses = netifaces.ifaddresses(str(iface_str))\n try:\n iface_mac = ifaddresses[netifaces.AF_LINK][0]['addr']\n except KeyError:\n return False\n return iface_mac != ''",
"def _mac_test(mac):\n\n\t\tif re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def isMAC(s):\n\n s = s.replace(':', '')\n if len(s) != 12: return 0\n for char in s:\n if re.compile('[a-zA-Z0-9]+').match(char) == None: return 0\n return 1",
"def isMac(cls, mac):\n return mac.startswith(cls.MAC_PREFIX)",
"def _validate_server_profile_template_manage_boot(server_profile_template):\n manage_boot = server_profile_template.get('boot', {}).get('manageBoot')\n\n if not manage_boot:\n message = _(\"Server Profile Template: %s, does not allow to manage \"\n \"boot order.\") % server_profile_template.get('uri')\n raise exception.OneViewError(message)",
"def isMACCommand(self):\n return self.payload.fport == 0",
"def is_template(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTemplate', self.handle))",
"def hasMACCommands(self):\n return hasattr(self, 'commands') and len(self.commands) > 0",
"def isMacAddr(string):\n return (True)",
"def regmac(mac):\n return len(mac.split(\":\")[1]) == 12 and mac.split(\":\")[1] or None",
"def _validate_node_server_hardware_type(oneview_client, oneview_info):\n node_server_hardware_type_uri = oneview_info['server_hardware_type_uri']\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n server_hardware_sht_uri = server_hardware.get('serverHardwareTypeUri')\n\n if server_hardware_sht_uri != node_server_hardware_type_uri:\n message = _(\"Node server_hardware_type_uri is inconsistent \"\n \"with OneView's server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def checkMac(self, mac):\n\t\tif mac in self.seenMacs:\n\t\t\treturn True\n\t\treturn False",
"def IsRetiredMac(self, serial):\n return False",
"def is_valid_profile(profile):\n\n return profile.metadata.get('os', 'unknown') == 'windows'",
"def valid_mikettle_mac(mac, pat=re.compile(r\"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac",
"def packetCheck(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n MagicNo = int.from_bytes(info[0], \"big\")\n PacketType = int.from_bytes(info[1], \"big\")\n RequestType = int.from_bytes(info[2], \"big\")\n if MagicNo != 0x497E:\n return False\n if PacketType != 0x0001:\n return False\n if RequestType != 0x0001 and RequestType != 0x0002:\n return False\n return True",
"def is_logical(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgHddPart_IsLogical', self.handle))",
"def mac(self):\n if not self.is_rule:\n raise NotRuleError(\"No 'ATTR{address}' field.\")\n\n if \"ATTR{address}\" not in self._fields:\n raise NotRule70Error(\"No 'ATTR{address}' field.\")\n\n return self._fields[\"ATTR{address}\"]",
"def is_nvme(self):\n if self.server_params[-1].bdev_class.value == \"nvme\":\n return True\n return False",
"def valid_mitemp_mac(mac, pat=re.compile(r\"4C:65:A8:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac",
"def check_device_type(device_type):\n\n if device_type not in (_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU):\n raise ValueError('Invalid device_type \"%s\"'%device_type)",
"def _validate_node_port_mac_server_hardware(oneview_client,\n oneview_info, ports):\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n if not ports:\n return\n\n # NOTE(nicodemos) If hponeview client's unable to get the MAC of the Server\n # Hardware and raises an exception, the driver will try to get it from\n # the iLOrest client.\n try:\n mac = _get_server_hardware_mac(server_hardware)\n except exception.OneViewError:\n mac = _get_server_hardware_mac_from_ilo(server_hardware)\n\n incompatible_macs = []\n for port in ports:\n if port.address.lower() == mac.lower():\n return\n incompatible_macs.append(port.address)\n\n message = _(\"The ports of the node are not compatible with its \"\n \"server hardware %(server_hardware_uri)s. There are no Ironic \"\n \"port MAC's: %(port_macs)s, that matches with the \"\n \"server hardware's MAC: %(server_hardware_mac)s\") % {\n 'server_hardware_uri': server_hardware.get('uri'),\n 'port_macs': ', '.join(incompatible_macs),\n 'server_hardware_mac': mac}\n raise exception.OneViewError(message)",
"def get_mac(self) :\n\t\ttry :\n\t\t\treturn self.p_fields.f128\n\t\texcept :\n\t\t\treturn None",
"def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False",
"def CheckKVM():\n return os.path.exists('/dev/kvm')",
"def check(interface, mac):\n\tifconfig = sp.check_output(['sudo','ifconfig',interface]).decode()\n\tregexMax = re.compile(r'(\\w\\w:){5}\\w\\w')\n\tresult = regexMax.search(ifconfig)\n\tif not result == None and result.group() == mac:\n\t\tprint('Mac changed')\n\t\tprint('[+] '+interface+' --> '+mac)\n\telse:\n\t\tprint('[[[[!]]]] Faliour',result.group())",
"def is_valid_mac(address):\n m = \"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\"\n if isinstance(address, six.string_types) and re.match(m, address.lower()):\n return True\n return False",
"def __is_adaptive_instance(self, policies, instance_type):\n zones = availabilityZones()\n typevms = zones.get_typevm_zones()\n if ( typevms[instance_type]['cpu'] >= policies['cpu_min'] and typevms[instance_type]['cpu'] <= policies['cpu_max'] and\n typevms[instance_type]['ram'] >= policies['memory_min'] and typevms[instance_type]['ram'] <= policies['memory_max'] and\n typevms[instance_type]['disk'] >= policies['disk_min'] and typevms[instance_type]['disk'] <= policies['disk_max'] ):\n return True\n return False"
] | [
"0.6238729",
"0.5995228",
"0.5811483",
"0.57653874",
"0.56162757",
"0.5592679",
"0.5545467",
"0.5504423",
"0.5461139",
"0.53143936",
"0.53029996",
"0.5298736",
"0.5189293",
"0.5159756",
"0.515517",
"0.5120909",
"0.50535005",
"0.5044812",
"0.50217086",
"0.5011732",
"0.50085557",
"0.50068104",
"0.50025445",
"0.49755007",
"0.49716318",
"0.49488273",
"0.49390912",
"0.49165547",
"0.49025434",
"0.48986176"
] | 0.83715606 | 0 |
Add event on obj which will trigger error on this Deferred | def add_error_event(self, obj, event, *args):
hid = obj.connect(event, self._err_emited, *args)
self.handlers_id.append(hid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _err_emited(self, *args):\n\t\tdebug(\"OnEventDeferred : err event catched\")\n\t\tself.errback(*args)\n\t\tself._clean()",
"def errback(self, f):\r\n assert self.__obj is None, 'Only one object can be registered.'\r\n assert isinstance(f, Failure), \"Failure has to be of type 'Failure'.\"\r\n self.__notify(f)",
"def error(self, func):\n self.error_handler = func\n return func",
"def _call_error_handler(self, event, err, **kwargs):\n if self._on_error_handler:\n event.error = str(err)\n event.origin_state = self.fullname\n return self._on_error_handler(event)",
"def on_error(self, callback):\n self.error_callback = callback",
"def on_error(self, event: ThreadResult):\n if self._on_error is not None:\n self._on_error(event.data)",
"def error(self, obj) -> None:\n if isinstance(obj, str) and obj in self:\n self.__err.extend(self.pop(obj))\n else:\n self.__err.append(obj)",
"def error(self, *args, **kwargs):",
"def error(self, handler):\n pass",
"def instantiateShootErrback():\n d = defer.Deferred()\n try:\n 1/0\n except:\n d.errback()\n d.addErrback(lambda x: None)",
"def error(self):\n return self._decorator_wrapper(EventName.error)",
"def setErrorDelegate(self, func):\r\n # Assign the user function to the internal callback handle\r\n self.errorDelegate = func",
"def error(self):\n ...",
"def registerDeferred(self, event, d):\n try:\n self._evq[event].schedule(d)\n except KeyError:\n raise ValueError(\"No such event type\", event)",
"def error(self, error):\n pass",
"def exception_callback(self, exception):\n self.exception_callback_value = exception",
"def add(self, obj, msg):\n self.errors.append((obj, msg))",
"def on_exception(self):\n pass",
"def error(self, error):\n\n self._error = error",
"def error(self, error):\n\n self._error = error",
"def error(self, error):\n\n self._error = error",
"def setError(self,err):\n self.error = err",
"def __call__(self, *args, **kwargs):\r\n return self.error(*args, **kwargs)",
"def _emited(self, *args):\n\t\tdebug(\"OnEventDeferred : event catched\")\n\t\tself.callback(*args)\n\t\tself._clean()",
"def error(self, func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(\"The local error handler must be an async function\")\r\n self._error_handler = func\r\n return func",
"def on_failure(self, exc: BaseException) -> None:",
"def set_error_callback(self, cb_func):\n self._error_callback = cb_func",
"def handleError(self, now, failureObj):\n errorHandler = getattr(self.runnable, 'timedEventErrorHandler', None)\n if errorHandler is not None:\n self._rescheduleFromRun(errorHandler(self, failureObj))\n else:\n self._defaultErrorHandler(now, failureObj)",
"def on_failure(self):\n pass",
"def onentererror(self, event):\n print('onentererror; event: %s, %s->%s' % (event.event, event.src, event.dst))"
] | [
"0.67899597",
"0.6495937",
"0.619477",
"0.61869735",
"0.61564225",
"0.615239",
"0.59784204",
"0.59604985",
"0.58662796",
"0.5854047",
"0.5818122",
"0.5777247",
"0.5679716",
"0.5663086",
"0.5646356",
"0.56234443",
"0.5596893",
"0.5590271",
"0.5570113",
"0.5570113",
"0.5570113",
"0.5559853",
"0.55508345",
"0.55099773",
"0.5497176",
"0.5493794",
"0.548098",
"0.5474951",
"0.54661053",
"0.5449447"
] | 0.7338435 | 0 |
Returns a frozenset of variables used in given terms. Note that this returns all used variables, not just free ones. | def used_variables(*terms):
t = terms[0] if len(terms) == 1 else terms
if type(t) is Var:
return frozenset((t,))
elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,
Implies, Iff):
return union(*(used_variables(x) for x in t))
elif type(t) in (ForAll, Exists, Lambda, NamedBinder):
return union(used_variables(t.body), t.variables)
elif hasattr(t,'args'):
return union(*(used_variables(x) for x in t.args))
else:
assert False, type(t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def free_variables(*terms, **kwargs):\n by_name = kwargs.get('by_name', False)\n _free_variables = partial(free_variables, by_name=by_name)\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t.name if by_name else t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(_free_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return _free_variables(t.body) - _free_variables(*t.variables)\n\n elif hasattr(t,'args'):\n return union(*(_free_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def used_constants(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Const:\n return frozenset((t,))\n\n elif type(t) in (tuple, Var, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff, ForAll, Exists, Lambda, NamedBinder):\n return union(*(used_constants(x) for x in t))\n\n elif hasattr(t,'args'):\n return union(*(used_constants(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def bound_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset()\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(bound_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(bound_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(bound_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def free_variables(self):\n\n free_vars = set()\n self.free_variables_helper(free_vars)\n return free_vars\n # Task 7.6",
"def variables(self):\n return [term.variable for term in self.terms]",
"def setOfVariables(self):\n return set(self.dictOfVariables().keys())",
"def get_variables(self):\n return set(self._head_vars)",
"def get_used_eqs_and_state_vars(eq_to_expand, equations):\n used_state_vars = set()\n for eq in eq_to_expand:\n for v in eq[1].atoms(Derivative) | eq[1].free_symbols:\n if v in self._model.state_vars:\n used_state_vars.add(v)\n elif v not in [e[0] for e in eq_to_expand]:\n eq_to_expand.extend(filter(lambda e: e[0] == v, equations))\n return set(eq_to_expand), used_state_vars",
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def variables(self):\n return sorted(set(self._variables))",
"def free_variables(formula):\n visitor = CollectFreeVariables()\n visitor.visit(formula)\n return [x.expr for x in visitor.free_variables] # Unpack the symrefs",
"def collect_primed_vars(t):\n g = Tree.from_recursive_ast(t)\n # (node, context)\n Q = [(t, False)]\n primed = set()\n while Q:\n u, c = Q.pop()\n if u.type == 'var' and c:\n primed.add(u.value)\n try:\n c = (u.operator == 'X') or c\n except AttributeError:\n pass\n Q.extend((v, c) for v in g.successors(u))\n return primed",
"def variables(self) -> AbstractSet[Variable]:\n return self._variables",
"def free_symbols(self):\n return ({j for i in self.args for j in i.free_symbols\n .difference(self.variables)})",
"def free_symbols(self) -> set[Basic]:\n empty: set[Basic] = set()\n return empty.union(*(a.free_symbols for a in self.args))",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def get_Term_frees(self, arg, free, non_free):\n args_vars = arg.variables() # get term's variables\n if args_vars != set(): # the set is not empty\n for var in args_vars:\n if var not in non_free and is_variable(var): # if it wasnt refrenced and is a var add it\n free.add(var)",
"def variables(self):\n # created variable from `get_variable`\n allname = [name for _, (name, t) in self._variable_info.iteritems()\n if t == 'variable']\n allvars = [v for v in K.get_all_variables() if v.name in allname]\n # related variables to all `Tensor`\n tensors = [self.get_variable(name)\n for name, (info, t) in self._variable_info.iteritems()\n if t == 'tensor']\n tensors = K.ComputationGraph(tensors).variables\n # all variables within the scope\n scope_vars = K.get_all_variables(scope=self.name)\n return sorted(set(allvars + tensors + scope_vars),\n key=lambda x: x.name)",
"def get_all_descriptor_terms(self):\n\t\tall_terms = set()\n\t\tfor ranking in self.get_descriptors(self.top_terms):\n\t\t\tall_terms = set(ranking).union(all_terms)\n\t\treturn sorted(all_terms)",
"def get_vars(scope=''):\n return [x for x in tf.trainable_variables() if scope in x.name]",
"def get_model_variables():\n g = tf.get_default_graph()\n return set(g.get_collection(tf.GraphKeys.MODEL_VARIABLES))",
"def all_variables(formula):\n return collect_unique_nodes(formula, lambda x: isinstance(x, Variable))",
"def variables(self):\n return {u for u in self if u.type == 'var'}",
"def get_variables(self):\n return [self.variables[key] for key in sorted(self.variables)]",
"def variables_used (self) :\r\n\t\treturn []",
"def cnf_variables(cnf):\n variabs = set()\n\n for clause in cnf:\n for var in clause:\n var = abs(var)\n\n if var not in variabs:\n variabs.add(var)\n\n return variabs",
"def potential_values(self) -> Set[Hashable]:\n\t\treturn set(self.iter_potential_values())",
"def known(self, words):\n return set(w for w in words if w in self.word_dict)",
"def findall_var(formula, variable):\n res = []\n s = Solver()\n s.add(formula)\n while True:\n if s.check() == sat:\n m = s.model()\n res.append(m)\n value = m[variable]\n if value == None:\n return res\n s.add(variable != value)\n else:\n return res",
"def used_vars(self, values, errors, combo=None):\n var = self.equation_vars(combo)\n err = var.intersection(errors)\n val = var.intersection(values) - err\n return sorted(val), sorted(err)"
] | [
"0.7428986",
"0.6987549",
"0.69048446",
"0.6717069",
"0.6559568",
"0.63589525",
"0.62739486",
"0.6270176",
"0.61921215",
"0.6188963",
"0.61090964",
"0.5963275",
"0.5938047",
"0.5924042",
"0.589158",
"0.5881121",
"0.5876238",
"0.5858637",
"0.56572014",
"0.5639939",
"0.56199235",
"0.5619777",
"0.55976135",
"0.55348426",
"0.5501154",
"0.5484312",
"0.5473802",
"0.5453527",
"0.54531723",
"0.5442447"
] | 0.7920552 | 0 |
Returns a frozenset of variables free in given terms. | def free_variables(*terms, **kwargs):
by_name = kwargs.get('by_name', False)
_free_variables = partial(free_variables, by_name=by_name)
t = terms[0] if len(terms) == 1 else terms
if type(t) is Var:
return frozenset((t.name if by_name else t,))
elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,
Implies, Iff):
return union(*(_free_variables(x) for x in t))
elif type(t) in (ForAll, Exists, Lambda, NamedBinder):
return _free_variables(t.body) - _free_variables(*t.variables)
elif hasattr(t,'args'):
return union(*(_free_variables(x) for x in t.args))
else:
assert False, type(t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def used_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(used_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(used_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(used_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def bound_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset()\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(bound_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(bound_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(bound_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def free_variables(self):\n\n free_vars = set()\n self.free_variables_helper(free_vars)\n return free_vars\n # Task 7.6",
"def used_constants(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Const:\n return frozenset((t,))\n\n elif type(t) in (tuple, Var, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff, ForAll, Exists, Lambda, NamedBinder):\n return union(*(used_constants(x) for x in t))\n\n elif hasattr(t,'args'):\n return union(*(used_constants(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def free_variables(formula):\n visitor = CollectFreeVariables()\n visitor.visit(formula)\n return [x.expr for x in visitor.free_variables] # Unpack the symrefs",
"def get_Term_frees(self, arg, free, non_free):\n args_vars = arg.variables() # get term's variables\n if args_vars != set(): # the set is not empty\n for var in args_vars:\n if var not in non_free and is_variable(var): # if it wasnt refrenced and is a var add it\n free.add(var)",
"def free_symbols(self) -> set[Basic]:\n empty: set[Basic] = set()\n return empty.union(*(a.free_symbols for a in self.args))",
"def free_symbols(self):\n return ({j for i in self.args for j in i.free_symbols\n .difference(self.variables)})",
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def variables(self):\n return [term.variable for term in self.terms]",
"def get_used_eqs_and_state_vars(eq_to_expand, equations):\n used_state_vars = set()\n for eq in eq_to_expand:\n for v in eq[1].atoms(Derivative) | eq[1].free_symbols:\n if v in self._model.state_vars:\n used_state_vars.add(v)\n elif v not in [e[0] for e in eq_to_expand]:\n eq_to_expand.extend(filter(lambda e: e[0] == v, equations))\n return set(eq_to_expand), used_state_vars",
"def setOfVariables(self):\n return set(self.dictOfVariables().keys())",
"def collect_primed_vars(t):\n g = Tree.from_recursive_ast(t)\n # (node, context)\n Q = [(t, False)]\n primed = set()\n while Q:\n u, c = Q.pop()\n if u.type == 'var' and c:\n primed.add(u.value)\n try:\n c = (u.operator == 'X') or c\n except AttributeError:\n pass\n Q.extend((v, c) for v in g.successors(u))\n return primed",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def generate_input(s_terms):\n qm = QuineMcCluskey()\n res = set()\n if len(s_terms) == 0:\n return res\n for term in s_terms:\n res = res | set([i for i in qm.permutations(term)])\n return res",
"def variables(names, **kwargs):\n return symbols(names, cls=Variable, seq=True, **kwargs)",
"def variables(self):\n return sorted(set(self._variables))",
"def findall_var(formula, variable):\n res = []\n s = Solver()\n s.add(formula)\n while True:\n if s.check() == sat:\n m = s.model()\n res.append(m)\n value = m[variable]\n if value == None:\n return res\n s.add(variable != value)\n else:\n return res",
"def subset(self, names):\n vld = VarLookupDict(self._namespaces)\n new_ns = dict((name, vld[name]) for name in names)\n return EvalEnvironment([new_ns])",
"def all_variables(formula):\n return collect_unique_nodes(formula, lambda x: isinstance(x, Variable))",
"def get_variables(self):\n return set(self._head_vars)",
"def all_first_derivatives(self, set_of_variables=None):\n if set_of_variables is None:\n subset = self.variables\n else:\n subset = self.variables.intersection(set_of_variables)\n return {v: self.derivative(v) for v in subset}",
"def free_symbols(self) -> Iterable[sympy.Symbol]:\n return get_free_symbols(self.params)",
"def free_symbols(self) -> Iterable[sympy.Symbol]:\n return get_free_symbols(self.params)",
"def freeze(split):\n return frozenset(\n (name, frozenset(items)) for name, items in split.items()\n )",
"def _search_callable_free_vars(fn):\n node = _parse_and_analyze(fn)\n scope = anno.getanno(node, anno.Static.SCOPE)\n free_vars_all = list(scope.free_vars)\n namespace = inspect_utils.getnamespace(fn)\n filtered = []\n\n for var in free_vars_all:\n base = str(var.qn[0])\n\n if var.is_simple():\n if base in builtins.__dict__.keys():\n continue\n obj = namespace[base]\n else:\n assert var.is_composite()\n # A compositve qualified name `QN` can be either an attr or a subscript\n if var.has_subscript():\n # For free var with subscripts, both the base and full formats are\n # generated.\n # For example, if the code have `glob[idx]`, `free_vars_all` would\n # contain `glob` as well as `glob[idx]`.\n # The method only keeps the base format for simplicity.\n continue\n else:\n assert var.has_attr()\n # For free vars with multiple attributes like `f.g.h`,\n # just as the subscripts, multiple free vars (QN) are generated:\n # ['f', 'f.g', 'f.g.h']\n # If `f` is `self`, only process the first attribute `f.g`.\n # Otherwise, only process `f`.\n if not var.qn[0].is_composite() and base == \"self\":\n attr = str(var.qn[1])\n obj = getattr(fn.__self__, attr)\n else:\n continue\n\n if (inspect.ismodule(obj) or inspect.isclass(obj)):\n continue\n elif inspect.isfunction(obj) or inspect.ismethod(obj):\n while hasattr(fn, \"__wrapped__\"):\n obj = obj.__wrapped__\n if obj.__module__ != fn.__module__:\n continue\n filtered.append(FreeVar(str(var), True, obj))\n else:\n filtered.append(FreeVar(str(var), False, None))\n\n filtered = sorted(filtered, key=lambda x: x.name)\n return filtered",
"def cnf_variables(cnf):\n variabs = set()\n\n for clause in cnf:\n for var in clause:\n var = abs(var)\n\n if var not in variabs:\n variabs.add(var)\n\n return variabs",
"def variables(self) -> AbstractSet[Variable]:\n return self._variables",
"def setOfBetas(self, free=True, fixed=False):\n if free:\n return set(self.betas)\n\n return set()",
"def prism_polynomial_set_vector(\n domain_dim: int, range_dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[VectorFunction]:\n set1d = prism_polynomial_set_1d(domain_dim, order, variables)\n return [\n VectorFunction([p if i == j else 0 for j in range(range_dim)])\n for p in set1d\n for i in range(range_dim)\n ]"
] | [
"0.7301436",
"0.6923216",
"0.66222817",
"0.6540475",
"0.6467375",
"0.63554454",
"0.6250781",
"0.61498994",
"0.5961769",
"0.58668995",
"0.5840457",
"0.5688273",
"0.5672531",
"0.5552212",
"0.5511896",
"0.55101573",
"0.54558146",
"0.5436367",
"0.5431674",
"0.54288334",
"0.54179144",
"0.5402639",
"0.53982294",
"0.53982294",
"0.53513396",
"0.53211236",
"0.52932745",
"0.5270516",
"0.521437",
"0.5207914"
] | 0.7726464 | 0 |
Returns a frozenset of variables bound in given terms. | def bound_variables(*terms):
t = terms[0] if len(terms) == 1 else terms
if type(t) is Var:
return frozenset()
elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,
Implies, Iff):
return union(*(bound_variables(x) for x in t))
elif type(t) in (ForAll, Exists, Lambda, NamedBinder):
return union(bound_variables(t.body), t.variables)
elif hasattr(t,'args'):
return union(*(bound_variables(x) for x in t.args))
else:
assert False, type(t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def used_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(used_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(used_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(used_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def free_variables(*terms, **kwargs):\n by_name = kwargs.get('by_name', False)\n _free_variables = partial(free_variables, by_name=by_name)\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t.name if by_name else t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(_free_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return _free_variables(t.body) - _free_variables(*t.variables)\n\n elif hasattr(t,'args'):\n return union(*(_free_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def used_constants(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Const:\n return frozenset((t,))\n\n elif type(t) in (tuple, Var, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff, ForAll, Exists, Lambda, NamedBinder):\n return union(*(used_constants(x) for x in t))\n\n elif hasattr(t,'args'):\n return union(*(used_constants(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def setOfVariables(self):\n return set(self.dictOfVariables().keys())",
"def cnf_variables(cnf):\n variabs = set()\n\n for clause in cnf:\n for var in clause:\n var = abs(var)\n\n if var not in variabs:\n variabs.add(var)\n\n return variabs",
"def subset(self, names):\n vld = VarLookupDict(self._namespaces)\n new_ns = dict((name, vld[name]) for name in names)\n return EvalEnvironment([new_ns])",
"def free_variables(self):\n\n free_vars = set()\n self.free_variables_helper(free_vars)\n return free_vars\n # Task 7.6",
"def variables(self):\n return [term.variable for term in self.terms]",
"def free_variables(formula):\n visitor = CollectFreeVariables()\n visitor.visit(formula)\n return [x.expr for x in visitor.free_variables] # Unpack the symrefs",
"def getVariableList(dataset):\n variables = [v for v in dataset.variables.keys() if v not in dataset.dimensions.keys()]\n for d in dataset.dimensions.keys():\n try:\n variables.pop(variables.index(dataset.variables[d].getncattr(\"bounds\")))\n except:\n pass\n return variables",
"def collect_primed_vars(t):\n g = Tree.from_recursive_ast(t)\n # (node, context)\n Q = [(t, False)]\n primed = set()\n while Q:\n u, c = Q.pop()\n if u.type == 'var' and c:\n primed.add(u.value)\n try:\n c = (u.operator == 'X') or c\n except AttributeError:\n pass\n Q.extend((v, c) for v in g.successors(u))\n return primed",
"def prism_polynomial_set_vector(\n domain_dim: int, range_dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[VectorFunction]:\n set1d = prism_polynomial_set_1d(domain_dim, order, variables)\n return [\n VectorFunction([p if i == j else 0 for j in range(range_dim)])\n for p in set1d\n for i in range(range_dim)\n ]",
"def check_occuring_variables(formula,variables_to_consider,allowed_variables) :\n variable_set=set(allowed_variables)\n for clause in formula :\n variables_in_clause = {abs(l) for l in clause if abs(l) in variables_to_consider}\n if not variables_in_clause <= variable_set:\n return False, [v for v in variables_in_clause if not v in variable_set] \n return True, []",
"def get_collections(scope_list):\n variables = [tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n for scope in scope_list]\n return list(chain(*variables))",
"def get_used_eqs_and_state_vars(eq_to_expand, equations):\n used_state_vars = set()\n for eq in eq_to_expand:\n for v in eq[1].atoms(Derivative) | eq[1].free_symbols:\n if v in self._model.state_vars:\n used_state_vars.add(v)\n elif v not in [e[0] for e in eq_to_expand]:\n eq_to_expand.extend(filter(lambda e: e[0] == v, equations))\n return set(eq_to_expand), used_state_vars",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def get_variables_binds(self, predicate, bound_variables=None, variables_binds=None, recursion_level=1):\n\n # print(\"EXPLORING\", recursion_level, predicate, variables_binds)\n\n # Set of bound variables in predicate body\n if bound_variables is None:\n bound_variables = set()\n\n # Possible binds\n if variables_binds is None:\n variables_binds = [{}]\n\n recursion_level -= 1\n\n new_possible_binds = []\n\n for body_clause in predicate.body:\n adornments = self.compute_adornments(body_clause.parameters, bound_variables)\n\n # For each fact search if we can match every bound variable and assign free ones\n if body_clause.name in self._facts:\n for fact in self._facts[body_clause.name]:\n possible_binds = self.check_fact_with_adornment(fact, body_clause, adornments, variables_binds)\n if len(possible_binds):\n # A fact matched, we add variables binds to sup\n new_possible_binds.extend(possible_binds)\n\n # if len(new_possible_binds):\n # variables_binds = new_possible_binds\n\n if recursion_level > 0:\n # For each rule\n if body_clause.name in self._rules:\n for applicable_rule in self._rules[body_clause.name]:\n\n n_bound_variables = set()\n n_variables_binds = [{}]\n\n for index, argument in enumerate(body_clause.parameters):\n rule_corresponding_parameter = applicable_rule.head.parameters[index]\n\n if rule_corresponding_parameter.is_constant():\n if argument.is_constant():\n if rule_corresponding_parameter.value != argument.value:\n break\n else:\n if adornments[index]:\n if argument.is_constant():\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = argument.value\n elif argument.name in bound_variables and argument.name in variables_binds[0]:\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = variables_binds[0][argument.name]\n\n applicable_predicate_binds = self.get_variables_binds(applicable_rule, n_bound_variables, n_variables_binds, recursion_level)\n for n_bind in applicable_predicate_binds:\n adapted_bind = self.substitute_variable_names(n_bind, applicable_rule.head, body_clause)\n new_possible_binds.extend(adapted_bind)\n\n if len(new_possible_binds):\n variables_binds = new_possible_binds.copy()\n new_possible_binds.clear()\n else:\n variables_binds = [{}]\n\n new_possible_binds_no_duplicates = self.remove_duplicate_binds(variables_binds)\n\n if len(new_possible_binds_no_duplicates):\n yield new_possible_binds_no_duplicates",
"def canonical_variables(self):\n if not hasattr(self, 'bound_symbols'):\n return {}\n dums = numbered_symbols('_')\n reps = {}\n # watch out for free symbol that are not in bound symbols;\n # those that are in bound symbols are about to get changed\n bound = self.bound_symbols\n names = {i.name for i in self.free_symbols - set(bound)}\n for b in bound:\n d = next(dums)\n if b.is_Symbol:\n while d.name in names:\n d = next(dums)\n reps[b] = d\n return reps",
"def variables(self):\n return sorted(set(self._variables))",
"def get_variables(self):\n return set(self._head_vars)",
"def find_rvs_in_graph(vars: Union[Variable, Sequence[Variable]]) -> Set[Variable]:\n\n def expand(r):\n owner = r.owner\n if owner:\n inputs = list(reversed(owner.inputs))\n\n if isinstance(owner.op, HasInnerGraph):\n inputs += owner.op.inner_outputs\n\n return inputs\n\n return {\n node\n for node in walk(makeiter(vars), expand, False)\n if node.owner and isinstance(node.owner.op, (RandomVariable, MeasurableVariable))\n }",
"def all_variables(formula):\n return collect_unique_nodes(formula, lambda x: isinstance(x, Variable))",
"def free_symbols(self) -> set[Basic]:\n empty: set[Basic] = set()\n return empty.union(*(a.free_symbols for a in self.args))",
"def all_first_derivatives(self, set_of_variables=None):\n if set_of_variables is None:\n subset = self.variables\n else:\n subset = self.variables.intersection(set_of_variables)\n return {v: self.derivative(v) for v in subset}",
"def findall_var(formula, variable):\n res = []\n s = Solver()\n s.add(formula)\n while True:\n if s.check() == sat:\n m = s.model()\n res.append(m)\n value = m[variable]\n if value == None:\n return res\n s.add(variable != value)\n else:\n return res",
"def variables(names, **kwargs):\n return symbols(names, cls=Variable, seq=True, **kwargs)",
"def variables(self) -> AbstractSet[Variable]:\n return self._variables",
"def get_vars(scope=''):\n return [x for x in tf.trainable_variables() if scope in x.name]",
"def power_set(min_a, max_a, min_b, max_b):\n terms = set()\n for a in range(min_a, max_a+1):\n for b in range(min_b, max_b+1):\n terms.add(a**b)\n return terms"
] | [
"0.72198117",
"0.70041084",
"0.6517899",
"0.59218013",
"0.5741847",
"0.57111204",
"0.5670193",
"0.5647711",
"0.5632197",
"0.5581976",
"0.55689853",
"0.55376583",
"0.54729617",
"0.5426977",
"0.5418284",
"0.5415937",
"0.5402921",
"0.53907967",
"0.5385815",
"0.53603476",
"0.53602",
"0.5352721",
"0.5350945",
"0.5327028",
"0.5316629",
"0.5300334",
"0.5297397",
"0.52944905",
"0.5228411",
"0.52199405"
] | 0.8032309 | 0 |
Returns a frozenset of constants used in given terms. | def used_constants(*terms):
t = terms[0] if len(terms) == 1 else terms
if type(t) is Const:
return frozenset((t,))
elif type(t) in (tuple, Var, Apply, Eq, Ite, Not, And, Or,
Implies, Iff, ForAll, Exists, Lambda, NamedBinder):
return union(*(used_constants(x) for x in t))
elif hasattr(t,'args'):
return union(*(used_constants(x) for x in t.args))
else:
assert False, type(t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_constants():\n return filter(\n lambda key: key.upper() == key and type(globals()[key]) in _ALLOWED,\n\n filter( # filter _PRIVATE variables\n lambda x: not x.startswith(\"_\"),\n globals()\n )\n )",
"def used_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(used_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(used_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(used_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def generate_input(s_terms):\n qm = QuineMcCluskey()\n res = set()\n if len(s_terms) == 0:\n return res\n for term in s_terms:\n res = res | set([i for i in qm.permutations(term)])\n return res",
"def strict(cls):\n return frozenset()",
"def get_all_descriptor_terms(self):\n\t\tall_terms = set()\n\t\tfor ranking in self.get_descriptors(self.top_terms):\n\t\t\tall_terms = set(ranking).union(all_terms)\n\t\treturn sorted(all_terms)",
"def potential_values(self) -> Set[Hashable]:\n\t\treturn set(self.iter_potential_values())",
"def known(words):\r\n return set(w for w in words if w in WORDS)",
"def commonSetElementPredicate(field_set: Sequence[Any]) -> FrozenSet[str]:\n\n return frozenset(str(item) for item in field_set)",
"def free_symbols(self) -> set[Basic]:\n empty: set[Basic] = set()\n return empty.union(*(a.free_symbols for a in self.args))",
"def known(self, words):\n return set(w for w in words if w in self.word_dict)",
"def all_terms(self, termset=None, phrases=True):\r\n\r\n if termset is None:\r\n termset = set()\r\n self._all_terms(termset, phrases=phrases)\r\n return termset",
"def get_type_term_set(self):\n term_set = self._term_set\n if term_set is None:\n term_set = set()\n type_tuples = self.get_type_tuples()\n for p_type in type_tuples:\n term_set.update(p_type)\n self._term_set = term_set\n return term_set",
"def bound_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset()\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(bound_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(bound_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(bound_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def power_set(min_a, max_a, min_b, max_b):\n terms = set()\n for a in range(min_a, max_a+1):\n for b in range(min_b, max_b+1):\n terms.add(a**b)\n return terms",
"def get_all_terms(self):\n return self.term.all()",
"def tactic_comps(cls) -> Set[str]:\n return set([\"mmic_autodock_vina\"])",
"def free_variables(*terms, **kwargs):\n by_name = kwargs.get('by_name', False)\n _free_variables = partial(free_variables, by_name=by_name)\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t.name if by_name else t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(_free_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return _free_variables(t.body) - _free_variables(*t.variables)\n\n elif hasattr(t,'args'):\n return union(*(_free_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def get_all_const(*ops):\n return all(map(get_const, ops))",
"def known(words: list[str]) -> list[str]:\n return [z for z in list(set(words)) if z in self.words]",
"def get_used_define_files(self):\n return set(self._used_defines.keys())",
"def freeze(split):\n return frozenset(\n (name, frozenset(items)) for name, items in split.items()\n )",
"def get_consts(self):\n consts = []\n for key in self.constants:\n consts.append({\n 'key': key,\n 'value': self.constants[key],\n })\n return consts",
"def to_frozen_set( val ):\n if isinstance(val, str):\n return frozenset([ val ])\n else:\n return frozenset(val)",
"def getChemTorsions(self):\n dataDict = self.__dict__\n atomsIncluded = self.chemAtoms.issuperset\n result = frozenset(xx for xx in self.chemComp.chemTorsions if atomsIncluded(xx.chemAtoms))\n return result",
"def get_defined_constants():\n raise NotImplementedError()",
"def get_constants_list(self):\n return [self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12]",
"def _known_in(self, words):\n return set(word for word in words if self._word_2_frequency.get(word))",
"def get_constants(self):\n temp = self._properties.get('constants', [])\n return temp",
"def find_constants_referenced(self, text: str) -> list[str]:\n aList = sorted(set(re.findall(r\"@[A-Za-z_][-A-Za-z0-9_]*\", text)))\n # Exempt references to Leo constructs.\n for s in ('@button', '@constants', '@data', '@language'):\n if s in aList:\n aList.remove(s)\n return aList",
"def sameThreeCharStartPredicate(field: str) -> FrozenSet[str]:\n return frozenset(initials(field.replace(\" \", \"\"), 3))"
] | [
"0.63245064",
"0.5992687",
"0.59890866",
"0.59641623",
"0.5806408",
"0.56888586",
"0.56451476",
"0.55847293",
"0.5571786",
"0.55393314",
"0.55284834",
"0.5504401",
"0.54954517",
"0.54566354",
"0.54400617",
"0.5428424",
"0.54196256",
"0.5411198",
"0.53967535",
"0.53961635",
"0.539396",
"0.538466",
"0.53621495",
"0.5336895",
"0.5324716",
"0.5321782",
"0.5315656",
"0.53142464",
"0.53053576",
"0.5300554"
] | 0.80427533 | 0 |
Return the term obtained from t by simultaneous substitution given by subs subs is either a dictionary or a mapping given by an iterable of (key, value) pairs Both keys and values in subs can be either Var or Const. All keys in subs will be substituted by their values in subs. For variables, only free occurances will be substituted. If the substitution will create capturing of a free variable, the substitution will fail with an error. | def substitute(t, subs):
if not isinstance(subs, dict):
subs = dict(subs)
if type(t) in (Var, Const):
if t in subs:
return subs[t]
else:
return t
elif type(t) in (Apply, Eq, Ite, Not, And, Or, Implies, Iff):
return type(t)(*(substitute(x, subs) for x in t))
elif type(t) in (ForAll, Exists, Lambda, NamedBinder):
forbidden_variables = free_variables(*subs.values())
if forbidden_variables.isdisjoint(t.variables):
return type(t)(t.variables, substitute(t.body, (
(k, v) for k, v in subs.iteritems()
if k not in t.variables
)))
else:
raise CaptureError(forbidden_variables.intersection(t.variables))
assert False, (t, subs) # capturing would be created
else:
assert False, type(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def substitute_apply(t, subs, by_name=False):\n\n if not isinstance(subs, dict):\n subs = dict(subs)\n\n _substitute_apply = partial(substitute_apply, subs=subs, by_name=by_name)\n\n if type(t) in (Var, Const):\n return t\n\n if type(t) is Apply and t.func in subs:\n terms = tuple(_substitute_apply(x) for x in t.terms)\n result = subs[t.func](*terms)\n fvr = free_variables(result, by_name=by_name)\n fvt = free_variables(*terms, by_name=by_name)\n assert fvr <= fvt, \"New free variables!? {}, {}\".format(fvr, fvt)\n return result\n\n elif type(t) in (Apply, Eq, Ite, Not, And, Or, Implies, Iff):\n return type(t)(*(_substitute_apply(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return type(t)(t.variables, _substitute_apply(t.body, subs=dict(\n (k, v) for k, v in subs.iteritems()\n if k not in t.variables\n )))\n else:\n assert False, type(e)",
"def substitute(self, substitution_map):\n for element_name in substitution_map:\n\n assert (is_constant(element_name) or is_variable(element_name)) and \\\n type(substitution_map[element_name]) is Term\n return self.subsitute_helper(copy.deepcopy(substitution_map), False)",
"def substitute(self, subs, **kwargs):\n return self",
"def substitute(self, formula, subs):\n # Check that formula is a term\n if not formula.is_term():\n raise TypeError(\"substitute() can only be used on terms.\")\n\n for (i, k) in enumerate(subs):\n v = subs[k]\n # Check that substitutions are terms\n if not k.is_term():\n raise TypeError(\n \"Only terms should be provided as substitutions.\" +\n \" Non-term '%s' found.\" % k)\n if not v.is_term():\n raise TypeError(\n \"Only terms should be provided as substitutions.\" +\n \" Non-term '%s' found.\" % v)\n # Check that substitutions belong to the current formula manager\n if k not in self.manager:\n raise TypeError(\n \"Key %d does not belong to the Formula Manager.\" % i)\n if v not in self.manager:\n raise TypeError(\n \"Value %d does not belong to the Formula Manager.\" % i)\n\n return self.walk(formula, substitutions=subs)",
"def _substitute(self, formula, subs):\n\n return subs.get(formula, formula)",
"def substitute(self, substitution_map):\n for element_name in substitution_map:\n assert (is_constant(element_name) or is_variable(element_name)) and \\\n type(substitution_map[element_name]) is Term\n\n if is_constant(self.root) or is_variable(self.root): # we need to to deal only with the root\n if self.root in substitution_map.keys():\n return substitution_map[self.root] # change it with it is in the map\n else:\n return Term(self.root) # else return it as is\n\n else:\n assert is_function(self.root) # we have a function\n if self.root in substitution_map.keys():\n root = substitution_map[self.root] # update the root if it is in map\n else:\n root = self.root # else, leave it as it is, without changing it to Term\n args = [] # this is our args\n for index, arg in enumerate(self.arguments): # for every arg, switch it with it's substitute\n args.append(arg.substitute(substitution_map)) # recursive call to substitute\n return Term(root, args)\n # Task 9.1",
"def do_subs(self, e):\n for expr, var in self.items():\n e = e.xreplace({var: expr})\n return e",
"def substitute(expression, subs=None):\n if subs is None:\n subs = {}\n if isNumber(expression):\n return expression\n if isSymbol(expression):\n if expression.name in subs:\n return subs[expression.name]\n elif expression in subs:\n return subs[expression]\n else:\n return expression\n expr = expression.copy()\n # Must be an expression\n symbolDct = {s.name: s for s in expression.free_symbols}\n # Update entry in substitution to be the same as the expression\n newSubs = dict(subs)\n for key, value in subs.items():\n if key.name in symbolDct.keys():\n del newSubs[key]\n newSubs[symbolDct[key.name]] = value\n expr = expr.subs(newSubs)\n return sympy.simplify(expr)",
"def subst(s, x):\n if isinstance(x, list):\n return [subst(s, xi) for xi in x]\n elif isinstance(x, tuple):\n return tuple([subst(s, xi) for xi in x])\n elif not isinstance(x, Expr):\n return x\n elif is_var_symbol(x.op):\n return s.get(x, x)\n else:\n return Expr(x.op, *[subst(s, arg) for arg in x.args])",
"def substitute(self,s,x):\r\n\t\t\r\n\t\t# turn substitution into top line\r\n\t\ttry:\r\n\t\t\tt = Li(s)\r\n\t\t\tb = Li(1)\r\n\t\t\t\r\n\t\t# unless it is a list of lines\r\n\t\texcept:\r\n\t\t\tt = Li(s[0])\r\n\t\t\tb = Li(s[1])\r\n\t\t\r\n\t\t# split variable from power\r\n\t\th = Te._chop(x)\r\n\t\tx = h[0]\r\n\t\t\r\n\t\t# assume power of 1 for substituted variable, but revise if found in string\r\n\t\tp = 1\r\n\t\ttry:\r\n\t\t\tp = int(h[1])\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t\r\n\t\t# exponents in each term\r\n\t\te = [i.look(x) for i in self]\r\n\t\t\r\n\t\t# adjust for power of substituted variable\r\n\t\te = [i // p for i in e]\r\n\t\t\r\n\t\t# max, min powers of substitution\r\n\t\ttry:\r\n\t\t\ta = max(e)\r\n\t\t\tm = min(e)\r\n\t\texcept:\r\n\t\t\ta = 0\r\n\t\t\tm = 0\r\n\t\t\r\n\t\t# truncate max and min powers \r\n\t\tif a < 0:\r\n\t\t\ta = 0\r\n\t\tif m > 0:\r\n\t\t\tm = 0\r\n\t\t\t\r\n\t\t# dictionaries of calculated terms for top and bottom\r\n\t\tf = {}\r\n\t\tg = {}\r\n\t\t\t\r\n\t\t# expand top and bottom to truncated max and min\r\n\t\tq,f = Li._expand(t,-m,f)\r\n\t\tr,g = Li._expand(b,a,g)\r\n\t\tq = Li(q,c=False)\r\n\t\tr = Li(r,c=False)\r\n\t\t\r\n\t\t# store results in dictionaries\r\n\t\ty = {-m: q}\r\n\t\tz = {a: r}\r\n\t\t\r\n\t\t# make denominator\r\n\t\td = q.multiply(r)\r\n\t\t\r\n\t\t# convert each term\r\n\t\tl = Li([])\r\n\t\tfor n,i in enumerate(self):\r\n\t\t\t\r\n\t\t\t# exponent of substitution\r\n\t\t\tw = e[n]\r\n\t\t\t\r\n\t\t\t# divide out variable\r\n\t\t\tv = Te({x: -w * p})\r\n\t\t\ti = i.multiply(v)\r\n\t\t\t\r\n\t\t\t# retrieve top expansion\r\n\t\t\tif (w - m) in y:\r\n\t\t\t\tu = y[w - m]\r\n\t\t\t\t\r\n\t\t\t# or calculate\r\n\t\t\telse:\r\n\t\t\t\tu,f = Li._expand(t,w - m,f)\r\n\t\t\t\tu = Li(u,c=False)\r\n\t\t\t\ty[w - m] = u\r\n\t\t\t\r\n\t\t\t# retrieve bottom expansion\r\n\t\t\tif (a - w) in z:\r\n\t\t\t\tc = z[a - w]\r\n\t\t\t\r\n\t\t\t# or calculate\r\n\t\t\telse:\r\n\t\t\t\tc,g = Li._expand(b,a - w,g)\r\n\t\t\t\tc = Li(c,c=False)\r\n\t\t\t\tz[a - w] = c\r\n\t\t\t\r\n\t\t\t# multiply and add\r\n\t\t\tu = u.multiply(c)\r\n\t\t\tu = u.multiply(i)\r\n\t\t\tl = l.add(u)\r\n\t\t\r\n\t\treturn [l,d]",
"def subs(self, *exp, **kargs):\n par = {}\n if self._has(\"p\"):\n par[\"p\"] = self._.p.subs(*exp)\n elif self._has(\"q\"):\n par[\"q\"] = self._.q.subs(*exp)\n elif self._has(\"P\"):\n par[\"P\"] = self._.P.subs(*exp)\n elif self._has(\"Q\"):\n par[\"Q\"] = self._.Q.subs(*exp)\n p, new = self._subs(exp, ASParameters(**par), kargs.get(\"seen\", {}))\n return p",
"def _substitute(self, mapping: VariableMapping) -> 'Substitution':\n return Substitution(\n # Create a new combined mapping. Later mappings override earlier\n # ones.\n mapping={\n **mapping,\n **{\n variable: term._substitute(mapping)\n for (variable, term) in self.mapping.items()\n }\n }\n )",
"def substitute(self, substitution: Dict[Terminal, \"CFG\"]) -> \"CFG\":\n idx = 0\n new_variables_d = {}\n new_vars = set()\n for variable in self._variables:\n temp = Variable(variable.value + SUBS_SUFFIX + str(idx))\n new_variables_d[variable] = temp\n new_vars.add(temp)\n idx += 1\n productions = []\n terminals = self._terminals.copy()\n final_replacement = {}\n for ter, cfg in substitution.items():\n new_variables_d_local = {}\n for variable in cfg.variables:\n temp = Variable(variable.value + SUBS_SUFFIX + str(idx))\n new_variables_d_local[variable] = temp\n new_vars.add(temp)\n idx += 1\n # Add rules of the new cfg\n for production in cfg.productions:\n body = []\n for cfgobj in production.body:\n if cfgobj in new_variables_d_local:\n body.append(new_variables_d_local[cfgobj])\n else:\n body.append(cfgobj)\n productions.append(\n Production(new_variables_d_local[production.head],\n body))\n final_replacement[ter] = new_variables_d_local[cfg.start_symbol]\n terminals = terminals.union(cfg.terminals)\n for production in self._productions:\n body = []\n for cfgobj in production.body:\n if cfgobj in new_variables_d:\n body.append(new_variables_d[cfgobj])\n elif cfgobj in final_replacement:\n body.append(final_replacement[cfgobj])\n else:\n body.append(cfgobj)\n productions.append(Production(new_variables_d[production.head],\n body))\n return CFG(new_vars, None, new_variables_d[self._start_symbol],\n set(productions))",
"def subs(self, dictin):\n\n return sum([ Dyadic( [ (v[0].subs(dictin), v[1], v[2]) ]) for v in\n self.args])",
"def do_variable_substitution(item):\n if isinstance(item, str):\n try:\n item = re_keyref.sub(getdata, item)\n except KeyError, err:\n print >> sys.stderr, (\n \"Use of undefined key in variable substitution: %s\"\n % err)\n elif isinstance(item, list):\n for index in range(len(item)):\n item[index] = do_variable_substitution(item[index])\n elif isinstance(item, dict):\n for key, value in item.iteritems():\n item[key] = do_variable_substitution(value)\n return item",
"def _subs(self, exp, p, seen):\n p, new = ASParameters._subs(self, exp, p, seen)\n if new:\n if self._has(\"theta\"):\n p._.theta = tuple(subs(th, *exp) for th in self._.theta)\n if self._has(\"omega\"):\n p._.omega = self._.omega.subs(*exp)\n return (p, new)",
"def convert_term(var_map, s, t):\n def convert(t):\n if t.head in var_map:\n if len(t.args) == 0:\n return s(Ident(to_binary(var_map[t.head])))\n elif len(t.args) == 1:\n return s(Para(Ident(to_binary(var_map[t.head])), t.arg))\n else:\n raise NotImplementedError\n elif t.is_equals():\n return Term.mk_equals(convert(t.arg1), convert(t.arg))\n elif logic.is_neg(t):\n return logic.neg(convert(t.arg))\n elif logic.is_conj(t):\n return logic.conj(convert(t.arg1), convert(t.arg))\n elif logic.is_disj(t):\n return logic.disj(convert(t.arg1), convert(t.arg))\n elif t.get_type() == boolT:\n return BoolV(t)\n elif t.get_type() == natT:\n return NatV(t)\n else:\n raise NotImplementedError\n\n return convert(t)",
"def func_subs(t, Func_expr, func, t0):\n assert(isinstance(type(Func_expr), UndefinedFunction))\n pos = Func_expr.args.index(t)\n\n def frozen(*args):\n # tuples are immutable\n L = list(args)\n L.insert(pos, t0)\n new_args = tuple(L)\n return func(*new_args)\n return frozen",
"def translator(dict):\n f = lambda match: dict.get(match.group(), match.group())\n return lambda expression: _word_pattern.sub(f,expression)",
"def variableSubstitution(d):\n variable = re.compile(r\"^(.*)\\$\\{(.*)\\}(.*)\")\n\n # translate the dictionary to lower-case keys:\n dd = {k.lower():v for k,v in d.iteritems()}\n maxIterations=4\n \n for i in range(maxIterations):\n anyChanges=False\n for k,v in dd.iteritems():\n if not isinstance(v,str):\n # Only operate on string-valued entries\n continue\n m = variable.match(v)\n if not m:\n continue\n anyChanges = True\n vout = str(v)\n while m:\n key = m.group(2).lower()\n if key not in dd.keys():\n print \"ERROR: variable substitution asks for nonexistent Attribute\", key, \"in\", v\n sys.exit(1)\n if key==k:\n print \"ERROR: self-reference to Attribute\", key, \"in\", v\n vv = dd[key]\n if not isinstance(vv,str):\n print \"ERROR: variable substitution using non-string-valued Attribute\",key\n sys.exit(1)\n vout = m.expand(r\"\\g<1>\"+vv+r\"\\g<3>\")\n m = variable.match(vout)\n dd[k] = vout\n if not anyChanges:\n break # Done\n if i==maxIterations:\n print \"ERROR: Too many iterations in variableSubstitution\"\n sys.exit(1)\n # restore case of original dictionary\n for k in d.keys():\n d[k] = dd[k.lower()]\n return",
"def variable_subs(self, variable, newexpr):\n cls = type(self)\n newexpr = cls(newexpr)\n try:\n index = list(self.variables).index(variable)\n except ValueError:\n index = None\n if index is not None:\n head, data = self.pair\n result = cls.Number(0)\n variables = cls.variables\n for exps, coeff in data.iteritems():\n term = cls.Number(1)\n for i,exp in enumerate(exps):\n if exp:\n if i==index:\n term *= newexpr**exp\n else:\n term *= cls.Symbol(variables[i])**exp\n result += term * cls.Number(coeff)\n return result\n raise NotImplementedError(`self.variables, variable, index`)",
"def _subs(self, exp, p, seen):\n if id(self) in seen:\n return (seen[id(self)], False)\n seen[id(self)] = p\n if self._has(\"p\") and not p._has(\"p\"):\n p._.p = self._.p.subs(*exp)\n if self._has(\"q\") and not p._has(\"q\"):\n p._.q = self._.q.subs(*exp)\n if self._has(\"P\") and not p._has(\"P\"):\n p._.P = self._.P.subs(*exp)\n if self._has(\"Q\") and not p._has(\"Q\"):\n p._.Q = self._.Q.subs(*exp)\n for k, v in self._.triple.items():\n p._.triple[k] = v.subs(*exp)\n for k, v in self._.quadruple.items():\n p._.quadruple[k] = v.subs(*exp)\n for par, part in self._.subschemes.items():\n try:\n p.add_subscheme(par.subs(*exp, seen=seen), part)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=part)\n for par, part in self._.fusion_schemes.items():\n try:\n p.add_subscheme(par.subs(*exp, seen=seen), part)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=part)\n for h, s in enumerate(self._.subconstituents):\n if s is None:\n continue\n s, refs = s\n name = self._subconstituent_name(h)\n try:\n p._.subconstituents[h] = (p.add_subscheme(\n s.subs(*exp, seen=seen), name), refs)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=name)\n if self._has(\"complement\") and not p._has(\"complement\"):\n try:\n p._.complement = self._.complement.subs(*exp, seen=seen)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=\"complement\")\n return (p, True)",
"def subs(self, dictin):\n\n ov = 0\n for i, v in enumerate(self.args):\n ov += Vector([(v[0].subs(dictin), v[1])])\n return ov",
"def substitute_elements(self, subs, **kwargs):\n return self",
"def substituteEquation(equation, substitutedVariable, substitutionEquation):\n# print(equation)\n# print(substitutionEquation)\n if substitutedVariable not in equation.keys():\n return equation\n \n sub_var_coefficient = substitutionEquation[substitutedVariable]\n coeff = equation[substitutedVariable]\n\n for i in substitutionEquation.keys():\n substitutionEquation[i] = substitutionEquation[i]/(sub_var_coefficient)\n# print(substitutionEquation)\n for i in substitutionEquation.keys() :\n\n if i not in equation.keys():\n equation[i] = 0\n equation[i] += -coeff*substitutionEquation[i]\n if equation[i] == 0:\n del equation[i]\n return equation",
"def variableSub(self, subMap):\n\t\t#create a copy of our Statement\n\t\treturned = copy.deepcopy(self)\n\t\t\n\t\t#for every variable specified in the input map\n\t\tfor variable in subMap.keys():\n\t\t\t# get all the themes it corresponds to\n\t\t\tif variable in returned.VariableMap.keys():\n\t\t\t\tthemes = returned.VariableMap[variable]\n\t\t\t\t#set all of the themes to the variable specificed\n\t\t\t\tfor theme in themes:\n\t\t\t\t\tif theme in returned.ArgDict.keys():\n\t\t\t\t\t\treturned[theme] = subMap[variable]\n\t\treturn returned",
"def mk_assign(var_map, s, assigns):\n assign_args = []\n for k, v in assigns.items():\n k2 = convert_term(var_map, s, k)\n assert k2.fun == s, \"mk_assign: key is not an identifer.\"\n assign_args.append(k2.arg)\n assign_args.append(convert_term(var_map, s, v))\n\n return function.mk_fun_upd(s, *assign_args)",
"def substitute(x, c_name):\n if params.substitution[c_name].get(x) is not None:\n return params.substitution[c_name][x]\n else:\n return x",
"def subs(self, subs):\n if subs is None:\n raise ValueError(\"Invalid value for `subs`, must not be `None`\") # noqa: E501\n\n self._subs = subs",
"def fn_sub(self, value):\n\n if isinstance(value, list):\n value, variables = value\n else:\n # only template parameter names, resource logical IDs, and resource attributes, will be parsed\n value, variables = value, {}\n\n for name, target in variables.items():\n value = value.replace('${{{}}}'.format(name), target)\n\n return Functions.SUB_VARIABLE_PATTERN.sub(self._sub_variable, value)"
] | [
"0.6979903",
"0.6809032",
"0.68054605",
"0.67548573",
"0.6693763",
"0.6661133",
"0.65522754",
"0.65254176",
"0.6471985",
"0.62673676",
"0.6261165",
"0.6179068",
"0.6160233",
"0.61346424",
"0.6046451",
"0.5994784",
"0.5992433",
"0.5826811",
"0.58200824",
"0.5808683",
"0.57407844",
"0.565298",
"0.5639479",
"0.560589",
"0.55435",
"0.5516989",
"0.55031025",
"0.5460717",
"0.54464823",
"0.5396748"
] | 0.80016816 | 0 |
Return the term obtained from t by simultaneous substitution given by subs subs is either a dictionary or a mapping given by an iterable of (key, value) pairs Both keys and values in subs can be either Var or Const, but must be 2nd order. For any key, value in subs, Apply(key, terms) is substituted by value(terms'), so the values in subs should be python functions (terms' is the result of recursively substituting terms according to subs). For variables, only free occurances will be substituted. To avoid capturing, the free variables of value(terms) should be a subset of the free variables of terms, for any value in subs. This is checked, and an error occurs if this is not the case, even if it does not lead to capturing. If by_name=True, then this check is done by name and not by the Var object. Note that nonapplication occurances of keys from subs in t are not substituted (these should generally not occur). | def substitute_apply(t, subs, by_name=False):
if not isinstance(subs, dict):
subs = dict(subs)
_substitute_apply = partial(substitute_apply, subs=subs, by_name=by_name)
if type(t) in (Var, Const):
return t
if type(t) is Apply and t.func in subs:
terms = tuple(_substitute_apply(x) for x in t.terms)
result = subs[t.func](*terms)
fvr = free_variables(result, by_name=by_name)
fvt = free_variables(*terms, by_name=by_name)
assert fvr <= fvt, "New free variables!? {}, {}".format(fvr, fvt)
return result
elif type(t) in (Apply, Eq, Ite, Not, And, Or, Implies, Iff):
return type(t)(*(_substitute_apply(x) for x in t))
elif type(t) in (ForAll, Exists, Lambda, NamedBinder):
return type(t)(t.variables, _substitute_apply(t.body, subs=dict(
(k, v) for k, v in subs.iteritems()
if k not in t.variables
)))
else:
assert False, type(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def substitute(t, subs):\n\n if not isinstance(subs, dict):\n subs = dict(subs)\n\n if type(t) in (Var, Const):\n if t in subs:\n return subs[t]\n else:\n return t\n\n elif type(t) in (Apply, Eq, Ite, Not, And, Or, Implies, Iff):\n return type(t)(*(substitute(x, subs) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n forbidden_variables = free_variables(*subs.values())\n if forbidden_variables.isdisjoint(t.variables):\n return type(t)(t.variables, substitute(t.body, (\n (k, v) for k, v in subs.iteritems()\n if k not in t.variables\n )))\n else:\n raise CaptureError(forbidden_variables.intersection(t.variables))\n assert False, (t, subs) # capturing would be created\n\n else:\n assert False, type(e)",
"def substitute(self, formula, subs):\n # Check that formula is a term\n if not formula.is_term():\n raise TypeError(\"substitute() can only be used on terms.\")\n\n for (i, k) in enumerate(subs):\n v = subs[k]\n # Check that substitutions are terms\n if not k.is_term():\n raise TypeError(\n \"Only terms should be provided as substitutions.\" +\n \" Non-term '%s' found.\" % k)\n if not v.is_term():\n raise TypeError(\n \"Only terms should be provided as substitutions.\" +\n \" Non-term '%s' found.\" % v)\n # Check that substitutions belong to the current formula manager\n if k not in self.manager:\n raise TypeError(\n \"Key %d does not belong to the Formula Manager.\" % i)\n if v not in self.manager:\n raise TypeError(\n \"Value %d does not belong to the Formula Manager.\" % i)\n\n return self.walk(formula, substitutions=subs)",
"def _substitute(self, formula, subs):\n\n return subs.get(formula, formula)",
"def substitute(self, substitution_map):\n for element_name in substitution_map:\n assert (is_constant(element_name) or is_variable(element_name)) and \\\n type(substitution_map[element_name]) is Term\n\n if is_constant(self.root) or is_variable(self.root): # we need to to deal only with the root\n if self.root in substitution_map.keys():\n return substitution_map[self.root] # change it with it is in the map\n else:\n return Term(self.root) # else return it as is\n\n else:\n assert is_function(self.root) # we have a function\n if self.root in substitution_map.keys():\n root = substitution_map[self.root] # update the root if it is in map\n else:\n root = self.root # else, leave it as it is, without changing it to Term\n args = [] # this is our args\n for index, arg in enumerate(self.arguments): # for every arg, switch it with it's substitute\n args.append(arg.substitute(substitution_map)) # recursive call to substitute\n return Term(root, args)\n # Task 9.1",
"def substitute(self, substitution_map):\n for element_name in substitution_map:\n\n assert (is_constant(element_name) or is_variable(element_name)) and \\\n type(substitution_map[element_name]) is Term\n return self.subsitute_helper(copy.deepcopy(substitution_map), False)",
"def subs(self, *exp, **kargs):\n par = {}\n if self._has(\"p\"):\n par[\"p\"] = self._.p.subs(*exp)\n elif self._has(\"q\"):\n par[\"q\"] = self._.q.subs(*exp)\n elif self._has(\"P\"):\n par[\"P\"] = self._.P.subs(*exp)\n elif self._has(\"Q\"):\n par[\"Q\"] = self._.Q.subs(*exp)\n p, new = self._subs(exp, ASParameters(**par), kargs.get(\"seen\", {}))\n return p",
"def subs(self, dictin):\n\n return sum([ Dyadic( [ (v[0].subs(dictin), v[1], v[2]) ]) for v in\n self.args])",
"def substitute(expression, subs=None):\n if subs is None:\n subs = {}\n if isNumber(expression):\n return expression\n if isSymbol(expression):\n if expression.name in subs:\n return subs[expression.name]\n elif expression in subs:\n return subs[expression]\n else:\n return expression\n expr = expression.copy()\n # Must be an expression\n symbolDct = {s.name: s for s in expression.free_symbols}\n # Update entry in substitution to be the same as the expression\n newSubs = dict(subs)\n for key, value in subs.items():\n if key.name in symbolDct.keys():\n del newSubs[key]\n newSubs[symbolDct[key.name]] = value\n expr = expr.subs(newSubs)\n return sympy.simplify(expr)",
"def _subs(self, exp, p, seen):\n if id(self) in seen:\n return (seen[id(self)], False)\n seen[id(self)] = p\n if self._has(\"p\") and not p._has(\"p\"):\n p._.p = self._.p.subs(*exp)\n if self._has(\"q\") and not p._has(\"q\"):\n p._.q = self._.q.subs(*exp)\n if self._has(\"P\") and not p._has(\"P\"):\n p._.P = self._.P.subs(*exp)\n if self._has(\"Q\") and not p._has(\"Q\"):\n p._.Q = self._.Q.subs(*exp)\n for k, v in self._.triple.items():\n p._.triple[k] = v.subs(*exp)\n for k, v in self._.quadruple.items():\n p._.quadruple[k] = v.subs(*exp)\n for par, part in self._.subschemes.items():\n try:\n p.add_subscheme(par.subs(*exp, seen=seen), part)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=part)\n for par, part in self._.fusion_schemes.items():\n try:\n p.add_subscheme(par.subs(*exp, seen=seen), part)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=part)\n for h, s in enumerate(self._.subconstituents):\n if s is None:\n continue\n s, refs = s\n name = self._subconstituent_name(h)\n try:\n p._.subconstituents[h] = (p.add_subscheme(\n s.subs(*exp, seen=seen), name), refs)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=name)\n if self._has(\"complement\") and not p._has(\"complement\"):\n try:\n p._.complement = self._.complement.subs(*exp, seen=seen)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=\"complement\")\n return (p, True)",
"def substitute(self, subs, **kwargs):\n return self",
"def subst(s, x):\n if isinstance(x, list):\n return [subst(s, xi) for xi in x]\n elif isinstance(x, tuple):\n return tuple([subst(s, xi) for xi in x])\n elif not isinstance(x, Expr):\n return x\n elif is_var_symbol(x.op):\n return s.get(x, x)\n else:\n return Expr(x.op, *[subst(s, arg) for arg in x.args])",
"def _subs(self, exp, p, seen):\n p, new = ASParameters._subs(self, exp, p, seen)\n if new:\n if self._has(\"theta\"):\n p._.theta = tuple(subs(th, *exp) for th in self._.theta)\n if self._has(\"omega\"):\n p._.omega = self._.omega.subs(*exp)\n return (p, new)",
"def do_subs(self, e):\n for expr, var in self.items():\n e = e.xreplace({var: expr})\n return e",
"def convert_term(var_map, s, t):\n def convert(t):\n if t.head in var_map:\n if len(t.args) == 0:\n return s(Ident(to_binary(var_map[t.head])))\n elif len(t.args) == 1:\n return s(Para(Ident(to_binary(var_map[t.head])), t.arg))\n else:\n raise NotImplementedError\n elif t.is_equals():\n return Term.mk_equals(convert(t.arg1), convert(t.arg))\n elif logic.is_neg(t):\n return logic.neg(convert(t.arg))\n elif logic.is_conj(t):\n return logic.conj(convert(t.arg1), convert(t.arg))\n elif logic.is_disj(t):\n return logic.disj(convert(t.arg1), convert(t.arg))\n elif t.get_type() == boolT:\n return BoolV(t)\n elif t.get_type() == natT:\n return NatV(t)\n else:\n raise NotImplementedError\n\n return convert(t)",
"def free_variables(*terms, **kwargs):\n by_name = kwargs.get('by_name', False)\n _free_variables = partial(free_variables, by_name=by_name)\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t.name if by_name else t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(_free_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return _free_variables(t.body) - _free_variables(*t.variables)\n\n elif hasattr(t,'args'):\n return union(*(_free_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def subs(self, dictin):\n\n ov = 0\n for i, v in enumerate(self.args):\n ov += Vector([(v[0].subs(dictin), v[1])])\n return ov",
"def substitute(self,s,x):\r\n\t\t\r\n\t\t# turn substitution into top line\r\n\t\ttry:\r\n\t\t\tt = Li(s)\r\n\t\t\tb = Li(1)\r\n\t\t\t\r\n\t\t# unless it is a list of lines\r\n\t\texcept:\r\n\t\t\tt = Li(s[0])\r\n\t\t\tb = Li(s[1])\r\n\t\t\r\n\t\t# split variable from power\r\n\t\th = Te._chop(x)\r\n\t\tx = h[0]\r\n\t\t\r\n\t\t# assume power of 1 for substituted variable, but revise if found in string\r\n\t\tp = 1\r\n\t\ttry:\r\n\t\t\tp = int(h[1])\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t\r\n\t\t# exponents in each term\r\n\t\te = [i.look(x) for i in self]\r\n\t\t\r\n\t\t# adjust for power of substituted variable\r\n\t\te = [i // p for i in e]\r\n\t\t\r\n\t\t# max, min powers of substitution\r\n\t\ttry:\r\n\t\t\ta = max(e)\r\n\t\t\tm = min(e)\r\n\t\texcept:\r\n\t\t\ta = 0\r\n\t\t\tm = 0\r\n\t\t\r\n\t\t# truncate max and min powers \r\n\t\tif a < 0:\r\n\t\t\ta = 0\r\n\t\tif m > 0:\r\n\t\t\tm = 0\r\n\t\t\t\r\n\t\t# dictionaries of calculated terms for top and bottom\r\n\t\tf = {}\r\n\t\tg = {}\r\n\t\t\t\r\n\t\t# expand top and bottom to truncated max and min\r\n\t\tq,f = Li._expand(t,-m,f)\r\n\t\tr,g = Li._expand(b,a,g)\r\n\t\tq = Li(q,c=False)\r\n\t\tr = Li(r,c=False)\r\n\t\t\r\n\t\t# store results in dictionaries\r\n\t\ty = {-m: q}\r\n\t\tz = {a: r}\r\n\t\t\r\n\t\t# make denominator\r\n\t\td = q.multiply(r)\r\n\t\t\r\n\t\t# convert each term\r\n\t\tl = Li([])\r\n\t\tfor n,i in enumerate(self):\r\n\t\t\t\r\n\t\t\t# exponent of substitution\r\n\t\t\tw = e[n]\r\n\t\t\t\r\n\t\t\t# divide out variable\r\n\t\t\tv = Te({x: -w * p})\r\n\t\t\ti = i.multiply(v)\r\n\t\t\t\r\n\t\t\t# retrieve top expansion\r\n\t\t\tif (w - m) in y:\r\n\t\t\t\tu = y[w - m]\r\n\t\t\t\t\r\n\t\t\t# or calculate\r\n\t\t\telse:\r\n\t\t\t\tu,f = Li._expand(t,w - m,f)\r\n\t\t\t\tu = Li(u,c=False)\r\n\t\t\t\ty[w - m] = u\r\n\t\t\t\r\n\t\t\t# retrieve bottom expansion\r\n\t\t\tif (a - w) in z:\r\n\t\t\t\tc = z[a - w]\r\n\t\t\t\r\n\t\t\t# or calculate\r\n\t\t\telse:\r\n\t\t\t\tc,g = Li._expand(b,a - w,g)\r\n\t\t\t\tc = Li(c,c=False)\r\n\t\t\t\tz[a - w] = c\r\n\t\t\t\r\n\t\t\t# multiply and add\r\n\t\t\tu = u.multiply(c)\r\n\t\t\tu = u.multiply(i)\r\n\t\t\tl = l.add(u)\r\n\t\t\r\n\t\treturn [l,d]",
"def substitute(self, substitution: Dict[Terminal, \"CFG\"]) -> \"CFG\":\n idx = 0\n new_variables_d = {}\n new_vars = set()\n for variable in self._variables:\n temp = Variable(variable.value + SUBS_SUFFIX + str(idx))\n new_variables_d[variable] = temp\n new_vars.add(temp)\n idx += 1\n productions = []\n terminals = self._terminals.copy()\n final_replacement = {}\n for ter, cfg in substitution.items():\n new_variables_d_local = {}\n for variable in cfg.variables:\n temp = Variable(variable.value + SUBS_SUFFIX + str(idx))\n new_variables_d_local[variable] = temp\n new_vars.add(temp)\n idx += 1\n # Add rules of the new cfg\n for production in cfg.productions:\n body = []\n for cfgobj in production.body:\n if cfgobj in new_variables_d_local:\n body.append(new_variables_d_local[cfgobj])\n else:\n body.append(cfgobj)\n productions.append(\n Production(new_variables_d_local[production.head],\n body))\n final_replacement[ter] = new_variables_d_local[cfg.start_symbol]\n terminals = terminals.union(cfg.terminals)\n for production in self._productions:\n body = []\n for cfgobj in production.body:\n if cfgobj in new_variables_d:\n body.append(new_variables_d[cfgobj])\n elif cfgobj in final_replacement:\n body.append(final_replacement[cfgobj])\n else:\n body.append(cfgobj)\n productions.append(Production(new_variables_d[production.head],\n body))\n return CFG(new_vars, None, new_variables_d[self._start_symbol],\n set(productions))",
"def func_subs(t, Func_expr, func, t0):\n assert(isinstance(type(Func_expr), UndefinedFunction))\n pos = Func_expr.args.index(t)\n\n def frozen(*args):\n # tuples are immutable\n L = list(args)\n L.insert(pos, t0)\n new_args = tuple(L)\n return func(*new_args)\n return frozen",
"def match(self, pattern):\n if isinstance(pattern, Var):\n substitution = {pattern: self}\n elif isinstance(pattern, Term) and self.function == pattern.function \\\n and len(self.arguments) == len(pattern.arguments):\n terms = [Term.__match(self.arguments[idx], pattern.arguments[idx])\n for idx in range(0, len(self.arguments))]\n substitution = reduce(merge, terms)\n else:\n substitution = None\n return substitution",
"def bound_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset()\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(bound_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(bound_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(bound_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def do_variable_substitution(item):\n if isinstance(item, str):\n try:\n item = re_keyref.sub(getdata, item)\n except KeyError, err:\n print >> sys.stderr, (\n \"Use of undefined key in variable substitution: %s\"\n % err)\n elif isinstance(item, list):\n for index in range(len(item)):\n item[index] = do_variable_substitution(item[index])\n elif isinstance(item, dict):\n for key, value in item.iteritems():\n item[key] = do_variable_substitution(value)\n return item",
"def used_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(used_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(used_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(used_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def apply(self, subj, include_unmapped=False):\n def selfref(k, v): return subj.get(v, '$' + k) == '$' + k\n variables = {k: subj[v] for k, v in self.items() if not selfref(k, v)}\n if include_unmapped:\n mapped = set(self.values()) | set(variables)\n variables.update({k: subj[k] for k in subj if k not in mapped})\n return variables",
"def undetermined_coefficients(gensols: List[Symbol], func_coeffs: List[Symbol], gt: Symbol, t: Symbol = t) -> Tuple[Symbol, Procedure]:\n\n Y = Function('Y', real=True)(t)\n\n coeffs = numbered_symbols('A', cls=Dummy)\n coefflist = []\n\n trialset = _undetermined_coefficients_match(gt, t)['trialset']\n\n notneedset = set()\n\n mult = 0\n for i, sol in enumerate(gensols):\n check = sol\n if check in trialset:\n # If an element of the trial function is already part of the\n # homogeneous solution, we need to multiply by sufficient x to\n # make it linearly independent. We also don't need to bother\n # checking for the coefficients on those elements, since we\n # already know it will be 0.\n while True:\n if check*t**mult in trialset:\n mult += 1\n else:\n break\n trialset.add(check*t**mult)\n notneedset.add(check)\n\n newtrialset = trialset - notneedset\n\n # while True:\n # dependent = False\n # for trial in newtrialset:\n # if trial in gensols:\n # dependent = True\n # break\n # if not dependent:\n # break\n # newtrialset = set([t*trial for trial in trialset])\n\n # trialset = trialset.union(newtrialset)\n\n trialfunc = sympy.Number(0)\n for i in newtrialset:\n c = next(coeffs)\n coefflist.append(c)\n trialfunc += c*i\n\n derivatives = []\n\n eqs = 0\n for order, coeff in enumerate(func_coeffs[::-1]):\n deriv = simplify(trialfunc.diff(t, order))\n derivatives.append(\n Eq(Derivative(Y, t, order), deriv, evaluate=False))\n eqs += coeff * deriv\n\n coeffsdict = dict(list(zip(trialset, [0]*(len(trialset) + 1))))\n\n eqs_lhs = eqs\n\n eqs = _mexpand(simplify(eqs - gt).expand())\n\n for i in Add.make_args(eqs):\n s = separatevars(i, dict=True, symbols=[t])\n coeffsdict[s[t]] += s['coeff']\n\n coeffvals = solve(list(coeffsdict.values()), coefflist)\n\n if not coeffvals:\n print(\n \"Could not solve `%s` using the \"\n \"method of undetermined coefficients \"\n \"(unable to solve for coefficients).\" % eqs)\n\n psol = trialfunc.subs(coeffvals)\n\n procedure = Procedure()\n procedure\\\n .text('Find ').latex('Y(t)').text(' that mimics the form of ').latex('g(t)', nl=True)\\\n .eq(Eq(Y, trialfunc, evaluate=False))\\\n .text('Compute successive derivatives of ').latex('Y(t)', nl=True)\\\n .equlist(derivatives)\\\n .text('Plug the derivatives into the LHS and equate coefficients', nl=True)\\\n .equlist([Eq(eqs_lhs, gt, evaluate=False),\n Eq(simplify(eqs_lhs).expand().collect(t), gt, evaluate=False)])\\\n .equarr([Eq(a, 0, evaluate=False) for a in coeffsdict.values()])\\\n .text('Solve for the undetermined coefficients', nl=True)\\\n .equarr([Eq(k, v, evaluate=False)\n for k, v in coeffvals.items() if k != 0] if len(coeffvals) > 0 else [])\\\n .text('Substitute the coefficients to get the particular solution', nl=True)\\\n .eq(Eq(Dummy('y_p'), psol, evaluate=False))\n\n return psol, procedure",
"def variableSubstitution(d):\n variable = re.compile(r\"^(.*)\\$\\{(.*)\\}(.*)\")\n\n # translate the dictionary to lower-case keys:\n dd = {k.lower():v for k,v in d.iteritems()}\n maxIterations=4\n \n for i in range(maxIterations):\n anyChanges=False\n for k,v in dd.iteritems():\n if not isinstance(v,str):\n # Only operate on string-valued entries\n continue\n m = variable.match(v)\n if not m:\n continue\n anyChanges = True\n vout = str(v)\n while m:\n key = m.group(2).lower()\n if key not in dd.keys():\n print \"ERROR: variable substitution asks for nonexistent Attribute\", key, \"in\", v\n sys.exit(1)\n if key==k:\n print \"ERROR: self-reference to Attribute\", key, \"in\", v\n vv = dd[key]\n if not isinstance(vv,str):\n print \"ERROR: variable substitution using non-string-valued Attribute\",key\n sys.exit(1)\n vout = m.expand(r\"\\g<1>\"+vv+r\"\\g<3>\")\n m = variable.match(vout)\n dd[k] = vout\n if not anyChanges:\n break # Done\n if i==maxIterations:\n print \"ERROR: Too many iterations in variableSubstitution\"\n sys.exit(1)\n # restore case of original dictionary\n for k in d.keys():\n d[k] = dd[k.lower()]\n return",
"def unify(self,term,fact,bindings):\n\n n = len(term.split('(')[1][:-1].split(','))\n term_args = term.split('(')[1][:-1].split(',')\n fact_args = fact.split('(')[1][:-1].split(',')\n for i in range(n):\n if (not Prover.is_var(term_args[i])) and (not Prover.is_var(fact_args[i])):\n if term_args[i] != fact_args[i]:\n return False\n elif (Prover.is_var(term_args[i])) and (not Prover.is_var(fact_args[i])):\n bindings[term_args[i]] = fact_args[i]\n elif (not Prover.is_var(term_args[i])) and (Prover.is_var(fact_args[i])):\n bindings[fact_args[i]] = term_args[i]\n return bindings",
"def _substitute(self, mapping: VariableMapping) -> 'Substitution':\n return Substitution(\n # Create a new combined mapping. Later mappings override earlier\n # ones.\n mapping={\n **mapping,\n **{\n variable: term._substitute(mapping)\n for (variable, term) in self.mapping.items()\n }\n }\n )",
"def substitute(x, c_name):\n if params.substitution[c_name].get(x) is not None:\n return params.substitution[c_name][x]\n else:\n return x",
"def subs(self, subs):\n if subs is None:\n raise ValueError(\"Invalid value for `subs`, must not be `None`\") # noqa: E501\n\n self._subs = subs"
] | [
"0.72416764",
"0.65752196",
"0.6252526",
"0.6022401",
"0.5977601",
"0.5844865",
"0.57589626",
"0.57118535",
"0.55280894",
"0.55234385",
"0.55226636",
"0.5458737",
"0.54270804",
"0.533437",
"0.5322457",
"0.53204954",
"0.52589554",
"0.51950306",
"0.5139489",
"0.50581187",
"0.5030978",
"0.50072575",
"0.49782762",
"0.4959321",
"0.49551064",
"0.4943624",
"0.49351692",
"0.4900502",
"0.48973635",
"0.48944998"
] | 0.77619237 | 0 |
Returns True if t is Eq(x,x) for some x | def is_tautology_equality(t):
return type(t) is Eq and t.t1 == t.t2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def equals(x, y):\n return x == y",
"def exact(cls, lhs, rhs):\n return lhs == rhs",
"def __eq__(self, *args):\n return _ida_hexrays.cexpr_t___eq__(self, *args)",
"def eq(self, y):\n return 1 - self.ne(y)",
"def __eq__(self, other: t.Any) -> bool:\n return self._op_bool('__eq__', other)",
"def __eq__(self, *args):\n return _ida_frame.stkpnt_t___eq__(self, *args)",
"def equals_exact(self, other, tolerance): # -> bool:\n ...",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self,*args):\r\n pass",
"def __eq__(self, x):\n return _elas.SwigPyIterator___eq__(self, x)",
"def __eq__(self, other):\n x_eq = self.x == other.x\n y_eq = self.y == other.y\n return x_eq and y_eq",
"def __eq__(self, other):\n x_eq = self.x == other.x\n y_eq = self.y == other.y\n return x_eq and y_eq"
] | [
"0.6527325",
"0.6335552",
"0.62206423",
"0.61665654",
"0.6158353",
"0.6072844",
"0.606981",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6037759",
"0.6004478",
"0.5982946",
"0.5982946"
] | 0.69490355 | 0 |
Returns True if t is syntactially equal to u modulo alpha conversion | def equal_mod_alpha(t,u):
def rec(t,u,m1,m2,n):
if type(t) is Var and type(u) is Var:
return m1.get(t,t) == m2.get(u,u)
if type(t) in (ForAll, Exists, Lambda, NamedBinder) and type(t) is type(u):
if len(t.variables) == len(u.variables):
for v1,v2 in zip(t.variables,u.variables):
m1.push(v1,n)
m2.push(v2,n)
n += 1
res = rec(t.body,u.body,m1,m2,n)
for v1,v2 in zip(t.variables,u.variables):
m1.pop()
m2.pop()
return res
if type(t) is Apply and type(u) is Apply and t.func == u.func and len(t.terms) == len(u.terms):
return all(rec(v,w,m1,m2,n) for v,w in zip(t.terms,u.terms))
if type(t) is Const and type(u) is Const:
return t == u
if type(t) in (Apply, Eq, Ite, Not, And, Or, Implies, Iff) and type(u) is type(t):
return len(t) == len(u) and all(rec(v,w,m1,m2,n) for v,w in zip(tuple(t),tuple(u)))
return False
return rec(t,u,pushable_map(),pushable_map(),0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def u_exact(t):\n return a * t + b",
"def isalpha(self) -> bool:\n pass",
"def oracle(ct: int) -> bool:\n return rsa.dec(ct) & 1 == 0",
"def isAcute(trpl):\n vd = vectorFormat(trpl)\n if angle_between(*vd) < np.pi/2:\n return True\n else:\n return False",
"def check_palindrome():",
"def valida(rut):\n rfiltro = filtra(rut)\n rutx = str(rfiltro[0:len(rfiltro) - 1])\n digito = str(rfiltro[-1])\n multiplo = 2\n total = 0\n\n for reverso in reversed(rutx):\n total += int(reverso) * multiplo\n if multiplo == 7:\n multiplo = 2\n else:\n multiplo += 1\n modulus = total % 11\n verificador = 11 - modulus\n if verificador == 10:\n div = \"k\"\n elif verificador == 11:\n div = \"0\"\n else:\n if verificador < 10:\n div = verificador\n\n if str(div) == str(digito):\n return True\n else:\n return False",
"def isalpha(self):\n return isalpha(self)",
"def isIsomorphic(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n alphabet = {}\n used = {}\n for i in range(len(s)):\n char = alphabet.get(s[i])\n if char and char != t[i]:\n return False\n if not char and t[i] in used:\n return False\n alphabet[s[i]] = t[i]\n used[t[i]] = True\n return True",
"def alpha(self) -> bool:\n return \"a\" in self.modifier if self.modifier else False",
"def is_special(s):\n for part in xrange(1, 3**len(s)):\n p = part\n sa = 0\n ca = 0\n sb = 0\n cb = 0\n for i, x in enumerate(s):\n if p%3 == 1:\n sa += x\n ca += 1\n elif p%3 == 2:\n sb += x\n cb += 1\n p = p//3\n if ca == 0 or cb == 0:\n continue\n if sa == sb:\n return False\n if ca > cb and sa <= sb:\n return False\n if cb > ca and sb <= sa:\n return False\n return True",
"def character(x):\n if (x==\"a\"or x==\"A\"or x==\"e\"or x==\"E\"or x==\"i\"or x==\"I\"or x==\"o\"or x==\"O\"or x==\"u\"or x==\"U\"):\n return('True')\n else:\n return('False')",
"def istele(number):\n if number[:3] == '140':\n return True\n return False",
"def esCUITValida(cuit):\n # Convertimos el valor a una cadena\n cuit = str(cuit)\n # Aca removemos guiones, espacios y puntos para poder trabajar\n cuit = cuit.replace(\"-\", \"\") # Borramos los guiones\n cuit = cuit.replace(\" \", \"\") # Borramos los espacios\n cuit = cuit.replace(\".\", \"\") # Borramos los puntos\n # Si no tiene 11 caracteres lo descartamos\n if len(cuit) != 11:\n return False, cuit\n # Solo resta analizar si todos los caracteres son numeros\n if not cuit.isdigit():\n return False, cuit\n # Despues de estas validaciones podemos afirmar\n # que contamos con 11 numeros\n # Aca comienza la magia\n base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]\n aux = 0\n for i in range(10):\n aux += int(cuit[i]) * base[i]\n aux = 11 - (aux % 11)\n if aux == 11:\n aux = 0\n elif aux == 10:\n aux = 9\n if int(cuit[10]) == aux:\n return True, cuit\n else:\n return False, cuit",
"def isAlpha(string):\n return (True)",
"def has_u(term):\n return term.amp.has(u)",
"def test_valid_alpha(alpha: Any) -> None:\n check_alpha(alpha=alpha)",
"def is_tr(self, y, t):\n return t != 0 and y != 0",
"def ctci_is_unique(test_str):\n\n checker = 0\n for char in test_str:\n if checker & (1 << ord(char)):\n return False\n checker |= (1 << ord(char))\n return True",
"def is_tn(self, y, t):\n return t != 0 and y == 0",
"def comp_alpha(self):\n pass",
"def check_alpha(a):\n\n a = check_1d(a, \"alpha\")\n if any(map(lambda d: d <= 0, a)):\n raise Exception('Alpha cannot be 0 or negative')\n\n return a",
"def is_unique3(a_string):\n\n if len(a_string) is 0:\n print \"String is empty.\"\n return False\n\n charset = [False] * 256\n\n for char in a_string:\n print char\n if charset[ord(char)]:\n return False\n charset[ord(char)] = True\n return True",
"def is_t2t(self):\n g = self.get_gene().get_seq()\n if 'c' != g[1]:\n return False\n if not len(g) >= 8:\n return False\n for x in range(2, 4):\n if g[x] is not 'c':\n return False\n for x in range(1, len(g)-3):\n dec = 'd' if x % 4 == 0 else 'c'\n if g[x+3] is not dec:\n return False\n return True",
"def isalpha(a):\n return _vec_string(a, bool_, 'isalpha')",
"def siruta_is_valid(self, siruta):\n if type(siruta) != int:\n siruta = int(siruta)\n if siruta >= 10**6:\n return False\n weights = [1, 2, 3, 5, 7]\n checksum = 0\n checkdigit = siruta % 10\n index = 0\n while (index < 5):\n siruta = int(siruta / 10)\n left = (siruta % 10) * weights[index]\n checksum += sum(map(int, str(left))) # sum of digits of left\n index += 1\n checksum %= 10\n checksum = 11 - checksum\n checksum %= 10\n return checksum == checkdigit",
"def check(s1):\n chars = [0] * 128\n for c in s1:\n chars[ord(c)]+=1\n\n counter = 0\n for i in range(len(chars)):\n if chars[i] %2 != 0:\n counter+=1\n \n return counter <= 1",
"def hasAlpha(self) :\n return self.m_hasAlpha",
"def isAnagram(self, s, t):\n \n s_count = {}\n t_count = {}\n for char in s:\n s_count[char] = s_count.get(char, 0) +1\n \n for char in t:\n t_count[char] = t_count.get(char, 0) +1\n \n return t_count == s_count",
"def question1a(s,t):\n\n anagrams = permutations(t, len(t))\n for anagram in anagrams:\n if anagram:\n if ''.join(anagram) in s:\n return True\n return False",
"def question1(s, t):\n if s is None or t is None:\n return \"Error: Please enter valid strings\"\n len_s = len(s)\n len_t = len(t)\n if len_s == 0 or len_s < len_t:\n return \"Error: Length of s cannot be smaller than length of t\"\n s = s.lower()\n t = t.lower()\n if len_t == 0:\n return True\n t = sorted(t)\n for i in range(len_s - len_t + 1):\n sub_s = s[i:i + len_t]\n if sorted(sub_s) == t:\n return True\n return False"
] | [
"0.58734155",
"0.5778579",
"0.5749819",
"0.5632407",
"0.55852145",
"0.55829185",
"0.5546565",
"0.5520525",
"0.55134135",
"0.5501811",
"0.5460695",
"0.5449854",
"0.54210347",
"0.5412214",
"0.538456",
"0.5383626",
"0.53737104",
"0.5345897",
"0.5311306",
"0.5297979",
"0.5283892",
"0.5245812",
"0.5243051",
"0.52406865",
"0.52362174",
"0.5230435",
"0.522333",
"0.5218643",
"0.520703",
"0.5194474"
] | 0.67610735 | 0 |
Return the body part of HTML files. | def get_body(html_file_content):
return findall("<body>(.*?)</body>", html_file_content, DOTALL)[0].decode("utf-8") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_body(html_page):\n soup = BeautifulSoup(open(html_page), 'html.parser')\n body = soup.find('body')\n return body",
"def get_body_content(self):\n\n try:\n html_tree = parse_html_string(self.content)\n except:\n return ''\n\n html_root = html_tree.getroottree()\n\n if len(html_root.find('body')) != 0:\n body = html_tree.find('body')\n\n tree_str = etree.tostring(body, pretty_print=True, encoding='utf-8', xml_declaration=False)\n\n # this is so stupid\n if tree_str.startswith(six.b('<body>')):\n n = tree_str.rindex(six.b('</body>'))\n\n return tree_str[7:n]\n\n return tree_str\n\n return ''",
"def html_body(self) -> str:\n html_templates = utils.get_file_contents(os.path.join(\n feconf.INTERACTIONS_DIR, self.id, '%s.html' % self.id))\n return html_templates",
"def html_body(self):\n return self._html_body",
"def html_body(self):\n return self.getattr('html_body')",
"def open_body(self) -> str:\n self.html_doc = self.html_doc + \"\"\"<body>\\n\n \"\"\"\n return self.html_doc",
"def get_body_text(self):\n if self.body_type != 'HTML':\n return self.body\n\n try:\n soup = bs(self.body, 'html.parser')\n except RuntimeError:\n return self.body\n else:\n return soup.body.text",
"def getContents(self):\n normal_body_regex = re.compile(r'[ \\n\\r\\t]+')\n return normal_body_regex.sub(' ', self.contents)",
"def get_body_soup(self):\n if self.body_type != 'HTML':\n return None\n else:\n return bs(self.body, 'html.parser')",
"def body(self):\r\n self._body.seek(0)\r\n return self._body",
"def setMITPageBody(self, context, fdata):\n fns = context.source.listFiles()\n for fn in fns:\n import os\n mimetype = mimetypes.guess_type(fn)\n textDoc = ''\n if mimetype:\n if mimetype[0]:\n textDoc = mimetype[0].split('/')[0]\n\n if fn[-1] != os.sep and textDoc == 'text':\n data = context.source.readFile(fn)\n from BeautifulSoup import BeautifulSoup\n soup = BeautifulSoup(data)\n \n ftext = ''\n if soup.findAll('div',attrs={'class':'maincontent'}):\n bc = soup.findAll('div',attrs={'class':'bread-crumb'})\n if bc:\n titleTag = bc[0].nextSibling.nextSibling\n bc[0].extract()\n if titleTag.name == 'h1':\n titleTag.extract()\n ftext = str(soup.findAll('div',attrs={'class':'maincontent'})[0])\n \n if not ftext:\n tbls = soup('table')\n for tbl in tbls:\n if tbl.has_key('summary'):\n summary = tbl['summary']\n if summary.find('Main Content Header') > 0:\n ftext = str(tbl)\n\n if ftext:\n fdata[fn] = ftext",
"def body(self):\n return self.browser.element(self.BODY_LOC)",
"def close_body(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</body>\\n\n \"\"\"\n return self.html_doc",
"def body(self) -> str:\n return pulumi.get(self, \"body\")",
"def body(self):\n return self.getattr('body')",
"def template_body(self) -> str:\n return pulumi.get(self, \"template_body\")",
"def build_body(self) -> str:\n # Always include default.js\n files = [os.path.join(self.directory, \"default.js\")]\n\n # Find increasingly less specific files based on the request path.\n paths = self.path.replace(\"/\", \"\").split(\".\")\n while paths:\n files.append(os.path.join(self.directory, \".\".join(paths)))\n paths = paths[1:]\n\n # Combine the files found, if they exist.\n body = \"// dotjs is working! //\\n\"\n for filename in files:\n if os.path.exists(filename):\n with open(filename) as fp:\n body += fp.read() + \"\\n\"\n\n return body",
"def body(self):\n\n return self._body",
"def get_html_part(parts):\n for part in parts:\n if part[\"mimeType\"] == \"text/html\":\n return part[\"body\"][\"data\"]\n return \"\"",
"def message_body_html(self):\n ...",
"def body(self) -> \"str\":\n return self._attrs.get(\"body\")",
"def body(self) -> \"str\":\n return self._attrs.get(\"body\")",
"def get_html_content():\n \n request = urllib2.Request(RBI_URL, headers=HEADERS)\n page = urllib2.urlopen(request)\n html_content = page.read()\n return html_content",
"def body(self) -> str:\n return self.data['body']",
"def _single_body(part):\n content_type = part.get_content_type()\n try:\n body = part.get_payload(decode=True)\n except Exception:\n return ''\n\n if content_type == 'text/html':\n return BeautifulSoup(body, 'html.parser').text\n elif content_type == 'text/plain':\n return body\n return ''",
"def get_html_parts(self):\n script_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'data')\n with open(os.path.join(script_path, 'head.html'), 'r') as hfile:\n self.header = hfile.read()\n with open(os.path.join(script_path, 'template.html'), 'r') as hfile:\n self.template = hfile.read()\n with open(os.path.join(script_path, 'footer.html'), 'r') as hfile:\n self.footer = hfile.read()\n self.module_icon = os.path.join(script_path, 'icon.png')\n return True",
"def get_main_page(self):\n if OPENFDA_BASIC:\n with open(\"openfda_basic.html\") as html_file:\n html = html_file.read()\n else:\n with open(\"openfda.html\") as html_file:\n html = html_file.read()\n\n return html",
"def get_article_body(url):\n G.go(url)\n text = G.doc.select('////div[@class=\"StandardArticleBody_body\"]').text()\n return text",
"def get_HTML(file):\r\n\r\n f = open(file, 'r')\r\n lines = f.readlines()\r\n f.close()\r\n return \"\".join(lines)",
"def parsed_html():\n return utils.parse_html(\n \"\"\"\n <!doctype hmtl>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width\">\n <title>Page title</title>\n <link rel=\"stylesheet\" href=\"/static/styles.css\" />\n </head>\n <body>\n <h1>Django Auto AMP</h1>\n <p>Generate automatic AMP from your Django templates</p>\n <img src=\"/static/img.jpg\" width=\"500\" height=\"300\" />\n <img src=\"/static/img.gif\" layout=\"nodisplay\" />\n <img src=\"/static/img.png\" />\n <script type=\"text/javascript\" src=\"/static/scripts.js\" />\n <script type=\"application/json\" src=\"/static/data.json\" />\n </body>\n </html>\n \"\"\"\n )"
] | [
"0.77461666",
"0.7460988",
"0.7410468",
"0.7252582",
"0.72338134",
"0.69558644",
"0.6863169",
"0.66610724",
"0.66539806",
"0.665275",
"0.6494882",
"0.64808214",
"0.6437959",
"0.6399701",
"0.63638884",
"0.63097113",
"0.6272104",
"0.6268735",
"0.62536013",
"0.62374175",
"0.6231799",
"0.6231799",
"0.6185719",
"0.6173802",
"0.6124519",
"0.6117942",
"0.6103839",
"0.6102988",
"0.61005086",
"0.6081967"
] | 0.7931815 | 0 |
Correct the diagram links into the main content body. | def correct_img_links(body_main_content, schema_name, list_name_image):
for name_image in list_name_image:
body_main_content = body_main_content.replace(
"src=\"" + name_image + "\"",
"src=\"{% static \"schema_viewer/oxygen/" + schema_name + "/" + name_image + "\" %}\""
)
return body_main_content | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fix_links():\n pass",
"def append_links(self, lines, lang):\n lines.append(\"verbatim \")\n lines.append(\"section Links\")\n lines.append(\"external http://polcasaglia.blogspot.com Blog\")\n lines.append(\"external http://www.uisp-fe.it/calcio.php UISP\" )\n lines.append(\"verbatim \")\n return lines",
"def adjust_anchors(self):\n pass",
"def fix_morphs():\n morph_links = load_morph_links()\n create_morphs_node(morph_links)\n create_custom_template(morph_links)\n clean_morphs()",
"def fixRelations (self):\n\t\tnodes = self.getFieldElements (\"relation\")\n\t\tif not nodes: return\n\t\t\n\t\tprint \"\\n%s\" % self.getId()\n\t\tfor r in nodes:\n\t\t\tvalue = XmlUtils.getText(r)\n\t\t\tif not value: return\n\t\t\tXmlUtils.setText (r,\"\")\n\t\t\tif value.startswith (\"http://\"):\n\t\t\t\tr.setAttribute (\"type\", \"Has part\")\n\t\t\t\tr.setAttribute (\"url\", value)\n\t\t\telse:\n\t\t\t\tr.setAttribute (\"type\", \"Is related\")\n\t\t\t\tr.setAttribute (\"title\", value)\n\t\t\tprint r.toxml()\n\t\tif 0:\n\t\t\tself.write()\n\t\t\tprint \"wrote record\"",
"def _fix_links(self, text, page_names):\n for n in page_names:\n text = text.replace(f\"]({n})\", f\"]({n}.html)\")\n text = text.replace(f\"]({n}.md)\", f\"]({n}.html)\")\n return text",
"def _generate_links(self):\n index = 0\n links = \"\"\n for ch in self.text:\n if ch == '[':\n links += \"(^\"\n elif ch == ']':\n links += \")$|\"\n index += 1\n elif links[-1:] != '|' and links != \"\":\n links += ch\n self.links = compile(links[:-1].lower())",
"def __drawNodes(self, levelDictionary, linkNodeDict, topLeft):\r\n setSmooth = self.__optionsDatabase.get('Spline optimization') \r\n setCurvature = self.__optionsDatabase.get('Arrow curvature') \r\n minOffsetY = self.__optionsDatabase.get('yOffset') \r\n minOffsetX = self.__optionsDatabase.get('xOffset') \r\n giveExtraSpaceForLinks = self.__optionsDatabase.get('addEdgeObjHeight') \r\n\r\n # Caclulate x, y offsets\r\n offsetX = 0\r\n levelInt2offsetY = dict()\r\n for levelInt in levelDictionary.keys():\r\n currentLevel = levelDictionary[levelInt]\r\n levelInt2offsetY[levelInt] = 0\r\n \r\n # Calculate maximum node size on a per level basis (X is for all levels)\r\n # Then add minimum seperation distance between nodes\r\n for node in currentLevel:\r\n # getSize returns node width, and height of the node & child link icon\r\n x, y = node.getSize(giveExtraSpaceForLinks)\r\n offsetX = max(offsetX, x)\r\n levelInt2offsetY[levelInt] = max(levelInt2offsetY[levelInt], y) \r\n \r\n \r\n maxOffsetX = offsetX + minOffsetX\r\n halfOffsetX = offsetX / 2\r\n \r\n # Send nodes to their final destination, assign final pos to dummy edges\r\n x, y = topLeft\r\n for levelInt in levelDictionary.keys():\r\n currentLevel = levelDictionary[levelInt] \r\n longEdgeOffset = [halfOffsetX, levelInt2offsetY[levelInt] / 3]\r\n \r\n # Move each node in the level (Dummy edges save the pos but don't move)\r\n for node in currentLevel:\r\n node.moveTo(x + node.getGridPosition() * maxOffsetX, y, longEdgeOffset)\r\n \r\n # Increment y for the next iteration\r\n y += levelInt2offsetY[levelInt] + minOffsetY\r\n \r\n # Self-looping edges (Must move these manually into position)\r\n for selfLoopedEdge in NodeWrapper.SelfLoopList: \r\n x, y = selfLoopedEdge.getEdgePosition()\r\n obj = selfLoopedEdge.getASGNode().graphObject_\r\n obj.moveTo(x, y)\r\n\r\n # Re-doing links can take a while, lets show something in meanwhile...\r\n self.atom3i.parent.update()\r\n \r\n # Re-wire the links to take into account the new node positions\r\n selectedLinks = []\r\n for obj in linkNodeDict.values():\r\n selectedLinks.append(obj)\r\n optimizeLinks(self.cb, setSmooth, setCurvature, \r\n selectedLinks=selectedLinks)\r\n \r\n # Re-doing links can take a while, lets show something in meanwhile...\r\n self.atom3i.parent.update()\r\n \r\n # Route multi-layer edges\r\n self.__edgeRouter()",
"def links_to_text(self):\r\n self.parser.stripTags(self.get_top_node(), 'a')",
"def useDefaultView(self):\n\n\t\torigin_tracker = viztracker.KeyboardMouse6DOF()\n\t\torigin_link = viz.link(origin_tracker, cave_origin)\n\t\t#origin_link.setMask(viz.LINK_POS)",
"def _link_elements(self):\n raise NotImplementedError(\"Please implement this method\")",
"def link_dihedra(self, verbose: bool = ...) -> None:\n ...",
"def fix_default_content(portal):\n logger = logging.getLogger(PROJECTNAME)\n content_ids = ['front-page', 'events', ]\n portal_ids = portal.objectIds()\n for cId in content_ids:\n if cId in portal_ids:\n portal.manage_delObjects([cId])\n logger.info('Deleted object with id %s' % cId)\n if 'news' in portal_ids:\n news = portal['news']\n news.setTitle(u'Notícias')\n news.setDescription(u'Notícias do Plone Symposium')\n news.reindexObject()\n if 'Members' in portal_ids:\n # Hide user's tab\n members = portal['Members']\n members.setTitle(u'Participantes')\n members.setExcludeFromNav(True)\n members.reindexObject()\n\n logger.info('Cleaned up portal contents')",
"def generate(self, diagram):",
"def correct_links(html_file, schema_name):\n return html_file.replace(schema_name.replace(\".\", \"_\") + \"_xsd.html#\", \"#\").replace(\"target=\\\"mainFrame\\\"\", \"\")",
"def loadLinks(self):\n\t\tmainPageFile = open(self.mainPageFilePath, 'r')\n\t\ttry:\n\t\t\tlines = mainPageFile.readlines()\n\t\t\tif len(lines) > 0:\n\t\t\t\tself.mainPage = lines[0]\n\t\t\telse:\n\t\t\t\tprint(\"No link in : \", self.mainPageFilePath)\n\t\tfinally:\n\t\t mainPageFile.close()\n\n\t\tlinkFile = open(self.linkFilePath, 'r')\n\t\ttry:\n\t\t\t# add all links to a list\n\t\t\tfor line in linkFile.readlines():\n\t\t\t\tif \"http\" in line:\n\t\t\t\t\tsubPageName = line.split(\".de\")[1].replace(\"/\", \"\")\n\t\t\t\t\tself.subpages.append( Subpage(line.strip(' \\t\\n\\r'),subPageName,self.mainPage) )\n\t\tfinally:\n\t\t linkFile.close()",
"def extendMarkdown(self, md, md_globals):\r\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def __update_diagrams(self):\n for diagram in self.__list:\n diagram.resize(280, 350)\n self.__group_layout.addWidget(diagram, 10, Qt.AlignTop)\n if self.__diagram_count == 1:\n self.add_stretch()\n self.__button_layout.insertWidget(0, self.__start_button)",
"def format(self, content, link):\n error = None\n # Phase 1 - setting correct xpath -> what to include as text from html\n tree = html.fromstring(content)\n # TODO: They change this often!!!\n texts = tree.xpath('//section[starts-with(@class,\"article-body\")]/div[starts-with(@id, \"node-\") and not(descendant::figcaption)]//text()')\n if not texts:\n texts = tree.xpath('//section[contains(@class,\"article-body\")]/div[starts-with(@id, \"node-\") and not(descendant::figcaption)]//text()')\n if not texts:\n texts = tree.xpath('//div[@class=\"classic-body\"]/p//text()')\n # There are also articles that show 404\n # This should be error message in Coindesk.article() as 404 articles\n # does not have dates\n if not texts:\n try:\n error = tree.xpath('//section[@class=\"error-module\"]')[0]\n except:\n error = None\n # There are article that have no text\n if not texts:\n try:\n empty_article = tree.xpath('//section[contains(@class,\"article-body\")]')[0]\n except IndexError:\n empty_article = None\n # If article has no text, does not show 404 error it is time to update xpath\n # This need to be tested on more articles !!!\n if not texts and empty_article is None and error is None:\n raise ContentError(f\"ERROR: Update xpath for text {link} !!!\")\n elif not texts and empty_article is None and error is not None:\n return \"404\"\n else:\n return 'No text available'\n para = []\n j = []\n # Phase 2 - connecting appropriate items in paras list\n text_list = [re.sub(r'[\\xa0]', '', i) for i in texts]\n for i in text_list:\n if i.endswith('.') or i.endswith('.\"') or i.endswith('. ') or i.endswith('.”') or i.endswith('.’”'):\n if j:\n j.append(i)\n para.append(j)\n j = []\n else:\n para.append(i)\n else:\n try:\n next_space = text_list[text_list.index(i) + 1].startswith(' ')\n next_capital = text_list[text_list.index(i) + 1][0].isupper()\n except IndexError:\n next_space = False\n next_capital = False\n if next_space and next_capital:\n para.append(i)\n else:\n j.append(i)\n # Phase 3 - final formatting corrections from list of paras to fluent text\n paras = [' '.join(i) if isinstance(i, list) else i for i in para]\n paras = [re.sub(r' +', ' ', i).strip() for i in paras]\n paras = [str(i) + '\\n' for i in paras]\n return ' '.join(paras)",
"def _do_layout(self):\n return",
"def get_diagram(self):\n self_nodes=self.nodes.all()\n self_arrows=self.arrows.all()\n \n \n if len(self_nodes)==0:\n return False\n \n nodes = [n.get_icon_obj() for n in self_nodes]\n node_liens = [n.liens.all() for n in self_nodes]\n \n pairs = []\n for n,n_liens in zip(nodes,node_liens):\n if len(n_liens)==0:\n liens = Lien.objects.filter(cause__id=n.target_id).all()\n liens = [l.consequence.id for l in liens]\n temp = [(n,target) for target in nodes if target.target_id in liens]\n pairs.extend(temp)\n else:\n ids=set([(i.cause.id,i.consequence.id) for i in n_liens])\n for n in nodes:\n pairs.extend([(n,i) for i in nodes if i is not n and \n (n.target_id,i.target_id) in ids])\n ids = set([(i.cause.id,i.consequence.id) for i in self_arrows])\n pairs = [p for p in pairs if (p[0].target_id,p[1].target_id) not in ids]\n \n lines=[]\n arrows=[]\n for obj in self_arrows:\n \n n0=[i for i in nodes if i.node_id==obj.cause.id]\n n1=[i for i in nodes if i.node_id==obj.consequence.id]\n if len(n0)!=1 or len(n1)!=1:\n continue\n n0=n0[0]\n n1=n1[0]\n \n pt=[(obj.X0, obj.Y0), (obj.X1,obj.Y1)]\n pt=[np.array(i) for i in pt if None not in i]\n if len(pt)==0:\n pairs.append((n0,n1))\n continue\n pairs = [p for p in pairs if (p[0].node_id,p[1].node_id)!=(n0.node_id,n1.node_id)]\n vect = pt[0]-np.array(n0.pos)\n first_pt = np.array(n0.pos)+vect*n0.size/np.sqrt(sum(vect*vect))\n vect = np.array(n1.pos) - pt[-1]\n last_pt = np.array(n1.pos)-vect*n1.size/np.sqrt(sum(vect*vect))\n pt=[first_pt,*pt,last_pt]\n \n lines.extend([((*i,*j),n0.color) for i,j in zip(pt[:-1],pt[1:])])\n arrows.append(((*pt[-2],*pt[-1]),n0.color))\n \n \n margin=10\n line_width=2\n \n diagram=DiagramObj(self.id,nodes,pairs,margin,\n self.width,self.height,line_width)\n diagram.add_arrows(arrows,lines)\n print(diagram.lines)\n return diagram",
"def fix_genindex(self, tree: list[tuple[str, list[tuple[str, Any]]]]) -> None:\n # XXX: modifies tree inline\n # Logic modeled from themes/basic/genindex.html\n for _key, columns in tree:\n for _entryname, (links, subitems, _key) in columns:\n for (i, (ismain, link)) in enumerate(links):\n m = self.refuri_re.match(link)\n if m:\n links[i] = (ismain,\n self.fix_fragment(m.group(1), m.group(2)))\n for _subentryname, subentrylinks in subitems:\n for (i, (ismain, link)) in enumerate(subentrylinks):\n m = self.refuri_re.match(link)\n if m:\n subentrylinks[i] = (ismain,\n self.fix_fragment(m.group(1), m.group(2)))",
"def on_page_markdown(self, markdown, **kwargs):\n for autolink in self.config[\"autolinks\"]:\n markdown = replace_autolink_references(markdown, autolink[\"reference_prefix\"], autolink[\"target_url\"])\n\n return markdown",
"def get_cause_diagram(self, width=500, height = 500):\n size_center= 7\n default_size = 3\n distance = 15\n line_width = 2\n margin = 5\n side_angle = 0.7\n \n causes = self.causes.all()\n lien_causes = [lien.cause for lien in causes]\n \n if len(causes)>0:\n # computing position and size of the graphic elements\n sizes= [lien.relative_share for lien in causes]\n sizes= [np.sqrt(i) if i is not None else default_size for i in sizes]\n sizes= [min(i,distance-size_center-2) for i in sizes]\n \n titles=[c.title for c in lien_causes]\n pics=[c.icon_picture for c in lien_causes]\n colors=[\"grey\" if c.secteur is None else \n c.secteur.color for c in lien_causes]\n text = [lien.cause_description for lien in causes]\n ids = [c.id for c in lien_causes]\n \n \n angle=[0]\n angle.extend([i+j for i,j in zip(sizes[:-1],sizes[1:])])\n angle=np.cumsum(angle)*(2*np.pi/(2*sum(sizes)))\n angle += np.pi - angle[-1]/2\n angle *= side_angle/2 \n angle += (1-side_angle)/2*np.pi + np.pi/2\n pos = [(distance*np.cos(i),distance*np.sin(i)) for i in angle]\n \n # storing as object list \n icons=[IconObj(*i) for i in \n zip(titles,text,pics,colors,pos,sizes,ids)]\n else:\n icons=[]\n \n try:\n color_center=self.secteur.color \n except AttributeError:\n color_center=\"grey\"\n center = IconObj(self.title,self.short_description,\n self.icon_picture,color_center,\n (0,0),size_center,self.id)\n \n\n con = self.consequences.all()\n \n if len(con)>0:\n # computing position and size of the graphic elements\n titles2 =[lien.consequence.title for lien in con]\n text2 = [lien.consequence_description for lien in con]\n pics2 =[lien.consequence.icon_picture for lien in con]\n colors2 =[\"grey\" if lien.consequence.secteur is None else \n lien.consequence.secteur.color for lien in con]\n ids2 = [lien.consequence.id for lien in con]\n\n angle2=np.linspace(0,2*np.pi,len(con),endpoint=False)\n angle2 += np.pi - angle2[-1]/2\n angle2 *= side_angle/2 \n angle2 += (1-side_angle)/2*np.pi - 1/2*np.pi\n pos2 = [(distance*np.cos(i),distance*np.sin(i)) for i in angle2]\n sizes2= [default_size]*len(con)\n \n # storing as object list \n icons2=[IconObj(*i) for i in \n zip(titles2,text2,pics2,colors2,pos2,sizes2,ids2)]\n else:\n icons2=[]\n \n pairs=[(i,center) for i in icons]\n pairs.extend([(center,i) for i in icons2])\n \n icons.extend(icons2)\n icons.append(center)\n \n if len(icons)==0:\n return False\n\n diag=DiagramObj(\"causes\",icons,pairs,margin,width,height,line_width,\n text_size_factor=2)\n self.cause_diagram = diag\n return True",
"def connect_links(base_url, extensions, wikidir, body):\n if base_url.endswith(\"/\"):\n base_url = base_url[:-1]\n\n i = 0\n body2 = []\n\n for match in WIKILINK.finditer(body):\n body2.append(body[i:match.span(0)[0]])\n \n text = match.group(1)\n\n if \"|\" in text:\n topic, desc = text.split(\"|\")\n topic = topic.strip()\n else:\n topic, desc = (text, text)\n\n fn = os.path.join(wikidir, topic)\n\n ext = tools.what_ext(extensions, fn)\n if not ext:\n body2.append(match.group(0))\n i = match.span(0)[1]\n continue\n\n body2.append(\"<a href=\\\"%s/%s/%s\\\">%s</a>\" % \\\n (base_url, TRIGGER, topic, desc))\n i = match.span(0)[1]\n\n body2.append(body[i:])\n return \"\".join(body2)",
"def test_documentation_path_links(self):\r\n main_page = DogMainPage(self.driver)\r\n dog_page = main_page.navigate_documentation()\r\n # Switch to 'List all breeds' tab\r\n all_breeds_page = dog_page.switch_tab(dog_page.ALL_BREEDS)\r\n all_breeds_expected = all_breeds_page.get_expected_header()\r\n all_breeds_header = all_breeds_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(all_breeds_expected, all_breeds_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (all_breeds_expected, all_breeds_header)))\r\n # Switch to 'Random image' tab\r\n random_page = dog_page.switch_tab(dog_page.RANDOM)\r\n random_expected_header = random_page.get_expected_header()\r\n random_header = random_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(random_expected_header, random_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (random_expected_header, random_header)))\r\n # Switch to 'By breed' tab\r\n breed_page = dog_page.switch_tab(dog_page.BREED)\r\n breed_expected_header = breed_page.get_expected_header()\r\n breed_header = breed_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(breed_expected_header, breed_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (breed_expected_header, breed_header)))\r\n # Switch to 'By sub-breed' tab\r\n sub_breed_page = dog_page.switch_tab(dog_page.SUB_BREED)\r\n sub_expected_header = sub_breed_page.get_expected_header()\r\n sub_header = sub_breed_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(sub_expected_header, sub_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (sub_expected_header, sub_header)))",
"def update_vizualization_layout(self, new):\n self.stages[\"Connectome\"].define_inspect_outputs()\n self.stages[\"Connectome\"].config.subject = self.subject",
"def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]",
"def init_linkage():\n for case in AutoCase.objects.all():\n case.autolink()\n case.save()",
"def fix_image_links_in_static_portlet(portal):\n\n def get_image_uid(image):\n \"\"\"Return image UID.\"\"\"\n folder = portal['imagens']\n if image in folder:\n return folder[image].UID()\n\n manager = getUtility(IPortletManager, name='plone.rightcolumn', context=portal)\n mapping = getMultiAdapter((portal, manager), IPortletAssignmentMapping)\n\n assert 'midias-sociais' in mapping\n portlet = mapping['midias-sociais']\n images = [\n 'ico-facebook.png', 'ico-twitter.png', 'ico-linkedin.png',\n 'ico-youtube.png', 'ico-flickr.png'\n ]\n for i in images:\n uid = 'resolveuid/' + get_image_uid(i)\n portlet.text = portlet.text.replace(i, uid)\n logger.debug(u'Links substituidos no portlet de midias sociais')\n\n assert 'banners' in mapping\n portlet = mapping['banners']\n image = 'acesso-a-informacao.png'\n uid = 'resolveuid/' + get_image_uid(image) + '/image_mini'\n portlet.text = portlet.text.replace(image, uid)\n logger.debug(u'Link substituido no portlet de acesso a informacao')"
] | [
"0.63308895",
"0.5643925",
"0.55043316",
"0.5476968",
"0.5352855",
"0.5346029",
"0.53028953",
"0.5257914",
"0.5252659",
"0.52249026",
"0.52183926",
"0.519567",
"0.5179021",
"0.5144104",
"0.51285285",
"0.5118393",
"0.50887305",
"0.50714946",
"0.5056446",
"0.50404006",
"0.5028262",
"0.50186247",
"0.50105524",
"0.49854892",
"0.4963512",
"0.49613893",
"0.49552694",
"0.49351516",
"0.4916096",
"0.48959747"
] | 0.5835807 | 1 |
Create the final HTML oxygen files, with the common header, a specific left menu and the main body. | def create_html_file(body_left_menu, body_main_content):
# Get the header fie and get it contents
path_header = path.join(
SITE_ROOT,
'schema_viewer',
'templates',
'schema_viewer',
'oxygen',
'header_oxygen_template.html'
)
file_header = open(path_header, 'r')
header = file_header.read()
file_header.close()
# Create the final file
final_file = \
header \
+ "\n{% block oxygen_menu %}\n" \
+ body_left_menu \
+ "{% endblock %}\n{% block oxygen_content %}" \
+ body_main_content \
+ "{% endblock %}"
return final_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def common_header_part1(outfile: TextIO, title: str, indexpath: str = \"\") -> None:\n outfile.write(\"<!DOCTYPE HTML>\\n\")\n outfile.write(\"<html lang=\\\"en\\\">\\n\")\n outfile.write(\" <head>\\n\")\n outfile.write(\" <!-- Google tag (gtag.js) -->\\n\")\n outfile.write(\" <script async src=\\\"https://www.googletagmanager.com/gtag/js?id=G-94FNMMTWTQ\\\"></script>\\n\")\n outfile.write(\" <script>\\n\")\n outfile.write(\" window.dataLayer = window.dataLayer || [];\\n\")\n outfile.write(\" function gtag(){dataLayer.push(arguments);}\\n\")\n outfile.write(\" gtag('js', new Date());\\n\")\n outfile.write(\" gtag('config', 'G-94FNMMTWTQ');\\n\")\n outfile.write(\" </script>\\n\")\n outfile.write(\" <meta charset=\\\"utf-8\\\" />\\n\")\n outfile.write(\" <meta name=\\\"viewport\\\" content=\\\"width=device-width, initial-scale=1.0\\\" />\\n\")\n outfile.write(\" <title>\" + remove_html(title) + \"</title>\\n\")\n outfile.write(\" <meta name=\\\"description\\\" content=\\\"Fiddler Crabs\\\" />\\n\")\n outfile.write(\" <link rel=\\\"icon\\\" sizes=\\\"128x128\\\" href=\\\"\" + indexpath +\n \"favicon128.png\\\" type=\\\"image/png\\\" />\\n\")\n outfile.write(\" <link rel=\\\"icon\\\" sizes=\\\"96x96\\\" href=\\\"\" + indexpath +\n \"favicon96.png\\\" type=\\\"image/png\\\" />\\n\")\n outfile.write(\" <link rel=\\\"icon\\\" sizes=\\\"72x72\\\" href=\\\"\" + indexpath +\n \"favicon72.png\\\" type=\\\"image/png\\\" />\\n\")\n outfile.write(\" <link rel=\\\"icon\\\" sizes=\\\"48x48\\\" href=\\\"\" + indexpath +\n \"favicon48.png\\\" type=\\\"image/png\\\" />\\n\")\n outfile.write(\" <link rel=\\\"icon\\\" sizes=\\\"32x32\\\" href=\\\"\" + indexpath +\n \"favicon32.png\\\" type=\\\"image/png\\\" />\\n\")\n outfile.write(\" <link rel=\\\"icon\\\" sizes=\\\"24x24\\\" href=\\\"\" + indexpath +\n \"favicon24.png\\\" type=\\\"image/png\\\" />\\n\")\n outfile.write(\" <link rel=\\\"icon\\\" sizes=\\\"16x16\\\" href=\\\"\" + indexpath +\n \"favicon16.png\\\" type=\\\"image/png\\\" />\\n\")\n outfile.write(\" <link rel=\\\"apple-touch-icon-precomposed\\\" href=\\\"\" + indexpath +\n \"apple-touch-icon-precomposed.png\\\">\\n\")\n outfile.write(\" <link rel=\\\"apple-touch-icon-precomposed\\\" sizes=\\\"72x72\\\" \"\n \"href=\\\"\" + indexpath + \"apple-touch-icon-72x72-precomposed.png\\\">\\n\")\n outfile.write(\" <link rel=\\\"apple-touch-icon-precomposed\\\" sizes=\\\"114x114\\\" \"\n \"href=\\\"\" + indexpath + \"apple-touch-icon-114x114-precomposed.png\\\">\\n\")\n outfile.write(\" <link rel=\\\"apple-touch-icon-precomposed\\\" sizes=\\\"144x144\\\" \"\n \"href=\\\"\" + indexpath + \"apple-touch-icon-144x144-precomposed.png\\\">\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"\" + indexpath + \"uca_style.css\\\" />\\n\")\n outfile.write(\" <script defer src=\\\"\" + indexpath + \"js/solid.min.js\\\"></script>\\n\")\n outfile.write(\" <script defer src=\\\"\" + indexpath + \"js/regular.min.js\\\"></script>\\n\")\n outfile.write(\" <script defer src=\\\"\" + indexpath + \"js/brands.min.js\\\"></script>\\n\")\n outfile.write(\" <script defer src=\\\"\" + indexpath + \"js/fontawesome.min.js\\\"></script>\\n\")\n outfile.write(\" <link rel=\\\"stylesheet\\\" href=\\\"\" + indexpath +\n \"images/flag-icon-css/css/flag-icons.min.css\\\" />\\n\")\n outfile.write(\" <link rel=\\\"author\\\" href=\\\"\" + init_data().site_author_email + \"\\\" />\\n\")",
"def common_header_part2(outfile: TextIO, indexpath: str = \"\", include_map: bool = False) -> None:\n outfile.write(\" </head>\\n\")\n outfile.write(\"\\n\")\n if include_map:\n outfile.write(\" <body onload=\\\"initialize()\\\">\\n\")\n else:\n outfile.write(\" <body>\\n\")\n outfile.write(\" <div id=\\\"skip-links\\\" role=\\\"complementary\\\" aria-label=\\\"Skip links menu\\\">\")\n outfile.write(\"<a href=\\\"#Main\\\" tabindex=\\\"1\\\">Skip to content</a></div>\\n\")\n outfile.write(\" <div id=\\\"home\\\">\\n\")\n outfile.write(\" <a href=\\\"\" + indexpath + \"index.html\\\" class=\\\"home-title\\\">Fiddler Crabs</a>\\n\")\n outfile.write(\" <a href=\\\"\" + indexpath +\n \"index.html\\\" class=\\\"home-link\\\">\" + fetch_fa_glyph(\"home\") + \"Home</a>\\n\")\n # outfile.write(\" <a href=\\\"\" + indexpath +\n # \"blog\\\" class=\\\"home-link\\\">\" + fetch_fa_glyph(\"blog\") + \"Blog</a>\\n\")\n outfile.write(\" </div>\\n\")",
"def write_template_body1(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n template_file = open(template_filename, 'a')\n template_file.write('<body>\\n') \n template_file.write('<div id=\"pageTitle\">\\n')\n template_file.write('<?php echo $stat_title; ?>\\n') \n template_file.write('</div>\\n')\n template_file.write('<div class=\"page-menu\"><div class=\"table\">\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Basin:</span>\\n')\n template_file.write(\n ' <select id=\"maptype\" '\n +'onchange=\"changeMaptype(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Name:</span>\\n')\n template_file.write(\n ' <select id=\"domain\" '\n +'onchange=\"changeDomain(this.value);\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(\n ' <span class=\"bold\">Forecast Lead:</span>\\n'\n )\n template_file.write(\n ' <select id=\"variable\" '\n +'onchange=\"changeVariable(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div></div>\\n')\n template_file.write('\\n')\n template_file.write('<!-- Middle menu -->\\n')\n template_file.write('<div class=\"page-middle\" id=\"page-middle\">\\n')\n template_file.write(\n 'Left/Right arrow keys = Change forecast lead | Up/Down arrow keys '\n +'= Change Storm\\n'\n )\n template_file.write(\n '<br>For information on tropical cyclone verification, '\n +'<button class=\"infobutton\" id=\"myBtn\">click here</button>\\n'\n )\n template_file.write('<div id=\"myModal\" class=\"modal\">\\n')\n template_file.write(' <div class=\"modal-content\">\\n')\n template_file.write(' <span class=\"close\">×</span>\\n')\n template_file.write(' Tropical Cyclone Verification Information\\n')\n template_file.write(\n ' <embed width=100% height=100% src=\"../main.php\">\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div>\\n')\n template_file.write('<!-- /Middle menu -->\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write(\n '<div id=\"loading\"><img style=\"width:100%\" '\n +'src=\"../../images/loading.png\"></div>\\n'\n )\n template_file.write('\\n')\n template_file.write('<!-- Image -->\\n')\n template_file.write('<div id=\"page-map\">\\n')\n template_file.write(' <image name=\"map\" style=\"width:100%\">\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write('<script type=\"text/javascript\">\\n')\n template_file.write('// Get the modal\\n')\n template_file.write('var modal = document.getElementById(\"myModal\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the button that opens the modal\\n')\n template_file.write('var btn = document.getElementById(\"myBtn\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the <span> element that closes the modal\\n')\n template_file.write(\n 'var span = document.getElementsByClassName(\"close\")[0];\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks the button, open the modal\\n'\n )\n template_file.write('btn.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"block\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks on <span> (x), close the modal\\n'\n )\n template_file.write('span.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks anywhere outside of the modal, close it\\n'\n )\n template_file.write('window.onclick = function(event) {\\n')\n template_file.write(' if (event.target == modal) {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//User-defined variables\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('//Global variables\\n')\n template_file.write(\n 'var minFrame = 0; //Minimum frame for every variable\\n'\n )\n template_file.write(\n 'var maxFrame = 26; //Maximum frame for every variable\\n'\n )\n template_file.write(\n 'var incrementFrame = 1; //Increment for every frame\\n'\n )\n template_file.write('\\n')\n template_file.write('var startFrame = 0; //Starting frame\\n')\n template_file.write('\\n')\n template_file.write('var cycle = 2018100600\\n')\n template_file.write('\\n')\n template_file.write('/*\\n')\n template_file.write(\n 'When constructing the URL below, DDD = domain, VVV = variable, '\n +'LLL = level, SSS = season, Y = frame number.\\n'\n )\n template_file.write(\n 'For X and Y, labeling one X or Y represents an integer '\n +'(e.g. 0, 10, 20). Multiple of these represent a string\\n'\n )\n template_file.write(\n 'format (e.g. XX = 00, 06, 12 --- XXX = 000, 006, 012).\\n'\n )\n template_file.write('*/\\n')\n template_file.write(\n 'var url = \"<?php echo $'+template_type+'_url; ?>\";\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Add variables & domains\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('var variables = [];\\n')\n template_file.write('var domains = [];\\n')\n template_file.write('var levels = [];\\n')\n template_file.write('var seasons = [];\\n')\n template_file.write('var maptypes = [];\\n')\n template_file.write('var validtimes = [];\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.close()",
"def write_template_header(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n template_file = open(template_filename, 'w')\n template_file.write(\n '<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" '\n +'\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\\n'\n )\n template_file.write(\n '<html xmlns=\"http://www.w3.org/1999/xhtml\" '\n +'xml:lang=\"en\" lang=\"en\">\\n'\n )\n template_file.write('\\n')\n template_file.write('<head>\\n')\n template_file.write(\n '<meta http-equiv=\"content-type\" content=\"text/html; '\n +'charset=utf-8\" />\\n'\n )\n template_file.write('<title>Home</title>\\n')\n template_file.write(\n '<link href=\"../../main.css\" rel=\"stylesheet\" type=\"text/css\" '\n +'media=\"all\" />\\n'\n )\n template_file.write(\n '<link href=\"../../fonts.css\" rel=\"stylesheet\" type=\"text/css\" '\n +'media=\"all\" />\\n'\n )\n template_file.write(\n '<script src=\"https://d3js.org/d3.v4.min.js\"></script>\\n'\n )\n template_file.write(\n '<script src=\"../jquery-3.1.1.min.js\"></script>\\n'\n )\n template_file.write(\n '<script type=\"text/javascript\" '\n +'src=\"../functions_metplus.js\"></script>\\n'\n )\n template_file.write(\n '<meta name=\"viewport\" content=\"width=device-width, '\n +'initial-scale=1.0\">\\n'\n )\n template_file.write('</head>\\n')\n template_file.write('\\n')\n template_file.write('<?php\\n')\n template_file.write(\n '$randomtoken = base64_encode( openssl_random_pseudo_bytes(32));\\n'\n )\n template_file.write(\n \"$_SESSION['csrfToken']=$randomtoken;\\n\"\n )\n template_file.write('?>\\n')\n template_file.write('\\n')\n template_file.write(\n '<?php include \"'+template_type+'_globalvars.php\"; ?>\\n'\n )\n template_file.write('\\n')\n template_file.close()",
"def makeHTML(header, body, footer):\n f = open(\"crimenews.html\", \"w\")\n f.write(header+body+footer)",
"def create_html(self):\n # Add html content to the self.doc\n self.doc.asis('<!DOCTYPE html>')\n with self.tag('html'):\n self.design_header()\n self.design_body()\n # Write html content from self.doc\n with codecs.open(self.filestream.name, 'w', 'utf-8') as f:\n html_content = indent(\n self.doc.getvalue(),\n indentation=' ',\n newline='\\r\\n'\n )\n f.write(html_content)",
"def header(style=u'default'):\n return (docType() + \n u'<html xmlns=\"http://www.w3.org/1999/xhtml\">\\n'\n u'<head>\\n'\n u'<style type=\"text/css\">\\n'\n u' @import url(/css/exe.css);\\n'\n u' @import url(/style/base.css);\\n'\n u' @import url(/style/%s/content.css);</style>\\n'\n u'<script type=\"text/javascript\" src=\"/scripts/common.js\">'\n u'</script>\\n'\n u'<script type=\"text/javascript\" src=\"/scripts/libot_drag.js\">'\n u'</script>\\n'\n u'<title>%s</title>\\n'\n u'<meta http-equiv=\"content-type\" '\n u' content=\"text/html; charset=UTF-8\"></meta>\\n'\n u'</head>\\n'\n % (style, _('eXe : elearning XHTML editor')))",
"def generateHtml(self, tokens, html, css):\n\n\t\tf = open(html, \"w\")\n\t\tf.write(\"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n<title>Document</title>\n</head>\n\t\t\"\"\")\n\t\tif os.path.exists(\"css/default.css\"):\n\t\t\tstyle = open(\"css/default.css\", \"r\").read()\n\t\telse:\n\t\t\tstyle = open(f\"{css}css/default.css\", \"r\").read()\n\t\tf.write(f\"<style>\\n{style}\\n</style>\\n\")\n\t\tf.write(\"<body>\")\n\t\tf.write('<div class=\"markdown-body\">')\n\t\tfor t in tokens:\n\t\t\tf.write(t.html)\n\t\tf.write(\"</div>\")\n\t\tf.write(\"</body>\")\n\t\tf.write(\"</html>\")\n\t\tf.close()",
"def save(self):\n f=open(\"{}/{}.html\".format(self.path,self.name),\"w\")\n f.write(\"<html>\\n <head>\\n\")\n for c in self.css:\n f.write(\" <link rel=\\\"Stylesheet\\\" href=\\\"{}\\\" />\\n\".format(c))\n f.write(\" </head>\\n</body>\\n\")\n for line in self.template.split(\"\\n\"):\n f.write(\" {}\\n\".format(line))\n f.write(\" </body>\\n</html>\")\n f.close()",
"def common_html_header(outfile: TextIO, title: str, indexpath: str = \"\") -> None:\n common_header_part1(outfile, title, indexpath=indexpath)\n common_header_part2(outfile, indexpath=indexpath)",
"def generate_webpages(self):\n if self.add_to_existing:\n self.add_existing_data()\n self.make_home_pages()\n self.make_1d_histogram_pages()\n self.make_corner_pages()\n self.make_config_pages()\n if self.make_comparison:\n self.make_comparison_pages()\n if self.make_interactive:\n self.make_interactive_pages()\n if self.publication:\n self.make_publication_pages()\n if self.gwdata is not None:\n self.make_detector_pages()\n self.make_error_page()\n self.make_version_page()\n if self.notes is not None:\n self.make_notes_page()\n self.make_downloads_page()\n self.generate_specific_javascript()",
"def makeHTMLIndexFile(self):\n part1 = \"\"\"<html>\n <body>\n <title>Index</title>\n <div id=\"pg_body\">\n <div id=\"testSuitesTitle\">TestSuites</div>\n <div id=\"resultsTitle\">Results</div>\n <div id=\"testSuites\">\n \"\"\"\n part2 = self.makeLinks()\n part3 = \"\"\"</div>\n <div id=\"results\">\n <iframe id=\"loadHTMLResults\" name=\"loadHTMLResults\" frameborder=\"0\" src=\"statistics.html\" style=\"height:100%;width:100%;\"></iframe>\n </div>\n <div id=\"footer\">Test Engineer(s) :\"\"\"+testEngineers+\"\"\"</div>\n </div>\n </body>\n </html>\n\t\t<style>\n\t\tbody{\n margin:0;\n }\n\t\t#pg_body{\n\t\twidth=100%;\n\t\ttext-align:center;\n\t\t}\n\t\t#testSuitesTitle{\n\t\twidth:25%;\n\t\tfloat:left;\n\t\tbackground-color:#6495ED;\n\t\tfont-weight:bold;\n\t\tcolor:white;\n\t\t}\n\t\t#resultsTitle{\n\t\twidth:75%;\n\t\tfloat:right;\n\t\tbackground-color:#6495ED;\n\t\tfont-weight:bold;\n\t\tcolor:white;\n\t\t}\n\t\t#testSuites{\n\t\twidth:25%;\n\t\tfloat:left;\n\t\tbackground-color:lightgrey;\n\t\tfont-weight:bold;\n\t\ttext-align:left;\n\t\theight:94%;\n\t\toverflow:scroll;\n\t\t}\n\t\t#results{\n\t\twidth:75%;\n\t\tbackground-color:white;\n\t\tfloat:right;\n\t\ttext-align:left;\n\t\theight:94%;\n\t\toverflow:scroll;\n\t\t}\n\t\t#footer{\n\t\twidth:100%;\n\t\ttext-align:left;\n\t\tcolor:lightgrey;\n\t\tbackground-color:#6495ED;\n\t\t}\n\t\t</style>\n \"\"\"\n \n page = (part1+part2+part3)\n f = open(self.dir+'/index.html','w')\n f.write(page)\n f.close",
"def generate():\n local('cd doc && make clean && make html')",
"def makeDocFile(self):\n\n f_out = \"%s/%s-doc.php\" % (self.dir_out, self.project_id)\n version = max(self.versions)\n\n with open(f_out, 'w') as f:\n f.write(\"<!DOCTYPE html>\\n\" \\\n \"<html xmlns=\\\"http://www.w3.org/1999/xhtml\\\">\\n\" \\\n \"<head>\\n\" \\\n \"<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"/>\\n\" \\\n \"\\n\" \\\n \"<title>Kit&Pack − Ultimate Power Booster</title>\\n\" \\\n \"<link rel=\\\"shortcut icon\\\" type=\\\"image/png\\\" href=\\\"../favicon.png\\\"/>\" \\\n \"<link rel=\\\"stylesheet\\\" type=\\\"text/css\\\" href=\\\"../css/doc-2.css\\\" />\\n\"\n \"\\n\" \\\n \"</head>\\n\" \\\n \"<body>\\n\" \\\n \"\\n\" \\\n \"<h1>Ultimate Power Booster</h1>\" \\\n \"\\n\")\n\n # Write a list of other versions of the documentation\n f.write(\"<p>Versions de cette documentation.</p>\\n\")\n f.write(\"<ul>\\n\")\n for v in self.versions:\n f.write(\"\\t<li><a href=\\\"%s.php\\\">%s</a></li>\\n\" % (\n v, v))\n f.write(\"</ul>\\n\\n\")\n\n f.write(\"<?php\\n\" \\\n \"include(\\\"%s.php\\\")\\n\" \\\n \"?>\\n\" \\\n \"\\n\" \\\n \"</body>\\n\" \\\n \"</html>\" % (version))",
"def initialPage():\n\treturn header() + footer()",
"def template(self):\n output=file(self.src, 'w').write\n output(\"\"\"%s\n<html>\n<head>\n<title>CHANGE ME</title>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=iso-8859-15\">\n<link rel=\"STYLESHEET\" href=\"%s\" type=\"text/css\">\n</head>\n<body>\n\n<!--it-->\n <p>\n Pagina non disponibile in questa lingua!\n <FORM><INPUT TYPE=\"button\" VALUE=\"Indietro\" onClick=\"history.go(-1);return true;\"> </FORM>\n </p>\n<!--/it-->\n\n<!--en-->\n <p>\n Page not available in this language!\n <FORM><INPUT TYPE=\"button\" VALUE=\"Back\" onClick=\"history.go(-1);return true;\"> </FORM>\n </p>\n<!--/en-->\n\n</body>\n</html>\n\"\"\" % (self.doctype, self.css))",
"def write_introduction(outfile: TextIO, do_print: bool, species: list, higher_taxa: list) -> None:\n if do_print:\n start_page_division(outfile, \"base_page\")\n outfile.write(\" <header id=\\\"introduction\\\">\\n\")\n outfile.write(\" <h1 class=\\\"bookmark1\\\">Introduction</h1>\\n\")\n outfile.write(\" </header>\\n\")\n else:\n common_html_header(outfile, \"Fiddler Crabs\")\n outfile.write(\" <p>\\n\")\n scnt = 0\n for s in species:\n if s.status != \"fossil\":\n scnt += 1\n outfile.write(\" Fiddler crabs are small, semi-terrestrial crabs are characterized by extreme cheliped \"\n \"asymmetry in males. They are most closely related to the <em class=\\\"species\\\">Ocypode</em> \"\n \"(ghost crabs). <a href=\\\"\" + rel_link_prefix(do_print) + init_data().species_url +\n \"\\\">There are currently {} recognized extant species</a>.\\n\".format(scnt))\n outfile.write(\" </p>\\n\")\n if do_print:\n media_path = MEDIA_PATH\n else:\n media_path = \"\"\n outfile.write(\" <div class=\\\"indeximages\\\">\\n\")\n outfile.write(\" <img class=\\\"thumbnail\\\" src=\\\"\" + media_path +\n \"photos/U_mjoebergi04tn.jpg\\\" alt=\\\"Austruca mjoebergi photo\\\" />\\n\")\n outfile.write(\" <img class=\\\"thumbnail\\\" src=\\\"\" + media_path +\n \"photos/U_minax07tn.jpg\\\" alt=\\\"Minuca minax photo\\\" />\\n\")\n outfile.write(\" <img class=\\\"thumbnail\\\" src=\\\"\" + media_path +\n \"photos/U_crassipes19tn.jpg\\\" alt=\\\"Paraleptuca crassipes photo\\\" />\\n\")\n outfile.write(\" </div>\\n\")\n outfile.write(\"\\n\")\n outfile.write(\" <h2 class=\\\"nobookmark\\\">Classification</h2>\\n\")\n outfile.write(\" <table>\\n\")\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Kingdom</td><td>Animalia</td></tr>\\n\")\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Phylum</td><td>Arthropoda</td></tr>\\n\")\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Class</td><td>Crustacea</td></tr>\\n\")\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Sub-class</td><td>Malocostraca</td></tr>\\n\")\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Order</td><td>Decapoda</td></tr>\\n\")\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Infraorder</td><td>Brachyura</td></tr>\\n\")\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Superfamily</td><td>Ocypodoidea</td></tr>\\n\")\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Family</td><td>Ocypodidae</td></tr>\\n\")\n # outfile.write(\" <tr><td class=\\\"classcol1\\\">Subfamily</td><td>Ocypodinae</td>\\n\")\n # outfile.write(\" <tr><td class=\\\"classcol1\\\">Genus</td><td><em class=\\\"species\\\">Uca</em></td>\\n\")\n genera = []\n for t in higher_taxa:\n if t.taxon_rank == \"genus\":\n genera.append(t.name)\n genera.sort()\n outfile.write(\" <tr><td class=\\\"classcol1\\\">Genera</td><td><em class=\\\"species\\\">\" +\n \", \".join(genera) + \"</em></td>\\n\")\n\n outfile.write(\" </table>\\n\")\n outfile.write(\"\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" The common English name “Fiddler Crab” comes from the feeding of the \"\n \"males, where the movement of the small claw from the ground to its mouth \"\n \"resembles the motion of a someone moving a bow across a fiddle (the large claw).\\n\")\n outfile.write(\" </p>\\n\")\n if do_print:\n end_page_division(outfile)\n else:\n outfile.write(\" <h2>Information</h2>\\n\")\n outfile.write(\" <ul class=\\\"fa-ul\\\">\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list systematics\") + \"<a href=\\\"\" + init_data().syst_url +\n \"\\\">Systematics</a></li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list phylogeny\") + \"<a href=\\\"\" + init_data().tree_url +\n \"\\\">Phylogeny</a></li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list species\") + \"<a href=\\\"\" + init_data().species_url +\n \"\\\">Species</a>\\n\")\n outfile.write(\" <ul>\\n\")\n outfile.write(\" <li><a href=\\\"names\\\">Name Index</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list common\") + \"<a href=\\\"\" + init_data().common_url +\n \"\\\">Common Names</a></li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list ranges\") + \"<a href=\\\"\" + init_data().map_url +\n \"\\\">Geographic Ranges</a>\\n\")\n outfile.write(\" <ul>\\n\")\n outfile.write(\" <li><a href=\\\"locations\\\">Location Index</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list lifecycle\") + \"<a href=\\\"\" + init_data().lifecycle_url +\n \"\\\">Life Cycle</a>\\n\")\n outfile.write(\" <ul>\\n\")\n # outfile.write(\" <li>\" + fetch_fa_glyph(\"list unusual dev\") + \"<a href=\\\"\" +\n # init_data().unsuual_dev_url + \"\\\">Unusual Development</a></li>\\n\")\n outfile.write(\" <li><a href=\\\"\" + init_data().unsuual_dev_url + \"\\\">Unusual Development</a></li>\\n\")\n outfile.write(\" </ul></li>\\n\")\n\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list morphology\") + \"<a href=\\\"\" + init_data().morph_url +\n \"\\\">Morphology</a>\\n\")\n outfile.write(\" <ul>\\n\")\n outfile.write(\" <li><a href=\\\"\" + init_data().handedness_url + \"\\\">Male Handedness</a></li>\\n\")\n outfile.write(\" </ul></li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list references\") + \"<a href=\\\"\" + init_data().ref_url +\n \"\\\">Comprehensive Reference List</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" <h2>Multimedia</h2>\\n\")\n outfile.write(\" <ul class=\\\"fa-ul\\\">\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list photo\") + \"<a href=\\\"\" + init_data().photo_url +\n \"\\\">Photos</a></li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list video\") + \"<a href=\\\"\" + init_data().video_url +\n \"\\\">Videos</a></li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list art\") + \"Art\\n\")\n outfile.write(\" <ul>\\n\")\n outfile.write(\" <li><a href=\\\"\" + init_data().art_sci_url + \"\\\">Scientific Art</a></li>\\n\")\n outfile.write(\" <li><a href=\\\"\" + init_data().art_stamp_url + \"\\\">Postage Stamps</a></li>\\n\")\n outfile.write(\" <li><a href=\\\"\" + init_data().art_craft_url + \"\\\">Crafts</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" <h2>Miscellania</h2>\\n\")\n outfile.write(\" <ul class=\\\"fa-ul\\\">\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list site cite\") + \"<a href=\\\"\" + init_data().cite_url +\n \"\\\">Citation info for this website</a></li>\\n\")\n outfile.write(\" <li>\" + fetch_fa_glyph(\"list github\") +\n \"<a href=\\\"https://github.com/msrosenberg/fiddlercrab.info\\\">Website data on GitHub</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n common_html_footer(outfile)",
"def write_main_morphology_pages(outfile: TextIO, do_print: bool, morphology: list) -> None:\n if do_print:\n start_page_division(outfile, \"base_page\")\n media_path = MEDIA_PATH\n else:\n common_html_header(outfile, \"Fiddler Crab Morphology\")\n media_path = \"\"\n outfile.write(\" <header id=\\\"\" + init_data().morph_url + \"\\\">\\n\")\n outfile.write(\" <h1 class=\\\"bookmark1\\\">Morphology</h1>\\n\")\n if not do_print:\n outfile.write(\" <nav>\\n\")\n outfile.write(\" <ul>\\n\")\n if do_print:\n index_page = \"#morphology_index.html\"\n else:\n index_page = \"morphology/index.html\"\n outfile.write(\" <li><a href=\\\"\" + index_page + \"\\\">\" + fetch_fa_glyph(\"index\") + \"Index</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </nav>\\n\")\n outfile.write(\" </header>\\n\")\n outfile.write(\"\\n\")\n outfile.write(\" <div class=\\\"morphdesc\\\">\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" Fiddler crabs are decapod “true crabs” with much of the standard morphology \"\n \"found within this group. The following sections briefly describe major morphological features \"\n \"as well as characteristics that are often used to distinguish among species.\\n\")\n outfile.write(\" </p>\\n\")\n outfile.write(\" The morphology is organized hierarchically by major body component with further details \"\n \"within each section.\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" </p>\\n\")\n outfile.write(\" <h2 class=\\\"nobookmark\\\">More Detail</h2>\\n\")\n outfile.write(\" <ul>\\n\")\n for m in morphology:\n if m.parent == \".\":\n outfile.write(\" <li><a href=\\\"\" + rel_link_prefix(do_print, \"morphology/\") +\n morphology_link(m.parent, m.character) + \".html\\\">\" + m.character + \"</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </div>\\n\")\n outfile.write(\" <figure class=\\\"morphimg\\\">\\n\")\n outfile.write(\" <img src=\\\"\" + media_path + \"morphology/dorsal_view.png\\\" \"\n \"alt=\\\"dorsal view of crab image\\\" title=\\\"dorsal view of crab\\\" />\\n\")\n outfile.write(\" <figcaption>Figure modified from Crane (1975).</figcaption>\\n\")\n outfile.write(\" </figure>\\n\")\n outfile.write(\" <figure class=\\\"morphimg\\\">\\n\")\n outfile.write(\" <img src=\\\"\" + media_path + \"morphology/ventral_view.png\\\" \"\n \"alt=\\\"ventral view of crab image\\\" title=\\\"ventral view of crab\\\" />\\n\")\n outfile.write(\" <figcaption>Figure modified from Crane (1975).</figcaption>\\n\")\n outfile.write(\" </figure>\\n\")\n outfile.write(\" <figure class=\\\"morphimg\\\">\\n\")\n outfile.write(\" <img src=\\\"\" + media_path + \"morphology/anterior_view.png\\\" \"\n \"alt=\\\"anterior view of crab image\\\" title=\\\"anterior view of crab\\\" />\\n\")\n outfile.write(\" <figcaption>Figure modified from Crane (1975).</figcaption>\\n\")\n outfile.write(\" </figure>\\n\")\n if do_print:\n end_page_division(outfile)\n write_morphology_index(outfile, do_print, morphology)\n for m in morphology:\n write_morphology_page(outfile, do_print, m, morphology)\n else:\n common_html_footer(outfile)\n for m in morphology:\n with open(WEBOUT_PATH + \"morphology/\" + morphology_link(m.parent, m.character) + \".html\", \"w\",\n encoding=\"utf-8\") as suboutfile:\n write_morphology_page(suboutfile, do_print, m, morphology)\n with open(WEBOUT_PATH + \"morphology/index.html\", \"w\", encoding=\"utf-8\") as suboutfile:\n write_morphology_index(suboutfile, do_print, morphology)",
"def create_home():\n meta_desc = (\n 'Expected values and probability per lap of step-up'\n ' banners in Final Fantasy Brave Exvius (FFBE)')\n template_vars = {\n 'title' : sitesettings.SITE_NAME,\n 'siteurl' : sitesettings.SITE_URL,\n 'sitename' : sitesettings.SITE_NAME,\n 'meta_desc' : meta_desc,\n 'last_four_banners' : nav.get_last_four_banners('all'),\n 'last_four_single' : nav.get_last_four_banners('single'),\n 'last_four_multi' : nav.get_last_four_banners('multi'),\n 'all_banner_info' : get_all_banner_info(),\n }\n\n home_path = os.path.join(sitesettings.LOCAL_FILE_PATH)\n\n if not os.path.exists(home_path):\n os.makedirs(home_path)\n\n template_file = 'home.html'\n html_file_loc = os.path.join(home_path, 'index.html')\n generatehtml.generate_html(\n html_file_loc, template_file, template_vars, os.path.join(os.getcwd(), 'templates'))",
"def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file",
"def produce_header_footer(self):\n header = pylatex.PageStyle(\"header\", header_thickness=0.1)\n\n image_filename = self.get_image()\n with header.create(pylatex.Head(\"L\")) as logo:\n logo.append(pylatex.StandAloneGraphic(image_options=\"width=110px\", filename=image_filename))\n\n # Date\n with header.create(pylatex.Head(\"R\")):\n header.append(\"Date Report Issued: \" + datetime.today().strftime('%Y-%m-%d'))\n\n # Footer\n with header.create(pylatex.Foot(\"C\")):\n with header.create(pylatex.Tabular('lcr')) as table:\n table.add_row('', bold('Data interpretation guidelines can be found in RDIMS document ID: 10401305'),\n '')\n table.add_row('', bold('This report was generated with OLC AutoROGA v0.0.1'), '')\n return header",
"def print_header(self, params=None):\n self.hhcfile.write(\"\"\"\n<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML//EN\">\n<HTML>\n<HEAD>\n<meta name=\"GENERATOR\" content=\"Microsoft® HTML Help Workshop 4.1\">\n<!-- Sitemap 1.0 -->\n</HEAD><BODY>\n<OBJECT type=\"text/site properties\">\n <param name=\"ImageType\" value=\"Folder\">\"\"\")\n if params!=None:\n for name, value in params:\n self.hhcfile.write(\"\"\" <param name=\"%s\" value=\"%s\">\\n\"\"\" % (name, value))\n self.hhcfile.write(\"\"\"\n</OBJECT>\n<UL>\n\"\"\")",
"def outputHtmlFileHeader(pageTitle):\n outputHtml(\n \"\"\"\n <!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\n \"http://www.w3.org/TR/html4/loose.dtd\">\n <html lang=\"en\">\n <head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\n <title>\"\"\"\n + pageTitle\n + \"\"\"</title>\n </head>\n <body>\n \"\"\"\n )",
"def rollup_header_footer(self, context):\n header = tags.html_string_to_element_tree(\n '<link href=\"%s/markdown.css\" rel=\"stylesheet\" '\n 'type=\"text/css\">' % RESOURCE_FOLDER)\n footer = tags.html_string_to_element_tree('')\n return (header, footer)",
"def writeHeader( self ):\n for k in self.secondaryTargets.keys():\n fileName = self.treyGene[k] + \"-GenesinCommon.txt\" \n with open( fileName, 'w' ) as out:\n out.write(\"%s\\t%s\\t%s\\n\" %(\"Gene_trey\", \"Gene\", \"Gene_inCommon\" ))\n out.close()",
"def _generate_report(self):\n\n _LOG.info(\"Generating the HTML report.\")\n\n # Make sure the output directory exists.\n try:\n self.outdir.mkdir(parents=True, exist_ok=True)\n except OSError as err:\n raise Error(f\"failed to create directory '{self.outdir}': {err}\")\n\n raw_stats_paths, descr_paths = self._copy_raw_data()\n\n # Find the styles and templates paths.\n templdir = FSHelpers.search_for_app_data(\"wult\", Path(\"templates\"),\n pathdescr=\"HTML report Jinja2 templates\")\n csspath = FSHelpers.search_for_app_data(\"wult\", Path(\"css/style.css\"),\n pathdescr=\"HTML report CSS file\")\n\n # Copy the styles file to the output directory.\n dstpath = self.outdir.joinpath(\"style.css\")\n try:\n shutil.copyfile(csspath, dstpath)\n except OSError as err:\n raise Error(f\"failed to copy CSS file from '{csspath}' to '{dstpath}':\\n{err}\")\n\n # The summary table is only included into the main HTML page.\n sum_tbl = self._prepare_summary_table(raw_stats_paths, descr_paths)\n links_tbl = self._prepare_links_table()\n\n # Each column name gets its own HTML page.\n for colname, pinfos in self._pinfos.items():\n stats_tbl = self._prepare_stats_table(pinfos)\n\n # Render the template.\n jenv = Jinja2.build_jenv(templdir, trim_blocks=True, lstrip_blocks=True)\n jenv.globals[\"stats_tbl\"] = stats_tbl\n jenv.globals[\"pinfos\"] = pinfos\n jenv.globals[\"colname\"] = colname\n jenv.globals[\"title_descr\"] = self.title_descr\n jenv.globals[\"toolname\"] = self._refinfo[\"toolname\"]\n\n if sum_tbl:\n jenv.globals[\"sum_tbl\"] = sum_tbl\n jenv.globals[\"links_tbl\"] = links_tbl\n templfile = outfile = \"index.html\"\n sum_tbl = None\n else:\n templfile = \"metric.html\"\n outfile = links_tbl[colname][\"fname\"]\n\n Jinja2.render_template(jenv, Path(templfile), outfile=self.outdir.joinpath(outfile))",
"def main():\n\n return render_template(\"index.html\", title=\"Home\", heading=\"Dublin Bus\")",
"def get_html_parts(self):\n script_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'data')\n with open(os.path.join(script_path, 'head.html'), 'r') as hfile:\n self.header = hfile.read()\n with open(os.path.join(script_path, 'template.html'), 'r') as hfile:\n self.template = hfile.read()\n with open(os.path.join(script_path, 'footer.html'), 'r') as hfile:\n self.footer = hfile.read()\n self.module_icon = os.path.join(script_path, 'icon.png')\n return True",
"def export(self, package):\n self.style = package.style\n self.copyFiles(package)\n self.html = self.renderHeader(package.name)\n self.html += u\"<body>\\n\"\n self.html += u\"<div id=\\\"content\\\">\\n\"\n self.html += u\"<div id=\\\"header\\\">\\n\"\n self.html += escape(package.title)\n self.html += u\"</div>\\n\"\n self.html += u\"<div id=\\\"main\\\">\\n\"\n self.renderNode(package.root)\n self.html += u\"</div>\\n\"\n self.html += u\"</div>\\n\"\n self.html += u\"</body></html>\\n\"\n self.save(self.outputDir/\"index.html\")",
"def make_html(depends=(files['image.gif'],),\n targets=(files['index.html'],)):\n\n index_html = open(files['index.html'].rel, 'w')\n index_html.write(pyyaks.context.render(html_template))\n index_html.close()"
] | [
"0.61840135",
"0.615706",
"0.6106681",
"0.59005207",
"0.58948696",
"0.5833731",
"0.57632947",
"0.57230896",
"0.57106525",
"0.57040775",
"0.56784046",
"0.5634475",
"0.56243837",
"0.5544781",
"0.5540921",
"0.5534541",
"0.55281717",
"0.55029094",
"0.5502361",
"0.5495124",
"0.54924494",
"0.5491808",
"0.5469504",
"0.54516214",
"0.54504186",
"0.5436966",
"0.5424728",
"0.5408814",
"0.53881615",
"0.53865236"
] | 0.7858279 | 0 |
Delete the floating global control on the main content. | def del_global_control(body_main_content):
return sub("<div id=\"global_controls\" (.*?) </div>", "", body_main_content, flags=DOTALL) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DelDiv(self):\n if self.created:\n self.CloseImage()\n command = \"\"\"$('#{}').remove();\"\"\".format(self.wid)\n get_ipython().run_cell_magic('javascript', '', command)\n self.created = False\n self.wid = uuid.uuid4().hex",
"def OnCloseFloatingPage(self, event):\r\n\r\n root_manager = framemanager.GetManager(self)\r\n if root_manager and root_manager != self._mgr:\r\n pane = event.pane\r\n if pane.name.startswith(\"__floating__\"):\r\n self.ReDockPage(pane)\r\n return\r\n \r\n event.Skip()\r\n else:\r\n event.Skip()\r\n frame = event.GetEventObject() \r\n page_title = frame.GetTitle() \r\n page_contents = frame.GetChildren()[-1] \r\n page_contents.Reparent(self)\r\n self.InsertPage(frame.page_index, page_contents, page_title, select=True, bitmap=frame.bitmap, control=frame.control)\r\n\r\n if frame.control:\r\n src_tabs, idx = self.FindTab(page_contents)\r\n frame.control.Reparent(src_tabs)\r\n frame.control.Hide()\r\n frame.control = None\r\n\r\n self.SetPageTextColour(frame.page_index, frame.text_colour)",
"def __editDelete(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").clear()\n else:\n self.activeWindow().clear()",
"def remove_canvas(self,):\r\n # reset plot view beofre change\r\n self.canvas.toolbar.home()\r\n # remove widgets from canvas_vlayout\r\n self.canvas_vlayout.removeWidget(self.toolbar)\r\n self.toolbar.close()\r\n self.canvas_vlayout.removeWidget(self.canvas)\r\n self.canvas.close()",
"def clear_press(self):\n\n for win in self.window.additional_windows:\n win.del_win()\n\n pos = self.window.physics_canvas.physics_objects\n self.window.physics_canvas.physics_objects = []\n\n for obj in pos:\n self.window.physics_canvas.canvas.delete(obj.canvas_id)\n\n for force in self.window.physics_canvas.interacting_forces:\n force.remove()\n\n for particle in self.window.physics_canvas.particles:\n self.window.physics_canvas.canvas.delete(particle.canvas_id)",
"def cleanWorkspace(self):\n self.window.labelMessage.setText(\"\")\n\n if self.inspectinoAnalyzer:\n del self.analyzerWidget\n self.inspectinoAnalyzer = False\n\n for index in reversed(range(self.window.layoutDepthermInpesction.count())):\n layoutItem = self.window.layoutDepthermInpesction.itemAt(index)\n widgetToRemove = layoutItem.widget()\n print(\"found widget: \" + str(widgetToRemove))\n widgetToRemove.setParent(None)\n self.window.layoutDepthermInpesction.removeWidget(widgetToRemove)",
"def __destroy_ui(self):\n # Remove the viewable area from Gedit's side panel\n self.__side_panel.remove_item(self.__view_port)\n\n # Empty class's properties\n self.__tree_view = None\n self.__side_panel = None\n\n self.__view_port.destroy()\n self.__view_port = None",
"def destroy(self):\r\n self.visible = False",
"def onClearButton(self):\n markupsNode = slicer.util.getNode( \"MarkupsFiducial\" ) \n markupsNode.RemoveAllMarkups()",
"def clear(self):\r\n self.delete(0, tkinter.END)",
"def clearwin(event=None):\r\n # for child in mframe.winfo_children():\r\n # child.destroy()\r\n global mframe\r\n mframe.destroy()\r\n mframe = tkinter.Frame(main, width=800, height=600, background='pink')\r\n mframe.pack(fill=\"both\", expand=True, padx=20, pady=20)",
"def remove_button(self):\n self.scene.remove_child(self.toggle_button_el)",
"def clearScreen(self):\n self.removeFrame(self.frame1)\n self.removeFrame(self.frame2)\n self.separator.destroy()\n #Here, the app will lose the row and column configuration and does not\n #apply new configuration. Don't know why?. So that, I destroy the\n #parent (in this case, a frame), create a new frame and set it again.\n self.parent.destroy()\n mainFrame = tk.Frame(self.store[\"root\"], bg=\"#FFF\")\n self.parent = mainFrame\n self.parent.grid(column=0, row=0, sticky=\"nsew\")",
"def clear_main(self):\n\n if self.terminate:\n return\n\n self.windows['MAIN'].erase()\n self.windows['MAIN'].border(' ', ' ',\n curses.ACS_HLINE, curses.ACS_HLINE,\n curses.ACS_HLINE, curses.ACS_HLINE,\n curses.ACS_HLINE, curses.ACS_HLINE)",
"def hlpframeclear(self):\r\n \r\n self.menubar.entryconfig(\"File\", state = 'normal')\r\n self.menubar.entryconfig(\"Help\", state = 'normal')\r\n self.hlpframe.place_forget()",
"def clear_screen(self):\r\n lst_grid = self.root.grid_slaves()\r\n for widget in lst_grid:\r\n widget.destroy()\r\n lst_pack = self.root.pack_slaves()\r\n for widget in lst_pack:\r\n widget.destroy()",
"def delete(self):\n\t\tself.canvas.delete('node_'+self.identifier)\n\t\tself.canvas.tag_unbind('node_'+self.identifier,\"<Any>\")",
"def clear_canvas():\n self.parent_class.canvas.delete(\"all\")",
"def remove(self):\n self.hide()\n self.deleteLater()",
"def _clear_control(self):\r\n if self.__control:\r\n _hiew.ControlClear(self.__control)\r\n self.__control = None",
"def clearButtons(self):\n for ch in self.cboxes:\n ch.hide()\n for tbx in self.tboxes:\n tbx.hide()\n for btnum in reversed(range(self.flowLayout.layout.count())):\n item = self.flowLayout.layout.itemAt(btnum)\n if item is not None:\n self.flowLayout.layout.removeItem(item)\n r, c = self.flowLayout.items[item.widget()]\n del self.flowLayout.items[item.widget()]\n del self.flowLayout.rows[r][c]\n item.widget().hide()\n self.flowLayout.update()",
"def delete_page(self, wid, child):\n\n page_num = self.notebook.page_num(child)\n self.notebook.remove_page(page_num)\n if not self.notebook.get_n_pages():\n self.output_window.remove(self.notebook)\n placeholder = self.get_placeholder_image()\n self.output_window.add(placeholder)\n placeholder.show_all()",
"def destroy (self,event=None):\n \n # This is enough to disable fillbox.\n self.top.withdraw()",
"def annihilate(self):\n self.master.destroy()",
"def on_delete():\r\n del win.box[-1] # delete last line\r\n #del win.box[0:-1] # delete all lines \r",
"def delete_grid(self):\n\n\t\tself.a_grid = None\t\t# Deletes the object from memory",
"def removeFrame(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n frame.pack_forget()",
"def delete_and_pop_handlers():\n self.zoom_box.delete()\n self.zoom_frame.delete()\n self.zoom_box = None\n # Popping handlers.\n # XXX: Are these always the right handlers??\n self.win.window.pop_handlers()\n # Return to the default cursor.\n self.window.set_mouse_cursor(self.default_cursor)",
"def __del__(self):\n #print 'del in'\n if hasattr(self,'root'):\n #print 'del root'\n if self.root:\n #print 'del circ'\n self.root.delete_circulars()\n del self.root",
"def clear_display(self, func):\n func.configure(state='normal')\n func.delete(0,END)"
] | [
"0.6569426",
"0.6322288",
"0.6220098",
"0.61642945",
"0.6048335",
"0.6038787",
"0.599455",
"0.598732",
"0.5944671",
"0.5929435",
"0.5899441",
"0.5881212",
"0.5822167",
"0.5811097",
"0.5805517",
"0.5760576",
"0.5734409",
"0.57206446",
"0.5712142",
"0.5706464",
"0.5681028",
"0.56764",
"0.56750643",
"0.56274694",
"0.5623064",
"0.56228644",
"0.56211716",
"0.5610796",
"0.56069934",
"0.56058353"
] | 0.6487117 | 1 |
filters data that have at least $at_least $x unique values per $per | def filter_x_per_y(df, at_least, x, per):
return df.groupby(per, as_index=False, sort=False).filter(
lambda g: g[x].nunique() >= at_least
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_toofew_toolong(df, min_each_group, max_length):\n df = df[~(df.question.apply(lambda x : len(x)) > max_length)]\n\n counts = df[\"index\"].value_counts()\n idxs = np.array(counts.index)\n \n # index numbers of groups with count >= mineachgroup\n list_idx = [i for i, c in zip(idxs, counts) if c >= min_each_group]\n\n # filter out data with \"index\" in list_idx \n df = df[df[\"index\"].isin(list_idx)]\n return df",
"def test_by_person_mininimal_data(minimal_mockdata, qfilter):\n res = qfilter.filter(minimal_mockdata, p='p1')\n assert len(res) == 1\n res = qfilter.filter(minimal_mockdata, p='xxxxxx')\n assert not res",
"def post_processing(f,param_dict):\r\n must_have = param_dict['must_have']\r\n cannot_be_together = param_dict['cannot_be_together']\r\n\r\n # must have \r\n tmp = list()\r\n for itemset in f:\r\n if set(itemset).intersection(set(must_have)): \r\n tmp.append(itemset)\r\n\r\n f = tmp[:]\r\n\r\n # cannot be together\r\n for itemset in f:\r\n for cbt in cannot_be_together:\r\n if set(cbt) <= set(itemset):\r\n tmp.remove(itemset)\r\n \r\n return tmp",
"def unique(combo, out):\n # This lets us find only minimally covering payments (you should never add cards to a payment that already\n # satisfies the charge)\n for el in out:\n if set(el).issubset(combo):\n return False\n return True",
"def filter_unneeded_rows(data: pd.DataFrame) -> FilteredData:\n allowed_values = [\"BUY\", \"SELL\"]\n correct_rows = data[\"Action\"].isin(allowed_values)\n num_of_ok_rows = correct_rows.sum()\n if num_of_ok_rows == len(data):\n return FilteredData(data, {})\n legal_rows = data.loc[correct_rows, :]\n illegal_rows = data.loc[~correct_rows, :]\n unique_illegal = illegal_rows[\"Action\"].unique()\n uniques = {}\n for unique in unique_illegal:\n values = data.loc[data[\"Action\"] == unique, :]\n uniques[unique] = (len(values), values.index.to_numpy() + 1)\n return FilteredData(legal_rows, uniques)",
"def filtration(self):\n filtration = 0\n for key in self.keys():\n binary_complexities = []\n for i, j in combinations(range(1, max(key) + 1), 2):\n r = tuple(k for k in key if k == i or k == j)\n cpxty = len([p for p, q in pairwise(r) if p != q])\n binary_complexities.append(cpxty)\n filtration = max(filtration, sum(binary_complexities))\n return filtration",
"def filter_user_interactions(df, min_value):\n # Number of users before filtering\n num_users_before = df['personId'].nunique()\n\n # Users with enough interactions\n filter_users = df['personId'].value_counts() > min_value\n filter_users = filter_users[filter_users].index.tolist()\n\n # New dataframe with only selected users\n df = df[(df['personId'].isin(filter_users))]\n df = df[['personId', 'contentId', 'virality']]\n\n # Number of users after filtering\n num_users_after = df['personId'].nunique()\n\n print('Number of users discarded: ', num_users_before - num_users_after)\n return df",
"def nonunique_gens(df,\n key_cols=['plant_id_eia', 'generator_id', 'report_date']):\n unique_gens = df.drop_duplicates(subset=key_cols)\n dupes = df[~df.isin(unique_gens)].dropna()\n dupes = dupes.sort_values(by=key_cols)\n return dupes",
"def check_uniqueness_in_group(tX_grouped, unwanted_value):\n masks_check = []\n counts_check = []\n for i in range(len(tX_grouped)):\n unwanted_value_check = 1 * (tX_grouped[i] == unwanted_value)\n masks_and_counts = np.unique(unwanted_value_check, return_counts=True, axis=0)\n masks_check.append(masks_and_counts[0])\n counts_check.append(masks_and_counts[1])\n print(masks_check)\n print(counts_check)\n return None",
"def filter_dups(self):\n def dups_filter():\n dups = set()\n for g1, g2, w in self.gen:\n if (min(g1, g2), max(g1, g2)) in dups:\n continue\n dups.add((min(g1, g2), max(g1, g2)))\n yield g1, g2, w\n return self.filter(dups_filter())",
"def uniform_but_one_dataset_no_weight(n, p):\n elements = []\n for i in range(n):\n elements.append(i)\n for i in range(int(n**(1.0 / p)) - 1):\n elements.append(1)\n return elements",
"def drop_vague_elements(df: pd.DataFrame, min_ratings: int) -> pd.DataFrame:\n initial = df\n df = df.copy()\n iteration = 0\n while True:\n print(f\"iteration {iteration}\")\n iteration += 1\n ratings_per_user = df.groupby('reviewerID').size()\n vague_users = ratings_per_user[ratings_per_user < min_ratings].index.values\n print(f'# of vague users: {len(vague_users)}')\n\n df = df[~df.reviewerID.isin(vague_users)]\n\n ratings_per_item = df.groupby('asin').size()\n vague_items = ratings_per_item[ratings_per_item < min_ratings].index.values\n print(f'# of vague items: {len(vague_items)}')\n\n df = df[~df.asin.isin(vague_items)]\n\n if len(vague_users) == 0 and len(vague_items) == 0:\n print(\"what's left:\")\n print(f\"- {len(df) / len(initial):.1%} of ratings\")\n print(f\"- {df.asin.nunique() / initial.asin.nunique():.1%} of unique items\")\n print(f\"- {df.reviewerID.nunique() / initial.reviewerID.nunique():.1%} of unique users\")\n return df",
"def _test_sampdup(t):\n return t.shape[1] != len(set(t.ids(axis='sample')))",
"def defaulter(arr):\n return list(set(map(lambda application: application['customer_id'], filter(lambda application: application['repaid_amount'] < (application['principal'] + application['fee']), arr))))",
"def _removeInsufficientTransformer(self, working_stats, params):\n\n for choice, subsets in working_stats.items():\n sufficient_values = [value for value in subsets if value > 0]\n if not sufficient_values:\n del working_stats[choice]\n\n return working_stats",
"def filter_counts(list_of_elements, minimum):\n counts = Counter(list_of_elements)\n lookup = {}\n for k, v in counts.items():\n if v >= minimum:\n lookup[k] = v\n return lookup",
"def filter_by_query(x, _query=None):\n if _query:\n scores = [fuzz.partial_ratio(_query, \"{0} {1}\".format(x[\"requester\"][\"first_name\"], x[\"requester\"][\"last_name\"])),\n fuzz.partial_ratio(_query, x[\"requester\"][\"email\"]),\n fuzz.partial_ratio(_query, x[\"requester\"][\"net_id\"]),\n fuzz.partial_ratio(_query, x[\"pay_to\"][\"name\"]),\n fuzz.partial_ratio(_query, x[\"pay_to\"][\"email\"]),\n fuzz.partial_ratio(_query, x[\"pay_to\"][\"id\"] or \"\"),\n fuzz.partial_ratio(_query, x[\"short_description\"]),\n fuzz.partial_ratio(_query, x[\"notes\"])]\n\n if max(scores) > 60:\n return True\n return False\n return True",
"def zero_one_card(df):\n unique_values = defaultdict()\n for col in df.columns:\n if df[col].nunique() < 2:\n unique_values[col] = df[col].nunique()\n if len(unique_values) > 0:\n printmd(str(\"* Columns: *\"+', '.join(list(unique_values.keys()))+\"* have less than two different values\"))\n for col in unique_values.keys():\n printmd(str('* *' + col + \"* has \" + str(df[col].nunique()) + ' differents values :' + str(df[col].unique())))\n else:\n printmd(\"* No columns have less than 2 different values\")",
"def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok",
"def gen_all_holds(hand):\n ans_set = set([()])\n\n for dummy_idx in range(len(hand)):\n temp_set = set([()])\n for seq in ans_set:\n for item in hand:\n new_seq = list(seq)\n if hand.count(item) > new_seq.count(item):\n new_seq.append(item)\n new_seq = sorted(new_seq)\n temp_set.add(tuple(new_seq))\n ans_set = temp_set\n return ans_set",
"def obs_with_data(x):\n num_toks = np.sum(x,axis=1)\n has_data = num_toks > 0\n return has_data",
"def permutationFilter(perm):\n \n # An example of how to use this\n #if perm['__consumption_encoder']['maxval'] > 300:\n # return False;\n # \n return True",
"def filter_packs(self, packs):\n return [\n pack for pack in packs\n if len(pack) < self.model.pack_size_threshold\n ]",
"def test_by_statement_mininimal_data(minimal_mockdata, qfilter):\n res = qfilter.filter(minimal_mockdata, st='st1')\n assert len(res) == 1\n res = qfilter.filter(minimal_mockdata, st='xxx2')\n assert not res == 0",
"def problem077():\n\n cond = lambda n: num_prime_sum_ways(n) > 5000\n ans = next(filter(cond, itertools.count(2)))\n return ans",
"def execQ7():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n filtered_frame = frame.sort_values(by='Price', ascending=True).drop_duplicates(subset='Product').head(10)\n return filtered_frame",
"def filter_out_rare_points(points, threshold_pct=0.5):\n \n c = Counter(points)\n total = sum(c.values())\n l = []\n for p in points:\n v = c[p]\n if v/total * 100 <= threshold_pct:\n l.append(np.nan)\n else:\n l.append(p)\n \n return l",
"def test_best_hits_unique(self):\n records = [h for _, h in self.result.best_hits_by_query(n=5)][0]\n self.assertEqual(len(records), 3)\n values = {tuple(h.values()) for h in records}\n self.assertEqual(len(values), 3)",
"def all_unique_validator(column, ignore_missing_vals=False):\n column = pd.Series(column)\n duplicated = column.duplicated()\n if ignore_missing_vals:\n duplicated = apply_ignore_missing_data_to_mask(duplicated, column)\n return not duplicated.any(), {\"actual\": column[duplicated]}",
"def get_conditions(info):\n conditions = info.columns\n # This condition's unique value should be less than 5\n new_conditions = list()\n for c in conditions:\n try:\n n_cond = len(pd.unique(info[c]))\n if 1 < n_cond < 5:\n new_conditions.append(c)\n except TypeError:\n pass\n \n return new_conditions"
] | [
"0.55409527",
"0.55308944",
"0.55091226",
"0.5428687",
"0.5395138",
"0.53877074",
"0.5354342",
"0.5341396",
"0.52779645",
"0.5227017",
"0.51802427",
"0.51764864",
"0.51556313",
"0.5132215",
"0.51163405",
"0.51158553",
"0.5110208",
"0.510321",
"0.50508755",
"0.5044529",
"0.5035802",
"0.5034186",
"0.50261056",
"0.50097495",
"0.5007785",
"0.5007214",
"0.500276",
"0.50015515",
"0.49894765",
"0.4966798"
] | 0.798085 | 0 |
Receives a DBSReader object and finds out whether it's pointing to Global DBS (no matter whether it's production or the preproduction instance). | def isGlobalDBS(dbs):
try:
url = urlparse(dbs.dbsURL)
if url.hostname.startswith('cmsweb'):
if url.path.startswith('/dbs/prod/global') or url.path.startswith('/dbs/int/global'):
return True
except Exception as ex:
logging.error("Failed to find out whether DBS is Global or not. Error: %s", str(ex))
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_current_gisdbase():\n global current_gisdbase\n return current_gisdbase",
"async def casino_is_global(self):\n return await self.db.Settings.Global()",
"def is_on_dbsnp(row):\n is_on_dbsnp = 1\n\n if row[\"dbsnp\"] == \"-\":\n is_on_dbsnp = 0\n\n return is_on_dbsnp",
"def getDBSApi():\n if 'testbed' in dbs3_url:\n dbs3_url_reader = dbs3_url + '/dbs/int/global/DBSReader'\n else:\n dbs3_url_reader = dbs3_url + '/dbs/prod/global/DBSReader'\n\n from dbs.apis.dbsClient import DbsApi\n\n\n #this needs to come after /data/srv/wmagent/current/apps/wmagent/etc/profile.d/init.sh is sourced \n dbsApi = DbsApi(url = dbs3_url_reader)\n return dbsApi",
"def needs_commons_db(self):\n return False",
"def uses_database(self, dbname):\n used = False\n if any([dbname.upper() in y for y in [x.upper() for x in self._dbnames]]):\n used = True\n return used",
"def check_db(self):\n if self.db == 'user':\n db = USERS_LIST\n return db\n elif self.db == 'questions':\n db = QUESTIONS_LIST\n return db\n elif self.db == 'meetups':\n db = MEETUPS_LIST\n return db\n\n elif self.db == 'rsvp':\n db = RSVP_LIST\n return db",
"def get_site_env(self):\n return self.config['SITE_ENVIRONMENT'] == 'DEV'",
"def is_sql2008(self, make_connection=True):\r\n return self.__get_dbms_version(make_connection).startswith(six.text_type(VERSION_SQL2008))",
"def db_exists():\n global DB_TYPE\n global SQLITE_FILE\n\n if DB_TYPE == \"sqlite\":\n return Path(SQLITE_FILE).exists()",
"def db_for_read(self, model, **hints):\n if model._meta.app_label == 'delivery':\n return 'db1'\n return None",
"def pg_relpersistence_exists(self):\n return exclusions.closed()",
"def db_for_read(self, model, **hints):\n if model._meta.app_label == 'researcherquery':\n return 'safedb'\n return None",
"def get_tgis_db_version():\n global tgis_db_version\n return tgis_db_version",
"def in_global_code(self):\n return self.sscope is None and self.lscope is None",
"def has_scn_con2ard(self, unq_id):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).one()\n ses.close()\n logger.debug(\"Closed the database session.\")\n return (query_result.ARDProduct == True) and (query_result.Invalid == False)",
"def _get_db_access(self, cfg) -> SqDB:\n if not self.outputs:\n return None\n # Remove gather from the outputs, since with it we only write in files\n candidate_out = [o for o in self.outputs if o != 'gather']\n if candidate_out:\n dbs = SqDB.get_plugins()\n # Get only the first out as source\n outdb = candidate_out[0]\n if outdb not in dbs:\n raise SqPollerConfError(f'{outdb} database not found')\n # Init the SqDB object\n return dbs[outdb](cfg, logger)\n return None",
"def get_tgis_database():\n global tgis_database\n return tgis_database",
"def _resolve_should_track_driver_status(self):\n return ('spark://' in self._connection['master'] and\n self._connection['deploy_mode'] == 'cluster')",
"def rdb_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"rdb_enabled\")",
"def is_development():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')",
"def get_db_client(db_type):\n\tfor client_cls in DatabaseClient.__subclasses__():\n\t\ttry:\n\t\t\tif client_cls.meets_condition(db_type):\n\t\t\t\treturn client_cls()\n\t\texcept KeyError:\n\t\t\tcontinue\n\n\traise UnknownDatabaseType(db_type)",
"def exists (self, db):\n return hasattr(self, db) and isinstance(getattr(self, db), Database)",
"def database_exists(self, db_name):\n conn = self.__get_open_connection(self.sys_conn_hash)\n conn.autocommit(True)\n sql = \"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = '{0}'\".format(db_name)\n # print sql\n r = self.exec_sql_get_records(conn, sql)\n return (len(r) == 1)",
"def get_tgis_database_string():\n global tgis_database_string\n return tgis_database_string",
"def isDPMSite(pfn, sitemover):\n # pfn is the filename of the first file in the file list (enough to test with)\n\n status = False\n # first get the DQ2 site name, then ask for its setype\n try:\n _DQ2SiteName = sitemover.getDQ2SiteName(surl=pfn)\n except:\n # Note: do not print the exception since it sometimes can not be converted to a string (as seen at Taiwan)\n tolog(\"WARNING: Failed to get the DQ2 site name (assuming no DPM site)\")\n else:\n setype = sitemover.getDQ2SEType(_DQ2SiteName)\n if setype == \"dpm\":\n status = True\n return status",
"def is_sql2000(self, make_connection=True):\r\n return self.__get_dbms_version(make_connection).startswith(six.text_type(VERSION_SQL2000))",
"async def _check_db_exists(self) -> bool:\n q = \"\"\"SELECT SCHEMA_NAME\n FROM INFORMATION_SCHEMA.SCHEMATA\n WHERE SCHEMA_NAME = %s\"\"\"\n res = await self.fetch_single(q, (self.dbname,))\n if not res:\n return False\n return True",
"def get_datasource_of():\n global datasource_of\n\n if not datasource_of:\n datasource_of = stixhelpers.datasource_of()\n \n return datasource_of",
"def db_session_is_localized(self):\n return self.__dict__.has_key('db_session')"
] | [
"0.52581006",
"0.5185863",
"0.51744187",
"0.51528794",
"0.5043897",
"0.50279176",
"0.5004774",
"0.500209",
"0.49769497",
"0.49480417",
"0.49011204",
"0.48366022",
"0.48357213",
"0.48242038",
"0.4815816",
"0.48043567",
"0.47987908",
"0.47840345",
"0.47834823",
"0.47745487",
"0.4730393",
"0.47239643",
"0.47162744",
"0.47162324",
"0.471419",
"0.469529",
"0.46932927",
"0.4678727",
"0.46751115",
"0.46722826"
] | 0.7254706 | 0 |
Get data location from dbs | def locationsFromDBS(self, dbs, dataItems):
result = defaultdict(set)
for dataItem in dataItems:
try:
if isDataset(dataItem):
phedexNodeNames = dbs.listDatasetLocation(dataItem)
else:
phedexNodeNames = dbs.listFileBlockLocation(dataItem)
result[dataItem].update(phedexNodeNames)
except Exception as ex:
self.logger.error('Error getting block location from dbs for %s: %s', dataItem, str(ex))
# convert the sets to lists
for name, nodes in viewitems(result):
psns = set()
psns.update(self.cric.PNNstoPSNs(nodes))
result[name] = list(psns)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def file_loc(self):\n\t\treturn self.__dbfile",
"def __get_location(self) -> str:\n\t\treturn os.getenv('SQLITE_DRIVER_LOCATION', 'db.sqlite')",
"def get_datasource_of():\n global datasource_of\n\n if not datasource_of:\n datasource_of = stixhelpers.datasource_of()\n \n return datasource_of",
"def get_path_db():\n\taiqc_config = get_config()\n\tif aiqc_config is None:\n\t\t# get_config() will print a null condition.\n\t\tpass\n\telse:\n\t\tdb_path = aiqc_config['db_path']\n\t\treturn db_path",
"def get_db_path():\n \n return(db_run.db_abs_path)",
"def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]",
"def default_global_location(database):\n\n for dataset in get_many(database, *[equals(\"location\", None)]):\n dataset[\"location\"] = \"GLO\"\n return database",
"def _get_db(self):\n gt_db = ...\n return gt_db",
"def StoreLocation(self) -> str:",
"def get_database_url(self):\n return self.config['dbase_path']",
"def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")",
"def return_location(doctype, filters_sql):\n\tif filters_sql:\n\t\ttry:\n\t\t\tcoords = frappe.db.sql(\n\t\t\t\tf\"\"\"SELECT name, location FROM `tab{doctype}` WHERE {filters_sql}\"\"\", as_dict=True\n\t\t\t)\n\t\texcept frappe.db.InternalError:\n\t\t\tfrappe.msgprint(frappe._(\"This Doctype does not contain location fields\"), raise_exception=True)\n\t\t\treturn\n\telse:\n\t\tcoords = frappe.get_all(doctype, fields=[\"name\", \"location\"])\n\treturn coords",
"def get_location_stats(db_path: str) -> int:\n\n return get_db_count(db_path, 'company_data.db', 'locations')",
"def db_path(self, host: str) -> str:\n app_path = os.path.abspath(os.getcwd())\n folder = 'data'\n path = os.path.join(app_path, folder)\n return os.path.normpath(os.path.join(path, host))",
"def read_locations(db, openfile):\n pass",
"def get_url(self):\n return self.db_url",
"def db(self) -> str:\n return self._db",
"def get_db_path_from_config():\n if not globalConf.sections():\n raise OSError(\"Cannot read config file.\")\n databases = parse_databases()\n db_path = None\n for db in databases:\n if db[0] == LocalDB:\n if db_path is None:\n db_path = db[1]\n else:\n raise ValueError(\"Multiple local database files are listed \"\n \"in the config file.\")\n if db_path is None:\n raise ValueError(\"No local database file is listed in the config file.\")\n return db_path",
"def getDataAtLocation(loc: ghidra.program.util.ProgramLocation) -> ghidra.program.model.listing.Data:\n ...",
"def location(self):\r\n try:\r\n return self.data['location']\r\n except KeyError:\r\n return self.data['station_name']",
"def getDBPath():\n return os.path.join(CONFIG_DIR, CONFIG_DICT['common']['local_db'])",
"def database():\n return conf().database",
"def get_data(db_dir, command, args = None):\n with lite.connect((db_dir)) as conn:\n try:\n cursor = conn.cursor()\n if args:\n cursor.execute(command,args)\n else:\n cursor.execute(command)\n data = cursor.fetchall()\n #print '[sql management] got all of the data requested according to:\\n--- %s ---\\n the data: %s'%(command, data)\n return data\n except:\n return None",
"def locate_data():\n # Locate by using the environment variable\n if \"TESSDATA_PREFIX\" in os.environ:\n data_prefix = os.environ[\"TESSDATA_PREFIX\"]\n\n if os.path.isdir(data_prefix):\n return data_prefix\n\n # Locate by using the command directory\n cmd_path = os.path.dirname(_config.command)\n\n if cmd_path:\n cmd_data_path = os.path.join(cmd_path, \"tessdata\")\n\n if os.path.isdir(cmd_data_path):\n return cmd_data_path\n\n return None",
"def get_locations(db_path: str) -> List[Location]:\n locations: List[Location] = []\n conn: Connection = sqlite3.connect(path.join(db_path, 'company_data.db'))\n cur: Cursor = conn.cursor()\n for row in cur.execute('SELECT name, area, climate FROM locations'):\n locations.append(Location(row[0], row[1], Climate(row[2])))\n\n cur.close()\n conn.close()\n return locations",
"def get_data_path():\n\treturn _paths[_DATA_DIRECTORY_KEY]",
"def dbRead(dbPoint):\n raise NotImplementedError('dbRead in simu mode')",
"def get_location(self):\r\n return None",
"def get_location(self):\n\t\treturn self.location",
"def mapdata():\n return getmapdata(db, MyTable)"
] | [
"0.6616532",
"0.6296066",
"0.6252961",
"0.62318724",
"0.6225313",
"0.61788684",
"0.61649096",
"0.61381984",
"0.60394216",
"0.60271424",
"0.59884375",
"0.5912525",
"0.5897254",
"0.5876116",
"0.5872178",
"0.58330953",
"0.582461",
"0.58245146",
"0.5800512",
"0.57918423",
"0.5775335",
"0.5773081",
"0.577031",
"0.57525533",
"0.57260305",
"0.572526",
"0.5711658",
"0.5706186",
"0.5700241",
"0.56821316"
] | 0.6312537 | 1 |
Sort items by dbs instances return dict with DBSReader as key & data items as values | def organiseByDbs(self, dataItems):
itemsByDbs = defaultdict(list)
for item in dataItems:
if ACDCBlock.checkBlockName(item['name']):
# if it is acdc block don't update location. location should be
# inserted when block is queued and not supposed to change
continue
if item['dbs_url'] not in self.dbses:
self.dbses[item['dbs_url']] = DBSReader(item['dbs_url'])
itemsByDbs[self.dbses[item['dbs_url']]].append(item['name'])
return itemsByDbs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sortdb():\n return sorted(donor_db.items(), key=sumdbkey, reverse=True)",
"def read_db():\n f_result = []\n result = execute_query('select sitename, id from {} order by sitename;'.format(TABLES[0]))\n sites = [(x['sitename'], x['id']) for x in result]\n for sitename, site_id in sites:\n sitedict = {'name': sitename}\n querystring = 'select settname, settval from {} order by settname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[1]), (site_id,))\n sitedict['settings'] = {x: y for x, y in cur.fetchall()}\n querystring = 'select dirname, id from {} order by dirname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[2]), (site_id,))\n sitedirs = [(x['dirname'], x['id']) for x in cur.fetchall()]\n sitedict['docs'] = []\n # if we keep the site_id in the docstats table we could restrict this to one db-query\n # and filter the result set inside the loop\n # although this should also be possible with a subselect or something like that\n for dirname, dir_id in sitedirs:\n dirlist = []\n querystring = 'select * from {} order by docname where dir_id = %s;'\n result = execute_query(querystring.format(TABLES[3]), (dir_id,))\n for resultdict in cur:\n resultdict['dirname'] = dirname\n dirlist.append(resultdict)\n sitedict['docs'].append(dirlist)\n f_result.append(sitedict)\n return f_result",
"def get_instances_sorted(ctx, filters, limit, marker, columns_to_join,\n sort_keys, sort_dirs):\n\n if not sort_keys:\n # This is the default from the process_sort_params() method in\n # the DB API. It doesn't really matter, as this only comes into\n # play if the user didn't ask for a specific ordering, but we\n # use the same scheme for consistency.\n sort_keys = ['created_at', 'id']\n sort_dirs = ['asc', 'asc']\n\n sort_ctx = InstanceSortContext(sort_keys, sort_dirs)\n\n if marker:\n # A marker UUID was provided from the API. Call this the 'global'\n # marker as it determines where we start the process across\n # all cells. Look up the instance in whatever cell it is in and\n # record the values for the sort keys so we can find the marker\n # instance in each cell (called the 'local' marker).\n global_marker_instance = _get_marker_instance(ctx, marker)\n global_marker_values = [global_marker_instance[key]\n for key in sort_keys]\n\n def do_query(ctx):\n \"\"\"Generate InstanceWrapper(Instance) objects from a cell.\n\n We do this inside the thread (created by\n scatter_gather_all_cells()) so that we return wrappers and\n avoid having to iterate the combined result list in the caller\n again. This is run against each cell by the scatter_gather\n routine.\n \"\"\"\n\n # The local marker is a uuid of an instance in a cell that is found\n # by the special method instance_get_by_sort_filters(). It should\n # be the next instance in order according to the sort provided,\n # but after the marker instance which may have been in another cell.\n local_marker = None\n\n # Since the regular DB query routines take a marker and assume that\n # the marked instance was the last entry of the previous page, we\n # may need to prefix it to our result query if we're not the cell\n # that had the actual marker instance.\n local_marker_prefix = []\n\n if marker:\n # FIXME(danms): If we knew which cell we were in here, we could\n # avoid looking up the marker again. But, we don't currently.\n\n local_marker = db.instance_get_by_sort_filters(\n ctx, sort_keys, sort_dirs, global_marker_values)\n if local_marker:\n if local_marker != marker:\n # We did find a marker in our cell, but it wasn't\n # the global marker. Thus, we will use it as our\n # marker in the main query below, but we also need\n # to prefix that result with this marker instance\n # since the result below will not return it and it\n # has not been returned to the user yet. Note that\n # we do _not_ prefix the marker instance if our\n # marker was the global one since that has already\n # been sent to the user.\n local_marker_filters = copy.copy(filters)\n if 'uuid' not in local_marker_filters:\n # If a uuid filter was provided, it will\n # have included our marker already if this instance\n # is desired in the output set. If it wasn't, we\n # specifically query for it. If the other filters would\n # have excluded it, then we'll get an empty set here\n # and not include it in the output as expected.\n local_marker_filters['uuid'] = [local_marker]\n local_marker_prefix = db.instance_get_all_by_filters_sort(\n ctx, local_marker_filters, limit=1, marker=None,\n columns_to_join=columns_to_join,\n sort_keys=sort_keys,\n sort_dirs=sort_dirs)\n else:\n # There was a global marker but everything in our cell is\n # _before_ that marker, so we return nothing. If we didn't\n # have this clause, we'd pass marker=None to the query below\n # and return a full unpaginated set for our cell.\n return []\n\n main_query_result = db.instance_get_all_by_filters_sort(\n ctx, filters,\n limit=limit, marker=local_marker,\n columns_to_join=columns_to_join,\n sort_keys=sort_keys,\n sort_dirs=sort_dirs)\n\n return (InstanceWrapper(sort_ctx, inst) for inst in\n itertools.chain(local_marker_prefix, main_query_result))\n\n # FIXME(danms): If we raise or timeout on a cell we need to handle\n # that here gracefully. The below routine will provide sentinels\n # to indicate that, which will crash the merge below, but we don't\n # handle this anywhere yet anyway.\n results = context.scatter_gather_all_cells(ctx, do_query)\n\n # If a limit was provided, and passed to the per-cell query routines.\n # That means we have NUM_CELLS * limit items across results. So, we\n # need to consume from that limit below and stop returning results.\n limit = limit or 0\n\n # Generate results from heapq so we can return the inner\n # instance instead of the wrapper. This is basically free\n # as it works as our caller iterates the results.\n for i in heapq.merge(*results.values()):\n yield i._db_instance\n limit -= 1\n if limit == 0:\n # We'll only hit this if limit was nonzero and we just generated\n # our last one\n return",
"def _get_db_data(self) -> None:\n if self._db_data:\n return\n with db(cursor=True) as cur:\n cur.execute('SELECT count, gender, age_start FROM age_groups WHERE district = ?', (self.district,))\n self._db_data = cur.fetchall()\n self._db_data = sorted([row for row in self._db_data if row['gender'] == self.gender],\n key=lambda x: (x['age_start'] is None, x['age_start']))",
"def dbdescs(data, dbname):\n # pylint: disable=bad-continuation\n return {\n 'admin': onedesc(data, dbname, 'admin', 'rw'),\n 'user': onedesc(data, dbname, 'user', 'rw'),\n 'viewer': onedesc(data, dbname, 'viewer', 'ro')\n }",
"def orderList(dataSource,**kwargs):\n\treturn sorted(dataSource)",
"def db_stats(self):\n return { \"search_and_get\": self.db_search_and_get }",
"def populate_db(dbase):\n # In this order: Iron, Blood, Shadow, Fel, Storm\n wowhead_ids = []\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-8))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-9))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-10))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-11))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-17))\n item_ids = set(wowhead_ids)\n print(item_ids)\n\n pos = 0\n for item_id in item_ids:\n if pos % 10 == 0:\n print(\"Relic %d of %d\" % (pos, len(item_ids)))\n pos += 1\n import_relic(dbase, item_id)",
"def all_measurements_lookup(client):\n dbs_dict = db_lookup(client)\n m_list_dict = []\n for db in dbs_dict:\n m_list_dict.append({db['name']: measurements_lookup(client, db['name'])})\n # print(\"def all_measurements_lookup 1: \", m_list_dict[:10])\n return m_list_dict",
"def get_sorted(self, collection, xmlFormat):\n\t\treturn {\n\t\t\t\"verb\": \"Search\",\n\t\t\t\"xmlFormat\": xmlFormat,\n\t\t\t'sortDescending' : '/text//itemRecord/metaMetadata/dateInfo/@lastModified',\n\t\t\t\"ky\": collection\n\t\t\t}",
"def test_query_sort_default_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(sorted(data)):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))",
"def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict",
"def merge_cached_results(*results):\r\n if len(results) == 1:\r\n return list(results[0])\r\n\r\n #make sure the sorts match\r\n sort = results[0].query._sort\r\n assert(all(r.query._sort == sort for r in results[1:]))\r\n\r\n def thing_cmp(t1, t2):\r\n for i, s in enumerate(sort):\r\n #t1 and t2 are tuples of (fullname, *sort_cols), so we can\r\n #get the value to compare right out of the tuple\r\n v1, v2 = t1[i + 1], t2[i + 1]\r\n if v1 != v2:\r\n return cmp(v1, v2) if isinstance(s, asc) else cmp(v2, v1)\r\n #they're equal\r\n return 0\r\n\r\n all_items = []\r\n for r in results:\r\n r.fetch()\r\n all_items.extend(r.data)\r\n\r\n #all_items = Thing._by_fullname(all_items, return_dict = False)\r\n return [i[0] for i in sorted(all_items, cmp = thing_cmp)]",
"def get_instance_objects_sorted(ctx, filters, limit, marker, expected_attrs,\n sort_keys, sort_dirs):\n columns_to_join = instance_obj._expected_cols(expected_attrs)\n instance_generator = get_instances_sorted(ctx, filters, limit, marker,\n columns_to_join, sort_keys,\n sort_dirs)\n if 'fault' in expected_attrs:\n # We join fault above, so we need to make sure we don't ask\n # make_instance_list to do it again for us\n expected_attrs = copy.copy(expected_attrs)\n expected_attrs.remove('fault')\n return instance_obj._make_instance_list(ctx, objects.InstanceList(),\n instance_generator,\n expected_attrs)",
"def SQLFactory(cls, keys={}, sort=[], multi=False, dbh=None, dbh_key=\"default\"):\n rv = None\n if multi:\n rv = []\n release = False\n if dbh is None:\n release = True\n dbh = dbstuff.getRO(dbh_key)\n try:\n whereclause = []\n wherevalues = []\n for k,v in keys.items():\n if k[0]=='<':\n whereclause.append('%s<%%s' % k[1:])\n elif k[0]=='>':\n whereclause.append('%s>%%s' % k[1:])\n else:\n whereclause.append('%s=%%s' % k)\n wherevalues.append(v)\n query = \"SELECT \" + cls.SQLId + \" FROM \" + cls.SQLTable \n if whereclause:\n query += \" WHERE \" + ' AND '.join(whereclause)\n if sort:\n if type(sort)!=type([]):\n sort = [sort]\n query += \" ORDER BY \" + ','.join([' `%s` %s' % (k,d) for k,d in sort])\n c = dbh.cursor()\n \n if (DEBUG):\n print query\n \n c.execute( query, tuple(wherevalues) )\n if c.rowcount>0:\n if multi:\n for id, in c:\n rv.append(cls(id,dbh))\n else:\n #print \"ROW COUNT\"\n #print c.rowcount\n (id,) = c.fetchone()\n rv = cls(id, dbh)\n c.close()\n finally:\n if release:\n dbstuff.release(dbh,dbh_key)\n return rv",
"def index_records(vr):\n return collections.OrderedDict((record2key(rec), clean_sample_index(rec))\n for rec in vr)",
"def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]",
"def get_sorted_results_by_dict(self):\n results = self.get_sorted_results()\n return [dict(r) for r in results]",
"def sortby(self):\n ...",
"def items(self):\n ix_obj = list(self.d_buffer.keys())\n ix_obj.sort()\n l_obj = [self.d_buffer[ix] for ix in ix_obj]\n\n return ix_obj, l_obj",
"def calc_similars(cmdb, db):\n res = {}\n for c1 in cmdb:\n l = {}\n for c2 in db:\n if similarity.calculate_similarity(c1, c2) != 0:\n l[db.get(c2)] = similarity.calculate_similarity(c1, c2)\n l_sort = {k: v for k, v in sorted(\n l.items(), key=lambda item: item[1], reverse=True)}\n res[cmdb.get(c1)] = l_sort\n return res",
"def get_queryset(self):\n return ObjectDB.objects.filter(roster__roster__name=self.roster_name).order_by(\n \"db_key\"\n )",
"def db_data4test():\n administrators = {\n 'field': ['name', 'password'],\n 'data': [\n ('admin', '123'),\n ]\n }\n\n countries = {\n 'field': 'name',\n 'data': [\n 'China',\n 'India'\n ]\n }\n\n positions = {\n 'field': 'name',\n 'data': [\n 'Software EngineerSystem Analyst',\n 'Business Analyst',\n 'Technical support',\n 'Network Engineer',\n 'Technical Consultant',\n 'Web Developer',\n 'Software Test'\n ]\n }\n\n users = {\n 'field': ['name', 'password'],\n 'data': [\n ('test', '123456'),\n ('test2', '123456'),\n ('test3', '123456')\n ]\n }\n\n user_infos = {\n 'field': [\n 'name', 'first_name', 'last_name', 'position', 'company',\n 'nationality', 'tobe_contacted', 'skills_have', 'skills_learned'\n ],\n 'data': [\n (\n 'test', 'Huang', 'Xiao', 'Business Analyst',\n 'Global Consulting Services', 'China', 1,\n '3months Python Subject',\n 'Advanced Python through on-job training'\n ),\n (\n 'test2', 'Yong', 'Wu', 'Business Analyst',\n 'REA', 'China', 0,\n '3 months Datawarehousing',\n 'Project management skill'\n ),\n ]\n }\n\n return {\n 'administrator': administrators,\n 'country': countries,\n 'position': positions,\n 'user': users,\n 'user_info': user_infos\n }",
"def __init__(self,dbname='',**kwds):\n self._skip = kwds.get('skip',[])\n self._limit= kwds.get('limit',[])\n self._keys= kwds.get('keys',[])\n self._db = getDBConnection()[dbname]\n self._collection_name=kwds.get('collection','all')\n self._collection = []\n self._skip_rec=0\n self._props = {}\n if self._limit and self._skip:\n self._nrows = self._limit[0]\n if len(self._limit)>1:\n self._ncols = self._limit[1]\n else:\n self._ncols = 1\n if len(self._skip)==2:\n self._skip_rows=self._skip[0]\n self._skip_cols=self._skip[1]\n else:\n self._skip_rec = self._skip[0]\n self._table=dict()\n self._is_set=False\n self._set_collection()\n self._row_heads=[]\n self._col_heads=[]",
"def _get_dapall_from_db(self):\n\n dapall_data = {}\n\n daptype = self.bintype.name + '-' + self.template.name\n\n mdb = marvin.marvindb\n\n if not mdb.isdbconnected:\n raise MarvinError('No DB connected')\n\n datadb = mdb.datadb\n dapdb = mdb.dapdb\n\n dapall_row = mdb.session.query(dapdb.DapAll).join(\n dapdb.File, datadb.PipelineInfo, datadb.PipelineVersion).filter(\n mdb.datadb.PipelineVersion.version == self._dapver,\n dapdb.DapAll.plateifu == self.plateifu,\n dapdb.DapAll.daptype == daptype).use_cache().first()\n\n if dapall_row is None:\n raise MarvinError('cannot find a DAPall match for this target in the DB.')\n\n for col in dapall_row.__table__.columns.keys():\n if col != 'pk' and '_pk' not in col:\n dapall_data[col] = getattr(dapall_row, col)\n\n return dapall_data",
"def get_db_info(self):\n total = 0\n info = {\n 'count': {},\n 'types': {}\n }\n for name in self._object_types:\n id, attrs, idx = self._object_types[name]\n info['types'][name] = {\n 'attrs': attrs,\n 'idx': idx\n }\n row = self._db_query_row('SELECT COUNT(*) FROM objects_%s' % name)\n info['count'][name] = row[0]\n total += row[0]\n\n info['total'] = total\n\n info['termcounts'] = {}\n for ivtidx in self._inverted_indexes:\n row = self._db_query_row('SELECT COUNT(*) FROM ivtidx_%s_terms' % ivtidx)\n info['termcounts'][ivtidx] = int(row[0])\n\n info['file'] = self._dbfile\n return info",
"def locationsFromDBS(self, dbs, dataItems):\n result = defaultdict(set)\n for dataItem in dataItems:\n try:\n if isDataset(dataItem):\n phedexNodeNames = dbs.listDatasetLocation(dataItem)\n else:\n phedexNodeNames = dbs.listFileBlockLocation(dataItem)\n result[dataItem].update(phedexNodeNames)\n except Exception as ex:\n self.logger.error('Error getting block location from dbs for %s: %s', dataItem, str(ex))\n\n # convert the sets to lists\n for name, nodes in viewitems(result):\n psns = set()\n psns.update(self.cric.PNNstoPSNs(nodes))\n result[name] = list(psns)\n\n return result",
"def fetch_objects_to_compare(self, sid, did):\n res = dict()\n\n last_system_oid = 0\n if self.manager.db_info is not None and did in self.manager.db_info:\n last_system_oid = (self.manager.db_info[did])['datlastsysoid']\n\n sql = render_template(\n \"/\".join([self.template_path, 'nodes.sql']),\n datlastsysoid=last_system_oid,\n showsysobj=self.blueprint.show_system_objects\n )\n status, rset = self.conn.execute_2darray(sql)\n if not status:\n return internal_server_error(errormsg=rset)\n\n for row in rset['rows']:\n status, data = self._fetch_properties(did, row['oid'])\n if status:\n res[row['name']] = data\n\n return res",
"def serializeItemsData(items, highlight=False):\n from debra.models import ProductModelShelfMap\n #items = items.filter(added_datetime__gte=datetime.date.today()-datetime.timedelta(days=30))\n # unordered_pair = list(items.values_list('added_datetime', 'id'))\n unordered_pair = []\n\n for item in items:\n unordered_pair.append((item.added_datetime, item.id))\n\n unordered_pair.sort()\n unordered_pair.reverse()\n ids = [x[1] for x in unordered_pair[:60]]\n items = ProductModelShelfMap.objects.select_related(\n 'product_model__brand').filter(id__in=ids)\n items_data = []\n prod_model_existing = set()\n for item in items:\n if item.product_model.name in prod_model_existing:\n continue\n prod_model_existing.add(item.product_model.name)\n item_data = {\n \"name\": item.product_model.name,\n \"img_url_feed_view\": item.product_model.img_url,\n \"img_url_panel_view\": item.img_url_panel_view,\n }\n if highlight:\n item_data[\"highlight\"] = True\n if item.product_model.brand:\n item_data[\"brand\"] = item.product_model.brand.name\n items_data.append(item_data)\n return items_data",
"def _topological_sort_metadata(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError"
] | [
"0.6100124",
"0.5767866",
"0.5482177",
"0.5375432",
"0.52297914",
"0.517466",
"0.51589227",
"0.51308376",
"0.5080761",
"0.50783294",
"0.5067629",
"0.5035221",
"0.50315094",
"0.5030121",
"0.50266075",
"0.5023428",
"0.5004768",
"0.50018305",
"0.4949546",
"0.49456415",
"0.4940035",
"0.4934822",
"0.4927733",
"0.49170372",
"0.48909774",
"0.48902762",
"0.48841354",
"0.488279",
"0.4878946",
"0.48757827"
] | 0.62622225 | 0 |
Hjorth's Complexity and Parameters Hjorth Parameters are indicators of statistical properties initially introduced by Hjorth (1970) to describe the general characteristics of an EEG trace in a few quantitative terms, but which can applied to any time series. The parameters are activity, mobility, and complexity. NeuroKit returns complexity directly in the output tuple, but the other parameters can be found in the dictionary. The activity parameter is simply the variance of the signal, which corresponds to the mean power of a signal (if its mean is 0). | def complexity_hjorth(signal):
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Calculate derivatives
dx = np.diff(signal)
ddx = np.diff(dx)
# Calculate variance and its derivatives
x_var = np.var(signal) # = activity
dx_var = np.var(dx)
ddx_var = np.var(ddx)
# Mobility and complexity
mobility = np.sqrt(dx_var / x_var)
complexity = np.sqrt(ddx_var / dx_var) / mobility
return complexity, {"Mobility": mobility, "Activity": x_var} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)",
"def get_resulting_hypo_params(self, injkey):\n h0_params = self.fid_values[injkey][\n 'h0_fit_to_%s'%(self.labels.dict['data'])]['params']\n h1_params = self.fid_values[injkey][\n 'h1_fit_to_%s'%(self.labels.dict['data'])]['params']\n return h0_params, h1_params",
"def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)",
"def Tanh(z):\n return 1.7159 * np.tanh(2 / 3.0 * z)",
"def __call__(self, h):\n\n Wh = self.W(h)\n p_yt = F.log_softmax(Wh) # should be (B x V)\n\n return p_yt",
"def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()",
"def H(self, z):\n prefactor = 15./np.pi**4.*self.Omega_gamma*(1.+z)**4.\n # Dark energy contribution\n Xde = self.X_DE(z)\n # Neutrino contribution\n yn = np.outer(self.M_nu/(const.kB*self.T_nu), 1./(1.+z))\n Fn = self.FermiDirac_integral(np.array(yn))\n nu_contribution = prefactor*self.Gamma_nu**4.*Fn\n # UR contribution\n Fu = self.FermiDirac_integral(0.)\n ur_contribution = prefactor*self.Gamma_nu_inst**4.*Fu*self.massless_nu\n # WDM contribution\n yw = np.outer(self.M_wdm/(const.kB*self.T_wdm), 1./(1.+z))\n Fw = self.FermiDirac_integral(np.array(yw))\n wdm_contribution = prefactor*np.expand_dims(self.Gamma_wdm**4.,1)*Fw\n # H(z)\n return self.H0*(self.Omega_cdm *(1+z)**3 +\n self.Omega_b *(1+z)**3 +\n self.Omega_gamma *(1+z)**4 + \n self.Omega_K *(1+z)**2 +\n self.Omega_lambda*Xde +\n ur_contribution +\n np.sum(wdm_contribution,axis=0) + \n np.sum(nu_contribution ,axis=0))**0.5",
"def training_data_rho_output(P=\"{P:d}\", T=\"{T:.2f}\", J=\"{J:d}\"):\n return _training_data_rho_output.format(P, T, J)",
"def H(n,x):\r\n H_values = [] #a list of sequential H values for different n's up to n=n.\r\n H_values.append(1) #appends H_0.\r\n H_values.append(2*x) #appends H_1.\r\n if n>1:\r\n for i in range(1,n):\r\n H_values.append((2*x*H_values[-1])-(2*i*H_values[-2]))\r\n return H_values[-1]\r\n elif n == 0:\r\n return H_values[0]\r\n else:\r\n return H_values[1]",
"def Hstep_cost_function(H): \n U = Wold - Yold\n #cost = -np.trace(H.T@K@H) + (self.admm_rho/2)*(norm(H.T@D - Wold + self.Y, 'fro')**2) \n cost = -np.trace(H.T@K@H)/nsamples + (rho/2)*np.trace((H.T@D - U)@(H.T@D-U).T) \n return cost",
"def H(t, args):\n\n f0 = args['f0']\n n = args['n']\n omega = args['omega']\n omegaDt = args['omegaDt']\n omegaArgs = args['omegaArgs']\n\n ad = create(n)\n a = destroy(n)\n # H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation\n ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))\n # additional term because of w(t) not constant\n ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)\n # Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss\n # with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)\n ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)\n # ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)\n return(ham)",
"def test_wet_psychrometric_rh_kwargs():\n p = 1013.25 * units.mbar\n dry_bulb_temperature = 20. * units.degC\n wet_bulb_temperature = 18. * units.degC\n coeff = 6.1e-4 / units.kelvin\n psychrometric_rh = relative_humidity_wet_psychrometric(p, dry_bulb_temperature,\n wet_bulb_temperature,\n psychrometer_coefficient=coeff)\n assert_almost_equal(psychrometric_rh, 82.9701 * units.percent, 3)",
"def calc_Hcp_ij(self):\n\t\n\thp0_delayed = self.hp_wavelet.get_Psi(self.xi[0] + self.Orbit.L/l.Clight)\n\thp0 = self.hp_wavelet.get_Psi(self.xi[0])\n\thc0_delayed = self.hc_wavelet.get_Psi(self.xi[0] + self.Orbit.L/l.Clight)\n\thc0 = self.hc_wavelet.get_Psi(self.xi[0])\n\t\n\thp1_delayed = self.hp_wavelet.get_Psi(self.xi[1] + self.Orbit.L/l.Clight)\n\thp1 = self.hp_wavelet.get_Psi(self.xi[1])\n\thc1_delayed = self.hc_wavelet.get_Psi(self.xi[1] + self.Orbit.L/l.Clight)\n\thc1 = self.hc_wavelet.get_Psi(self.xi[1])\n\t\n\thp2_delayed = self.hp_wavelet.get_Psi(self.xi[2] + self.Orbit.L/l.Clight)\n\thp2 = self.hp_wavelet.get_Psi(self.xi[2])\n\thc2_delayed = self.hc_wavelet.get_Psi(self.xi[2] + self.Orbit.L/l.Clight)\n\thc2 = self.hc_wavelet.get_Psi(self.xi[2])\n\t\n\tself.Hpij[0,1] = hp1_delayed - hp0\n\tself.Hpij[1,0] = hp0_delayed - hp1\n\n\tself.Hpij[0,2] = hp2_delayed - hp0\n\tself.Hpij[2,0] = hp0_delayed - hp2\n\n\tself.Hpij[1,2] = hp2_delayed - hp1\n\tself.Hpij[2,1] = hp1_delayed - hp2\n\t\n\t# cross-polarization\n\tself.Hcij[0,1] = hc1_delayed - hc0\n\tself.Hcij[1,0] = hc0_delayed - hc1\n\n\tself.Hcij[0,2] = hc2_delayed - hc0\n\tself.Hcij[2,0] = hc0_delayed - hc2\n\n\tself.Hcij[1,2] = hc2_delayed - hc1\n\tself.Hcij[2,1] = hc1_delayed - hc2\n\t\n\treturn",
"def RHO(p,tv): \n _rd=287.053 # Gas constant for dry air\n _tv=tv*1.\n if np.nanmax(_tv)<100: _tv +=273.15# NB: C-->K\n if np.nanmax(p)<2000: p*=100 # hPa to Pa\n rho=np.divide(p,np.multiply(_rd,_tv))\n\n return rho",
"def Keldysh_Parameter(omega,Uion,E):\n\treturn omega*np.sqrt(2.0*Uion)/E",
"def eval_hankel_function(pt, n=MAX_N):\n j_0 = 0\n for i in range(n):\n j_0 += (-1)**i * (1 / 4 * e**2)**i / factorial(i)**2\n\n g = 0.57721566490153286\n y_0 = (ln(e / 2) + g) * j_0\n h_n = 0\n for i in range(n):\n h_n += 1 / (i + 1)\n y_0 += (-1)**(i) * h_n * (e**2 / 4)**(i+1) / (factorial(i+1))**2\n y_0 *= 2 / pi\n\n imag_unit = (np.zeros(1, dtype=np.complept128) + 1j)[0]\n h_0 = j_0 + imag_unit * y_0\n return h_0",
"def multiple_premia_heston(nobs=2000):\r\n lm = 1.5768\r\n mu = .12**2\r\n eta = .5751\r\n rho = -.0\r\n sigma = .12**2\r\n\r\n price = 1\r\n strike = np.exp(np.linspace(-.1, .1, nobs))\r\n maturity = 30/365\r\n riskfree = .01 * np.ones(nobs)\r\n moneyness = np.log(strike/price) - riskfree * maturity\r\n call = np.ones_like(moneyness).astype(bool)\r\n call[moneyness < 0] = False\r\n\r\n param = HestonParam(lm=lm, mu=mu, eta=eta, rho=rho, sigma=sigma)\r\n model = Heston(param, riskfree, maturity)\r\n premium = cosmethod(model, moneyness=moneyness, call=call)\r\n plt.plot(strike, premium)\r\n plt.show()",
"def h(n, x, orthonormal=True):\n h = polynomial.polyval(x, h_coefs[n])\n return h",
"def noyes84_rossby_activity(logRpHK):\n y = 5 + logRpHK\n logRo = 0.324 - 0.400*y - 0.283 * y**2 - 1.325 * y**3\n return 10**logRo",
"def test_parameters(self):\n assert self.hll.p == 8\n assert self.hll.m == 256\n assert round(self.hll.alpha - 0.7182725932495458, 5) == 0\n assert round(self.hll.error - 0.065, 5) == 0\n assert self.hll64.treshold == 120000",
"def __init__(self, problem, v0, alpha):\n super(HHT, self).__init__(problem)\n self.solver = newton_krylov # Why not =)\n self.options[\"h\"] = 0.01\n self.f = problem.rhs\n self.v = v0\n if(alpha < -1.0/3.0 or alpha > 0):\n print alpha\n raise Exception(\"Non-valid alpha for HHT method\")\n self.alpha = alpha\n self.beta = ( (1.0 - alpha)/2.0 )**2\n self.gamma = (1.0 - 2.0 * alpha)/2.0",
"def h(X, theta, n_hidden_layers=1):\n _, a = feed_forward(X, theta, n_hidden_layers)\n L = n_hidden_layers + 1 # last layer\n\n hypothesis = a[L]\n return hypothesis",
"def user_cons_hJ(h, Jac, mbs_data, tsim):\n\n # Example: Compute the expression of h and Jac then assign the values.\n # h[1] = mbs_data.q[1]-mbs_data.q[2]*mbs_data.q[2]\n # Jac[1,1] = 1.\n # Jac[1,2] = -2*mbs_data.q[2].\n # IMPORTANT: NEVER REASSIGN h => h = np.array([0,mbs_data.q[1]-mbs_data.q[2]*mbs_data.q[2],0])\n # NEVER REASSIGN Jac => Jac = np.array([[0,0,0,0],[0,1,-2*mbs_data.q[2],0])\n # Both command will change the values of h, Jac in this function\n # but they will not be modified outside the scope of this function.\n rwt = RwtTrackGeometry(mbs_data, pointer = mbs_data.user_model['addons']['rwt'])\n rwc = RwcMain(pointer = mbs_data.user_model['addons']['rwc'])\n \n rwt.cons_hJ(mbs_data, h, Jac)\n rwc.compute_constraints(mbs_data, h, Jac)\n \n \"\"\"id1 = mbs_data.joint_id[\"R1_caisse1\"]\n id2 = mbs_data.joint_id[\"R1_caisse2\"]\n id3 = mbs_data.joint_id[\"R1_chassis1\"]\n id4 = mbs_data.joint_id[\"R1_chassis2\"]\n\n # define the value of the constraint\n h[1] = (mbs_data.q[id1] + mbs_data.q[id3]) - (mbs_data.q[id2] + mbs_data.q[id4])\n\n # define the value of the jacobian matrix\n Jac[1,id1] = 1\n Jac[1,id2] = -1\n Jac[1,id3] = 1\n Jac[1,id4] = -1\"\"\"\n \n return",
"def test_tanh_con():\n c=14\n assert {'diff':EF.tanh(c).der, 'value': EF.tanh(c).val}=={'diff':0, 'value': ((math.exp(c)-math.exp(-c))/2)/((math.exp(c)+math.exp(-c))/2)}",
"def h(self, z):\n # See definition at end of Section 1, p2 of Arnaud et al.\n return np.sqrt(self.Om*(1.+z)**3. + self.Ol)",
"def ph(self,k,z=0):\n return self.p(k*self.h,z)*self.h**3",
"def cost_function(H, n_qubits, p, params):\n ini_state=plus_state(n_qubits)\n for i in range(p):\n ini_state=qaoa_step(ini_state,H,n_qubits,params=[params[2*i],params[2*i+1]])\n return ((sparse.spmatrix.getH(ini_state)).dot(H.dot(ini_state))).real, ini_state",
"def circuit_one_qubit_one_param_h_ry(inpt):\n qml.Hadamard(wires=0)\n qml.RY(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))",
"def hat(J):\n hat=math.sqrt(2*J+1)\n return hat",
"def h(x, theta):\n # ... dopolnite (naloga 1)\n\n power = x.dot(-theta.T)\n\n return 1 / (1 + np.exp(power))"
] | [
"0.6022703",
"0.57704234",
"0.5730367",
"0.56339955",
"0.5561419",
"0.5557344",
"0.5556481",
"0.5482355",
"0.548171",
"0.548036",
"0.54543763",
"0.5435389",
"0.5425596",
"0.5410607",
"0.5403748",
"0.5390412",
"0.537292",
"0.5371755",
"0.5351225",
"0.53323776",
"0.531349",
"0.52973264",
"0.528889",
"0.52793694",
"0.527774",
"0.5271966",
"0.5262096",
"0.52573866",
"0.52503824",
"0.5246471"
] | 0.71381897 | 0 |
Update the images that are displayed from the video stream. | def update(self):
# Update the vision frames in the system
self._system.update()
# Create blank PIL images to hold the video streams
layered = PIL.Image.new('RGBA', (400, 400))
stacked = PIL.Image.new('RGBA', (200, 800))
control = PIL.Image.new('RGBA', (600, 800))
focalpoint = self._system[self._appString["device"].get()].focalpoint()
# print(focalpoint)
# Get each vision key and vision for the selected device
visionList = [(visionKey, vision) for visionKey, vision in self._system[self._appString["device"].get()]]
# Loop through each vision in the vision list
for i, (visionKey, vision) in enumerate(visionList):
# Grab the frames from the vision when it is "curr"
frameList = [frame for frameKey, frame in vision if frameKey==self._appString["frame"].get()]
# Loop through each frame in the frame list
for frame in frameList:
# Get the properties and turn the image into RGBA
ratio, size = vision.properties()
rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
# print(rgbFrame.shape)
width, height, channels = rgbFrame.shape
# Paste the images together in layered
imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (int(400 * ratio), int(400 * ratio))))
layered.paste(imgFrame, (int(200 * (1 - ratio)), int(200 * (1 - ratio))))
# layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))
# layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 // width)), int(200 * (1 - ratio) - focalpoint[1] * (200 // height))))
# layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1)), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1))))
# layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200/width) / ratio), int(200 * (1 - ratio) - focalpoint[1] * (200/height) / ratio)))
# layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))
# layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1) / 200), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1) / 200)))
# layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (400//width * (1- ratio))), int(200 * (1 - ratio) - focalpoint[1] * (400//height * (1 - ratio)))))
# Paste the images together in stacked
imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (200, 200)))
stacked.paste(imgFrame, (0, 200 * i))
# Add the stacked image to the canvas
self._pilFrames["stacked"] = PIL.ImageTk.PhotoImage(image=stacked)
self._appCanvas["stacked"].create_image(100, 0, image=self._pilFrames["stacked"], anchor=tkinter.NW)
# Add the layered image to the canvas
self._pilFrames["layered"] = PIL.ImageTk.PhotoImage(image=layered)
self._appCanvas["layered"].create_image(0, 0, image=self._pilFrames["layered"], anchor=tkinter.NW)
# Add the control image to the canvas
imgFrame = cv2.cvtColor(self._system[self._appString["device"].get()][self._appString["vision"].get()][self._appString["frame"].get()], cv2.COLOR_BGR2RGBA)
control = PIL.Image.fromarray(cv2.resize(imgFrame, (600, 600)))
self._pilFrames["control"] = PIL.ImageTk.PhotoImage(image=control)
self._appCanvas["control"].create_image(100, 90, image=self._pilFrames["control"], anchor=tkinter.NW)
# Continue to update with a delay of 15
self.after(15, self.update) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self):\n print('VIDEO: Video Stream started')\n while True:\n if self.stopped:\n return\n (self.grabbed, self.frame) = self.stream.read()",
"def viewUpdate(self):\n # Update Capture\n imgtk = self.model.capture\n self.updateImage(self.view.lmain, imgtk)\n # Update Stitch \n imgtk = self.model.stitch\n self.updateImage(self.view.rmain, imgtk)\n self.view.dist.set(self.model.dist)",
"def refresh(self):\r\n # todo, use vid_info as property instead of this\r\n # reset properties and rebuild streams\r\n self.setup()",
"def update(self):\n self.frame = self.video_stream.read()",
"def update(self, frame = None):\n if type(frame) == type(None):\n frame = self.video.get_frame()\n height, width, channel = frame.shape\n bytesPerLine = 3 * width\n image = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)\n self.pixmap = QtGui.QPixmap(image)\n size = self.size()\n scaledPix = self.pixmap.scaled(size, Qt.KeepAspectRatio, transformMode = Qt.FastTransformation)\n self.setPixmap(scaledPix)\n\n QtCore.QCoreApplication.processEvents()",
"def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n self.camera_feed.setPixmap(qt_img)",
"def video_loop(self):\n if not self.isReplay:\n if self.initStream:\n print('[SB Live] Starting live video stream...')\n self.replayStream.release()\n self.vs.open(0)\n self.initStream = False\n print('[SB Live] Live video stream started')\n if self.cClear:\n self.cache.release()\n os.remove('sblive/cache/replay.mov')\n self.cache.open('sblive/cache/replay.mov', self.fourcc, 10.0, (1280, 720))\n self.cClear = False\n ok, frame = self.vs.read() # read frame from video stream\n if ok: # frame captured without any errors\n key = cv2.waitKey(1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA\n self.cache.write(frame)\n self.current_image = Image.fromarray(cv2image) # convert image for PIL\n imgtk = ImageTk.PhotoImage(image=self.current_image) # convert image for tkinter\n \n self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector\n self.panel.config(image=imgtk) # show the image\n else:\n if self.initStream:\n print('[SB Live] Starting replay video stream...')\n self.cache.release()\n self.vs.release()\n self.replayStream.open('sblive/cache/replay.mov')\n self.initStream = False\n print('[SB Live] Replay video stream started')\n ok, frame = self.replayStream.read()\n if ok:\n key = cv2.waitKey(1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA\n self.current_image = Image.fromarray(cv2image) # convert image for PIL\n imgtk = ImageTk.PhotoImage(image=self.current_image) # convert image for tkinter\n \n self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector\n self.panel.config(image=imgtk) # show the image\n else:\n self.replayStream.release()\n self.replayStream.open('sblive/cache/replay.mov')\n if not self.killThread:\n self.root.after(30, self.video_loop) # call the same function after 30 milliseconds",
"def updateImages(self, msg, arg2=None):\n\t\tself.picPaths = msg\n\t\tself.totalPictures = len(self.picPaths)\n\t\tself.loadImage(self.picPaths[0])",
"def update_display(self):\n \n # check availability of display queue of the wide camera\n# if not hasattr(self,'wide_disp_queue'):\n# pass\n# elif self.wide_disp_queue.empty():\n# pass\n# else:\n# try:\n# wide_disp_image = self.wide_disp_queue.get()\n# \n# self.wide_disp_counter += 1\n# self.wide_disp_counter %= 2\n# if self.wide_disp_counter == 0:\n# if type(wide_disp_image) == np.ndarray:\n# if wide_disp_image.shape == (self.wide_cam.settings.height.value(),self.wide_cam.settings.width.value()):\n# try:\n# self.wide_cam_image.setImage(wide_disp_image)\n# except Exception as ex:\n# print('Error: %s' % ex)\n# except Exception as ex:\n# print(\"Error: %s\" % ex)\n \n # check availability of display queue of the track camera \n if not hasattr(self,'track_disp_queue'):\n pass\n elif self.track_disp_queue.empty():\n pass\n else:\n try:\n track_disp_image = self.track_disp_queue.get()\n self.track_disp_counter += 1\n self.track_disp_counter %= 4\n if self.track_disp_counter == 0:\n if type(track_disp_image) == np.ndarray:\n if track_disp_image.shape == (self.track_cam.settings.height.value(),self.track_cam.settings.width.value()):\n try:\n self.track_cam_image.setImage(track_disp_image)\n except Exception as ex:\n print('Error: %s' % ex)\n \n x = int(self.settings.x.value())\n y = int(self.settings.y.value())\n self.tracker_data[:] = 0\n self.tracker_data[x,y] = 1\n self.tracker_image.setImage(np.copy(self.tracker_data))\n except Exception as ex:\n print(\"Error: %s\" % ex)",
"def update_imgs(self):\n\n for b in self.gamebuttons:\n b.update_img()\n self.start_but.update_img()",
"def update_visuals(self):\n\n result, data = self.dev.grab_pipe()\n if not result:\n log.critical(\"Problem grabbing pipe\")\n\n if self.live_updates == True:\n self.update_graph(data)\n self.curve_render += 1\n self.update_image(data)\n self.check_image(self.curve_render)\n\n self.update_fps()\n self.data_timer.start(0)",
"def setVidFrame(self, ori_images):\n self.cleanThread()\n if ori_images == 0:\n logging.critical(\"Video Image number 0\")\n else:\n self.buildRunDictMain(ori_images)",
"def update_image(self):\n self.image = Image.fromarray(self.img)",
"def update(self):\n while not self.stopped:\n time.sleep(0.01)\n self.grab_image()",
"def video_loop(self):\n\n _, img = self.vs.read()\n img = imutils.resize(img, width=self.width)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n self.frame.configure(image=image)\n self.frame.photo = image\n\n self.top.after(self.fps, self.video_loop)",
"def show_images(processed_video, unprocessed_video, arraylength, isFirst, levels, calculating_boarder, fps):\n processed_video = upsample_images(processed_video, unprocessed_video, arraylength, levels)\n if not isFirst:\n processed_video = processed_video[-calculating_boarder:]\n for image in processed_video:\n time.sleep(1/fps)\n cv2.imshow(\"colour changes pulse\", image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break",
"def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n if(self.iscapture):\n print(\"update\")\n direct = self.label1.text()\n if direct == \"~default\":\n direct = \"face_dataframes\"\n else:\n direct = direct + \"/face_dataframes\"\n \n if (not os.path.exists(direct)):\n os.mkdir(direct)\n cv2.imwrite(\"{1}/{2}{0}.jpeg\".format(self.count, direct,self.textbox.text()), cv_img)\n self.iscapture = False\n self.label2.setText(\"Image # 0{0} Saved\".format(self.count))\n self.pushButton0.setEnabled(False)\n self.count += 1\n \n \n if(self.count == 6):\n #print(\"greater\")\n self.pushButton.setEnabled(False)\n self.pushButton2.setDisabled(False)\n\n\n self.image_label.setPixmap(qt_img)",
"def update_image(self, cv_img):\n\t\tqt_img = self.ImageEdits(cv_img)\n\t\tself.camera.setPixmap(qt_img)",
"def update(self):\n cv2.imshow(self.window_name, self.map.get_crop())",
"def updateDisplay(self):\n if self._displayPjt:\n self._displayPjt.updateim()\n if self._displayUsr:\n self._displayUsr.updateim()\n if self._displayVtk:\n self._displayVtk.updateim()",
"def run(self):\n\n im = None\n while im == None:\n im = self.vid_mem_reader.get_latest_image()\n if im == None:\n print \"not receiving images yet...\"\n time.sleep(0.2)\n\n #Wait for video source to be ready:\n #TODO: Shoud use vidmemreader, but this one never seem to return a resolution (at time of writing):\n #res = self.vid_mem_reader.get_resolution()\n \n #TODO: This should work, but it doesn't because OpenCV keeps on complaining about that im is not a IPL image \n #(while if you print it, it seems to be a IPL image).\n #print im\n size = cv.GetSize(im[0])\n #print size\n self.res = ({'width':size[0], 'height':size[1]})\n res = self.res\n\n self.transformer = util.speed_angle.SpeedAngle(None, res['width'], res['height'])\n \n while True:\n self.__ticker.tick()\n start_time = time.time()\n img = self.get_new_image()\n ''' Parallel Process Inside this module\n \n im = np.asarray(img[:,:])\n time_spent = time.time() - start_time\n \n #Parallel process\n \n self.parallel_rotate_image(im)\n self.logger.debug(\"Set one finished\")\n \n print \"Image Length: \", self.rotatedImages\n for img in self.rotatedImages:\n self.get_faces(img[0])\n self.update()\n \n self.rotatedImages = []\n '''\n im = np.asarray(img[:,:])\n \n image = self.rotate_image( im, [self.rotation])\n self.get_faces(image)\n self.update()\n\n #TODO: To be removed and or configurable:\n directory = \"/tmp/emergency/\"\n if not os.path.exists(directory):\n os.makedirs(directory) \n try:\n cv.SaveImage(directory + \"image.png\", image)\n except:\n print \"ERROR: Could not write image to /tmp/emergency/\"",
"def update_image(self, cv_img):\n\t\tqt_img = self.convert_cv_qt(cv_img)\n\t\tself.label.setPixmap(qt_img)\n\t\tself.display_info()",
"def update_cap_image(self):\n\n fn = self._get_cap_filename()\n try:\n im = PIL.Image.open(fn)\n except FileNotFoundError:\n return\n\n frame = np.array(im)\n\n \"\"\"\n frame = cv2.imread(fn, cv2.IMREAD_ANYDEPTH)\n if (frame is None):\n return\n \"\"\"\n\n frame = (frame >> (16 - self.camera.pixel_bits)).astype(np.uint16)\n\n ndx = self.dpar.cur_cap\n\n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(frame, self.dpar.iwindow[ndx])\n self.live_screen.live_title = self._cap_title(ndx)\n self.live_screen.setPixmap(pix)\n else:\n pix, gray = self._get_pixmap(frame[::4,::4], self.dpar.iwindow[ndx])\n self.cap_screen.cap_title = self._cap_title(ndx)\n self.cap_screen.setPixmap(pix)\n self.cap_screen.format_for_cap() # This is because first time, format is for \"no stills\".",
"def update_image(self, surface):\n self.ui_widget.update_image(surface=surface)",
"def run(self):\n \n count = 0\n while True:\n self.__ticker.tick()\n\n self.update()\n img = self.get_new_image()\n if img == None:\n print \"not receiving images yet...\"\n else:\n if self.verbose:\n cv.ShowImage(\"SnapShotSaver\", img)\n cv.WaitKey(10)\n cv.SaveImage(\"%s/%s_%d.png\" % (self.destination, self.prefix, count), img)\n count += 1",
"def update_image(self, cv_img):\n \n qt_img = self.convert_cv_qt(cv_img)\n self.image_label.setPixmap(qt_img)\n #pass",
"def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)",
"def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n self.image_label.setPixmap(qt_img)",
"def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n self.image_label.setPixmap(qt_img)",
"def _update_frame(self):\n # check if continue\n if self._keep_updating:\n self.__frame = self._cam.get_display_frame()\n if self.__frame is not None:\n self._cvn_camera_viewfinder.create_image(0, 0, image=self.__frame, anchor=tk.NW)\n\n self._root.after(self._delay, self._update_frame)"
] | [
"0.67444074",
"0.6730207",
"0.6720518",
"0.67168564",
"0.6591318",
"0.657772",
"0.6577194",
"0.64696455",
"0.6445771",
"0.64267576",
"0.64083344",
"0.6405238",
"0.6402562",
"0.6371376",
"0.6345765",
"0.63317454",
"0.62935567",
"0.62902534",
"0.6285674",
"0.6278713",
"0.62650186",
"0.626164",
"0.6199037",
"0.6198393",
"0.6190942",
"0.61784947",
"0.6167083",
"0.61226094",
"0.61226094",
"0.61217725"
] | 0.7045872 | 0 |
Update the vision choices when a new device is selected. | def updateDevice(self, *args):
# Update the list of vision choices and the default vision choice
self._appChoice["vision"] = [choice[0] for choice in self._system[self._appString["device"].get()]]
self._appString["vision"].set(self._appChoice["vision"][0])
# Delete the old choices fromt the option menu
menu = self._appOption["vision"]["menu"]
menu.delete(0, "end")
# Add the new list of choices to the option menu
for string in self._appChoice["vision"]:
menu.add_command(label=string, command=lambda value=string: self._appString["vision"].set(value)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updateVision(self, *args):\r\n\r\n # Update the list of frame choices and the default frame choice\r\n self._appChoice[\"frame\"] = [choice[0] for choice in self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()]]\r\n self._appString[\"frame\"].set(self._appChoice[\"frame\"][0])\r\n\r\n # Delete the old choices fromt the option menu\r\n menu = self._appOption[\"frame\"][\"menu\"]\r\n menu.delete(0, \"end\")\r\n\r\n # Add the new list of choices to the option menu\r\n for string in self._appChoice[\"frame\"]:\r\n menu.add_command(label=string, command=lambda value=string: self._appString[\"frame\"].set(value))",
"def device_selected(self, uid):\n if uid == self.cur_uid:\n print \"Already Selected\"\n return\n # This line is going to return \"DEVICE_LABEL\" so you may as well skip it\n pid_key = \"DEVICE_LABEL\"\n self.dev_label.set(\"%s (%s)\"%(self._uid_dict[uid][pid_key][\"label\"], uid))\n self.ola_thread.rdm_get(self.universe.get(), uid, 0, \"IDENTIFY_DEVICE\", \n lambda b, s, uid = uid:self._get_identify_complete(uid, b, s))\n\n if \"SUPPORTED_PARAMETERS\" not in self._uid_dict[uid]:\n self.ola_thread.rdm_get(\n self.universe.get(), uid, 0, \"SUPPORTED_PARAMETERS\",\n lambda b, l, uid = uid:self._get_pids_complete(uid, b, l))\n else:\n self._notebook.Update()\n self.cur_uid = uid",
"def update_selection(self):\n raise NotImplementedError",
"def _selected_bridge_changed(self, new):\n if self.bridges[new] is None:\n bridge_desc_file = os.path.join(BRIDGES_PATH, new + u'.json')\n bridge = self.load_bridge_info(bridge_desc_file)\n self.bridges[new] = bridge\n\n self.bridge = self.bridges[new]\n self.channels = [channel for instrument in self.bridge.children\n for sensor in instrument.children\n for channel in sensor.children]",
"def set_choices(self):\n self.check_idxs()\n np.random.seed()\n self.choices = np.random.choice(self.running_idxs, self.num_selections)\n self.running_idxs = np.delete(self.running_idxs, self.choices)\n self.choices = torch.tensor(self.choices).cuda()",
"def setupDeviceGui(self):\n\n dropDowns = list(self.drivers[driver]['uiDriver'] for driver in self.drivers)\n for dropDown in dropDowns:\n dropDown.clear()\n dropDown.setView(PyQt5.QtWidgets.QListView())\n dropDown.addItem('No device selected')\n\n # adding special items\n self.drivers['dome']['uiDriver'].addItem('INDI')\n self.drivers['imaging']['uiDriver'].addItem('INDI')\n self.drivers['sensorWeather']['uiDriver'].addItem('INDI')\n self.drivers['directWeather']['uiDriver'].addItem('Built-In')\n self.drivers['onlineWeather']['uiDriver'].addItem('Built-In')\n self.drivers['cover']['uiDriver'].addItem('INDI')\n self.drivers['skymeter']['uiDriver'].addItem('INDI')\n self.drivers['telescope']['uiDriver'].addItem('INDI')\n self.drivers['power']['uiDriver'].addItem('INDI')\n self.drivers['relay']['uiDriver'].addItem('Built-In')\n for app in self.app.astrometry.solverAvailable:\n self.drivers['astrometry']['uiDriver'].addItem(app)\n self.drivers['remote']['uiDriver'].addItem('Built-In')\n self.drivers['measure']['uiDriver'].addItem('Built-In')\n\n return True",
"def select(self):\n if not self._selected:\n \tself._selected = True\n\t\tself.log(\"device {} is now selected\".format(self._secondary_address))",
"def vistrailChanged(self):\n from vistrails.gui.vistrails_window import _app\n select_node = True\n if _app._previous_view and _app._previous_view.window() != self.window():\n select_node = False\n self.scene().setupScene(self.controller, select_node)\n if self.controller and self.controller.reset_version_view:\n self.scene().fitToAllViews()\n if self.controller:\n # self.versionProp.updateVersion(self.controller.current_version)\n self.versionProp.updateVersion(self.controller.current_version)\n self.emit(QtCore.SIGNAL(\"vistrailChanged()\"))",
"def update_dev(self, *args):\r\n try:\r\n self.localSDK.get_version()\r\n except IOError:\r\n kT.debug_log('IO Error', sys.exc_info()[2])\r\n try:\r\n self.newProj.name = self.widgetList[3].get()\r\n except IndexError:\r\n kT.debug_log('Index Error', sys.exc_info()[2])\r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n\r\n if self.advancedDevType.get():\r\n\r\n self.widgetList[34].state([\"!disabled\"])\r\n\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList[7].config(text='Board:')\r\n\r\n try:\r\n self.widgetList[31].config(command=lambda: self.begin_advanced_gen(self.master, None))\r\n except TclError:\r\n kT.debug_log('Tcl Error', sys.exc_info()[2])\r\n\r\n ### Widget 8 is te drop down menu for the devices\r\n self.widgetList[8].config(textvariable=self.advBrdSelect)\r\n self.widgetList[8]['values'] = self.localSDK.brdList\r\n try:\r\n self.widgetList[8].current(int(self.currBoard) - 1)\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n self.widgetList[8].current(0)\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n self.widgetList[8].current(0)\r\n else:\r\n try:\r\n self.widgetList[34].state([\"disabled\"])\r\n\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList[7].config(text='Device:')\r\n\r\n self.widgetList[31].config(command=lambda: self.package_select(self.master))\r\n\r\n ### Widget 8 is te drop down menu for the devices\r\n self.widgetList[8].config(textvariable=self.advDevSelect)\r\n self.widgetList[8]['values'] = self.localSDK.devList\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n\r\n try:\r\n self.newProj.add_board(self.currBoard, self.localSDK.brdList)\r\n self.widgetList[8].current(self.localSDK.devList.index(self.newProj.device[0]))\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n try:\r\n self.widgetList[8].current(0)\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n try:\r\n self.widgetList[8].current(0)\r\n except IndexError:\r\n kT.debug_log('Index Error', sys.exc_info()[2])\r\n \r\n #FIXME Radka add special method for updating path \r\n self._update_project_path()",
"def _add_device(self, uid, succeeded, data):\n # TODO: Bug: on discover the label in the label in the device option menu \n # doesn't change and if you try to select the first device it tells \n # you that it is already selected\n if succeeded:\n self._uid_dict.setdefault(uid, {})[\"DEVICE_LABEL\"] = data\n self.device_menu[\"menu\"].add_command( label = \"%s (%s)\"%(\n self._uid_dict[uid][\"DEVICE_LABEL\"][\"label\"], uid), \n command = lambda:self.device_selected(uid))\n else:\n self._uid_dict.setdefault(uid, {})[\"DEVICE_LABEL\"] = {\"label\":\"\"}\n self.device_menu[\"menu\"].add_command( label = \"%s\" % uid, \n command = lambda:self.device_selected(uid))\n self._uid_dict[uid][\"index\"] = self.device_menu[\"menu\"].index(tk.END)",
"def state_chosen_do(cfg, app, win, events):",
"def img_dict_updated(self, change):\n if change[\"value\"]:\n self.select_dataset(self.io_model.img_dict_default_selected_item)\n self.init_plot_status()",
"def setupDeviceGui(self):\n\n # all dropdown have disabled as capability\n dropDowns = list(self.drivers[driver]['uiDropDown'] for driver in self.drivers)\n for dropDown in dropDowns:\n dropDown.clear()\n dropDown.setView(PyQt5.QtWidgets.QListView())\n dropDown.addItem('device disabled')\n\n # adding driver items with applicable framework\n for driver in self.drivers:\n if not hasattr(self.drivers[driver]['class'], 'run'):\n continue\n for framework in self.drivers[driver]['class'].run.keys():\n self.drivers[driver]['uiDropDown'].addItem(framework)\n\n return True",
"def update_selection(self):\n\n # clear all boxes\n self.clear_boxes()\n self.draw_figure(self.s)\n\n # update temperature list\n if self.Data[self.s]['T_or_MW'] == \"T\":\n self.temperatures = np.array(self.Data[self.s]['t_Arai']) - 273.\n else:\n self.temperatures = np.array(self.Data[self.s]['t_Arai'])\n\n self.T_list = [\"%.0f\" % T for T in self.temperatures]\n self.tmin_box.SetItems(self.T_list)\n self.tmax_box.SetItems(self.T_list)\n self.tmin_box.SetValue(\"\")\n self.tmax_box.SetValue(\"\")\n self.Blab_window.SetValue(\n \"%.0f\" % (float(self.Data[self.s]['pars']['lab_dc_field']) * 1e6))\n if \"saved\" in self.Data[self.s]['pars']:\n self.pars = self.Data[self.s]['pars']\n self.update_GUI_with_new_interpretation()\n self.Add_text(self.s)\n self.write_sample_box()",
"def update_view(self, selected):\n pass",
"def update_device_list(self, device_list):\n self.device_list = device_list\n\n self.device_combo.clear()\n\n if not device_list:\n return\n\n self.device_combo.addItem(\"\")\n\n active_entry = None\n\n for dev in device_list:\n\n action_string = \"{model:<18} - {contype:<7} - {serial}\".format(model=dev.model,\n contype=dev.device_type,\n serial=dev.serial)\n if dev.serial == self.serial:\n active_entry = action_string\n self.device_combo.addItem(action_string)\n\n if active_entry is not None:\n self.device_combo.setCurrentText(active_entry)",
"def _selected_labels_changed(self, name, old, new):\n if self.value_lock.acquire(False):\n try:\n self.value = [self._options_dict[name] for name in new]\n finally:\n self.value_lock.release()",
"def graphs_change():\n d = curdoc()\n _remove_fig(d)\n _remove_selection(d)\n graph_val = d.get_model_by_name(GRAPH_SELECTION).value\n model_id, message_name, model_type = run_handlers.get_modelid_messagename_type(d)\n props = run_handlers.get_model_properties(model_id, message_name, model_type)\n\n if graph_val in [\"line\", \"scatter\", \"step\"]:\n # never want to plot this special string field\n field_options = [\"{0} : {1}\".format(k, props[k]) for k in props if not any(apv in k for apv in [ APV_MODEL ] ) ]\n xselect = Select(title=\"X Axis\", value=DEFAULT_UNSELECTED, options=field_options + [DEFAULT_UNSELECTED], name=X_AXIS_SELECTION)\n yselect = Select(title=\"Y Axis\", value=DEFAULT_UNSELECTED, options=field_options + [DEFAULT_UNSELECTED], name=Y_AXIS_SELECTION)\n xselect.on_change('value', lambda attr, old, new: make_2axis_graph())\n yselect.on_change('value', lambda attr, old, new: make_2axis_graph())\n d.add_root(column(Div(text=\"\"), row(widgetbox([xselect]), widgetbox([yselect])), name=FIELD_SELECTION))\n\n if graph_val in [\"image\"]:\n # alter the field options for known non-image fields\n field_options = [\"{0} : {1}\".format(k, props[k]) for k in props if not any(apv in k for apv in [APV_RECVD, APV_SEQNO, APV_MODEL] ) ]\n imageselect = Select(title=\"Image Field\", value=DEFAULT_UNSELECTED, options=[DEFAULT_UNSELECTED] + field_options, name=IMAGE_SELECTION)\n mimeselect = Select(title=\"MIME Type\", value=DEFAULT_UNSELECTED, options=[DEFAULT_UNSELECTED] + SUPPORTED_MIME_TYPES, name=MIME_SELECTION)\n imageselect.on_change('value', lambda attr, old, new: image_selection_change())\n mimeselect.on_change('value', lambda attr, old, new: image_selection_change())\n d.add_root(column(Div(text=\"\"), widgetbox([imageselect, mimeselect]), name=IMAGE_MIME_SELECTION))\n\n if graph_val in [\"table\"]:\n # TODO: limit selectable columns to whose of the same size (table height)\n # use just the field name; don't show properties in the multi-select box\n col_options = [k for k in props if not any (apv in k for apv in [APV_RECVD, APV_SEQNO, APV_MODEL] ) ]\n columnmultiselect = MultiSelect(title=\"Columns:\", value=[], options=col_options, name=COLUMN_MULTISELECT)\n columnmultiselect.on_change('value', lambda attr, old, new: column_selection_change())\n d.add_root(column(Div(text=\"\"), widgetbox([columnmultiselect]), name=COLUMN_SELECTION))\n\n if graph_val in [\"raw\"]:\n p = figure(plot_width=500, plot_height=500,\n background_fill_color=\"white\",\n y_range=(-40, 0), title=\"\", name=FIGURE_MODEL)\n p.xaxis.visible = False\n p.yaxis.visible = False\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name)\n _install_callback_and_cds(sind, model_id, message_name, stream_limit=1)\n p.text(x='apv_sequence_number',\n y=0,\n text='apv_model_as_string',\n source=d.get_model_by_name(sind),\n text_font_size=\"10pt\",\n text_line_height=0.7,\n text_baseline=\"top\",\n text_align=\"left\")\n p.x_range.follow = \"end\" # don't jam all the data into the graph; \"window\" it\n p.x_range.follow_interval = 1 # don't jam all the data into the graph; \"window\" it\n p.x_range.range_padding = 0\n d.add_root(p)",
"def updateAvailablePorts(self):\n # Build a port list\n device_list_all = comports()\n self.device_choices = list()\n for device in device_list_all:\n self.device_choices.append(device[0])\n\n if len(self.device_choices) < 1:\n tkinter.messagebox.showerror('No Available Serial Ports','No serial ports are available.')",
"def update(self):\n self.platform_list.update()\n self.enemy_list.update()",
"def on_change(self, attr, old, new):\n n = self.labels.index(new) + 1 # Select 0-indexed\n self.notify(set_figures(n))",
"def apply_changes(self, updated_talk=None):\r\n self.presentationModel.select()\r\n self.select_talk(updated_talk)\r\n self.update_autocomplete_fields()",
"def modelselec_change():\n d = curdoc()\n _remove_fig(d)\n _remove_selection(d)\n d.remove_root(d.get_model_by_name(AFTER_MODEL_SELECTION))\n modelselec = d.get_model_by_name(MODEL_SELECTION)\n msv = modelselec.value\n if msv != DEFAULT_UNSELECTED:\n model_id, message_name, model_type = run_handlers.get_modelid_messagename_type(d)\n if model_type == \"protobuf\":\n message = Select(title=\"Message Selection\", value=DEFAULT_UNSELECTED, options=list(data.proto_data_structure[model_id][\"messages\"].keys()) + [DEFAULT_UNSELECTED], name=MESSAGE_SELECTION)\n graphs = Select(title=\"Graph Selection\", value=DEFAULT_UNSELECTED, options=[], name=GRAPH_SELECTION)\n message.on_change('value', lambda attr, old, new: message_change())\n graphs.on_change('value', lambda attr, old, new: graphs_change())\n selec = row(widgetbox([message]), widgetbox([graphs]), name=AFTER_MODEL_SELECTION)\n else: # there is no message selection here\n graphs = Select(title=\"Graph Selection\", value=DEFAULT_UNSELECTED, options=GRAPH_OPTIONS, name=GRAPH_SELECTION)\n graphs.on_change('value', lambda attr, old, new: graphs_change())\n selec = row(widgetbox([graphs]), name=AFTER_MODEL_SELECTION)\n d.add_root(selec)\n d.add_root(selec)",
"def choose_devices(devices, query=False):\n # Style for selection interface\n style = style_from_dict({\n Token.Separator: '#FF00AA',\n Token.QuestionMark: '#00AAFF bold',\n Token.Selected: '#00AAFF', # default\n Token.Pointer: '#00FF00 bold',\n Token.Instruction: '#FFAA00', # default\n Token.Answer: '#00AAFF bold',\n Token.Question: '#FF00AA',\n })\n\n choice_list = []\n current_type = None\n name_len = max([len(dev.f_name) for dev in devices]) + 1\n\n if query:\n print('Querying all devices for version numbers. '\n 'This may take a minute...')\n choice_list.append(Separator('Name'.center(name_len)\n + 'Firmware'.center(15)\n + 'Core'.center(7) + 'State'))\n for device in devices:\n if device.type != current_type:\n current_type = device.type\n sep_str = ' {} '.format(current_type).center(name_len + 29, '=')\n choice_list.append(Separator(sep_str))\n menu_text = device.f_name.ljust(name_len)\n if query and device.software == 'tasmota':\n device.query_tas_status()\n if 'tas_version' in device.reported:\n menu_text += device.reported['tas_version'].ljust(15)\n menu_text += device.reported['core_version'].ljust(7)\n if device.reported['power'] is not None:\n menu_text += device.reported['power']\n else:\n menu_text += 'Offline'\n choice_list.append({'name': menu_text, 'value': device.f_name})\n\n # Ask the user to choose which devices to flash\n questions = [\n {\n 'type': 'checkbox',\n 'message': 'Select Devices',\n 'name': 'device_selection',\n 'choices': choice_list,\n }\n ]\n answers = prompt(questions, style=style)\n selected_devices = [device for device in devices if device.f_name \\\n in answers['device_selection']]\n return selected_devices",
"def _selectionChangedSlot(self, _):\r\n\r\n self._updateButtonStates()",
"def update(self):\n self.platform_list.update()\n #self.enemy_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite_list.update()",
"def poll_selection(self):\r\n osName = platform.system()\r\n\r\n ## Check if the user changed the KSDK_path\r\n try:\r\n checkPath = self.widgetList[1].get()\r\n if checkPath != self.localSDK.path:\r\n self.ask_set_directory(True, 1)\r\n\r\n ## Check if user updated project name\r\n checkName = self.widgetList[4].get()\r\n if checkName != self.newProj.name:\r\n if kT.check_proj_name(checkName):\r\n self.newProj.name = checkName\r\n else:\r\n self.newProj.name = None\r\n if self.prevName != checkName:\r\n tkMessageBox.showinfo(\"Invalid Project Name\",\\\r\n \"No spaces or special characters.\")\r\n self.prevName = checkName\r\n kT.debug_log(\"Invalid name\")\r\n except AttributeError:\r\n kT.debug_log(\"Basic Changed menu\", sys.exc_info()[2])\r\n #return\r\n\r\n try:\r\n now = self.widgetList[6].curselection()\r\n if now != self.curr:\r\n if len(self.widgetList[6].curselection()) > 0:\r\n try:\r\n self.displayBoard = PhotoImage(data=self.imageList[int(now[0])])\r\n except IndexError:\r\n kT.debug_log(now[0], sys.exc_info()[2])\r\n self.widgetList[8].grid_remove()\r\n self.widgetList[8] = Button(self, \\\r\n image=self.displayBoard, \\\r\n command=lambda:\\\r\n self.web_launch(self.localSDK.brdList[\\\r\n int(self.widgetList[6].curselection()[0])]))\r\n self.widgetList[8].image = self.displayBoard\r\n self.widgetList[8].grid(row=5, column=3, columnspan=3, sticky=E+W+N+S)\r\n self.widgetList[8].bind(\"<Enter>\", \\\r\n lambda h: self.update_tips('Is this your board?\\n' + \\\r\n 'If so, ' + \\\r\n 'then clicking on the board' + \\\r\n ' image will take you to the ' + \\\r\n 'board homepage on ' + \\\r\n 'freescale.com.\\n\\n'))\r\n self.widgetList[8].bind(\"<Leave>\", \\\r\n lambda h: self.update_tips(self.defaultHelp))\r\n self.curr = now\r\n try:\r\n self.currBoard = int(self.widgetList[6].curselection()[0]) + 1\r\n # Clear out driver list and board\r\n self.newProj.board = ()\r\n self.newProj.drvList = []\r\n # Configure ksdkProj given GUI state\r\n self.localSDK.get_version()\r\n self.newProj.name = self.widgetList[4].get()\r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n self.newProj.useBSP = not self.localSDK.isNewVersion()\r\n except IndexError:\r\n self.displayBoard = PhotoImage(data=kImg.boardImages['kds_icon.gif'])\r\n self.widgetList[8].config(image=self.displayBoard)\r\n self.widgetList[8].image = self.displayBoard\r\n self.widgetList[8].config(command=lambda: self.web_launch(kImg.boardImages['NoPreview.gif']))\r\n kT.debug_log(\"Index Error\", sys.exc_info()[2])\r\n #return\r\n except IndexError:\r\n kT.debug_log(\"Index Error\", sys.exc_info()[2])\r\n #return\r\n except AttributeError:\r\n kT.debug_log(\"AttributeError\", sys.exc_info()[2])\r\n return\r\n\r\n self._retLoop = self.after(250, self.poll_selection)",
"def update_rec_configs_choice(self):\n # this will update the configuration choices in reconstruction tab\n # fill out the config_id choice bar by reading configuration files names\n self.rec_ids = []\n for file in os.listdir(os.path.join(self.main_win.experiment_dir, 'conf')):\n if file.endswith('_config_rec'):\n self.rec_ids.append(file[0:len(file) - len('_config_rec')])\n if len(self.rec_ids) > 0:\n self.rec_id.addItems(self.rec_ids)\n self.rec_id.show()",
"def update(self):\n #update checkboxes\n self.average_check_box.SetValue(self.parent.fftsink.average)\n self.use_persistence_check_box.SetValue(self.parent.fftsink.use_persistence)\n self.peak_hold_check_box.SetValue(self.parent.fftsink.peak_hold)\n #update radio buttons\n try:\n index = list(DIV_LEVELS).index(self.parent.fftsink.y_per_div)\n self.radio_buttons[index].SetValue(True)\n except: pass",
"def _selection_changed(self, i):\n\n # Check if we have disconnected\n if i < 0:\n return\n # First check if we need to stop the old block\n if self._started_previous and self._previous_config:\n logger.debug(\"Should stop config [%s], stopping!\",\n self._previous_config.name)\n self._previous_config.delete()\n\n # Remove our callback for the previous config\n if self._previous_config:\n self._previous_config.data_received_cb.remove_callback(\n self._log_data_signal_wrapper)\n self._previous_config.error_cb.remove_callback(\n self._log_error_signal_wrapper)\n\n lg = self._model.get_config(i)\n if not lg.started:\n logger.debug(\"Config [%s] not started, starting!\", lg.name)\n self._started_previous = True\n lg.start()\n else:\n self._started_previous = False\n self._plot.removeAllDatasets()\n color_selector = 0\n\n self._plot.set_title(lg.name)\n\n self.avgsumvalue = dict();\n self.avgsumnumb = 0;\n for d in lg.variables:\n self._plot.add_curve(d.name, self.colors[\n color_selector % len(self.colors)])\n color_selector += 1\n self.avgsumvalue[d.name]=0;\n lg.data_received_cb.add_callback(self._log_data_signal_wrapper)\n lg.error_cb.add_callback(self._log_error_signal_wrapper)\n\n self._previous_config = lg"
] | [
"0.68089235",
"0.59843624",
"0.5973282",
"0.5937724",
"0.5828414",
"0.58241105",
"0.5739061",
"0.56566024",
"0.56519794",
"0.5624004",
"0.5604147",
"0.5591249",
"0.55712897",
"0.5546274",
"0.5538931",
"0.54759663",
"0.54590356",
"0.54416597",
"0.54367846",
"0.54256344",
"0.54246885",
"0.5420892",
"0.5395128",
"0.53674567",
"0.5347316",
"0.5330627",
"0.5329606",
"0.53228045",
"0.53176385",
"0.5273428"
] | 0.82246095 | 0 |
Update the frame choices whena new vision is selected. | def updateVision(self, *args):
# Update the list of frame choices and the default frame choice
self._appChoice["frame"] = [choice[0] for choice in self._system[self._appString["device"].get()][self._appString["vision"].get()]]
self._appString["frame"].set(self._appChoice["frame"][0])
# Delete the old choices fromt the option menu
menu = self._appOption["frame"]["menu"]
menu.delete(0, "end")
# Add the new list of choices to the option menu
for string in self._appChoice["frame"]:
menu.add_command(label=string, command=lambda value=string: self._appString["frame"].set(value)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def slider_frames_changed(self):\n\n # Again, please note the difference between indexing and GUI displays.\n index = self.slider_frames.value() - 1\n\n # Differentiate between frame ordering (by quality or chronologically).\n if self.frame_ordering == \"quality\":\n self.frame_index = self.quality_sorted_indices[index]\n self.quality_index = index\n\n else:\n self.frame_index = index\n self.quality_index = self.rank_indices[self.frame_index]\n\n # Adjust the frame list and select the current frame.\n\n self.listWidget.setCurrentRow(index, QtCore.QItemSelectionModel.SelectCurrent)\n\n # Update the image in the viewer.\n self.frame_selector.setPhoto(self.frame_index)\n self.listWidget.setFocus()",
"def hook_frame_selected(self):",
"def update_selection(self):\n raise NotImplementedError",
"def update_selection(self):\n\n # clear all boxes\n self.clear_boxes()\n self.draw_figure(self.s)\n\n # update temperature list\n if self.Data[self.s]['T_or_MW'] == \"T\":\n self.temperatures = np.array(self.Data[self.s]['t_Arai']) - 273.\n else:\n self.temperatures = np.array(self.Data[self.s]['t_Arai'])\n\n self.T_list = [\"%.0f\" % T for T in self.temperatures]\n self.tmin_box.SetItems(self.T_list)\n self.tmax_box.SetItems(self.T_list)\n self.tmin_box.SetValue(\"\")\n self.tmax_box.SetValue(\"\")\n self.Blab_window.SetValue(\n \"%.0f\" % (float(self.Data[self.s]['pars']['lab_dc_field']) * 1e6))\n if \"saved\" in self.Data[self.s]['pars']:\n self.pars = self.Data[self.s]['pars']\n self.update_GUI_with_new_interpretation()\n self.Add_text(self.s)\n self.write_sample_box()",
"def updateDevice(self, *args):\r\n\r\n # Update the list of vision choices and the default vision choice\r\n self._appChoice[\"vision\"] = [choice[0] for choice in self._system[self._appString[\"device\"].get()]]\r\n self._appString[\"vision\"].set(self._appChoice[\"vision\"][0])\r\n\r\n # Delete the old choices fromt the option menu\r\n menu = self._appOption[\"vision\"][\"menu\"]\r\n menu.delete(0, \"end\")\r\n\r\n # Add the new list of choices to the option menu\r\n for string in self._appChoice[\"vision\"]:\r\n menu.add_command(label=string, command=lambda value=string: self._appString[\"vision\"].set(value))",
"def _selectionChangedSlot(self, _):\r\n\r\n self._updateButtonStates()",
"def change_frame(self, frame):\r\n pass",
"def use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i included\" % frame_selected)\n item.setBackground(self.background_included)\n item.setForeground(QtGui.QColor(0, 0, 0))\n self.index_included[index_selected] = True\n self.frame_selector.setPhoto(self.frame_index)",
"def _update_gui(self):\r\n \r\n # Update the RF button.\r\n rf_on = self.api.get_output()\r\n if rf_on == None: rf_on = True\r\n self.button_rf.set_checked(rf_on, block_events=True).enable()\r\n \r\n # Update the combo; we block first just in case the value doesn't \"change\"\r\n if self.api == None: self.label_instrument_name.set_text('Simulation')\r\n else:\r\n if self.api.get_mode() == 'Fixed': self.combo_mode.set_value(0, block_events=True).enable()\r\n else: self.combo_mode.set_value(1, block_events=True).enable()\r\n self._combo_mode_changed()\r\n \r\n # Update the list plot\r\n self.query_list()",
"def accept(self):\n self.selInsts = [x.get() for x in self.selInstsVar]\n self.selParams = [x.get() for x in self.selParamsVar]\n self.updateTitle()\n self.window.grab_release()\n self.window.destroy()\n self.instBoxes = []\n self.paramBoxes = []\n self.subRows = []\n self.selInstsVar = []\n self.selParamsVar = []\n self.addRow = []\n self.instTraces = []",
"def ChannelSelect(self):\n self.active_mode = 'default'\n self.reset_buttons()\n # Dialog to choose channels from specific brain regions\n w = SelectChannelsDialog(\n stringlist=self.model.all_regions,\n checked=self.model.regions_mask\n )\n all_locs = self.model.electrodes_table['location'][self.model.electrical_series_channel_ids]\n self.model.channels_mask = np.zeros(len(all_locs))\n for loc in w.choices:\n self.model.channels_mask += all_locs == np.array(loc)\n # Indices of channels from chosen regions\n self.model.channels_mask_ind = np.where(self.model.channels_mask)[0]\n self.model.n_channels_total = len(self.model.channels_mask_ind)\n # Reset channels span control\n self.model.lastCh = np.minimum(16, self.model.n_channels_total)\n self.model.firstCh = 1\n self.model.nChToShow = self.model.lastCh - self.model.firstCh + 1\n self.qline0.setText(str(self.model.lastCh))\n self.qline1.setText(str(self.model.firstCh))\n # Update signals plot\n self.model.selectedChannels = self.model.channels_mask_ind[self.model.firstCh - 1:self.model.lastCh]\n self.model.refreshScreen()",
"def hook_frame_unselected(self):",
"def update_view(self, selected):\n pass",
"def __init__(self, parent_gui, configuration, frames, rank_frames, stacked_image_log_file,\n signal_finished):\n\n super(FrameSelectorWidget, self).__init__(parent_gui)\n self.setupUi(self)\n\n # Keep references to upper level objects.\n self.parent_gui = parent_gui\n self.configuration = configuration\n self.stacked_image_log_file = stacked_image_log_file\n self.signal_finished = signal_finished\n self.frames = frames\n self.index_included = frames.index_included.copy()\n self.quality_sorted_indices = rank_frames.quality_sorted_indices\n self.rank_indices = rank_frames.rank_indices\n\n # Start with ordering frames by quality. This can be changed by the user using a radio\n # button.\n self.frame_ordering = \"quality\"\n\n # Initialize the frame list selection.\n self.items_selected = None\n self.indices_selected = None\n\n # Set colors for the frame list.\n self.background_included = QtGui.QColor(130, 255, 130)\n self.foreground_included = QtGui.QColor(0, 0, 0)\n self.background_excluded = QtGui.QColor(120, 120, 120)\n self.foreground_excluded = QtGui.QColor(255, 255, 255)\n\n self.addButton.clicked.connect(self.use_triggered)\n self.removeButton.clicked.connect(self.not_use_triggered)\n\n # Be careful: Indices are counted from 0, while widget contents are counted from 1 (to make\n # it easier for the user.\n self.quality_index = 0\n self.frame_index = self.quality_sorted_indices[self.quality_index]\n\n # Set up the frame selector and put it in the upper left corner.\n self.frame_selector = VideoFrameSelector(self.frames, self.index_included, self.frame_index)\n self.frame_selector.setObjectName(\"frame_selector\")\n self.gridLayout.addWidget(self.frame_selector, 0, 0, 2, 3)\n\n # Initialize the list widget.\n self.fill_list_widget()\n self.listWidget.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n self.listWidget.installEventFilter(self)\n self.listWidget.itemClicked.connect(self.select_items)\n self.listWidget.currentRowChanged.connect(self.synchronize_slider)\n\n # Group widget elements which are to be blocked during player execution in a list.\n self.widget_elements = [self.listWidget,\n self.slider_frames,\n self.addButton,\n self.removeButton,\n self.pushButton_play,\n self.GroupBox_frame_sorting]\n\n # Initialize a variable for communication with the frame_player object later.\n self.run_player = False\n\n # Create the frame player thread and start it. The player displays frames in succession.\n # It is pushed on a different thread because otherwise the user could not stop it before it\n # finishes.\n self.player_thread = QtCore.QThread()\n self.frame_player = FramePlayer(self)\n self.frame_player.moveToThread(self.player_thread)\n self.frame_player.block_widgets_signal.connect(self.block_widgets)\n self.frame_player.unblock_widgets_signal.connect(self.unblock_widgets)\n self.frame_player.set_photo_signal.connect(self.frame_selector.setPhoto)\n self.frame_player.set_slider_value.connect(self.slider_frames.setValue)\n self.frame_player_start_signal.connect(self.frame_player.play)\n self.player_thread.start()\n\n # Initialization of GUI elements\n self.slider_frames.setMinimum(1)\n self.slider_frames.setMaximum(self.frames.number)\n self.slider_frames.setValue(self.quality_index + 1)\n self.radioButton_quality.setChecked(True)\n\n self.gridLayout.setColumnStretch(0, 7)\n self.gridLayout.setColumnStretch(1, 0)\n self.gridLayout.setColumnStretch(2, 0)\n self.gridLayout.setColumnStretch(3, 0)\n self.gridLayout.setColumnStretch(4, 1)\n self.gridLayout.setRowStretch(0, 0)\n self.gridLayout.setRowStretch(1, 0)\n\n # Connect signals with slots.\n self.buttonBox.accepted.connect(self.done)\n self.buttonBox.rejected.connect(self.reject)\n self.slider_frames.valueChanged.connect(self.slider_frames_changed)\n self.pushButton_play.clicked.connect(self.pushbutton_play_clicked)\n self.pushButton_stop.clicked.connect(self.pushbutton_stop_clicked)\n self.radioButton_quality.toggled.connect(self.radiobutton_quality_changed)\n\n if self.configuration.global_parameters_protocol_level > 0:\n Miscellaneous.protocol(\"+++ Start selecting frames +++\", self.stacked_image_log_file)",
"def radiobutton_quality_changed(self):\n\n # Block listWidget signals. Otherwise, changes to the widget triggered by changing the\n # slider would cause trouble.\n self.listWidget.blockSignals(True)\n\n if self.frame_ordering == \"quality\":\n self.frame_ordering = \"chronological\"\n self.slider_frames.setValue(self.frame_index + 1)\n else:\n self.frame_ordering = \"quality\"\n self.slider_frames.setValue(self.quality_index + 1)\n\n self.fill_list_widget()\n\n # Unblock listWidget signals again.\n self.listWidget.blockSignals(False)",
"def _selection_changed(self, i):\n\n # Check if we have disconnected\n if i < 0:\n return\n # First check if we need to stop the old block\n if self._started_previous and self._previous_config:\n logger.debug(\"Should stop config [%s], stopping!\",\n self._previous_config.name)\n self._previous_config.delete()\n\n # Remove our callback for the previous config\n if self._previous_config:\n self._previous_config.data_received_cb.remove_callback(\n self._log_data_signal_wrapper)\n self._previous_config.error_cb.remove_callback(\n self._log_error_signal_wrapper)\n\n lg = self._model.get_config(i)\n if not lg.started:\n logger.debug(\"Config [%s] not started, starting!\", lg.name)\n self._started_previous = True\n lg.start()\n else:\n self._started_previous = False\n self._plot.removeAllDatasets()\n color_selector = 0\n\n self._plot.set_title(lg.name)\n\n self.avgsumvalue = dict();\n self.avgsumnumb = 0;\n for d in lg.variables:\n self._plot.add_curve(d.name, self.colors[\n color_selector % len(self.colors)])\n color_selector += 1\n self.avgsumvalue[d.name]=0;\n lg.data_received_cb.add_callback(self._log_data_signal_wrapper)\n lg.error_cb.add_callback(self._log_error_signal_wrapper)\n\n self._previous_config = lg",
"def select_items(self):\n\n self.listWidget.currentItem().setSelected(True)\n self.items_selected = self.listWidget.selectedItems()\n\n if self.frame_ordering == \"quality\":\n self.indices_selected = [self.quality_sorted_indices[self.listWidget.row(item)] for item\n in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n else:\n self.indices_selected = [self.listWidget.row(item) for item in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n\n self.synchronize_slider()",
"def update_species_frames(self):\n pass",
"def _selected_bridge_changed(self, new):\n if self.bridges[new] is None:\n bridge_desc_file = os.path.join(BRIDGES_PATH, new + u'.json')\n bridge = self.load_bridge_info(bridge_desc_file)\n self.bridges[new] = bridge\n\n self.bridge = self.bridges[new]\n self.channels = [channel for instrument in self.bridge.children\n for sensor in instrument.children\n for channel in sensor.children]",
"def graphs_change():\n d = curdoc()\n _remove_fig(d)\n _remove_selection(d)\n graph_val = d.get_model_by_name(GRAPH_SELECTION).value\n model_id, message_name, model_type = run_handlers.get_modelid_messagename_type(d)\n props = run_handlers.get_model_properties(model_id, message_name, model_type)\n\n if graph_val in [\"line\", \"scatter\", \"step\"]:\n # never want to plot this special string field\n field_options = [\"{0} : {1}\".format(k, props[k]) for k in props if not any(apv in k for apv in [ APV_MODEL ] ) ]\n xselect = Select(title=\"X Axis\", value=DEFAULT_UNSELECTED, options=field_options + [DEFAULT_UNSELECTED], name=X_AXIS_SELECTION)\n yselect = Select(title=\"Y Axis\", value=DEFAULT_UNSELECTED, options=field_options + [DEFAULT_UNSELECTED], name=Y_AXIS_SELECTION)\n xselect.on_change('value', lambda attr, old, new: make_2axis_graph())\n yselect.on_change('value', lambda attr, old, new: make_2axis_graph())\n d.add_root(column(Div(text=\"\"), row(widgetbox([xselect]), widgetbox([yselect])), name=FIELD_SELECTION))\n\n if graph_val in [\"image\"]:\n # alter the field options for known non-image fields\n field_options = [\"{0} : {1}\".format(k, props[k]) for k in props if not any(apv in k for apv in [APV_RECVD, APV_SEQNO, APV_MODEL] ) ]\n imageselect = Select(title=\"Image Field\", value=DEFAULT_UNSELECTED, options=[DEFAULT_UNSELECTED] + field_options, name=IMAGE_SELECTION)\n mimeselect = Select(title=\"MIME Type\", value=DEFAULT_UNSELECTED, options=[DEFAULT_UNSELECTED] + SUPPORTED_MIME_TYPES, name=MIME_SELECTION)\n imageselect.on_change('value', lambda attr, old, new: image_selection_change())\n mimeselect.on_change('value', lambda attr, old, new: image_selection_change())\n d.add_root(column(Div(text=\"\"), widgetbox([imageselect, mimeselect]), name=IMAGE_MIME_SELECTION))\n\n if graph_val in [\"table\"]:\n # TODO: limit selectable columns to whose of the same size (table height)\n # use just the field name; don't show properties in the multi-select box\n col_options = [k for k in props if not any (apv in k for apv in [APV_RECVD, APV_SEQNO, APV_MODEL] ) ]\n columnmultiselect = MultiSelect(title=\"Columns:\", value=[], options=col_options, name=COLUMN_MULTISELECT)\n columnmultiselect.on_change('value', lambda attr, old, new: column_selection_change())\n d.add_root(column(Div(text=\"\"), widgetbox([columnmultiselect]), name=COLUMN_SELECTION))\n\n if graph_val in [\"raw\"]:\n p = figure(plot_width=500, plot_height=500,\n background_fill_color=\"white\",\n y_range=(-40, 0), title=\"\", name=FIGURE_MODEL)\n p.xaxis.visible = False\n p.yaxis.visible = False\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name)\n _install_callback_and_cds(sind, model_id, message_name, stream_limit=1)\n p.text(x='apv_sequence_number',\n y=0,\n text='apv_model_as_string',\n source=d.get_model_by_name(sind),\n text_font_size=\"10pt\",\n text_line_height=0.7,\n text_baseline=\"top\",\n text_align=\"left\")\n p.x_range.follow = \"end\" # don't jam all the data into the graph; \"window\" it\n p.x_range.follow_interval = 1 # don't jam all the data into the graph; \"window\" it\n p.x_range.range_padding = 0\n d.add_root(p)",
"def on_selection(self, analysis_selection, make_new_analysis):\n landmark_indexes = self.landmark_index_from_selection(\n analysis_selection)\n if self.demo_type == DemoType.HYPERSPECTRAL_DEMO:\n # Pass area influenced to the hyperspectral viewer\n self.data_gui.set_static_mask(\n self.analysis.get_area_of_influence(landmark_indexes))\n\n if make_new_analysis:\n self.make_new_analysis(self.analysis, analysis_selection)\n else:\n if self.demo_type == DemoType.LABELLED_DEMO:\n # Pass data indexes to labelled viewer\n self.data_gui.set_image_indexes(\n self.data_index_from_selection(analysis_selection))",
"def on_change(self, attr, old, new):\n n = self.labels.index(new) + 1 # Select 0-indexed\n self.notify(set_figures(n))",
"def img_dict_updated(self, change):\n if change[\"value\"]:\n self.select_dataset(self.io_model.img_dict_default_selected_item)\n self.init_plot_status()",
"def state_chosen_do(cfg, app, win, events):",
"def SelectPresentation(self, event):\n pass",
"def update(self):\r\n\r\n # Update the vision frames in the system\r\n self._system.update()\r\n\r\n # Create blank PIL images to hold the video streams\r\n layered = PIL.Image.new('RGBA', (400, 400))\r\n stacked = PIL.Image.new('RGBA', (200, 800))\r\n control = PIL.Image.new('RGBA', (600, 800))\r\n\r\n focalpoint = self._system[self._appString[\"device\"].get()].focalpoint()\r\n # print(focalpoint)\r\n\r\n # Get each vision key and vision for the selected device\r\n visionList = [(visionKey, vision) for visionKey, vision in self._system[self._appString[\"device\"].get()]]\r\n\r\n # Loop through each vision in the vision list\r\n for i, (visionKey, vision) in enumerate(visionList):\r\n\r\n # Grab the frames from the vision when it is \"curr\"\r\n frameList = [frame for frameKey, frame in vision if frameKey==self._appString[\"frame\"].get()]\r\n\r\n # Loop through each frame in the frame list\r\n for frame in frameList:\r\n\r\n # Get the properties and turn the image into RGBA\r\n ratio, size = vision.properties()\r\n rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n\r\n # print(rgbFrame.shape)\r\n width, height, channels = rgbFrame.shape\r\n\r\n # Paste the images together in layered\r\n\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (int(400 * ratio), int(400 * ratio))))\r\n layered.paste(imgFrame, (int(200 * (1 - ratio)), int(200 * (1 - ratio))))\r\n\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 // width)), int(200 * (1 - ratio) - focalpoint[1] * (200 // height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1)), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200/width) / ratio), int(200 * (1 - ratio) - focalpoint[1] * (200/height) / ratio)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1) / 200), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1) / 200)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (400//width * (1- ratio))), int(200 * (1 - ratio) - focalpoint[1] * (400//height * (1 - ratio)))))\r\n\r\n # Paste the images together in stacked\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (200, 200)))\r\n stacked.paste(imgFrame, (0, 200 * i))\r\n\r\n # Add the stacked image to the canvas\r\n self._pilFrames[\"stacked\"] = PIL.ImageTk.PhotoImage(image=stacked)\r\n self._appCanvas[\"stacked\"].create_image(100, 0, image=self._pilFrames[\"stacked\"], anchor=tkinter.NW)\r\n\r\n # Add the layered image to the canvas\r\n self._pilFrames[\"layered\"] = PIL.ImageTk.PhotoImage(image=layered)\r\n self._appCanvas[\"layered\"].create_image(0, 0, image=self._pilFrames[\"layered\"], anchor=tkinter.NW)\r\n\r\n # Add the control image to the canvas\r\n imgFrame = cv2.cvtColor(self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()][self._appString[\"frame\"].get()], cv2.COLOR_BGR2RGBA)\r\n control = PIL.Image.fromarray(cv2.resize(imgFrame, (600, 600)))\r\n self._pilFrames[\"control\"] = PIL.ImageTk.PhotoImage(image=control)\r\n self._appCanvas[\"control\"].create_image(100, 90, image=self._pilFrames[\"control\"], anchor=tkinter.NW)\r\n\r\n # Continue to update with a delay of 15\r\n self.after(15, self.update)",
"def launch_GUV_GUI(self):\n for i in self.parameters['selected_series']:\n print(f\"Analysing series {i}\")\n self.stack.bundle_axes = 'yx'\n finderparams = ParameterList(filename=self.parameters['filename'],\n channel=self.parameters['channel'],\n intensity_channel=self.parameters['intensity_channel'],\n pixel_microns=self.parameters['pixel_microns'])\n if self.has_multiple_series:\n self.stack.default_coords['v'] = i\n finderparams.series = i\n GUV_Control(self.stack, finderparams) # launch the GUI that can find GUVs and let the user remove them\n \n self.quit()",
"def refresh(self):\n\n # Set Graphics scene\n self.setScene(QtGui.QGraphicsScene())\n self._connections = set()\n self._nodes = {}\n self._selection = set()\n self._manipulation_mode = 0\n self._selection_rect = None",
"def image_selection_change():\n\n def return_image(val, model_id, message_name, field_name, mime, sind):\n \"\"\"Returns a URL resolvable by the probe\"\"\"\n column_data_source = curdoc().get_model_by_name(sind)\n index = column_data_source.tags[0]\n url = \"http://{0}/image/\".format(_host) + \"---\".join([model_id, message_name, field_name, mime, sind, str(index)])\n return url\n\n d = curdoc()\n _remove_fig(d)\n model_id, message_name, _ = run_handlers.get_modelid_messagename_type(d)\n image_field = d.get_model_by_name(IMAGE_SELECTION).value.split(\" :\")[0]\n mime = d.get_model_by_name(MIME_SELECTION).value\n\n if image_field != DEFAULT_UNSELECTED and mime != DEFAULT_UNSELECTED:\n plot = figure(plot_width=500, plot_height=500, title=\"\", x_range=Range1d(start=0, end=1), y_range=Range1d(start=0, end=1), name=FIGURE_MODEL)\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name, image_field + mime)\n\n _install_callback_and_cds(sind, model_id, message_name,\n {image_field: [return_image, {\"model_id\": model_id,\n \"message_name\": message_name,\n \"field_name\": image_field,\n \"mime\": mime,\n \"sind\": sind}]},\n stream_limit=1)\n plot.image_url(url=image_field, x=0, y=1, h=1, w=1, source=d.get_model_by_name(sind))\n d.add_root(plot)",
"def callback_selectstate(self, attrname, old, new):\n self._update_chart(self.selectstate.value)"
] | [
"0.67221683",
"0.647668",
"0.6142463",
"0.60368323",
"0.6010365",
"0.59839",
"0.594206",
"0.5939723",
"0.5774318",
"0.57701164",
"0.5685508",
"0.56649303",
"0.5623016",
"0.5616354",
"0.5597576",
"0.55696803",
"0.5566506",
"0.5529806",
"0.5504718",
"0.54967684",
"0.5455349",
"0.54481906",
"0.5441665",
"0.5428047",
"0.5419617",
"0.54100496",
"0.540215",
"0.5366627",
"0.536203",
"0.5361787"
] | 0.777694 | 0 |
Creates a color palette compatible with Bokeh from a matplotlib cmap name. | def get_bokeh_palette(cmap):
from bokeh.colors import RGB
from matplotlib import cm
# Solution adapted from
# https://stackoverflow.com/questions/31883097/elegant-way-to-match-a-string-to-a-random-color-matplotlib
m_RGB = (255 * plt.get_cmap(cmap)(range(256))).astype("int")
return [RGB(*tuple(rgb)).to_hex() for rgb in m_RGB] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def palette_from_mpl_name(name):\n if name in CMAPS:\n return CMAPS[name]\n\n rgba = plt.get_cmap(name)(np.linspace(0, 1, 256))\n palette = [to_hex(color) for color in rgba]\n return palette",
"def get_palette(palette_name):\n\n if hasattr(plt.cm, palette_name):\n cmap = getattr(plt.cm, palette_name)\n elif palette_name in list(mpl_cm.cmap_d.keys()):\n cmap = mpl_cm.get_cmap(palette_name)\n else:\n print(\"Error, color option '\", palette_name, \"' not a valid option\")\n sys.exit(1)\n\n return cmap",
"def get_cmap(n, name='Paired'):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name='jet'):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(n, name=\"hsv\"):\n return plt.cm.get_cmap(name, n)",
"def get_cmap(cmap=None):\n if cmap:\n if isinstance(cmap, (mpl.colors.Colormap)):\n colormap = cmap\n elif cmap in cmo.cmapnames:\n colormap = cmo.cmap_d[cmap]\n elif cmap in plt.colormaps():\n colormap = plt.get_cmap(cmap)\n else:\n raise ValueError(\n \"Get unrecognised name for the colormap `{}`. Colormaps should be from standard matplotlib set of from cmocean package.\".format(\n cmap\n )\n )\n else:\n colormap = plt.get_cmap(\"Spectral_r\")\n\n return colormap",
"def colormap(cats, mplmap='auto', categorical=None):\n # Should automatically choose the right colormaps for:\n # categorical data\n # sequential data (low, high important)\n # diverging data (low, mid, high important)\n global DEF_SEQUENTIAL\n from matplotlib import cm\n\n if hasattr(cm, 'inferno'):\n DEF_SEQUENTIAL = 'inferno'\n else:\n DEF_SEQUENTIAL = 'BrBG'\n\n # strip units\n units = None # TODO: build a color bar with units\n if hasattr(cats[0], 'magnitude'):\n arr = u.array(cats)\n units = arr.units\n cats = arr.magnitude\n is_categorical = False\n else:\n is_categorical = not isinstance(cats[0], (float, int))\n\n if categorical is not None:\n is_categorical = categorical\n\n if is_categorical:\n values = _map_categories_to_ints(cats)\n if mplmap == 'auto':\n mplmap = DEF_CATEGORICAL\n else:\n values = np.array(list(map(float, cats)))\n if mplmap == 'auto':\n mplmap = DEF_SEQUENTIAL\n\n rgb = _cmap_to_rgb(mplmap, values)\n hexcolors = [webcolors.rgb_to_hex(np.array(c)) for c in rgb]\n return hexcolors",
"def mpl_palette(name, n_colors=6):\n brewer_qual_pals = {\"Accent\": 8, \"Dark2\": 8, \"Paired\": 12,\n \"Pastel1\": 9, \"Pastel2\": 8,\n \"Set1\": 9, \"Set2\": 8, \"Set3\": 12}\n\n if name.endswith(\"_d\"):\n pal = [\"#333333\"]\n pal.extend(color_palette(name.replace(\"_d\", \"_r\"), 2))\n cmap = blend_palette(pal, n_colors, as_cmap=True)\n else:\n cmap = getattr(mpl.cm, name)\n if name in brewer_qual_pals:\n bins = np.linspace(0, 1, brewer_qual_pals[name])[:n_colors]\n else:\n bins = np.linspace(0, 1, n_colors + 2)[1:-1]\n palette = list(map(tuple, cmap(bins)[:, :3]))\n\n return _ColorPalette(palette)",
"def cmap_to_pil_palette(cmap):\n # return (255.*np.array(\n # map(lambda x: cmap(x)[0:3], np.linspace(0., 1.,256)))\n # .ravel()).astype('int')\n return (255. * np.array(\n [cmap(x)[:3] for x in np.linspace(0,1,256)]).ravel().astype('int'))",
"def make_colormap(colormap = 'rainbow_r', bins = 256, add_alpha = True, invert_alpha = False, cmap_name = 'costum',\n discrete = False, return_cmap = False):\n \n if isinstance(colormap, str): # if input is string (so existent colormap)\n\n # get colormap\n cmap = cm.get_cmap(colormap)\n\n else: # is list of strings\n cvals = np.arange(len(colormap))\n norm = plt.Normalize(min(cvals),max(cvals))\n tuples = list(zip(map(norm,cvals), colormap))\n cmap = colors.LinearSegmentedColormap.from_list(\"\", tuples)\n \n if discrete == True: # if we want a discrete colormap from list\n cmap = colors.ListedColormap(colormap)\n bins = int(len(colormap))\n\n # convert into array\n cmap_array = cmap(range(bins))\n\n # reshape array for map\n new_map = []\n for i in range(cmap_array.shape[-1]):\n new_map.append(np.tile(cmap_array[...,i],(bins,1)))\n\n new_map = np.moveaxis(np.array(new_map), 0, -1)\n \n if add_alpha: \n # make alpha array\n if invert_alpha == True: # in case we want to invert alpha (y from 1 to 0 instead pf 0 to 1)\n _, alpha = np.meshgrid(np.linspace(0, 1, bins, endpoint=False), 1-np.linspace(0, 1, bins))\n else:\n _, alpha = np.meshgrid(np.linspace(0, 1, bins, endpoint=False), np.linspace(0, 1, bins, endpoint=False))\n\n # add alpha channel\n new_map[...,-1] = alpha\n cmap_ext = (0,1,0,1)\n else:\n new_map = new_map[:1,...].copy() \n cmap_ext = (0,100,0,1)\n \n fig = plt.figure(figsize=(1,1))\n ax = fig.add_axes([0,0,1,1])\n # plot \n plt.imshow(new_map,\n extent = cmap_ext,\n origin = 'lower')\n ax.axis('off')\n\n if add_alpha: \n rgb_fn = op.join(op.split(cortex.database.default_filestore)[\n 0], 'colormaps', cmap_name+'_alpha_bins_%d.png'%bins)\n else:\n rgb_fn = op.join(op.split(cortex.database.default_filestore)[\n 0], 'colormaps', cmap_name+'_bins_%d.png'%bins)\n #misc.imsave(rgb_fn, new_map)\n plt.savefig(rgb_fn, dpi = 200,transparent=True)\n\n if return_cmap:\n return cmap\n else:\n return rgb_fn",
"def cmap(num,cmap = plt.cm.gist_earth_r):\n return cmap(np.linspace(0, 1, num))",
"def generate_n_colors(n, cmap_name='tab20'):\n pt_region_colormap = plt.get_cmap(cmap_name)\n max_i = len(pt_region_colormap.colors)\n return [pt_region_colormap(i % max_i) for i in range(n)]",
"def color_mapper(cmap: str, vmin: float=0, vmax: float=1, alpha: float=1):\n\n alpha = int(255 * alpha)\n\n cmap = plt.get_cmap(cmap)\n cNorm = Normalize(vmin=vmin, vmax=vmax)\n scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmap)\n\n def mapper(value):\n \"\"\"\n This is the function that gets returned\n\n Parameters\n ----------\n value A number between vmin and vmax\n\n Returns\n -------\n An RGB color\n\n \"\"\"\n\n out = scalarMap.to_rgba(value)\n\n if isinstance(out, tuple):\n return tuple([255 * out[i] for i in range(3)] + [alpha])\n\n elif isinstance(out, np.ndarray):\n out[:, :-1] *= 255\n out[:, 3] = alpha\n return out\n\n return mapper",
"def get_density_cmap():\n # Add completely white color to Reds colormap in Matplotlib\n list_colors = plt.cm.datad['Reds']\n list_colors = list(list_colors)\n list_colors.insert(0, (1, 1, 1))\n list_colors.insert(0, (1, 1, 1))\n lscm = matplotlib.colors.LinearSegmentedColormap.from_list(\"my_Reds\", list_colors)\n return lscm",
"def set_cmap_cb(self, w, index):\n old_cmap_name = self._cmap_name\n name = cmap.get_names()[index]\n self.cmap_name = name\n self.pipeline.push(StageAction(self,\n dict(cmap_name=old_cmap_name),\n dict(cmap_name=self._cmap_name),\n descr=\"rgbmap / change cmap\"))\n\n self.pipeline.run_from(self)",
"def discrete_cmap(n_colors: int, base_cmap: str) -> Colormap:\r\n # https://gist.github.com/jakevdp/91077b0cae40f8f8244a\r\n base = plt.cm.get_cmap(base_cmap)\r\n color_list = base(np.linspace(0, 1, n_colors))\r\n cmap_name = base.name + str(n_colors)\r\n\r\n return base.from_list(cmap_name, color_list, n_colors)",
"def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap",
"def colormap(self):\n palette = [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),\n (111, 74, 0), (81, 0, 81), (128, 64, 128), (244, 35, 232),\n (250, 170, 160), (230, 150, 140), (70, 70, 70),\n (102, 102, 156), (190, 153, 153), (180, 165, 180),\n (150, 100, 100), (150, 120, 90), (153, 153, 153),\n (153, 153, 153), (250, 170, 30), (220, 220, 0),\n (107, 142, 35), (152, 251, 152), (70, 130, 180),\n (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),\n (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100),\n (0, 0, 230), (119, 11, 32), (0, 0, 142)]\n\n num_colors = self[0][1].shape[-1]\n colormap = np.zeros((num_colors, 3), dtype=int)\n for i in range(num_colors):\n colormap[i, ...] = palette[self._update_labels_dict[i]]\n return colormap",
"def create_cycler_colors(color_scheme):\n cmap = cm.get_cmap(color_scheme) # PiYG\n cycler_colors = []\n\n for i in range(cmap.N):\n rgba = cmap(i)\n # rgb2hex accepts rgb or rgba\n cycler_colors.append(matplotlib.colors.rgb2hex(rgba)) \n \n return cycler_colors",
"def set_cmap(self, cmap: str | Colormap | matplotlib.colors.Colormap):\n if self._image is None:\n raise RuntimeError(\"You add data with `set_data` before setting cmaps\")\n self._image.set_cmap(cmap)\n self._widget._mgui_set_value(self._image.make_image())",
"def cmap(self):\n return self._palette",
"def _color_palette(n_colors=4, cmap='viridis', extrema=False):\n if extrema:\n bins = np.linspace(0, 1, n_colors)\n else:\n bins = np.linspace(0, 1, n_colors * 2 - 1 + 2)[1:-1:2]\n\n cmap = plt.get_cmap(cmap)\n palette = list(map(tuple, cmap(bins)[:, :3]))\n return palette",
"def set_palette(name=\"default\", ncontours=255):\n\n if name == \"gray\" or name == \"grayscale\":\n stops = [0.00, 0.34, 0.61, 0.84, 1.00]\n red = [1.00, 0.95, 0.95, 0.65, 0.15]\n green = [1.00, 0.85, 0.7, 0.5, 0.3]\n blue = [0.95, 0.6, 0.3, 0.45, 0.65]\n # elif name == \"whatever\":\n # (define more palettes)\n elif name == \"chris\":\n stops = [ 0.00, 0.34, 0.61, 0.84, 1.00 ]\n red = [ 1.0, 0.95, 0.95, 0.65, 0.15 ]\n green = [ 1.0, 0.85, 0.7, 0.5, 0.3 ]\n blue = [ 0.95, 0.6 , 0.3, 0.45, 0.65 ]\n else:\n # default palette, looks cool\n stops = [0.00, 0.34, 0.61, 0.84, 1.00]\n red = [0.00, 0.00, 0.87, 1.00, 0.51]\n green = [0.00, 0.81, 1.00, 0.20, 0.00]\n blue = [0.51, 1.00, 0.12, 0.00, 0.00]\n\n s = array('d', stops)\n r = array('d', red)\n g = array('d', green)\n b = array('d', blue)\n\n npoints = len(s)\n rt.TColor.CreateGradientColorTable(npoints, s, r, g, b, ncontours)\n rt.gStyle.SetNumberContours(ncontours)",
"def assigning_colors():\n rgb_colors = {}\n for name, hex in matplotlib.colors.cnames.items():\n color = []\n # So the values are from 0-255 and not 0-1\n for i in matplotlib.colors.to_rgb(hex):\n color.append(int(i * 255))\n\n color = tuple(color)\n rgb_colors[name] = color\n\n return rgb_colors",
"def getColorMap(colors):\n # Normalise RGBs\n norm_colors = []\n for color in colors:\n norm_colors.append([val / 255. for val in color])\n # create color map\n cmap = cols.ListedColormap(norm_colors)\n\n return cmap",
"def grayscale_cmap(cmap):\n # cmap = cm.get_cmap(cmap)\n map_colors = cmap(np.arange(cmap.N))\n\n # convert RGBA to perceived grayscale luminance\n # cf. http://alienryderflex.com/hsp.html\n RGB_weight = [0.299, 0.587, 0.114]\n luminance = np.sqrt(np.dot(map_colors[:, :3] ** 2, RGB_weight))\n map_colors[:, :3] = luminance[:, np.newaxis]\n\n return colors.LinearSegmentedColormap.from_list(cmap.name + \"_gray\", map_colors, cmap.N)"
] | [
"0.8101128",
"0.6942909",
"0.6776598",
"0.6741528",
"0.6741528",
"0.66666794",
"0.66659963",
"0.66659963",
"0.66659963",
"0.6503461",
"0.6470813",
"0.6413775",
"0.63696545",
"0.6352337",
"0.631037",
"0.61285675",
"0.61251086",
"0.6121512",
"0.6044165",
"0.60116994",
"0.60105366",
"0.5997186",
"0.59899193",
"0.59587944",
"0.59356815",
"0.5935643",
"0.59344226",
"0.5924718",
"0.5918142",
"0.5903378"
] | 0.6990008 | 1 |
instead of return a cursor object, find_one() returns one document. so when you look up document by it's _id (_id field is always unique), use find_one() method. | def find_one():
fmter.tpl._straightline("one document", 100)
result = users.find_one({})
print(type(result))
ppt(result)
fmter.tpl._straightline("none result", 100)
result = users.find_one({"_id": 100})
print(type(result))
ppt(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_one(self, collection, query):\n obj = getattr(self.db, collection)\n result = obj.find_one(query)\n return result",
"def first(self, **kwargs):\n return self.find(**kwargs).first()",
"def one(self):\n try:\n return self[0]\n except IndexError:\n raise self.document.DoesNotExist",
"def get_one(collection: Collection, query: Dict[str, Any]):\n data = collection.find_one(query)\n if data is None:\n raise CannotFindItemInDatabase(query, data, collection.name)\n return data",
"def find_one(self, criteria):\n return self.connection.find_one(criteria)",
"async def find_one(self, collection_name: str, filter_dict: dict, *args, catch_error: bool = True, **kwargs):\n collection = self.database.get_collection(collection_name)\n result = None\n try:\n result = await collection.find_one(filter_dict, *args, **kwargs)\n self.debug_log(collection_name, f\"Found a document {result} in '{collection_name}' \"\n f\"with options '{args, kwargs}'\")\n except BaseException as e:\n self.error_log(collection_name, f\"Could not execute 'find_one'-command in '{collection_name}' \"\n f\"with options '{args, kwargs}'\",\n err=e)\n if not catch_error:\n raise e\n return result",
"def fetchone(self) -> Any:\n return self.cursor.fetchone()",
"def get_single_data(document_id):\n data = collection.find_one({'_id': ObjectId(document_id)})\n return data",
"async def find_one(\n self, query: Union[dict, MotycQuery] = None, *,\n _id=None,\n inject_default_id: bool = None\n ) -> T:\n\n mongo_query = self.build_mongo_query(query, _id=_id)\n\n document = await self.collection.find_one(mongo_query)\n\n if document is None: raise NotFound(mongo_query)\n return self.parse_document(document, inject_default_id=inject_default_id)",
"def find_one(cls, query, select=None, as_dict=False):\n record = cls.collection.find_one(query, select)\n\n return record if as_dict else cls(record)",
"async def find_one(self, **query):\n\n return await self._expand(await self.db.get_one(**query))",
"def find(cls, id=None):\n return cls.query.filter_by(id=id).one_or_none()",
"def find_one(self, user_id):\n pass",
"def obj_get(self, request=None, **kwargs):\n return Document(self.get_collection(request).find_one({\n \"_id\": ObjectId(kwargs.get(\"pk\"))\n }))",
"def find_by_id(cls, object_id):\n try:\n return mongo_db[cls.__collection__].find_one({\"_id\": ObjectId(object_id)})\n except InvalidId:\n # TODO: Log the exception\n print('Invalid bson id: {}'.format(object_id))\n return None",
"def first(self):\n try:\n data = self.get_cursor()[0]\n return self.from_(**self.prepare_data(data))\n except IndexError:\n return None",
"def find_by_id(cls, doc_id: str):\n document = None\n if doc_id:\n try:\n document = cls.query.get(doc_id)\n except Exception as db_exception: # noqa: B902; return nicer error\n current_app.logger.error('Db2Document.find_by_id exception: ' + str(db_exception))\n raise DatabaseException(db_exception)\n if document:\n document.strip()\n return document",
"def get_one(self, object):\n self.lock.acquire()\n result = self.__Session.query(object).first()\n self.lock.release()\n return result",
"async def get_one(self, where: t.Mapping[str, t.Any]) -> t.Optional[Model]:\n\n data = await self.collection.find_one(where)\n return self.model_class(**data) if data else None",
"def fetchone(cursor):\n\t# type: (Cursor, ) -> Any\n\n\trows = cursor.fetchall()\n\tif len(rows) == 0:\n\t\traise NoResult(\"No result found\")\n\telif len(rows) == 1:\n\t\treturn rows[0]\n\telse:\n\t\traise InconsistentState(\"More than one result found\")",
"def find_one(self, query, collection_name='talent5__staff_collection'):\r\n collection = self.get_collection(collection_name)\r\n\r\n if collection is None:\r\n return None\r\n else:\r\n return collection.find_one(query)",
"def find_one(cls, attr):\n result = cls.db().find_one(attr, True)\n\n if result is not None:\n return cls(result)\n\n return None",
"def get(self, query_data=None, id_obj=None):\n if id_obj:\n return self.collection.find_one({'_id': id_obj})\n return self.collection.find_one(query_data)",
"def fetchone(self):\n row = self.cursor.fetchone()\n\n if row is None:\n return None\n\n if self.model.single:\n return self.__instance_from_db(self.model, row)\n else:\n return tuple(self.__instance_from_db(m, row) for m in self.model.models)",
"def document(self):\n query = {\"_id\": ObjectId(self.document_id)}\n return Document(get_collection(\"documents\").find_one(query))",
"def find_one_byprimaryid(cls, primaryid, defaultval = None):\n return cls.dbm().modelclass_find_one_byprimaryid(cls, primaryid, defaultval)",
"def first_or_raise(self):\n res = super(CustomQuery, self).first()\n if not res:\n raise NotFoundException\n return res",
"def getFirstDocument(address=\"\", database=\"\", collection=\"\"):\n\n document = []\n client = connectMongo(address, database, collection)\n\n document.append(client.find_one())\n\n return document",
"def find_first(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()",
"def get(self, id, model_type=None):\n # If collection is not specified, use the collection when this client is\n if not model_type:\n collection = self._collection\n else:\n collection = self._db[model_type]\n\n print 'mongo.get(): id={}'.format(id)\n if id:\n obj = collection.find_one({'_id': ObjectId(id)})\n if not obj:\n raise DbProviderError(\"DB record for {} is not found\".format(id))\n obj['_id'] = str(obj['_id'])\n else:\n obj = {}\n return obj"
] | [
"0.7754107",
"0.7186727",
"0.7182808",
"0.715503",
"0.69664955",
"0.6931495",
"0.69100857",
"0.6904317",
"0.6898412",
"0.6841113",
"0.68210936",
"0.6808862",
"0.679702",
"0.6777635",
"0.67716956",
"0.6718852",
"0.6685826",
"0.66839063",
"0.66428053",
"0.6642651",
"0.66407835",
"0.6631507",
"0.66231894",
"0.65974617",
"0.65802956",
"0.65712774",
"0.65319496",
"0.6519915",
"0.6492244",
"0.64813393"
] | 0.7955518 | 0 |
Fills out the model by invoking C{svnlook} | def _populateModel(self):
self.repoPath = self.argv[1]
self.rev = self.argv[2]
self.model.rev = self.rev
self.model.repo = os.path.split(self.repoPath)[-1]
self.prefix = (self.addRepoPrefix() and ('/' + self.model.repo)) or ''
# First, get the user and log message
lines = self._svnlook('info')
self.model.user = lines[0][:-1]
self.model.log = ''.join(lines[3:]).strip()
# Now build an initial tree of file and tree changes
for line in self._svnlook('changed'):
action = self.actions[line[0]]
target = '/' + line[4:-1]
if target.endswith('/'):
directory = self.model.directory(self.prefix + target)
directory.action = action
else:
parts = target.split('/')
name = parts[-1]
directoryPath = '/' + '/'.join(parts[0:-1]) + '/'
file = File(name, self.model.directory(self.prefix + directoryPath), action)
# Markers to tell us when we hit a new diff
markers = ['Modified', 'Added', 'Copied', 'Deleted', 'Property changes on']
# Recontruct each diff by parsing through the output of svnlook line by line
diffs = []
partialDiff = None
#A marker word after a "____" line is a change in a property and shouldn't be added as a change
#in a file. InProperty keeps track of this. If it's 0 this is a normal line, any larger
#and it's a property line.
inProperty = 1
for line in self.getDiffLines():
inProperty = max(0, inProperty-1)
if line == "___________________________________________________________________\n":
inProperty = 2
# Look for Modified:, Added:, etc.
if line[0:line.find(':')] in markers and not inProperty > 0:
# Handle starting a new diff
partialDiff = [line]
diffs.append(partialDiff)
elif partialDiff:
partialDiff.append(line)
if len(diffs) == 0:
for file in self.model.files():
file.delta = '<Unavailable>'
file.diff = ''
# And finally parse through the diffs and save them into our tree of changes
for diff in diffs:
# Use [:-1] to leave of the trailing \n
start = diff[0].find(': ') + 2
stop = diff[0].find('(') - 1 # -1 ignores the space before the paren
if stop == -2: stop = len(diff[0])
filePath = '/' + diff[0][:-1][start:stop]
# This could be a file or a directory - going ahead with the .file()
# call for most directories is fine as it will just return null.
#
# Howeever, root / will exception out as an invalid file path so
# just special case it
if filePath == '/':
file = None
else:
file = self.model.file(self.prefix + filePath)
# Maybe its a directory
if file:
isFile = True
else:
file = self.model.directory(self.prefix + filePath + '/')
isFile = False
if not diff[0].startswith('Property changes on:'):
file.delta, file.diff = self._parse_diff(diff)
else:
if file.diff:
# Only files will already have a diff set
file.diff = file.diff + '\n\n' + ''.join(diff)
else:
# If the 'Property changes on' line is here without a
# file.diff, that file.diff will never come because it would
# have been printed before us
if isFile:
sep = '===================================================================\n\n'
file.diff = ''.join([sep] + diff)
file.delta = '+0 -0'
else:
file.diff = ''.join(diff) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_model(self):\n pass # TODO: Implement this.",
"def build_model():",
"def update_model(self):\n pass",
"def updateModel(self):\n pass",
"def loadAdjustedModel(self):\r\n # Load model in GUI\r\n addModel(self.trcFilePath.replace('.trc','.osim'))",
"def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return",
"def build(self):\n labelled_documents = self.get_labelled_documents_queryset()\n\n self.model = self.build_model(labelled_documents)\n self.save_model()",
"def revision(self):\n self.r3.reuse = self.r2\n self.r3.case = self.case\n self.r3.revise()\n self.r2.predictionGenre = self.case.playlist_genre\n self.r2.predictionSubGenre =self.case.playlist_subgenre\n print()",
"def model(self, new_model):\n if self.locations.empty:\n raise RuntimeError(\"Cannot create a model until locations exist\")\n writer = ModelWriter(self, self.dismod_file)\n new_model.write(writer)\n writer.close()",
"def __init__(self):\n this = _diff.new_svn_patch_t()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def reset(self):\n self.vna.write(edit_list(self.model))\n self.vna.write(clear_list(self.model))",
"def save(self):\n\n if not self.revertable:\n return\n\n state = {}\n for x in self.toSave:\n state[x] = deepcopy(self.toSave[x]())\n\n #made a new model, reparent it so it displays\n state[\"model\"].reparentTo(base.render)\n\n #add it to the stack\n self.stack.append(state)\n\n for s in self.stack:\n s[\"model\"].setPos(s[\"model\"].getPos() + Vec3(0,0,-THING_REVERT_DISTANCE))",
"def ClearModels(self):\n self._modelFileNames = []\n self._models = []\n self.Modified(readAgain=True)",
"def test_model_found(arguments):\n ...",
"def handle(self, *args, **kwargs):\n # Part model\n try:\n print(\"Rebuilding Part objects\")\n\n from part.models import Part\n Part.objects.rebuild()\n except Exception:\n print(\"Error rebuilding Part objects\")\n\n # Part category\n try:\n print(\"Rebuilding PartCategory objects\")\n\n from part.models import PartCategory\n PartCategory.objects.rebuild()\n except Exception:\n print(\"Error rebuilding PartCategory objects\")\n\n # StockItem model\n try:\n print(\"Rebuilding StockItem objects\")\n\n from stock.models import StockItem\n StockItem.objects.rebuild()\n except Exception:\n print(\"Error rebuilding StockItem objects\")\n\n # StockLocation model\n try:\n print(\"Rebuilding StockLocation objects\")\n\n from stock.models import StockLocation\n StockLocation.objects.rebuild()\n except Exception:\n print(\"Error rebuilding StockLocation objects\")\n\n # Build model\n try:\n print(\"Rebuilding Build objects\")\n\n from build.models import Build\n Build.objects.rebuild()\n except Exception:\n print(\"Error rebuilding Build objects\")",
"def update_model(command):\n namespace = app.main(command)\n assert namespace.command == 'um' or namespace.command == \"updatemodel\"",
"def test_po_migration(self):\n PurchaseOrder = self.new_state.apps.get_model('order', 'purchaseorder')\n for ii in range(10):\n\n po = PurchaseOrder.objects.get(reference=f\"{ii}-abcde\")\n self.assertEqual(po.extra_lines.count(), 1)\n self.assertEqual(po.lines.count(), 1)\n\n # TODO @matmair fix this test!!!\n # SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n # for ii in range(10):\n # so = SalesOrder.objects.get(reference=f\"{ii}-xyz\")\n # self.assertEqual(so.extra_lines, 1)\n # self.assertEqual(so.lines.count(), 1)",
"def __init__(self):\n self.model = None",
"def __init__(self):\n self.model = None",
"def model(self):",
"def model(self):",
"def model(self):",
"def model(self):",
"def model(self):",
"def update(self,wsname,fname,storelist=[]):\n import os\n from ROOT import RooWorkspace, TFile\n\n # Create the ws with the model and data available\n w = RooWorkspace(wsname,'Workspace')\n wsImport = getattr(w,'import')\n # Put the models \n for rawtuple in self.__models.values():\n model = rawtuple[0]\n anythingelse = rawtuple[1:]\n wsImport(model)\n # Put whatever the user want to store\n for _item in storelist:\n wsImport(_item)\n\n # Check if the fname already exist\n file_exist = os.path.isfile(fname)\n if file_exist:\n # Create a new auxiliar file, to provisionaly\n # store the ws \n auxfname = '_auxroot.root'\n w.writeToFile(auxfname)\n # Previously delete the old Workspace, if any\n _rootfile = TFile(fname)\n _rootfile.Delete(wsname+';*')\n _rootfile.Delete(\"ProcessID*;*\")\n _rootfile.Close()\n # Open the auxiliary file\n _aux = TFile(auxfname)\n # and copy the ws to the rootfile\n w = _aux.Get(wsname)\n # and copy the ws to the rootfile\n w.writeToFile(fname,False)\n\n if file_exist:\n # And closing and removing auxfile\n _aux.Close()\n os.remove(auxfname)",
"def onBuildModels(self):\n if self.refSeriesNumber != '-1':\n ref = self.refSeriesNumber\n refLongName = self.seriesMap[ref]['LongName']\n labelNodes = slicer.util.getNodes('*'+refLongName+'*-label*')\n\n numNodes = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelHierarchyNode\" )\n outHierarchy = None\n\n for n in xrange(numNodes):\n node = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelHierarchyNode\" )\n if node.GetName() == 'mpReview-'+refLongName:\n outHierarchy = node\n break\n\n # Remove the previous models\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n slicer.mrmlScene.RemoveNode(modelNode)\n\n # if models hierarchy does not exist, create it.\n else:\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\n outHierarchy.SetScene( slicer.mrmlScene )\n outHierarchy.SetName( 'mpReview-'+refLongName )\n slicer.mrmlScene.AddNode( outHierarchy )\n\n progress = self.makeProgressIndicator(len(labelNodes))\n step = 0\n for label in labelNodes.values():\n labelName = label.GetName().split(':')[1]\n structureName = labelName[labelName[:-6].rfind(\"-\")+1:-6]\n # Only save labels with known structure names\n if any(structureName in s for s in self.structureNames):\n parameters = {}\n parameters[\"InputVolume\"] = label.GetID()\n parameters['FilterType'] = \"Sinc\"\n parameters['GenerateAll'] = True\n\n parameters[\"JointSmoothing\"] = False\n parameters[\"SplitNormals\"] = True\n parameters[\"PointNormals\"] = True\n parameters[\"SkipUnNamed\"] = True\n\n # create models for all labels\n parameters[\"StartLabel\"] = -1\n parameters[\"EndLabel\"] = -1\n\n parameters[\"Decimate\"] = 0\n parameters[\"Smooth\"] = 0\n\n parameters[\"ModelSceneFile\"] = outHierarchy\n\n progress.labelText = '\\nMaking Model for %s' % structureName\n progress.setValue(step)\n if progress.wasCanceled:\n break\n\n try:\n modelMaker = slicer.modules.modelmaker\n self.CLINode = slicer.cli.run(modelMaker, self.CLINode,\n parameters, wait_for_completion=True)\n except AttributeError:\n qt.QMessageBox.critical(slicer.util.mainWindow(),'Editor', 'The ModelMaker module is not available<p>Perhaps it was disabled in the application settings or did not load correctly.')\n step += 1\n progress.close()\n #\n\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n displayNode = modelNode.GetDisplayNode()\n displayNode.SetSliceIntersectionVisibility(1)\n displayNode.SetSliceIntersectionThickness(2)\n self.modelsVisibilityButton.checked = False\n self.updateViewRenderer()",
"def test_persistence_old_model(self):\n loaded = PoincareModel.load(datapath('poincare_test_3.4.0'))\n self.assertEqual(loaded.kv.syn0.shape, (239, 2))\n self.assertEqual(len(loaded.kv.vocab), 239)\n self.assertEqual(loaded.size, 2)\n self.assertEqual(len(loaded.all_relations), 200)",
"def load_cobra_model(self, model):\n self.model = ModelPro(model)\n for g in self.model.genes:\n if self.genes_dir:\n g.root_dir = self.genes_dir\n g.protein.pdb_file_type = self.pdb_file_type\n self.genes = self.model.genes\n\n log.info('{}: loaded model'.format(model.id))\n log.info('{}: number of reactions'.format(len(self.model.reactions)))\n log.info('{}: number of reactions linked to a gene'.format(ssbio.core.modelpro.true_num_reactions(self.model)))\n log.info('{}: number of genes (excluding spontaneous)'.format(ssbio.core.modelpro.true_num_genes(self.model,\n custom_spont_id=self.custom_spont_id)))\n log.info('{}: number of metabolites'.format(len(self.model.metabolites)))\n log.warning('IMPORTANT: All Gene objects have been transformed into GenePro '\n 'objects, and will be for any new ones')",
"def build_model(self):\n pass",
"def build_model(self):\n pass"
] | [
"0.5480063",
"0.54420656",
"0.5437654",
"0.53411984",
"0.5257772",
"0.5196333",
"0.51816934",
"0.50881743",
"0.5072814",
"0.50495183",
"0.50386804",
"0.5009755",
"0.499261",
"0.49754384",
"0.49611595",
"0.49506703",
"0.49395007",
"0.4916408",
"0.4916408",
"0.49122566",
"0.49122566",
"0.49122566",
"0.49122566",
"0.49122566",
"0.49096972",
"0.49089012",
"0.48955873",
"0.48922318",
"0.48882458",
"0.48882458"
] | 0.69316125 | 0 |
Gera o codigo rml do cabecalho | def cabecalho(dic_cabecalho,dat_ordem,imagem):
tmp=''
tmp+='\t\t\t\t<image x="4.1cm" y="26.9cm" width="74" height="60" file="' + imagem + '"/>\n'
tmp+='\t\t\t\t<lines>3.3cm 26.3cm 19.5cm 26.3cm</lines>\n'
tmp+='\t\t\t\t<setFont name="Helvetica-Bold" size="15"/>\n'
tmp+='\t\t\t\t<drawString x="6.7cm" y="28.1cm">' + dic_cabecalho['nom_casa'] + '</drawString>\n'
tmp+='\t\t\t\t<setFont name="Helvetica" size="11"/>\n'
tmp+='\t\t\t\t<drawString x="6.7cm" y="27.6cm">' + 'Estado de ' + dic_cabecalho['nom_estado'] + '</drawString>\n'
return tmp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(self):",
"def mezclar_bolsa(self):",
"def load_ccs9():\n ccs9 = pd.read_csv(pkg_resources.resource_filename(__name__,'$dxref 2015.csv'))\n ccs9 = ccs9.reset_index()\n for col in ccs9.columns:\n ccs9.loc[:,col]=ccs9[col].str.strip('\\'')\n ccs9.columns=ccs9.iloc[0,:]\n ccs9 = ccs9.iloc[1:,:]\n ccs9 = ccs9.replace(r'^\\s*$', np.nan, regex=True)\n ccs9 = ccs9.loc[ccs9['ICD-9-CM CODE'].notnull(),:]\n ccs9.loc[:,'ICD-9-CM CODE'] = ccs9['ICD-9-CM CODE'].str.replace(' ','')\n ccs9.loc[:,'CCS CATEGORY'] = ccs9['CCS CATEGORY'].str.replace(' ','')\n ccs9 = ccs9.iloc[:,0:4] \n ccs9_labels = pd.read_csv(pkg_resources.resource_filename(__name__,'dxlabel 2015.csv'))\n ccs9 = ccs9.merge(ccs9_labels,how='left',left_on='CCS CATEGORY',right_on='CCS DIAGNOSIS CATEGORIES')\n ccs9.drop('CCS CATEGORY DESCRIPTION',axis=1,inplace=True)\n ccs9.drop('CCS DIAGNOSIS CATEGORIES',axis=1,inplace=True)\n ccs9.columns = [i.replace('CCS DIAGNOSIS CATEGORIES LABELS','CCS CATEGORY DESCRIPTION') for i in ccs9.columns]\n return ccs9",
"def lcia_methods__metadata(self):\r\n with UnicodeReader(os.path.join(dirpath, \"categoryUUIDs.csv\"), \r\n encoding='latin-1', \r\n delimiter=dt) as csv_file:\r\n next(csv_file) \r\n csv_data = [{'name': (line[0], line[2], line[4]),\r\n 'description': line[7]\r\n } for line in csv_file]\r\n \r\n filename = \"LCIA_implementation_2019.xlsx\" # this was donwloaded and updated on Oct 2019 from ecoinvent website. \r\n wb = xlrd.open_workbook(os.path.join(dirpath, filename))\r\n #characterizaton factors\r\n sheet= wb.sheet_by_name(\"CFs\")\r\n cf_data = [{\r\n 'method': (sheet.cell(row, 0).value,\r\n sheet.cell(row, 1).value,\r\n sheet.cell(row, 2).value),\r\n 'name': sheet.cell(row, 3).value,\r\n 'categories': (sheet.cell(row, 4).value, sheet.cell(row, 5).value),\r\n 'amount': sheet.cell(row, 7).value\r\n }\r\n for row in range(1, sheet.nrows)\r\n if sheet.cell(row, 0).value not in \r\n {'selected LCI results, additional', 'selected LCI results'} and isinstance(sheet.cell(row, 7).value, Number)]\r\n #units\r\n sheet= wb.sheet_by_name(\"units\")\r\n units = {(sheet.cell(row, 0).value, sheet.cell(row, 1).value, \r\n sheet.cell(row, 2).value): sheet.cell(row, 4).value for row in range(1, sheet.nrows)}\r\n return csv_data, cf_data, units, filename",
"def build(c):",
"def mol_to_cbor(mol: masm.Molecule) -> str:\n serializer = masm.JsonSerialization\n cbor_format = serializer.BinaryFormat.CBOR\n serialization = serializer(mol)\n cbor_binary = serialization.to_binary(cbor_format)\n return serializer.base_64_encode(cbor_binary)",
"def get_model_and_assets():\n\n return common.read_model('cloth_corner.xml'),common.ASSETS",
"def load_data(self):",
"def presenetCar():",
"def read_meta(self):\n meta = cPickle.load(open('../sugar_analysis_data/META-CABALLO2.pkl'))\n self.meta_sn_name_list = []\n self.meta_zcmb = []\n self.meta_x0 =[]\n self.meta_x0_err = []\n self.meta_x1 =[]\n self.meta_x1_err = []\n self.meta_c = []\n self.meta_c_err = []\n self.meta_mb = []\n self.meta_mb_err = []\n self.meta_cov_x0_x1 = [] \n self.meta_cov_x0_c = []\n self.meta_cov_x1_c = []\n self.meta_cov_mb_x1 = []\n self.meta_cov_mb_c = [] \n self.meta_zhl = []\n self.meta_zhl_err = []\n self.meta_idr = []\n for meta_sn_name in meta.keys(): \n \n if meta[meta_sn_name]['idr.subset'] != 'bad' and meta[meta_sn_name]['idr.subset'] != 'auxiliary':\n \n self.meta_sn_name_list.append(meta_sn_name)\n self.meta_zhl_err.append(meta[meta_sn_name]['host.zhelio.err'])\n self.meta_zhl.append(meta[meta_sn_name]['host.zhelio'])\n self.meta_zcmb.append(meta[meta_sn_name]['host.zcmb'])\n self.meta_x0.append(meta[meta_sn_name]['salt2.X0'])\n self.meta_x0_err.append(meta[meta_sn_name]['salt2.X0.err'])\n self.meta_x1.append(meta[meta_sn_name]['salt2.X1'])\n self.meta_x1_err.append(meta[meta_sn_name]['salt2.X1.err'])\n self.meta_c.append(meta[meta_sn_name]['salt2.Color'])\n self.meta_c_err.append(meta[meta_sn_name]['salt2.Color.err'])\n self.meta_mb.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B'])\n self.meta_mb_err.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B.err'])\n self.meta_cov_x0_x1.append(meta[meta_sn_name]['salt2.CovX0X1'])\n self.meta_cov_x0_c.append(meta[meta_sn_name]['salt2.CovColorX0'])\n self.meta_cov_x1_c.append(meta[meta_sn_name]['salt2.CovColorX1'])\n self.meta_cov_mb_x1.append(meta[meta_sn_name]['salt2.CovRestFrameMag_0_BX1'])\n self.meta_cov_mb_c.append(meta[meta_sn_name]['salt2.CovColorRestFrameMag_0_B'])\n self.meta_idr.append(meta[meta_sn_name]['idr.subset'])\n \n self.meta_idr = np.array(self.meta_idr)\n self.meta_zcmb = np.array(self.meta_zcmb)\n self.meta_zhl = np.array(self.meta_zhl)\n self.meta_zhl_err = np.array(self.meta_zhl_err)\n self.meta_x0 = np.array(self.meta_x0)\n self.meta_x0_err = np.array(self.meta_x0_err)\n self.meta_x1 = np.array(self.meta_x1)\n self.meta_x1_err = np.array(self.meta_x1_err) \n self.meta_c = np.array(self.meta_c)\n self.meta_c_err = np.array(self.meta_c_err)\n self.meta_mb = np.array(self.meta_mb)\n self.meta_mb_err = np.array(self.meta_mb_err)\n self.meta_cov_x0_x1 = np.array(self.meta_cov_x0_x1)\n self.meta_cov_x0_c = np.array(self.meta_cov_x0_c)\n self.meta_cov_x1_c = np.array(self.meta_cov_x1_c)\n self.meta_cov_mb_x1 = np.array(self.meta_cov_mb_x1)\n self.meta_cov_mb_c = np.array(self.meta_cov_mb_c)",
"def processa_compra(self, compra):\n\n print(\"Boleto criado!\\n\" + compra.nota_fiscal)",
"def convertion_binaire_arbre(self):\r\n binary_code = self.root.conversion_binaire('')\r\n binary_dict = {}\r\n binary_code = binary_code.strip().split(\"\\n\")\r\n for element in binary_code:\r\n binary_dict[element.split(\":\")[0]] = element.split(\":\")[1]\r\n return binary_dict",
"def agregar_bolsa(self, letra, cantidad):",
"def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data",
"def _download_cxr_model(self):\n file_id = \"1KIsLmVv8jKTVG_LxchMZAvR7rugHy7uB\"\n download_from_google_drive(file_id=file_id, folder=\"data/\", name=\"covid_cxr.zip\")",
"def read_acbr(self):\n return self.ACBR",
"def model() -> Any:\n with open(\"airbnb_regressor.pickle\",\"rb\") as f:\n model = pickle.load(f)\n return model",
"def CL(self):",
"def load(self, filename):\n # XXX Hay que comprobar los datos leidos y lanzar excepcion\n f = open(filename)\n prelaciones = []\n asig = []\n rec = []\n l = f.readline()\n while l:\n # Activities and following activities\n if l[0:21] == 'PRECEDENCE RELATIONS:':\n f.readline()\n l = f.readline()\n while l[0] != '*':\n data = l.split()\n prel = (data[0], data[3:])\n prelaciones.append(prel)\n l = f.readline()\n\n # Activity duration and resource units needed\n if l[0] == '-':\n l = f.readline()\n while l[0] != '*':\n asig.append(l.split())\n l = f.readline()\n\n # Name, type and unit of resources\n if l[0:22] == 'RESOURCEAVAILABILITIES':\n l = f.readline()\n while l[0] != '*':\n rec.append(l.split())\n l = f.readline()\n\n l = f.readline()\n \n # Create data structure\n cont = 1\n activities = []\n for prelacion in prelaciones:\n activities.append([cont, prelacion[0], prelacion[1], '', '', '', '', '', ('Beta')])\n cont += 1 \n\n # Update activities duration\n for n in range(len(asig)): \n activities[n][6] = float(asig[n][2])\n\n # Update resources\n i = 1\n m = 0\n resources = []\n if len(rec) < 2:\n raise InvalidFileFormatException()\n\n for n in range(len(rec[1])):\n # Renewable\n if rec[0][m]=='R' or rec[0][m][0]=='R':\n if rec[0][m]=='R':\n row=[rec[0][m]+rec[0][i], 'Renewable', '', rec[1][n]] \n m+=2\n else:\n row=[rec[0][m], 'Renewable', '', rec[1][n]] \n m+=1 \n # Non Renewable\n elif rec[0][m]=='N' or rec[0][m][0]=='N':\n if rec[0][m]=='N':\n row=[rec[0][m]+rec[0][i], 'Non renewable', rec[1][n], '']\n m+=2\n else:\n row=[rec[0][m], 'Non renewable', rec[1][n], ''] \n m+=1\n # Double constrained\n elif rec[0][m]=='D' or rec[0][m][0]=='D':\n if rec[0][m]=='D':\n row=[rec[0][m]+rec[0][i], 'Double constrained', rec[1][n], rec[1][n]]\n m+=2\n else:\n row=[rec[0][m], 'Double constrained', rec[1][n], rec[1][n]] \n m+=1\n \n resources.append(row)\n i += 2\n # Note: Unlimited resources are not present on PSPLIB projects and so \n # not taken into account here\n\n # Resources needed per activity\n asignation = []\n for n in range(len(asig)): \n for m in range(3, 3+len(rec[1])): #len(self.rec[1]): number of resources \n if asig[n][m] != '0': #unused resources are not shown\n i = m-3\n row = [asig[n][0], resources[i][0], asig[n][m]] \n asignation.append(row)\n \n return (activities, [], resources, asignation)",
"def archivoXlFormateado(archivo):\r\n return ow(archivo, formatting_info=True)",
"def crf_entities():\n if not os.path.isfile(PATH_ENTITIES['crf']['model']):\n print('downloading ENTITIES frozen CRF model')\n download_file(\n S3_PATH_ENTITIES['crf']['model'], PATH_ENTITIES['crf']['model']\n )\n with open(PATH_ENTITIES['crf']['model'], 'rb') as fopen:\n model = pickle.load(fopen)\n return CRF(model)",
"def climat_model_build(self) :\n dict_climat = dict()\n list_state = self._df.ORIGIN_STATE_ABR.unique().tolist()\n # Climat tempéré océanique : 1\n dict_climat['WA']=1\n dict_climat['OR']=1\n\n # Climat tempéré continental sec : 2\n list_climate_2 = ['ID','MT','WY','UT','CO','ND','SD','NE','KS','OK']\n for state in list_climate_2 :\n dict_climat[state]=2\n\n # Climat tempéré continental pacifique : 3\n\n # Climat sub-tropical : 4\n list_climate_4 = ['VA','NC','GA','FL','AL','TN','MS','LA','AR']\n for state in list_climate_4 :\n dict_climat[state]= 4\n\n # Climat Aride : 5\n list_climate_5 = ['AZ','TX','NM','NV']\n for state in list_climate_5 :\n dict_climat[state]= 5\n\n # Climat tempéré méditéranéen : 6\n list_climate_6 = ['CA']\n for state in list_climate_6 :\n dict_climat[state]= 6\n\n for state in list_state :\n if state not in dict_climat.keys() :\n dict_climat[state] = 3\n\n self.dict_climat = dict_climat\n self._df['ORIGIN_CLIMAT'] = self._df.ORIGIN_STATE_ABR.apply(self._state_climate_build)\n self._df['DEST_CLIMAT'] = self._df.DEST_STATE_ABR.apply(self._state_climate_build)",
"def on_load(self):\n filename = QtGui.QFileDialog.getOpenFileName(self, \"Open file\", \"\", \"*.scc\")\n print(\"Load file \", filename)\n if filename == \"\":\n return\n f = open(filename, mode=\"rb\")\n state = pickle.load(f)\n f.close() \n cmp = ComponentRect()\n cmp.set_state(state)\n self.mdl.set_component(cmp)",
"def mol_from_cbor(cbor_str: str) -> masm.Molecule:\n serializer = masm.JsonSerialization\n cbor_binary = serializer.base_64_decode(cbor_str)\n cbor_format = serializer.BinaryFormat.CBOR\n serialization = serializer(cbor_binary, cbor_format)\n return serialization.to_molecule()",
"def descompactar(caminho, nome, formato):\n\n # Abre e le o arquivo\n arquivo = Arquivo(caminho, nome, formato)\n cabecalho_compactado = arquivo.ler()\n\n #----------------------------------------\n\n # Descompactacao do cabecalho por meio do gzip\n cabecalho = g.decompress(cabecalho_compactado)\n\n #----------------------------------------\n\n # Cabecalho\n numero_magico = int.from_bytes(cabecalho[0:1], byteorder=\"big\") \n if numero_magico != 42:\n exit()\n\n largura = int.from_bytes(cabecalho[1:3], byteorder=\"big\")\n altura = int.from_bytes(cabecalho[3:5], byteorder=\"big\")\n quantidade_cores = int.from_bytes(cabecalho[5:6], byteorder=\"big\")\n lista = list(cabecalho[6:262])\n conteudo = list(cabecalho[262:len(cabecalho)])\n\n #----------------------------------------\n \n # Amplia para 8 bits\n bit = Bitwise(6)\n imagem_rotulada = bit.ampliar(conteudo, largura*altura)\n\n #----------------------------------------\n\n # Cria a tabela\n matriz = [[lista[y], lista[y+1], lista[y+2], lista[y+3]] for y in range(0, len(lista), 4)]\n tabela = Tabela(matriz)\n\n #----------------------------------------\n\n # Cria um objeto da classe Imagem\n final = Imagem(caminho, nome+\"_som\", \"TIFF\")\n # Salva a imagem\n final.salvar(largura, altura, imagem_rotulada, tabela)",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n pass",
"def load_building_blocks(path):\t\t\n\t#TODO : automatization\n\tbenzene = Building_Block(abbrev=\"B\", num_atoms=6,origin=0, para_pos=3, para_angle=0, meta_pos=4 , meta_angle = -np.pi/3., ortho_pos=5, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/benzene.xyz\")\n\tnapthtalene = Building_Block(abbrev=\"N\", num_atoms=18,origin=0, para_pos=12, para_angle=0., meta_pos=11 , meta_angle = -np.pi/3., ortho_pos=10, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/naphtalene.xyz\")\n\tdbPc1 = Building_Block(abbrev=\"dbPc1\", num_atoms=32,origin=13, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = +np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc1_block.xyz\")\n\tdbPc4 = Building_Block(abbrev=\"dbPc4\", num_atoms=55,origin=22, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc4.xyz\")\n\tdbPc6 = Building_Block(abbrev=\"dbPc6\", num_atoms=52,origin=17, para_pos=0, para_angle=0, meta_pos=1 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc6.xyz\")\n\tdbPc5 = Building_Block(abbrev=\"dbPc5\", num_atoms=58,origin=12, para_pos=26, para_angle=0, meta_pos=20 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc5.xyz\")\n\tpseudo_para_naph_PCP = Building_Block(abbrev=\"pseudo-para_naph_PCP\", num_atoms=44,origin=0, para_pos=18, para_angle=0, meta_pos=16 , meta_angle = -np.pi/3, ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/pseudo-para_naph_PCP.xyz\")\n\tline =Building_Block(abbrev=\"line\", num_atoms=4,origin=0, para_pos=1, para_angle=0, meta_pos=1 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/line.xyz\")\n\t#rot=Building_Block(abbrev=\"line\", num_atoms=47,origin=6, para_pos=16, para_angle=0, meta_pos=20 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/rot.xyz\")\n\t#stacked_anth=Building_Block(abbrev=\"stacked_anth\", num_atoms=62,origin=3, para_pos=22, para_angle=0, meta_pos=30 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/stacked_anth.xyz\")\n\t\n\tbuilding_blocks = [benzene,napthtalene,dbPc1,dbPc4,dbPc6, dbPc5,pseudo_para_naph_PCP, line]\n\n\treturn building_blocks"
] | [
"0.55839115",
"0.55125",
"0.53216106",
"0.5215636",
"0.5121315",
"0.5116958",
"0.51138484",
"0.5088174",
"0.49747437",
"0.4962377",
"0.49491808",
"0.49370742",
"0.4935785",
"0.4934977",
"0.49025398",
"0.4889983",
"0.48558292",
"0.4849866",
"0.48489085",
"0.48460174",
"0.48455277",
"0.48432562",
"0.48404643",
"0.48305163",
"0.4825298",
"0.48055714",
"0.48055714",
"0.48055714",
"0.48055714",
"0.480072"
] | 0.55939573 | 0 |
Retreive a file from the file storage. | def storage_get_file(self, group='', key=''):
try:
obj = None
content = None
if key != '':
if self.config['type'] == 's3':
obj = self.s3.Object(bucket_name=self.bucket, key='corr-{0}s/{1}'.format(group,key))
res = obj.get()
content = res['Body'].read()
elif self.config['type'] == 'filesystem':
with open('{0}/corr-{1}s/{2}'.format(self.storage_path, group, key), "rb") as obj:
content = obj.read()
else:
content = None
except:
print(traceback.print_exc())
content = None
try:
if self.config['type'] == 's3':
file_buffer = BytesIO()
elif self.config['type'] == 'filesystem':
file_buffer = BytesIO()
file_buffer.write(content)
file_buffer.seek(0)
return file_buffer
except:
self.app.logger.error(traceback.print_exc())
print(traceback.print_exc())
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n try:\n file_obj = open(file_path, \"r\")\n except IOError:\n return\n else:\n return file_obj.read()",
"def get_file(URI):\n return file_fabric.get_class(URI).get_content(URI)",
"def file(self):\n if self.state == self.STATE_COMPLETED:\n return default_storage.open(self.path)\n return None",
"def get_file(self, path):\n file = self.get('data_request?id=file¶meters=%s' % path)\n return file",
"def get(self, filename=None):\n try:\n return self.storage.get(filename)\n except FileNotFoundError:\n abort(404, message='File %s does not exist' % filename)\n except BaseException:\n message = 'Failed to list the files of storage'\n if filename:\n message = 'Failed to get the file ' + filename\n\n abort(500, message=message)\n\n LOGGER.error('A generic exception has occurred.', exc_info=True)",
"def getFile(self, resource):\n resource = self.parseUrl(resource, 'files')\n\n res = self.getRequest(resource)\n fObj = vsdModels.File(**res)\n return fObj",
"def get(self, filepath):\n try:\n collname = '%s.files' % self.bucketname\n coll = Collection(self.db, collname)\n if coll:\n doc = coll.find_one({'filename': str(filepath)}, sort=[('uploadDate', -1)])\n if doc:\n id = doc['_id']\n gout = self.gridfs.get(ObjectId(id))\n if gout:\n content = gout.read()\n gout.close()\n return content\n except Exception, e:\n print e\n return None",
"def get_file(self, path):\n return self._files.get(self._get_rel_path(path))",
"def get_file(self, name):\n return self.files[name]",
"def get_file(service, file_id):\n return service.files().get(fileId=file_id).execute()",
"def get_file(self, file_name: str) -> BytesIO:\n fl = BytesIO()\n self.client.download_fileobj(self.bucket, file_name, fl)\n fl.seek(0)\n return fl",
"def get_file(self):\n return self.theFile",
"def get_file():\n fname = get_var(request, \"fname\")\n return open(fname).read()",
"def get_file(self, sys_id):\n url = \"{}/file\".format(self._target(sys_id))\n r = self._client.session.get(url, stream=True)\n return r",
"def get_file(self, path):\n return self.client._perform_raw(\n \"GET\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))",
"async def get_file(self, file_id: base.String) -> types.File:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.GET_FILE, payload)\n\n return types.File(**result)",
"def GetFile(file_name):\n\n the_file = None\n\n try:\n the_file = open(file_name, 'rb')\n \n except IOError:\n the_file = None\n \n return the_file",
"def get_file(self, filename, handler=False):\n result = None\n if self.exists(filename):\n file_path = join_paths(self.path, filename)\n if handler:\n result = open(file_path, 'rb')\n else:\n result = file_path\n return result",
"def get(self, resource_id, file_id):\n d = Deposition.get(resource_id, user=current_user)\n df = d.get_file(file_id)\n if df is None:\n abort(404, message=\"File does not exist\", status=404)\n return d.type.marshal_file(df)",
"def get_file(cls, url, working_dir):\n if url.lower().startswith(\"s3://\"):\n return cls._s3_get_file(url)\n elif url.lower().startswith(\"http\"):\n return cls._http_get_file(url)\n else:\n return cls._fs_get_file(url, working_dir)",
"def get(self, id):\n file = (\n self.drive.files()\n .get(\n fileId=id,\n fields=\"id, name\",\n supportsAllDrives=self.shared_drive[0],\n )\n .execute()\n )\n return file",
"def get_file(self, key):\n result = (None, None)\n path = os.path.join(self.directory, self.subdirectory, key)\n if os.path.isfile(path):\n content_type, _ = mimetypes.guess_type(path)\n with open(path, \"rb\") as file:\n result = content_type, file.read()\n return result",
"def get_file(self, file_path):\n try:\n return self._files[file_path]\n except KeyError:\n file = File()\n self._files[file_path] = file\n return file",
"def _fs_get_file(url, working_dir):\n if not os.path.isabs(url) and working_dir:\n url = os.path.join(working_dir, url)\n\n try:\n with codecs.open(url, 'r', encoding='utf-8') as f:\n return f.read()\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))",
"def get_file(self, file_id):\n LOG.debug(\"Getting a file from mattermost\")\n url = '%s/api/v4/files/%s' % (self.server_url, file_id)\n LOG.debug(\"Sending: %s\", url)\n response = self._request(self._session.get, url)\n\n if response.status_code != 200:\n raise RuntimeError(\"Server unhappy. (%s)\", response)\n\n return response.content",
"def get_file(self, remote_path, local_path, storage_id=None):\n return self.get(remote_path, local_path, directory=False, storage_id=storage_id)",
"def get_file(self, filename):\r\n\r\n return File.from_name(self, filename)",
"def _get_file(self, path: str) -> Tuple[str, bytes]:\n self._trace(\"fetching: %s\" % path)\n meta, resp = self._connection.files_download(path)\n return (meta.rev, resp.content)",
"def file(self) :\n\t\ttry :\n\t\t\treturn self._file\n\t\texcept Exception as e:\n\t\t\traise e",
"def web_get_file(self, url):\n try:\n print(url)\n response = requests.get(url, verify=False)\n file_buffer = BytesIO(response.content)\n file_buffer.seek(0)\n return file_buffer\n except:\n print(traceback.print_exc())\n return None"
] | [
"0.78756535",
"0.7461347",
"0.7436858",
"0.7395782",
"0.7368263",
"0.7213877",
"0.72103053",
"0.713844",
"0.7136062",
"0.7094323",
"0.7046528",
"0.70224756",
"0.6999044",
"0.6987379",
"0.6954168",
"0.6929677",
"0.6907303",
"0.6901869",
"0.68689317",
"0.6859872",
"0.6847875",
"0.68420213",
"0.6820236",
"0.67702436",
"0.6764525",
"0.6763935",
"0.6757334",
"0.6752592",
"0.6728733",
"0.672774"
] | 0.74737984 | 1 |
Agent function that prepares a dictionary for storage in a compressed files. | def agent_prepare(self, zf, group, object_dict):
object_buffer = StringIO()
object_buffer.write(json.dumps(object_dict, sort_keys=True, indent=4, separators=(',', ': ')))
object_buffer.seek(0)
data = zipfile.ZipInfo("{0}.json".format(group))
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
data.external_attr |= 0o777 << 16 # -rwx-rwx-rwx
return zf.writestr(data, object_buffer.read()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _files_from_json(self, file_json):\n self.compressed_file_json = zlib.compress(json.dumps(file_json).encode('utf-8'))\n self.compression_algorithm = 'gzip'\n self.compressed_content_hash = hashlib.sha256(self.compressed_file_json).hexdigest()",
"def file_to_dictionary():\n\n return;",
"def __init__(self):\n self.keyingMethod=fileSize\n self.keyToFile=dict()",
"def read_archive(self):\n print '------------------------------------'\n print ' Archiving the simulation results'\n print ' '\n self.archive = {}\n with open(self.archive_data, 'r') as f:\n for line in f:\n (key, val) = line.split()\n self.archive[(key)] = val",
"def dosomething(file_dict2, path, buf):\n file_dict2[path] = {\"Extension\": 1, \"Hash value\": 1}\n return file_dict2",
"def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory",
"def init() -> None:\n init_dict()\n parse_file(\"alphabet.txt\", letters)\n parse_file(\"numbers.txt\", numbers)\n parse_file(\"symbols.txt\", symbols)",
"def run(self):\n self.compress(\n self.__config.public_key(),\n self.__config.input_dir(),\n self.__config.output_dir(),\n self.__config.suffix()\n )",
"def zip_files(dict_files, compression=zipfile.ZIP_DEFLATED):\n in_memory = StringIO()\n\n with zipfile.ZipFile(in_memory, 'w', compression) as zf:\n for fname, fp in dict_files.iteritems():\n zf.writestr(fname, fp.read())\n\n zf.close()\n\n in_memory.seek(0)\n\n return in_memory",
"def init(sFileName, sDescription):\n \n try:\n with open(sFileName) as f:\n my_dict = json.load(f)\n \n \n except:\n \n #assume there was an error, possibly the file does not exist\n my_dict = {'descriptor':sDescription ,'measurements':[]}\n with open (sFileName, 'w') as f:\n json.dump(my_dict,f)\n \n return my_dict",
"def read_archive(self):\n print '------------------------------------'\n print ' Archiving the simulation results'\n print '----------------------------------- '\n\n if self.Simulation_area.lower() == 'barrow':\n os.chdir(self.control['Run_dir']+self.Input_directory+'/Barrow/')\n elif self.Simulation_area.lower() == 'tanana':\n os.chdir(self.control['Run_dir']+self.Input_directory+'/Tanana/')\n elif self.Simulation_area.lower() == 'yukon':\n os.chdir(self.control['Run_dir']+self.Input_directory+'/Yukon/')\n \n self.archive = {}\n with open(self.archive_data, 'r') as f:\n for line in f:\n if line.startswith('#'):\n continue\n else:\n (key, val) = line.split()\n self.archive[(key)] = val",
"def _build_dictionary(self):\n print(\"Building Dictionary...\")\n self.dictionary = Dictionary(self.load_files())",
"def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')",
"def compress_file(map_, name, save_path):\n size = os.path.getsize(save_path)\n temp = subprocess.run([\"gzip\", \"-k\", save_path])\n cr_size = os.path.getsize(save_path+\".gz\")\n try:\n map_[name] = cr_size / size\n except Exception as e:\n print(f\"File: {save_path}, Ori:{size}, Compr:{cr_size}\")\n print(e)\n raise ZeroDivisionError\n temp = subprocess.run([\"rm\", save_path])\n temp = subprocess.run([\"rm\", save_path+\".gz\"])",
"def reinit(self):\n self.keys = {}\n fh = open(self.path, \"w\")\n json.dump(self.keys, fh)\n fh.close()\n os.chmod(self.path, 0o600)",
"def __init__(\n self, basedir: str, primary_key: Optional[List[int]] = None,\n replace: Optional[bool] = False, serializer: Optional[ArchiveSerializer] = None,\n compression: Optional[str] = None, encoder: Optional[json.JSONEncoder] = None,\n decoder: Optional[Callable] = None\n ):\n self.basedir = util.createdir(basedir)\n self._primary_key = primary_key\n self.serializer = serializer if serializer else DefaultSerializer()\n self.compression = compression\n self.encoder = encoder if encoder is not None else DefaultEncoder\n self.decoder = decoder if decoder is not None else default_decoder\n # Initialize the file names\n self.datafile = os.path.join(self.basedir, 'rows.dat')\n self.metafile = os.path.join(self.basedir, 'metadata.dat')\n self.tmpdatafile = os.path.join(self.basedir, 'rows.tmp')\n self.tmpmetafile = os.path.join(self.basedir, 'metadata.tmp')\n if not replace and os.path.isfile(self.metafile):\n # Read schema and snapshot information from disk if the metadata\n # file exists.\n with open(self.metafile, 'r') as f:\n doc = json.load(f, object_hook=self.decoder)\n # Deserialize schema columns.\n columns = list()\n for c in doc.get(META_SCHEMA, []):\n columns.append(self.serializer.deserialize_column(c))\n self.schema = ArchiveSchema(columns=columns)\n # Deserialize snapshot descriptors\n snapshots = list()\n for s in doc.get(META_SNAPSHOTS, []):\n snapshots.append(self.serializer.deserialize_snapshot(s))\n self.snapshots = SnapshotListing(snapshots=snapshots)\n # Deserialize row counter.\n self.row_counter = doc.get(META_ROWCOUNT, 0)\n # Overwrite primary key if it is present in the metadata file.\n if self._primary_key:\n doc[META_PRIMARYKEY] = self._primary_key\n with open(self.tmpmetafile, 'w') as f:\n json.dump(doc, f, cls=self.encoder)\n else:\n self._primary_key = doc[META_PRIMARYKEY]\n else:\n # Create an empty archive.\n self.schema = ArchiveSchema()\n self.snapshots = SnapshotListing()\n self.row_counter = 0\n # Remove any previous files that may exist in the base folder.\n for f in [self.datafile, self.metafile]:\n if os.path.isfile(f):\n os.remove(f)\n # Write primary key to metadata file.\n with open(self.metafile, 'w') as f:\n json.dump({META_PRIMARYKEY: self._primary_key}, f)",
"def prepare(self):\n\t\tif len(os.listdir(self.storagedir)):\n\t\t\traise Exception(\"There are already files in\", self.storagedir + \".\", \"Delete manually!\")\n\t\t\n\t\tself.worddict = {\"__pad__\": 0, \"__oov__\": 1}\n\t\tself.classdict = {}\n\t\tself.raw_documents, self.tokenized_documents = {}, {}\n\t\tself.X, self.Y = {}, {}\n\n\t\tfor dataset in self.DATASETS_TMP:\n\t\t\tself.get_raw_data(dataset)\n\t\t\tself.delete_empty_documents(dataset)\n\t\t\tself.tokenize_documents(dataset)\n\n\t\tself.make_classdict()\n\t\tself.make_worddict()\n\t\tself.make_embeddings()\n\t\tself.reverse_dicts()\n\n\t\tfor dataset in self.DATASETS_TMP:\n\t\t\tself.make_X(dataset)\n\t\t\tself.shuffle_dataset(dataset)\n\n\t\tif not \"dev\" in self.X:\n\t\t\tself.split_dev()\t\t\n\t\tself.make_hybrid()\n\t\tself.store()",
"def store_harvest(self, file_prefix, data):\n compressed = bz2.compress(data)\n k = f\"{self.harvest_key_prefix}/{self.harvest_date}/{file_prefix}.bz2\"\n self.s3_client.put_object(\n Body=compressed,\n Bucket=self.s3_bucket,\n Key=k,\n )",
"def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item",
"def create_zip_dict() -> dict:\n with open('zip_coordinates.json', 'r') as zip_map:\n return json.loads(zip_map.read())",
"def prepare_fastq(self) -> dict:\r\n\r\n fastq = {}\r\n for organism, data in self.composition.items():\r\n file = data['file']\r\n file_path = Path(file)\r\n if not file_path.exists():\r\n raise ValueError(f'File {file_path} does not exist.')\r\n else:\r\n fastq[organism] = pyfastx.Fastq(file)\r\n\r\n self.logger.info('Prepared read files - proceeding')\r\n\r\n return fastq",
"def __init__(self, filename,chainlen,encoding = None):\n self.encoding=encoding\n dict.__init__(self)\n self.chainlen = chainlen\n self.filename = filename\n self.populate()",
"def make_file_dict():\r\n fileDict = {'pageUrls': [],\r\n 'pageFileNames': [],\r\n 'pageIds': [],\r\n 'fileUrls': [],\r\n 'fileIds': [],\r\n 'fileNames': [],\r\n 'cssUrls': [],\r\n 'cssFileNames': [],\r\n 'imgUrls': [],\r\n 'imgFileNames': []}\r\n return fileDict",
"def pre_lookup(self, file):\n return {}",
"def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n sample_category = sample_info[\"sample_category\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n logging.info(\"Building dictionaries for sample %s...\" % process_name)\n for charge_selection in self.charge_selections:\n central_or_shift_extensions = [\"\", \"hadd\", \"addBackgrounds\"]\n central_or_shifts_extended = central_or_shift_extensions + self.central_or_shifts\n for central_or_shift_or_dummy in central_or_shifts_extended:\n process_name_extended = [ process_name, \"hadd\" ]\n for process_name_or_dummy in process_name_extended:\n if central_or_shift_or_dummy in [ \"hadd\" ] and process_name_or_dummy in [ \"hadd\" ]:\n continue\n if central_or_shift_or_dummy != \"central\" and central_or_shift_or_dummy not in central_or_shift_extensions:\n if not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift_or_dummy, sample_info):\n continue\n\n key_dir = getKey(process_name_or_dummy, charge_selection, central_or_shift_or_dummy)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy, central_or_shift_or_dummy)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy)\n for subdirectory in [ \"comp_jetToTauFakeRate\", \"makePlots\" ]:\n key_dir = getKey(subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel, subdirectory)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel, subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n\n numDirectories = 0\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n numDirectories += len(self.dirs[key])\n else:\n numDirectories += 1\n logging.info(\"Creating directory structure (numDirectories = %i)\" % numDirectories)\n numDirectories_created = 0;\n frac = 1\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n numDirectories_created += len(self.dirs[key])\n else:\n create_if_not_exists(self.dirs[key])\n numDirectories_created = numDirectories_created + 1\n while 100*numDirectories_created >= frac*numDirectories:\n logging.info(\" %i%% completed\" % frac)\n frac = frac + 1\n logging.info(\"Done.\")\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_info, self.max_files_per_job)\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n inputFileList = inputFileLists[sample_name]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name))\n\n is_mc = (sample_info[\"type\"] == \"mc\")\n sample_category = sample_info[\"sample_category\"]\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n \n if central_or_shift != \"central\" and not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift, sample_info):\n continue\n\n # build config files for executing analysis code\n key_analyze_dir = getKey(process_name, charge_selection, central_or_shift)\n\n for jobId in inputFileList.keys():\n\n analyze_job_tuple = (process_name, charge_selection, central_or_shift, jobId)\n key_analyze_job = getKey(*analyze_job_tuple)\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n logging.warning(\"No input ntuples for %s --> skipping job !!\" % (key_analyze_job))\n continue\n\n cfgFile_modified_path = os.path.join(self.dirs[key_analyze_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%i_cfg.py\" % analyze_job_tuple)\n logFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%i.log\" % analyze_job_tuple)\n histogramFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_HIST], \"analyze_%s_%s_%s_%i.root\" % analyze_job_tuple)\n rleOutputFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_RLES], \"rle_%s_%s_%s_%i.txt\" % analyze_job_tuple) \\\n if self.select_rle_output else \"\"\n\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : cfgFile_modified_path,\n 'histogramFile' : histogramFile_path,\n 'logFile' : logFile_path,\n 'chargeSelection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_tight' : self.hadTau_selection_tight,\n 'hadTauSelection_denominator' : self.hadTau_selection_denominator,\n 'hadTauSelections_numerator' : self.hadTau_selections_numerator,\n 'trigMatchingOptions' : self.trigMatchingOptions,\n 'selEventsFileName_output' : rleOutputFile_path,\n 'absEtaBins' : self.absEtaBins,\n 'decayModes' : self.decayModes,\n 'central_or_shift' : central_or_shift,\n 'central_or_shifts_local' : [],\n 'apply_hlt_filter' : self.hlt_filter,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job], sample_info)\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1_dir = getKey(process_name, charge_selection)\n hadd_stage1_job_tuple = (process_name, charge_selection)\n key_hadd_stage1_job = getKey(*hadd_stage1_job_tuple)\n if not key_hadd_stage1_job in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1_job] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1_job].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1_job] = os.path.join(self.dirs[key_hadd_stage1_dir][DKEY_HIST],\n \"hadd_stage1_%s_%s.root\" % hadd_stage1_job_tuple)\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1_job = getKey(process_name, charge_selection)\n key_hadd_stage2_dir = getKey(\"hadd\", charge_selection)\n key_hadd_stage2_job = getKey(charge_selection)\n if not key_hadd_stage2_job in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2_job] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2_job].append(self.outputFile_hadd_stage1[key_hadd_stage1_job])\n self.outputFile_hadd_stage2[key_hadd_stage2_job] = os.path.join(self.dirs[key_hadd_stage2_dir][DKEY_HIST],\n \"hadd_stage2_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n charge_key = \"comp_%s\" % charge_selection\n self.comp_input_files[charge_key] = []\n for trigMatchingOption in self.trigMatchingOptions:\n key_hadd_stage2_job = getKey(charge_selection)\n key_comp_jetToTauFakeRate_dir = getKey(\"comp_jetToTauFakeRate\")\n key_comp_jetToTauFakeRate_job = getKey(charge_selection, trigMatchingOption)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_%s_cfg.py\" % (charge_selection, trigMatchingOption)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s_%s.root\" % (charge_selection, trigMatchingOption)),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s_%s.log\" % (charge_selection, trigMatchingOption)),\n 'looseRegion' : \"jetToTauFakeRate_%s_%s/denominator/\" % (charge_selection, trigMatchingOption),\n 'tightRegion' : \"jetToTauFakeRate_%s_%s/numerator/\" % (charge_selection, trigMatchingOption),\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins,\n 'decayModes' : self.decayModes,\n 'hadTauSelections' : self.hadTau_selections_numerator,\n 'trigMatchingOption' : trigMatchingOption,\n 'plots_outputFileName' : os.path.join(self.dirs[key_comp_jetToTauFakeRate_dir][DKEY_PLOT], \"comp_jetToTauFakeRate_%s.png\" % trigMatchingOption)\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n comp_output = self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile']\n self.targets.append(comp_output)\n self.comp_input_files[charge_key].append(comp_output)\n self.comp_output_files[charge_key] = os.path.join(self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_dir = getKey(\"makePlots\")\n key_makePlots_job = getKey(charge_selection) \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for trigMatchingOption in self.trigMatchingOptions:\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"denominator\") \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_denominator_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/denominator/%s\" % (charge_selection, trigMatchingOption, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"numerator\", hadTau_selection_numerator)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_numerator_%s_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_numerator_%s_%s.png\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/numerator/%s/%s\" % (charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile, make_dependency = \"phony_hadd_stage1\", max_mem = '4096M')\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_comp_hadd(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n\n logging.info(\"Done.\")\n\n return self.num_jobs",
"def _initializeDoneDict(self):\n\t\tself.donedictfile = os.path.join(self.params['rundir'] , self.functionname+\".donedict\")\n\t\tif os.path.isfile(self.donedictfile) and self.params['continue'] == True:\n\t\t\t### unpickle previously done dictionary\n\t\t\tapDisplay.printMsg(\"Reading old done dictionary: \"+os.path.basename(self.donedictfile))\n\t\t\tf = open(self.donedictfile,'r')\n\t\t\tself.donedict = cPickle.load(f)\n\t\t\tf.close()\n\t\t\tif not 'commit' in self.donedict or self.donedict['commit'] == self.params['commit']:\n\t\t\t\t### all is well\n\t\t\t\tapDisplay.printMsg(\"Found \"+str(len(self.donedict))+\" done dictionary entries\")\n\t\t\t\treturn\n\t\t\telif self.donedict['commit'] is True and self.params['commit'] is not True:\n\t\t\t\t### die\n\t\t\t\tapDisplay.printError(\"Commit flag was enabled and is now disabled, create a new runname\")\n\t\t\telse:\n\t\t\t\t### set up fresh dictionary\n\t\t\t\tapDisplay.printWarning(\"'--commit' flag was changed, creating new done dictionary\")\n\n\t\t### set up fresh dictionary\n\t\tself.donedict = {}\n\t\tself.donedict['commit'] = self.params['commit']\n\t\tapDisplay.printMsg(\"Creating new done dictionary: \"+os.path.basename(self.donedictfile))\n\n\t\t### write donedict to file\n\t\tf = open(self.donedictfile, 'w', 0666)\n\t\tcPickle.dump(self.donedict, f)\n\t\tf.close()\n\t\treturn",
"def convert_gz_json(option, _, value, parser):\n setattr(\n parser.values, option.dest,\n json.loads(zlib.decompress(base64.b64decode(value))))",
"def savepickle(fln, dict, compress=None):\n if compress == None:\n if not os.path.exists(fln) and os.path.exists(fln + '.gz'):\n compress = True\n else:\n compress = False\n if compress:\n import gzip\n with open(fln + '.gz', 'wb') as fh:\n gzh = gzip.GzipFile(fln, 'wb', compresslevel=3, fileobj=fh)\n pickle.dump(dict, gzh, 2)\n gzh.close()\n else:\n with open(fln, 'wb') as fh:\n pickle.dump(dict, fh, 2) # 2 ... fast binary\n return",
"def decompress(compressed):\r\n \r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((i, chr(i)) for i in range(dict_size))\r\n # in Python 3: dictionary = {i: chr(i) for i in range(dict_size)}\r\n \r\n # use StringIO, otherwise this becomes O(N^2)\r\n # due to string concatenation in a loop\r\n result = StringIO()\r\n w = chr(compressed.pop(0))\r\n result.write(w)\r\n for k in compressed:\r\n if k in dictionary:\r\n entry = dictionary[k]\r\n elif k == dict_size:\r\n entry = w + w[0]\r\n else:\r\n raise ValueError('Bad compressed k: %s' % k)\r\n result.write(entry)\r\n \r\n # Add w+entry[0] to the dictionary.\r\n dictionary[dict_size] = w + entry[0]\r\n dict_size += 1\r\n \r\n w = entry\r\n return result.getvalue()",
"def prepare_gz(self, filename, *args, **kwargs):\n\n return '/vsigzip/' + filename, args, kwargs"
] | [
"0.6145931",
"0.6029067",
"0.5845155",
"0.5629594",
"0.56274456",
"0.5606034",
"0.55619204",
"0.55617094",
"0.55312747",
"0.5409232",
"0.5396165",
"0.5389857",
"0.5363818",
"0.5336105",
"0.5334452",
"0.5315649",
"0.53133637",
"0.53003275",
"0.52949893",
"0.527997",
"0.52777815",
"0.5277205",
"0.5265289",
"0.525663",
"0.5245047",
"0.52206063",
"0.5206608",
"0.51939416",
"0.51810604",
"0.51789516"
] | 0.62700593 | 0 |
Delete a project files. | def delete_project_files(self, project, logStat):
from corrdb.common.models import FileModel
from corrdb.common.models import EnvironmentModel
for _file in project.resources:
file_ = FileModel.objects.with_id(_file)
if file_:
result = self.storage_delete_file(file_.group, file_.storage)
if result:
logStat(deleted=True, file_obj=file_)
file_.delete()
for record in project.records:
result = self.delete_record_files(record, logStat)
if result:
logStat(deleted=True, record=record)
record.delete()
for environment_id in project.history:
_environment = EnvironmentModel.objects.with_id(environment_id)
if _environment:
if _environment.bundle and _environment.bundle.scope == "local":
result = self.storage_delete_file('bundle', _environment.bundle.storage)
if result:
# logStat(deleted=True, bundle=_environment.bundle)
# logStat(deleted=True, environment=_environment)
_environment.bundle.delete()
# else:
# logStat(deleted=True, environment=_environment)
_environment.delete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_project(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n print('Deleting project the fastq files within the project: ', project_obj.description)\n\n description = project_obj.description.replace(' ', '') # remove any space in the project name\n project_dir = 'documents/%s/%s' % (str(project_obj.date.date()), description)\n shutil.rmtree(project_dir, ignore_errors=True)\n print(\"Files deleted.\")",
"def _delete_files(p4, files, repo_name=None):\n if repo_name:\n msgstr = _(\"Deleting {num_commits} commit objects for repo '{repo_name}'.\")\n else:\n msgstr = _(\"Deleting {num_commits} commit objects for all repos.\")\n total = 0\n bite_size = 1000\n while len(files):\n to_delete = files[:bite_size]\n files = files[bite_size:]\n result = p4.run(\"delete\", to_delete)\n count = sum([int('depotFile' in row and row['action'] == 'delete') for row in result])\n total += count\n if count:\n for d in to_delete:\n if os.path.isfile(d):\n os.remove(d)\n result = p4.run(\"submit\", \"-d\", msgstr.format(num_commits=count, repo_name=repo_name))\n return total",
"def delete_files(src_files):\n for i, src_file in enumerate(src_files):\n sys.stdout.write(str(i + 1) + ': ' + src_file + '\\n')\n subprocess.call(['rm', src_file])",
"def project_clear_files(request, **kwargs):\n project = kwargs.get(\"project\")\n if request.user.is_authenticated and request.user == project.user:\n project.clear_project_folder()\n return Response(status=status.HTTP_200_OK)\n else:\n raise PermissionDenied",
"def delete_project_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False",
"def __remove_all_projects__():\n p = subprocess.Popen('rm -rf {}/.wcscanner/*'.format(context.__BASE_PATH__), shell=True)\n p.wait()",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def delete_project(arn=None):\n pass",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def remove_files(files):\n for file_name in files:\n os.remove(file_name)",
"def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def delete(self, filename):\n pass",
"def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)",
"def remove(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To remove a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n\n storage, remote_path = split_storage(args.target)\n\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()",
"def delete(self, *args, **kwargs):\n if 'user' not in kwargs or not args:\n self.raise401()\n\n user = kwargs['user']\n path = parse_path(args[0])\n project = Project.objects(name=path[0], members__in=[user])\n if not project:\n self.raise401()\n try:\n project.delete()\n self.set_status(204)\n self.finish()\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)",
"def delete_files(file_prototype):\n from os import remove\n folder = getFolder()\n if folder != '/':\n for f in list_files(folder, file_prototype):\n print('Deleting: '+f)\n remove(f)\n\n return()",
"def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()",
"def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def delFiles(self):\r\n \r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if os.path.exists(self.h5File): \r\n os.remove(self.h5File) \r\n logger.debug(\"{0:s} File {1:s} deleted.\".format(logStr,self.h5File)) \r\n except XmError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))",
"def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))",
"def delete(self):\r\n delete_tracks(self.project, [self])",
"def test_remove_project(self):\n pass",
"def delete_project(self, project_name):\n # type(project_name) == unicode\n project = self.db.get_project_by_name(project_name)\n if not project:\n print(u\"*** Error: The project '{}' was not found.\"\n \"\".format(project_name))\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the project? [y/N] '\n ''.format(eol=os.linesep), end='')\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_project_by_name(project_name)\n print(u\"The project '%s' has been deleted.\" % project_name)\n self.set_prompt()",
"def clean(self):\n\n if not self.__projects:\n return\n\n Console.info(\"Cleaning session...\")\n Console.indent()\n\n for project in self.__projects:\n project.clean()\n\n path = os.path.abspath(os.path.join(\".jasy\", \"locale\"))\n if os.path.exists(path):\n Console.info(\"Cleaning up locale project...\")\n shutil.rmtree(path)\n\n path = os.path.abspath(os.path.join(\".jasy\", \"virtual\"))\n if os.path.exists(path):\n Console.info(\"Cleaning up virtual project...\")\n shutil.rmtree(path)\n\n Console.outdent()",
"def delete_project(request, project_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(Project, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('portfolio'))",
"def clean_project(self, app_name=None, delete_all=False):\n\n if not app_name and not delete_all:\n ConuException(\"You need to specify either app_name or set delete_all=True\")\n\n if delete_all:\n args = [\"--all\"]\n logger.info('Deleting all objects in current project')\n else:\n args = \"-l app=%s\" % app_name\n logger.info('Deleting all objects with label app=%s', app_name)\n\n try:\n o = run_cmd(self._oc_command([\"delete\", \"all\", args]),\n return_output=True)\n o_lines = o.split('\\n')\n for line in o_lines:\n logger.info(line)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Cleanup failed because of exception: %s\" % ex)",
"def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None"
] | [
"0.7671678",
"0.70789576",
"0.69476116",
"0.67798454",
"0.66957814",
"0.6679533",
"0.6669261",
"0.663331",
"0.6531018",
"0.6503622",
"0.6503622",
"0.64738876",
"0.6457917",
"0.64377785",
"0.6412456",
"0.6380504",
"0.6365763",
"0.63568527",
"0.635452",
"0.6349144",
"0.634238",
"0.63400716",
"0.6262896",
"0.62579924",
"0.6257533",
"0.62509465",
"0.6231207",
"0.621701",
"0.62161785",
"0.62076765"
] | 0.74385405 | 1 |
Delete a record files. | def delete_record_files(self, record, logStat):
from corrdb.common.models import FileModel
final_result = True
for _file_id in record.resources:
_file = FileModel.objects.with_id(_file_id)
result = self.delete_record_file(_file, logStat)
if not result:
final_result = result
return final_result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_records(self, records_to_delete):\n for record in records_to_delete:\n self.records.remove(record)\n self._store_writer.remove_img_file(record)\n\n self._process_change()",
"def delete_record(records):\n delete_record()",
"def delete(self, filename):\n pass",
"def delete_record_file(self, record_file, logStat):\n result = self.storage_delete_file(record_file.group, record_file.storage)\n if result:\n logStat(deleted=True, file_obj=record_file)\n record_file.delete()\n return result",
"def delete(self, filename):\n raise NotImplementedError",
"def on_delete_record(event):\n keep_old_files = asbool(utils.setting_value(event.request, 'keep_old_files', default=False))\n\n # Retrieve attachments for these records using links.\n resource_name = event.payload['resource_name']\n filter_field = '%s_uri' % resource_name\n uri = event.payload['uri']\n utils.delete_attachment(event.request, link_field=filter_field, uri=uri,\n keep_old_files=keep_old_files)",
"def delete_file(self, msg_parameters):\n reg = self.get_regex_file_name(msg_parameters[0])\n for file_part in self.files.keys():\n if reg.search(file_part) is not None:\n # remove from computer\n os.remove(self.files[file_part])\n # do not save that the data server has it\n del self.files[file_part]",
"def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)",
"def delete_file(sender, instance, **kwargs):\n if bool(instance.exam_file): # check if exam file exists\n try:\n instance.exam_file.delete()\n except OSError:\n pass\n # if exam file has already been deleted, then do nothing and continue\n # with deleting the exam model",
"def delete_file(filename):\n\tprint client.file_delete(filename)",
"def cli_delete_record(field_list):\n try:\n api.delete_record(field_list)\n except NoRecordsFound as error:\n print \"%(error)s\" % locals()\n return",
"async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):",
"def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")",
"def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass",
"def deleteFileRecordByID(file_id):\n session = Queries.createSession()\n try:\n file_db = session.query(FileTable).filter_by(id=file_id).first()\n servers = file_db.server_id[:]\n for server in servers:\n file_db.server_id.remove(server)\n session.commit()\n session.delete(file_db)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()",
"def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)",
"def delete(self, *route, **req_data):\n # Read the file ID from the request, with safety.\n try:\n file_id = UUID(req_data['file_id']).hex\n except ValueError:\n return Response(status='400 Bad Request')\n\n # Retrieve and delete the file.\n stored_files = StoredFile.collection()\n to_delete = stored_files.first(id=file_id)\n\n log_activity('%s deleted file %s'%(\n context.user.link, to_delete.filename\n ))\n\n stored_files.delete(to_delete)\n get_bucket().delete(to_delete.data_id)\n\n return Response(status='200 OK')",
"def storage_delete_report_file(self, report_pk):\n self._get_queryset(pk=report_pk).delete()",
"def sorl_delete(**kwargs):\n from sorl.thumbnail import delete\n delete(kwargs['file'])",
"def delete_users(self, filename):\n f_id = self.face.FACES.files.find_one({ \"filename\" : filename }, { \"_id\" : 1 })\n self.face_fs.delete(f_id['_id'])",
"def predio_delete(sender, instance, **kwargs):\n instance.dataFile.delete(False)",
"def delete(self, *, recording_id):\n\n response = openvidu().delete_recording(recording_id)\n\n if response.status_code == 204:\n return\n elif response.status_code == 404:\n abort(NotFound, query=f\"Recording `{recording_id}` does not exist\")\n elif response.status_code == 409:\n abort(\n Conflict,\n query=\"The recording has started status. Stop it before deletion\",\n )\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\n to_delete = [\n instance.photo,\n instance.photo2,\n instance.photo3\n ]\n for photo in to_delete:\n if photo:\n if os.path.isfile(photo.path):\n os.remove(photo.path)",
"def delete(self, *args, **kwargs):\n self.file.delete(save=False)\n self.thumbnail.delete(save=False)\n\n super(File, self).delete(*args, **kwargs)",
"def delete_file(self, lfile):\n raise NotImplementedError('delete_file')",
"def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass",
"def delete(socket, args, config, library, cmd=False):\n files=args['<nameid>']\n ignore=args['--ignore']\n\n for nameid in files:\n receipt = library.get_receipt( nameid )\n if not receipt:\n if cmd: print \"Could not find receipt for:\",nameid\n if not ignore: return False\n continue\n\n if receipt.get_oid() == None:\n if cmd: print \"You do not have deletion permission for:\",nameid\n if not ignore: return False\n continue\n\n if cmd: print \"Delete\", receipt.get_filename(), \"?\"\n\n response = raw_input(\"Are you sure? [y/N]\")\n if response.lower() not in ['yes','y']:\n print \"File was not deleted.\"\n return False\n\n if delete_file( socket, receipt ):\n #Succeeded, so remove receipt from library\n library.remove_receipt( nameid )\n\n if cmd: print \"Deletion succeeded!\"\n elif cmd: print \"Deletion failed!\"\n\n # Return Success.\n return True",
"def _delete_datafile(sender, instance, **kwargs):\n instance.delete_datafile(save_instance=False)",
"def delete_files(files, volume_path, broker):\n\n logger.info('Deleting %i files', len(files))\n try:\n broker.delete_files(volume_path=volume_path, files=files, update_model=False)\n except ScaleError as err:\n err.log()\n sys.exit(err.exit_code)\n except Exception as ex:\n exit_code = GENERAL_FAIL_EXIT_CODE\n err = get_error_by_exception(ex.__class__.__name__)\n if err:\n err.log()\n exit_code = err.exit_code\n else:\n logger.exception('Error performing delete_files steps')\n sys.exit(exit_code)\n\n return",
"def delete_file(self, filename=None):\n return self._service.delete_object(self._datasets_id, filename)"
] | [
"0.75914454",
"0.7471837",
"0.73076427",
"0.7286397",
"0.6935407",
"0.68690795",
"0.67846966",
"0.6765541",
"0.6712611",
"0.6703749",
"0.66534185",
"0.6648763",
"0.6578517",
"0.656606",
"0.6554084",
"0.65478927",
"0.6511074",
"0.64954126",
"0.6470087",
"0.6469611",
"0.64144486",
"0.63587445",
"0.63562804",
"0.634033",
"0.6334975",
"0.6316999",
"0.63056797",
"0.6299872",
"0.6297266",
"0.62704456"
] | 0.77628917 | 0 |
Delete a record file and log the stats. | def delete_record_file(self, record_file, logStat):
result = self.storage_delete_file(record_file.group, record_file.storage)
if result:
logStat(deleted=True, file_obj=record_file)
record_file.delete()
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_file(fileName):\n os.remove(fileName)\n print (\"Deleteing file: \" + str(fileName))\n write_log()\n read_log()",
"def delete(self, filename):\n pass",
"def delete_log():\n log_path = Path.cwd() / \"premise.log\"\n if log_path.exists():\n log_path.unlink()",
"def delete_record(records):\n delete_record()",
"def storage_delete_report_file(self, report_pk):\n self._get_queryset(pk=report_pk).delete()",
"def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)",
"def delete_record_files(self, record, logStat):\n from corrdb.common.models import FileModel\n final_result = True\n for _file_id in record.resources:\n _file = FileModel.objects.with_id(_file_id)\n result = self.delete_record_file(_file, logStat)\n if not result:\n final_result = result\n return final_result",
"def delete_file(filename):\n\tprint client.file_delete(filename)",
"def delete_records(self, records_to_delete):\n for record in records_to_delete:\n self.records.remove(record)\n self._store_writer.remove_img_file(record)\n\n self._process_change()",
"def deleteGmlLoaderLogFile(logFile, command, logger):\n \n if os.path.isfile(logFile) == True:\n reader = open(logFile)\n \n for line in reader:\n if re.search(\"TransactionHandler - Rollback transaction\", line) != None:\n logger.error(\"TransactionHandler - Rollback transaction for \" + command)\n \n reader.close()\n message = \"Delete \" + logFile + \" \" + str(time.strftime(\"%d.%m.%Y %H:%M:%S\", time.gmtime(os.path.getmtime(logFile)))) + \" \" + str(os.path.getsize(logFile)) + \" bytes\"\n logger.info(message)\n os.remove(logFile)",
"def delete_log(file_path):\n if os.path.exists(file_path):\n print('Deleting log %s...' % file_path)\n os.remove(file_path)\n else:\n raise ValueError(\"File %r doesn't exists - cannot delete.\" % file_path)",
"def delete_record(self, key):\n del self._records[key]",
"def close(self) -> None:\n logs.remove_record_handler(self.execution_uid)",
"def tests_ti_file_delete_action(self):\n file = cast(File, self.ti_helper.create_indicator())\n action = 'traffic'\n indicator_data = {\n 'confidence': randint(0, 100),\n 'ip': self.ti_helper.rand_ip(),\n 'owner': self.owner,\n 'rating': randint(0, 5),\n }\n target = self.ti.address(**indicator_data)\n target.create()\n file.add_action(action, target)\n response = file.delete_action(action, target)\n assert response.ok\n target.delete()",
"def on_delete_record(event):\n keep_old_files = asbool(utils.setting_value(event.request, 'keep_old_files', default=False))\n\n # Retrieve attachments for these records using links.\n resource_name = event.payload['resource_name']\n filter_field = '%s_uri' % resource_name\n uri = event.payload['uri']\n utils.delete_attachment(event.request, link_field=filter_field, uri=uri,\n keep_old_files=keep_old_files)",
"def delete(self, filename):\n raise NotImplementedError",
"def delete(self, *route, **req_data):\n # Read the file ID from the request, with safety.\n try:\n file_id = UUID(req_data['file_id']).hex\n except ValueError:\n return Response(status='400 Bad Request')\n\n # Retrieve and delete the file.\n stored_files = StoredFile.collection()\n to_delete = stored_files.first(id=file_id)\n\n log_activity('%s deleted file %s'%(\n context.user.link, to_delete.filename\n ))\n\n stored_files.delete(to_delete)\n get_bucket().delete(to_delete.data_id)\n\n return Response(status='200 OK')",
"def delete(self, *, recording_id):\n\n response = openvidu().delete_recording(recording_id)\n\n if response.status_code == 204:\n return\n elif response.status_code == 404:\n abort(NotFound, query=f\"Recording `{recording_id}` does not exist\")\n elif response.status_code == 409:\n abort(\n Conflict,\n query=\"The recording has started status. Stop it before deletion\",\n )\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)",
"def delete(self, host, file):",
"def deleteFileRecordByID(file_id):\n session = Queries.createSession()\n try:\n file_db = session.query(FileTable).filter_by(id=file_id).first()\n servers = file_db.server_id[:]\n for server in servers:\n file_db.server_id.remove(server)\n session.commit()\n session.delete(file_db)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()",
"def delete_file(self, lfile):\n raise NotImplementedError('delete_file')",
"def delete(self, record):\n\n s = record.split()\n if len(s) != 3:\n sys.stderr.write('The format of the input should be like this: meal breakfast -50.\\\n \\nFail to delete a record.\\n')\n elif self._records.count(record) > 1:\n try:\n d = int(input(f'Which line of the record \"{record}\" is going to be deleted? '))\n testlist = []\n for i, v in enumerate(self._records):\n if v == record:\n testlist.append(i+1) # testlist contains the records that is identical to the input\n assert d in testlist\n except ValueError:\n sys.stderr.write('Invalid input. Should be an integer.\\nFail to delete a record.\\n')\n except AssertionError:\n sys.stderr.write(f'Invalid input number. No record of \"{record}\" in line {d}.\\\n \\nFail to delete a record')\n else:\n del(self._records[d-1])\n elif self._records.count(record) == 1:\n self._records.remove(record)\n else:\n sys.stderr.write(f'There\\'s no record with \"{record}\".\\nFail to delete a record.\\n')",
"def delete_record(self):\n for record in self.records:\n if self.date_str == record[\"date\"]:\n self.records.remove(record)\n if len(self.records) > 0:\n self.write_json_file(self.records_file, self.records)\n else:\n os.remove(self.records_file)\n return True\n return False",
"async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):",
"def delete(self, record):\n temp = self.hashing(record.get_key())\n if self.__buckets[temp].contains(record):\n self.__buckets[temp].delete(record)\n self.__num_records -= 1",
"def predio_delete(sender, instance, **kwargs):\n instance.dataFile.delete(False)",
"def cli_delete_record(field_list):\n try:\n api.delete_record(field_list)\n except NoRecordsFound as error:\n print \"%(error)s\" % locals()\n return",
"def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass",
"def delete_last_record():\n\tnewRcrds = list()\n\twith jsonlines.open('tempRecords.jsonl', mode='r') as readerOp:\n\t\tfor obj in readerOp:\n\t\t\tnewRcrds.append(obj)\n\twith jsonlines.open('tempRecords.jsonl', mode='w') as writerOp:\n\t\tif len(newRcrds) != 1:\n\t\t\t# checking if the record being removed is the last record which has file names.\n\t\t\tfor obji in newRcrds[:len(newRcrds)-1]:\n\t\t\t\twriterOp.write(obji)\n\t\telse:\n\t\t\t# if its the last record then do not delet it, as it is required for annotation data\n\t\t\tfor obji in newRcrds[:len(newRcrds)]:\n\t\t\t\twriterOp.write(obji)",
"def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")"
] | [
"0.6743752",
"0.6579642",
"0.65737575",
"0.6460211",
"0.64425284",
"0.6401115",
"0.6390069",
"0.63400424",
"0.63385975",
"0.6327259",
"0.6298369",
"0.6264551",
"0.6233331",
"0.6219438",
"0.620855",
"0.6170005",
"0.61598647",
"0.61536807",
"0.61153316",
"0.6106858",
"0.6105311",
"0.60972834",
"0.6069509",
"0.6013952",
"0.600587",
"0.5997765",
"0.5992497",
"0.59692466",
"0.5954038",
"0.5950987"
] | 0.81148934 | 0 |
Retrieve a externaly hosted file. | def web_get_file(self, url):
try:
print(url)
response = requests.get(url, verify=False)
file_buffer = BytesIO(response.content)
file_buffer.seek(0)
return file_buffer
except:
print(traceback.print_exc())
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_file(self, sys_id):\n url = \"{}/file\".format(self._target(sys_id))\n r = self._client.session.get(url, stream=True)\n return r",
"def get_file(URI):\n return file_fabric.get_class(URI).get_content(URI)",
"def _fs_get_file(url, working_dir):\n if not os.path.isabs(url) and working_dir:\n url = os.path.join(working_dir, url)\n\n try:\n with codecs.open(url, 'r', encoding='utf-8') as f:\n return f.read()\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))",
"def get_file(self, path):\n file = self.get('data_request?id=file¶meters=%s' % path)\n return file",
"def ReadRemoteFile(url) -> bytes:\n local_url = download_util.DownloadResource(url)\n return file_util.OpenFile(local_url).read()",
"def get_file(cls, url, working_dir):\n if url.lower().startswith(\"s3://\"):\n return cls._s3_get_file(url)\n elif url.lower().startswith(\"http\"):\n return cls._http_get_file(url)\n else:\n return cls._fs_get_file(url, working_dir)",
"def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename",
"def get_remote_file(url, success=200, timeout=10):\n try:\n app.logger.info(\"GET: %s\" % url)\n auth = None\n res = requests.get(url, stream=True, timeout=timeout, auth=auth)\n if res.status_code == success:\n return res.headers.get('Content-Type', 'application/octet-stream'), res.raw.data\n except:\n pass\n return None, None",
"def get_remote_file(sid, path):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.get_file(path)",
"def get(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n try:\n file_obj = open(file_path, \"r\")\n except IOError:\n return\n else:\n return file_obj.read()",
"def get_file(self, file_id):\n LOG.debug(\"Getting a file from mattermost\")\n url = '%s/api/v4/files/%s' % (self.server_url, file_id)\n LOG.debug(\"Sending: %s\", url)\n response = self._request(self._session.get, url)\n\n if response.status_code != 200:\n raise RuntimeError(\"Server unhappy. (%s)\", response)\n\n return response.content",
"def getfile(url):\n try:\n return urlreq.urlopen(url)\n except urlreq.HTTPError as e:\n safeprint(\"Sever returned with response code \" + str(e.getcode()) + \", download failed.\")",
"def get_url(self):\n return self.get_file(uri_type=URI_URL, no_copy=True)",
"def get_file(self, path):\n return self.client._perform_raw(\n \"GET\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))",
"def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents",
"def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)",
"def get_remote_content(self, path):\n if path.startswith(\"http\"):\n page_path = path\n elif path.startswith(\"www\"):\n page_path = \"https://\" + path\n else:\n page_path = self.source + path\n \n print(\"Getting \" + page_path)\n \n try:\n resp = requests.get(page_path)\n except:\n print(\"Unable to get \" + page_path)\n return None\n \n if resp.status_code == 200:\n return resp.content\n else:\n print(\"Unable to get \" + page_path + \" Response = \" + str(resp.status_code))\n return None",
"def get_remote_file(url):\n # Disable the proxies by not trusting the env\n session = requests.Session()\n session.trust_env = False\n\n # Make the request\n requests.packages.urllib3.disable_warnings()\n try:\n r = session.get(url, verify=False)\n except requests.exceptions.RequestException as e:\n # catastrophic error. bail.\n print(e)\n sys.exit(1)\n\n r = session.get(url, verify=False)\n remote_file = r.text\n return remote_file",
"def get_file():\n fname = get_var(request, \"fname\")\n return open(fname).read()",
"def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()",
"def fetch(file_url):\n\n tmp_file_handle = NamedTemporaryFile(delete=True)\n headers = {'User-Agent': 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}\n\n # download file and save to temp object\n with requests.get(file_url, headers=headers, stream=True) as r:\n tmp_file_handle.write(r.content)\n\n tmp_file_handle.flush()\n\n return tmp_file_handle",
"def get(self, filepath):\n try:\n collname = '%s.files' % self.bucketname\n coll = Collection(self.db, collname)\n if coll:\n doc = coll.find_one({'filename': str(filepath)}, sort=[('uploadDate', -1)])\n if doc:\n id = doc['_id']\n gout = self.gridfs.get(ObjectId(id))\n if gout:\n content = gout.read()\n gout.close()\n return content\n except Exception, e:\n print e\n return None",
"def get_file(self, path):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/files/{path}\"\n\n return self.connector.http_call(\"get\", _url).text",
"def get_file(self, file_name: str) -> BytesIO:\n fl = BytesIO()\n self.client.download_fileobj(self.bucket, file_name, fl)\n fl.seek(0)\n return fl",
"def fetch_file_from_web(server_url, path, transform_func=json.loads):\n artifact_url = \"{0}/{1}\".format(server_url, path)\n r = requests.get(artifact_url)\n r.raise_for_status()\n if transform_func:\n return transform_func(r.text)\n else:\n return r.text",
"def get_demo_file(fname):\n\n d = download_demo_files()\n if fname in d:\n return d[fname]\n else:\n return None",
"def get_remote_file_server(client, sid, path):\n with slycat.web.server.remote.get_session_server(client, sid) as session:\n return session.get_file(path)",
"def get_local_file(self, no_copy=False):\n return self.get_file(uri_type=URI_LOCAL, no_copy=no_copy)",
"def _get_file(self, path: str) -> Tuple[str, bytes]:\n self._trace(\"fetching: %s\" % path)\n meta, resp = self._connection.files_download(path)\n return (meta.rev, resp.content)",
"def fetch(self, url) -> bytes:\n buffer = self.download(url)\n zfs = ZipFileSystem(buffer, \"r\")\n return zfs.open(zfs.glob(\"*\")[0]).read()"
] | [
"0.7363333",
"0.71599615",
"0.7076054",
"0.70494545",
"0.70425373",
"0.6997317",
"0.69079727",
"0.68707514",
"0.68504196",
"0.67665625",
"0.6739661",
"0.6723161",
"0.6715714",
"0.6701585",
"0.6683252",
"0.6590083",
"0.6564484",
"0.65569955",
"0.65451306",
"0.65384126",
"0.6485488",
"0.64797294",
"0.64671713",
"0.645547",
"0.64396316",
"0.6434205",
"0.64113724",
"0.6408906",
"0.6402094",
"0.6385535"
] | 0.7258937 | 1 |
Bundle a project's environment. | def prepare_env(self, project=None, env=None):
if project == None or env == None:
return [None, '']
else:
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
if env.bundle != None and env.bundle.storage != '':
try:
bundle_buffer = StringIO()
if 'http://' in env.bundle.storage or 'https://' in env.bundle.storage:
bundle_buffer = self.web_get_file(env.bundle.storage)
else:
bundle_buffer = self.storage_get_file('bundle', env.bundle.storage)
data = zipfile.ZipInfo("bundle.%s"%(env.bundle.storage.split("/")[-1].split(".")[-1]))
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
data.external_attr |= 0o777 << 16 # -rwx-rwx-rwx
zf.writestr(data, bundle_buffer.read())
except:
print(traceback.print_exc())
try:
json_buffer = StringIO()
json_buffer.write(env.to_json())
json_buffer.seek(0)
data = zipfile.ZipInfo("env.json")
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
data.external_attr |= 0o777 << 16 # -rwx-rwx-rwx
zf.writestr(data, json_buffer.read())
except:
print(traceback.print_exc())
memory_file.seek(0)
return [memory_file, "project-%s-env-%s.zip"%(str(project.id), str(env.id))] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def env(config, args):\n print config.template(\"scripts/env.sh\", project=args.project)",
"def venv(session):\n # Install dependencies.\n session.install(\"--upgrade\", \"pip\", \"setuptools\")\n session.install(\"-r\", \"requirements-dev.txt\")\n session.install(\"-e\", \".\")\n\n # Customize the venv.\n env_dir = Path(session.bin)\n activate = env_dir / 'activate'\n with activate.open('a') as f:\n f.write(f'\\n[ -f {activate.resolve()}/postactivate ] && . {activate.resolve()}/postactivate\\n')\n\n {{ cookiecutter.project_name }}_complete = nox_file / 'contrib/{{ cookiecutter.project_name }}-complete.sh'\n postactivate = env_dir / 'postactivate'\n with postactivate.open('a') as f:\n f.write('export PYTHONBREAKPOINT=bpdb.set_trace\\n')\n f.write(f'source { {{ cookiecutter.project_name }}_complete.resolve() }\\n')\n\n predeactivate = env_dir / 'predeactivate'\n with predeactivate.open('a') as f:\n f.write('unset PYTHONBREAKPOINT\\n')",
"def __gitBundle(self):\n self.vcs.gitBundle(self.project.getProjectPath())",
"def create_aiida_project_environment(self):\n # mock the virtualenv activation procedure\n venv_prefix = self.env_folder / self.proj_name\n current_env = os.environ.copy()\n current_env.pop('PYTHONHOME', None)\n current_env['VIRTUAL_ENV'] = venv_prefix\n old_path = current_env['PATH']\n new_path = str(venv_prefix / 'bin') + os.pathsep + old_path\n current_env['PATH'] = new_path\n try:\n self.create_folder_structure()\n self.build_python_environment()\n self.install_packages_from_index(env=current_env)\n self.install_packages_from_source(env=current_env)\n except Exception:\n self.exit_on_exception()\n raise\n self.create_spec_entry()",
"def bundle(self, app):\r\n assert(isinstance(app, BundleCreate.App))\r\n\r\n bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)\r\n self.context.log.info('creating %s' % os.path.relpath(bundledir, get_buildroot()))\r\n\r\n safe_mkdir(bundledir, clean=True)\r\n\r\n classpath = OrderedSet()\r\n if not self.deployjar:\r\n libdir = os.path.join(bundledir, 'libs')\r\n os.mkdir(libdir)\r\n\r\n # Add internal dependencies to the bundle.\r\n def add_jars(target):\r\n target_jars = self.context.products.get('jars').get(target)\r\n if target_jars is not None:\r\n for basedir, jars in target_jars.items():\r\n for internaljar in jars:\r\n os.symlink(os.path.join(basedir, internaljar),\r\n os.path.join(libdir, internaljar))\r\n classpath.add(internaljar)\r\n app.binary.walk(add_jars, lambda t: t.is_internal)\r\n\r\n # Add external dependencies to the bundle.\r\n for basedir, externaljar in self.list_jar_dependencies(app.binary):\r\n path = os.path.join(basedir, externaljar)\r\n os.symlink(path, os.path.join(libdir, externaljar))\r\n classpath.add(externaljar)\r\n\r\n for basedir, jars in self.context.products.get('jars').get(app.binary).items():\r\n if len(jars) != 1:\r\n raise TaskError('Expected 1 mapped binary for %s but found: %s' % (app.binary, jars))\r\n\r\n binary = jars[0]\r\n binary_jar = os.path.join(basedir, binary)\r\n bundle_jar = os.path.join(bundledir, binary)\r\n if not classpath:\r\n os.symlink(binary_jar, bundle_jar)\r\n else:\r\n with open_zip(binary_jar, 'r') as src:\r\n with open_zip(bundle_jar, 'w', compression=ZIP_DEFLATED) as dest:\r\n for item in src.infolist():\r\n buf = src.read(item.filename)\r\n if Manifest.PATH == item.filename:\r\n manifest = Manifest(buf)\r\n manifest.addentry(Manifest.CLASS_PATH,\r\n ' '.join(os.path.join('libs', jar) for jar in classpath))\r\n buf = manifest.contents()\r\n dest.writestr(item, buf)\r\n\r\n for bundle in app.bundles:\r\n for path, relpath in bundle.filemap.items():\r\n bundlepath = os.path.join(bundledir, relpath)\r\n safe_mkdir(os.path.dirname(bundlepath))\r\n os.symlink(path, bundlepath)\r\n\r\n return bundledir",
"def setup_virtual_env(self):\n\n venv(\"{0}_env\".format(self.app_name), self.install_django_project)",
"def _prepare_publish_environments():\n env = copy.deepcopy(os.environ)\n\n project_name = os.getenv(\"AVALON_PROJECT\")\n asset_name = os.getenv(\"AVALON_ASSET\")\n\n env[\"AVALON_PROJECT\"] = project_name\n env[\"AVALON_ASSET\"] = asset_name\n env[\"AVALON_TASK\"] = os.getenv(\"AVALON_TASK\")\n env[\"AVALON_WORKDIR\"] = os.getenv(\"AVALON_WORKDIR\")\n env[\"AVALON_APP\"] = f\"hosts.{publish_host}\"\n env[\"AVALON_APP_NAME\"] = \"celaction/local\"\n\n env[\"PYBLISH_HOSTS\"] = publish_host\n\n os.environ.update(env)",
"def production():\n env.run = run\n env.cd = cd\n env.deployment = 'remote'",
"def bundle(self, app):\n assert(isinstance(app, BundleCreate.App))\n\n bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)\n self.context.log.info('creating %s' % os.path.relpath(bundledir, get_buildroot()))\n\n safe_mkdir(bundledir, clean=True)\n\n classpath = OrderedSet()\n if not self.deployjar:\n libdir = os.path.join(bundledir, 'libs')\n os.mkdir(libdir)\n\n # Add external dependencies to the bundle.\n for basedir, externaljar in self.list_jar_dependencies(app.binary):\n path = os.path.join(basedir, externaljar)\n os.symlink(path, os.path.join(libdir, externaljar))\n classpath.add(externaljar)\n\n # TODO: There should probably be a separate 'binary_jars' product type,\n # so we can more easily distinguish binary jars (that contain all the classes of their\n # transitive deps) and per-target jars.\n for basedir, jars in self.context.products.get('jars').get(app.binary).items():\n if len(jars) != 1:\n raise TaskError('Expected 1 mapped binary for %s but found: %s' % (app.binary, jars))\n\n binary = jars[0]\n binary_jar = os.path.join(basedir, binary)\n bundle_jar = os.path.join(bundledir, binary)\n # Add the internal classes into the bundle_jar.\n if not classpath:\n os.symlink(binary_jar, bundle_jar)\n else:\n # TODO: Can we copy the existing jar and inject the manifest in, instead of\n # laboriously copying the contents one by one? Would that be more efficient?\n with open_zip(binary_jar, 'r') as src:\n with open_zip(bundle_jar, 'w', compression=ZIP_DEFLATED) as dest:\n for item in src.infolist():\n buf = src.read(item.filename)\n if Manifest.PATH == item.filename:\n manifest = Manifest(buf)\n manifest.addentry(Manifest.CLASS_PATH,\n ' '.join(os.path.join('libs', jar) for jar in classpath))\n buf = manifest.contents()\n dest.writestr(item, buf)\n\n for bundle in app.bundles:\n for path, relpath in bundle.filemap.items():\n bundlepath = os.path.join(bundledir, relpath)\n safe_mkdir(os.path.dirname(bundlepath))\n os.symlink(path, bundlepath)\n\n return bundledir",
"def bootstrap(environment: Environment):\n pass",
"def deploy(force_version=None):\n bundle_name = env.http_host\n bundle_root = '%s/%s' % (env.get('bundle_root', run('pwd') + '/bundles'),\n bundle_name)\n env.bundle_root = bundle_root\n run('mkdir -p %s/{log,conf,public}' % bundle_root)\n\n # virtualenv, Packages\n if not exists(bundle_root + '/env'):\n run('virtualenv --no-site-packages %s/env' % bundle_root)\n run('%s/env/bin/pip install -U pip' % bundle_root)\n\n local('python setup.py sdist')\n dists = [\n d for d in os.listdir(os.path.join(os.getcwd(),\n 'dist')) if d.endswith('.tar.gz')\n ]\n version_string = lambda d: d.rsplit('-', 1)[1][:-7]\n def int_or_s(num):\n try:\n return int(num)\n except ValueError:\n return num\n dist = sorted(dists, key=lambda d: map(int_or_s,\n version_string(d).split('.')))[-1]\n version = force_version or version_string(dist)\n dist_name = dist.rsplit('-', 1)[0]\n requirement = '%s==%s' % (dist_name, version)\n\n packages = env.bundle_root + '/packages'\n run('mkdir -p %s' % packages)\n if not exists('%s/%s' % (packages, dist)):\n put('dist/%s' % dist, '%s/%s' % (packages, dist))\n\n has_vendor = 'vendor' in os.listdir(os.getcwd())\n if has_vendor:\n local_files = set(os.listdir(os.path.join(os.getcwd(), 'vendor')))\n uploaded = set(run('ls %s' % packages).split())\n diff = local_files - uploaded\n for file_name in diff:\n put('vendor/%s' % file_name, '%s/%s' % (packages, file_name))\n\n freeze = run('%s/env/bin/pip freeze' % bundle_root).split()\n if requirement in freeze and force_version is None:\n die(\"%s is already deployed. Increment the version number to deploy \"\n \"a new release.\" % requirement)\n\n cmd = '%s/env/bin/pip install -U %s gunicorn gevent greenlet setproctitle --find-links file://%s' % (\n bundle_root, requirement, packages\n )\n if 'index_url' in env:\n cmd += ' --index-url %(index_url)s' % env\n run(cmd)\n env.path = bundle_root\n python = run('ls %s/env/lib' % bundle_root)\n template(\n 'path_extension.pth',\n '%s/env/lib/%s/site-packages/_virtualenv_path_extensions.pth' % (\n bundle_root, python\n ),\n )\n\n if 'media_url' not in env:\n env.media_url = '/media/'\n if 'media_root' not in env:\n env.media_root = bundle_root + '/public' + env.media_url\n if 'static_url' not in env:\n env.static_url = '/static/'\n if 'static_root' not in env:\n env.static_root = bundle_root + '/public' + env.static_url\n if not 'staticfiles' in env:\n env.staticfiles = True\n if not 'cache' in env:\n env.cache = 0 # redis DB\n template('settings.py', '%s/settings.py' % bundle_root)\n template('wsgi.py', '%s/wsgi.py' % bundle_root)\n\n # Do we have a DB?\n database_creation()\n database_migration()\n\n if env.staticfiles:\n manage('collectstatic')\n\n # Some things don't like dots\n env.app = env.http_host.replace('.', '')\n\n # Cron tasks\n if 'cron' in env:\n template('cron', '%(bundle_root)s/conf/cron' % env, use_sudo=True)\n sudo('chown root:root %(bundle_root)s/conf/cron' % env)\n sudo('chmod 644 %(bundle_root)s/conf/cron' % env)\n sudo('ln -sf %(bundle_root)s/conf/cron /etc/cron.d/%(app)s' % env)\n else:\n # Make sure to deactivate tasks if the cron section is removed\n sudo('rm -f %(bundle_root)s/conf/cron /etc/cron.d/%(app)s' % env)\n\n # Log rotation\n logrotate = '/etc/logrotate.d/%(app)s' % env\n template('logrotate', logrotate, use_sudo=True)\n sudo('chown root:root %s' % logrotate)\n\n # Nginx vhost\n changed = template('nginx.conf', '%s/conf/nginx.conf' % bundle_root)\n with cd('/etc/nginx/sites-available'):\n sudo('ln -sf %s/conf/nginx.conf %s.conf' % (bundle_root,\n env.http_host))\n with cd('/etc/nginx/sites-enabled'):\n sudo('ln -sf ../sites-available/%s.conf' % env.http_host)\n if env.get('ssl_cert') and env.get('ssl_key'):\n put(env.ssl_cert, '%s/conf/ssl.crt' % bundle_root)\n put(env.ssl_key, '%s/conf/ssl.key' % bundle_root)\n if changed: # TODO detect if the certs have changed\n sudo('/etc/init.d/nginx reload')\n\n # Supervisor task(s) -- gunicorn + rq\n if not 'workers' in env:\n env.workers = 2\n changed = template('supervisor.conf',\n '%s/conf/supervisor.conf' % bundle_root)\n with cd('/etc/supervisor/conf.d'):\n sudo('ln -sf %s/conf/supervisor.conf %s.conf' % (bundle_root,\n bundle_name))\n\n if 'rq' in env and env.rq:\n changed = True # Always supervisorctl update\n handle_rq(bundle_name, bundle_root, env)\n\n if 'celery' in env and env.celery:\n changed = True\n handle_celery(bundle_name, bundle_root, env)\n\n if changed:\n sudo('supervisorctl update')\n run('kill -HUP `pgrep gunicorn`')\n\n # All set, user feedback\n ip = run('curl http://ifconfig.me/')\n dns = run('nslookup %s' % env.http_host)\n if ip in dns:\n proto = 'https' if 'ssl_cert' in env else 'http'\n yay(\"Visit %s://%s\" % (proto, env.http_host))\n else:\n err(\"Deployment successful but make sure %s points to %s\" % (\n env.http_host, ip))",
"def configure_environment(config): # pragma: no cover\n base = config.get_jinja2_environment()\n config.registry[ENVIRONMENT_KEY] = create_environment(base)",
"def setenv(args: Namespace) -> None:\n env = {}\n if not args.no_langkit_support:\n env = langkit_support_env_map(args)\n\n for cwd in selected_lib_roots(args):\n d = json.loads(subprocess.check_output(\n [sys.executable,\n \"./manage.py\",\n \"setenv\",\n f\"--build-mode={args.build_mode}\",\n \"-J\"],\n cwd=cwd\n ))\n\n for k, v in d.items():\n if k in env:\n env[k] = format_path(k, [env[k], v])\n else:\n env[k] = v\n\n if args.json:\n print(json.dumps(env))\n else:\n for k, v in env.items():\n print(format_setenv(k, v))",
"def _setup_environment(environment):\n env.environment = environment\n env.project = ENVS[environment]\n env.hosts = [env.project['host']]\n env.user = env.project.get('user', env.local_user)\n env.password = env.project.get('password', None)\n # Redundant, just to easy the interpolation later on\n env.project['environment'] = environment",
"def deploy(force_version=None):\n bundle_name = env.http_host\n bundle_root = '{0}/{1}'.format(\n env.get('bundle_root', run('pwd') + '/bundles'),\n bundle_name,\n )\n env.bundle_root = bundle_root\n run('mkdir -p %s/{log,conf,public}' % bundle_root)\n\n # virtualenv, Packages\n if not exists(bundle_root + '/env'):\n run('virtualenv --no-site-packages {0}/env'.format(bundle_root))\n run('{0}/env/bin/pip install -U pip'.format(bundle_root))\n\n local('python setup.py sdist')\n dists = [\n d for d in os.listdir(os.path.join(os.getcwd(),\n 'dist')) if d.endswith('.tar.gz')\n ]\n version_string = lambda d: d.rsplit('-', 1)[1][:-7]\n\n def int_or_s(num):\n try:\n return int(num)\n except ValueError:\n return num\n dist = sorted(dists, key=lambda d: map(int_or_s,\n version_string(d).split('.')))[-1]\n version = force_version or version_string(dist)\n dist_name = dist.rsplit('-', 1)[0]\n requirement = '{0}=={1}'.format(dist_name, version)\n\n packages = env.bundle_root + '/packages'\n run('mkdir -p {0}'.format(packages))\n if not exists('{0}/{1}'.format(packages, dist)):\n put('dist/{0}'.format(dist), '{0}/{1}'.format(packages, dist))\n\n has_vendor = 'vendor' in os.listdir(os.getcwd())\n if has_vendor:\n local_files = set(os.listdir(os.path.join(os.getcwd(), 'vendor')))\n uploaded = set(run('ls {0}'.format(packages)).split())\n diff = local_files - uploaded\n for file_name in diff:\n put('vendor/{0}'.format(file_name),\n '{0}/{1}'.format(packages, file_name))\n\n freeze = run('{0}/env/bin/pip freeze'.format(bundle_root)).split()\n if requirement in freeze and force_version is None:\n die(\"{0} is already deployed. Increment the version number to deploy \"\n \"a new release.\".format(requirement))\n\n cmd = ('{0}/env/bin/pip install -U {1} gunicorn gevent greenlet '\n 'setproctitle --find-links file://{2}'.format(\n bundle_root, requirement, packages,\n ))\n if 'index_url' in env:\n cmd += ' --index-url {0}'.format(env.index_url)\n run(cmd)\n env.path = bundle_root\n\n manage_envdir(bundle_root)\n\n if not 'staticfiles' in env:\n env.staticfiles = True\n if not 'cache' in env:\n env.cache = 0 # redis DB\n\n # Do we have a DB?\n result = run('psql -U postgres -l|grep UTF8')\n if bundle_name not in result:\n if 'gis' in env and env.gis is False:\n db_template = 'template0'\n else:\n db_template = 'template_postgis'\n run('createdb -U postgres -T {0} -E UTF8 {1}').format(db_template,\n bundle_name)\n\n if 'migrations' in env:\n if env.migrations != 'nashvegas':\n die(\"{0} is not supported for migrations.\".format(env.migrations))\n manage('upgradedb -l', noinput=False) # This creates the migration\n # tables\n\n installed = run('psql -U postgres {0} -c \"select id from '\n 'nashvegas_migration limit 1;\"'.format(bundle_name))\n installed = '0 rows' not in installed\n if installed:\n manage('upgradedb -e', noinput=False)\n else:\n # 1st deploy, force syncdb and seed migrations.\n manage('syncdb')\n manage('upgradedb -s', noinput=False)\n else:\n manage('syncdb')\n\n if env.staticfiles:\n manage('collectstatic')\n\n # Some things don't like dots\n env.app = env.http_host.replace('.', '')\n\n # Cron tasks\n if 'cron' in env:\n template('cron', '%(bundle_root)s/conf/cron' % env, use_sudo=True)\n sudo('chown root:root %(bundle_root)s/conf/cron' % env)\n sudo('chmod 644 %(bundle_root)s/conf/cron' % env)\n sudo('ln -sf %(bundle_root)s/conf/cron /etc/cron.d/%(app)s' % env)\n else:\n # Make sure to deactivate tasks if the cron section is removed\n sudo('rm -f %(bundle_root)s/conf/cron /etc/cron.d/%(app)s' % env)\n\n # Log rotation\n logrotate = '/etc/logrotate.d/%(app)s' % env\n template('logrotate', logrotate, use_sudo=True)\n sudo('chown root:root %s' % logrotate)\n\n # Nginx vhost\n changed = template('nginx.conf', '%s/conf/nginx.conf' % bundle_root)\n with cd('/etc/nginx/sites-available'):\n sudo('ln -sf %s/conf/nginx.conf %s.conf' % (bundle_root,\n env.http_host))\n with cd('/etc/nginx/sites-enabled'):\n sudo('ln -sf ../sites-available/%s.conf' % env.http_host)\n if 'ssl_cert' in env and 'ssl_key' in env:\n put(env.ssl_cert, '%s/conf/ssl.crt' % bundle_root)\n put(env.ssl_key, '%s/conf/ssl.key' % bundle_root)\n if changed: # TODO detect if the certs have changed\n sudo('/etc/init.d/nginx reload')\n\n # Supervisor task(s) -- gunicorn + rq\n if not 'workers' in env:\n env.workers = 2\n changed = template('supervisor.conf',\n '%s/conf/supervisor.conf' % bundle_root)\n with cd('/etc/supervisor/conf.d'):\n sudo('ln -sf %s/conf/supervisor.conf %s.conf' % (bundle_root,\n bundle_name))\n\n if 'rq' in env and env.rq:\n changed = True # Always supervisorctl update\n\n # RQ forks processes and they load the latest version of the code.\n # No need to restart the worker **unless** RQ has been updated (TODO).\n for worker_id in range(env.rq['workers']):\n env.worker_id = worker_id\n template(\n 'rq.conf', '%s/conf/rq%s.conf' % (bundle_root, worker_id),\n )\n with cd('/etc/supervisor/conf.d'):\n sudo('ln -sf %s/conf/rq%s.conf %s_worker%s.conf' % (\n bundle_root, worker_id, bundle_name, worker_id,\n ))\n\n # Scale down workers if the number decreased\n names = '/etc/supervisor/conf.d/{0}_worker*.conf'.format(bundle_name)\n workers = run('ls {0}'.format(names))\n workers_conf = run('ls {0}/conf/rq*.conf'.format(bundle_root))\n to_delete = []\n for w in workers.split():\n if int(w.split('{0}_worker'.format(bundle_name),\n 1)[1][:-5]) >= env.rq['workers']:\n to_delete.append(w)\n for w in workers_conf.split():\n if int(w.split(bundle_name, 1)[1][8:-5]) >= env.rq['workers']:\n to_delete.append(w)\n if to_delete:\n sudo('rm {0}'.format(\" \".join(to_delete)))\n\n if changed:\n sudo('supervisorctl update')\n run('kill -HUP `pgrep gunicorn`')\n\n # All set, user feedback\n ip = run('curl http://ifconfig.me/')\n dns = run('nslookup {0}'.format(env.http_host))\n if ip in dns:\n proto = 'https' if 'ssl_cert' in env else 'http'\n yay(\"Visit {0}://{1}\".format(proto, env.http_host))\n else:\n err(\"Deployment successful but make sure {0} points to {1}\".format(\n env.http_host, ip))",
"def create_aiida_project_environment(self):\n try:\n self.create_folder_structure()\n self.build_python_environment()\n self.install_packages_from_index()\n except Exception:\n self.exit_on_exception()\n raise\n self.create_spec_entry()",
"def create_environment(base):\n # Build a template loader based on SEARCH_PATHS\n resolver = AssetResolver()\n searchpath = [resolver.resolve(path).abspath() for path in SEARCH_PATHS]\n loader = pyramid_jinja2.SmartAssetSpecLoader(searchpath)\n\n # Make an overlay environment from the main Jinja2 environment. See:\n #\n # http://jinja.pocoo.org/docs/dev/api/#jinja2.Environment.overlay\n return base.overlay(autoescape=True, loader=loader)",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)",
"def SetupEnvironment(self):\n pass",
"def _build_env(target, *, orig=os.environ):\n overlay = dict(\n PYTHONPATH=_path_insert(orig.get('PYTHONPATH', ''), os.fspath(target)),\n PATH=_path_insert(orig.get('PATH', ''), os.fspath(target / 'bin')),\n )\n return {**orig, **overlay}",
"def bootstrap():\n local('virtualenv fabric_factory/ve')",
"def environment(args, env_name=None):\n\n chefserver = open_chef_connection(args)\n if env_name is None:\n env_name = args.get('name')\n env = chefserver.get_env(name=env_name)\n env_attrs = env.to_dict()\n backup_attributes(\n backup_dict=env_attrs,\n name='%s_Environment' % env_name\n )\n new_env = _package_upgrades(\n args=args, env_attrs=_super_munger(\n quantum_name_check(\n args, env_attrs\n )\n )\n )\n\n chefserver.put_env(old_env=env_name, new_env=new_env)",
"def development():\n env.branch = 'development'",
"def setup_environment(self, spack_env, run_env):\n run_env.prepend_path('PICARD',\n join_path(self.prefix, 'bin', 'picard.jar'))",
"def setup_project():\n _require_environment()\n\n # Checks if needed conf files for this environment already exist\n if not os.path.exists(_interpolate('%(settings)s.py')):\n abort(_interpolate('There is no settings.py for %(environment)s - create one, and commit'))\n if not os.path.exists(_interpolate('config/apache_%(environment)s.conf')):\n abort(_interpolate('There is no Apache conf for %(environment)s - use task \"generate_apache_conf\" to generate one, and commit'))\n if not os.path.exists(_interpolate('config/wsgi_%(environment)s.py')):\n abort(_interpolate('There is no WSGI conf for %(environment)s - use task \"generate_wsgi_conf\" to generate one, and commit'))\n\n # Configures virtualenv and clones git repo\n _setup_virtualenv()\n _clone_gitrepo()\n\n # Issues extra commands at project's level, if any\n extra_commands()\n\n # Sets up Apache, MySQL\n _setup_project_apache()\n _drop_database_mysql()\n _setup_project_mysql()\n\n # Finish installation\n pip_install()\n update_project()",
"def setup_environ(project_dir, i18n_dirname, set_file):\n if not os.path.exists(set_file):\n sys.stderr.write(\"Error: You are not into a Django's project\\\n directory.\\n\")\n sys.exit(1)\n\n project_name = os.path.basename(project_dir)\n i18n_app = \"%s.%s\" % (project_name, i18n_dirname) # Name of I18n app.\n # Set DJANGO_SETTINGS_MODULE appropriately.\n os.environ['DJANGO_SETTINGS_MODULE'] = \"%s.settings\" % project_name\n\n # Add parent's directory to sys.path so that the module is importable.\n sys.path.append(os.path.dirname(project_dir))\n try:\n i18n_model = __import__(\"%s.models\" % (i18n_app), {}, {}, [''])\n except ImportError, err:\n sys.stderr.write(\"Error: %s. Are you sure I18n app. is installed?\\n\" \\\n % err)\n sys.exit(1)\n sys.path.pop()\n\n # If it is not installed, it looking for the line and insert it.\n if i18n_app not in settings.INSTALLED_APPS:\n print \"Activating %s application\" % i18n_dirname\n is_header = False # Look for 'INSTALLED_APPS'.\n try:\n write_f = fileinput.input(set_file, inplace=1)\n except IOError, err:\n sys.stderr.write(\"Error: %r. %s.\\n\" % (err.filename, err.strerror))\n sys.exit(1)\n\n for line in write_f:\n if not is_header and 'INSTALLED_APPS' in line:\n is_header = True\n elif is_header and ')' in line:\n print \" '%s',\" % i18n_app\n is_header = False\n print line[:-1]\n write_f.close()\n\n # Create the tables for I18n application.\n # Could add I18n application to the variable but don't\n # since that is only necessary create the tables for I18n.\n settings.INSTALLED_APPS = []\n settings.INSTALLED_APPS.append(\"%s\" % i18n_app)\n call_command('syncdb')\n\n return i18n_model",
"def export(self, **env):\n with self.lock:\n for key, value in env.items():\n self.environment[key] = value",
"def update_environ():\n\n # Environment variables to set.\n BASE = os.getcwd()\n PLUGINS = os.path.join(BASE, 'lib')\n RESOURCES = os.path.join(BASE, 'res')\n MODELS = os.path.join(RESOURCES, 'models')\n\n # Set the vaue to '' to set the var to ''.\n # Anything else will be added to current var value.\n minimapper_env = {\n 'GAZEBO_RESOURCE_PATH': RESOURCES,\n 'GAZEBO_MODEL_PATH': MODELS,\n 'GAZEBO_PLUGIN_PATH': PLUGINS,\n 'GAZEBO_MODEL_DATABASE_URI': None\n }\n\n # Conditionally set environment variables.\n env = os.environ.copy()\n for key, val in minimapper_env.items():\n if val is None:\n env[key] = ''\n elif key not in env:\n env[key] = val\n elif key in env and val not in env[key]:\n env[key] = val + ':' + env[key]\n\n return env",
"def generate_environment(self):\n try:\n if self._environment is None:\n self._environment = Environment.fromfilepath(self._environmentName,\n self._configuration.environment_file_path)\n except Exception:\n raise",
"def set_environment(plugin_path):\n srcpath = os.path.join(plugin_path, \"scripts\")\n icnpath = os.path.join(plugin_path, \"icons\")\n melpath = os.path.join(plugin_path, \"mel\")\n modpath = os.path.join(plugin_path, \"modules\")\n tplpath = os.path.join(plugin_path, \"templates\")\n tolpath = os.path.join(plugin_path, \"scripts\", \"tools\")\n sys.path.append(modpath)\n sys.path.append(srcpath)\n sys.path.append(os.path.join(srcpath, \"ui\"))\n sys.path.append(tolpath)\n\n script_dirs = os.environ[\"MAYA_SCRIPT_PATH\"] + os.pathsep\n os.environ[\"AZUREBATCH_ICONS\"] = AzureBatchSetup.clean(icnpath)\n os.environ[\"AZUREBATCH_MODULES\"] = AzureBatchSetup.clean(modpath)\n os.environ[\"AZUREBATCH_TEMPLATES\"] = AzureBatchSetup.clean(tplpath)\n os.environ[\"AZUREBATCH_TOOLS\"] = AzureBatchSetup.clean(tolpath)\n os.environ[\"MAYA_SCRIPT_PATH\"] = script_dirs + \\\n AzureBatchSetup.clean(melpath)\n print(\"Attempting to create mod file under MAYA_MODULE_PATH\")\n mods = AzureBatchSetup.find_modules_locations(plugin_path)\n\n if not mods:\n print(\"Attempting to add custom module path to Maya.env\")\n mods = AzureBatchSetup.find_env_location(plugin_path)\n if not mods:\n print(\"Failed to setup AzureBatch mod file\")\n return os.environ[\"MAYA_MODULE_PATH\"] + os.pathsep"
] | [
"0.6130687",
"0.598805",
"0.5796788",
"0.57524836",
"0.5727751",
"0.572663",
"0.57033294",
"0.564921",
"0.5631391",
"0.56252474",
"0.560973",
"0.55973923",
"0.5581485",
"0.55788237",
"0.55742204",
"0.556539",
"0.55550957",
"0.5539426",
"0.5535784",
"0.55265415",
"0.5499274",
"0.5484143",
"0.5478806",
"0.5478427",
"0.5462823",
"0.54313487",
"0.54028165",
"0.5396542",
"0.53950524",
"0.53924125"
] | 0.60943496 | 1 |
Obtain ORI, AGENCY, CGOVTYPE, FIPS_STATE, FIPS_PLACE from final main(9001) file | def get_final_main_cgovtype_ori_agency(file_path):
final_main_df = pd.read_csv(file_path)
final_main_fips_ori_agency = final_main_df[['ORI', 'AGENCY', 'CGOVTYPE', 'FIPS_STATE', 'FIPS_PLACE']]
"""
1. Obtain only unique records from the final main file - key: fips place + fips state
"""
final_main_fips_ori_agency_unique = final_main_fips_ori_agency.drop_duplicates(['FIPS_STATE', 'FIPS_PLACE']) # --> 11,602 rows
"""
2. Rename CGOVTYPE, FIPS_STATE, FIPS_PLACE to Govt_level, 'STATEFP', 'place_fips' to match national census file
"""
final_main_fips_ori_agency_unique = final_main_fips_ori_agency_unique.rename(
{'CGOVTYPE': 'Govt_level', 'FIPS_STATE': 'STATEFP', 'FIPS_PLACE': 'place_fips'}, axis='columns')
"""
3. Get only those records from 90 final main file whose cgovtype is 1,2 or 3
"""
final_main_fips_ori_agency_unique = final_main_fips_ori_agency_unique.loc[final_main_fips_ori_agency_unique['Govt_level'].isin([1, 2, 3])]
return final_main_fips_ori_agency_unique | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ogip_dictionary_arf():\n \"\"\"\n this function returns the required and optional keywords and columns\n as defined by OGIP 92-002 and 92-002a\n \"\"\"\n global status\n global REPORT\n\n \"\"\"\n FOR the ARF file:\n \"\"\"\n \"\"\"\n Define REQUIRED Keywords for SPECRESP EXTENSION (note: EXTNAME is SPECRESP)\n \"\"\"\n reqkeys = ['TELESCOP', 'INSTRUME']\n reqkeys.append('FILTER')\n reqkeys.append('CHANTYPE[PHA|PI]')\n reqkeys.append('DETCHANS')\n reqkeys.append('HDUCLASS[OGIP]')\n reqkeys.append('HDUCLAS1[RESPONSE]')\n reqkeys.append('HDUCLAS2[SPECRESP]')\n reqkeys.append('HDUVERS[1.1.0]')\n reqkeys.append('TLMIN*')\n reqkeys.append('NUMGRP')\n reqkeys.append('NUMELT')\n reqkeys.append('CCLS0001[CPF]')\n reqkeys.append('CCNM0001[SPECRESP]')\n reqkeys.append('CDTP0001[DATA]')\n reqkeys.append('CVSD0001')\n reqkeys.append('CVST0001')\n reqkeys.append('CDES0001')\n\n \"\"\"\n Define recommended Keywords\n \"\"\"\n optkeys = ['PHAFILE']\n optkeys.append('LO_THRES') # minimum probability threshold in matrix (values < this are set to 0)\n optkeys.append('HDUCLAS3[REDIST|DETECTOR|FULL]') # required if channel numbering doesn't start at 1\n optkeys.append('RMFVERSN[1992A]')\n optkeys.append('HDUVERS1[1.1.0]')\n optkeys.append('HDUVERS2[1.2.0]')\n\n \"\"\"\n Define Required Columns\n \"\"\"\n reqcols = ['ENERG_LO'] # lower energy bound of bin (keV)\n reqcols.append('ENERG_HI') # upper energy bound of bin (keV); generally ENERG_LO(J) = ENERG_HI(J-1)\n reqcols.append('SPECRESP') # the \"effective area\"\n\n\n \"\"\"\n Define Optional Columns\n \"\"\"\n optcols = [] # dispersion order for grating data\n\n specresp = {'KEYWORDS':{'REQUIRED':reqkeys,'RECOMMENDED':optkeys}, 'COLUMNS':{'REQUIRED':reqcols,'RECOMMENDED':optcols}}\n\n extns={'REQUIRED':['SPECRESP'],'OPTIONAL':[]}\n #\n # create structure for the ARF file\n #\n ogip = {'EXTENSIONS':extns,\n 'SPECRESP':specresp,\n 'REFERENCE':'OGIP/92-002',\n 'REFURL':'https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/ofwg_recomm.html',\n 'REFTITLE':'The Calibration Requirements for Spectral Analysis'}\n\n return ogip",
"def get_glevel_ori_agency(county_cens_file, crime_df, filename, cens_year, city_cens_file=False):\n\n \"\"\"\n 1. Append cities census file to counties census file\n \"\"\"\n national_census_df = pd.read_csv(county_cens_file)\n\n \"\"\"\n Checking for city census file coz we need to first append city census file to the bottom of county census file for 2000 and 2010.\n And city census file is passed only for 2000 and 2010 since for 1990 city and county census data is already together.\n \"\"\"\n if city_cens_file:\n cities_df = pd.read_csv(city_cens_file)\n national_census_df = national_census_df.append([cities_df])\n\n # Drop duplicates\n national_census_df = national_census_df.drop_duplicates(['STATEFP', 'place_fips'])\n national_census_df.to_csv(f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/US Crime Analytics/data/cen_00/Census_{cens_year}_Unique.csv', index=False)\n\n\n \"\"\"\n 2.\n Merge census unique files with Crime_Major_Gov_Fips to get the correct cgovtype, CNTY based on fips state, fips place. \n Also obtain ORI, Agency columns from crime file. \n \"\"\"\n national_census_df = national_census_df.merge(crime_df, on=['STATEFP', 'place_fips'], how='right')\n\n\n \"\"\"\n 3. Create final Govt_level = Govt_level_y column which has govt_level values from crime file and get rid of _x and _y columns \n \"\"\"\n national_census_df['Govt_level'] = national_census_df['Govt_level_y']\n national_census_df['CNTY'] = national_census_df['CNTY_y']\n national_census_df.drop(['Govt_level_x', 'Govt_level_y', 'CNTY_x', 'CNTY_y'], axis=1, inplace=True)\n\n \"\"\"\n Add the year column to have year for even the missing census rows for certain ORIs\n \"\"\"\n national_census_df['YEAR'] = cens_year\n\n \"\"\"\n 4. Rearrange columns so that ORI, AGENCY, Govt_level are at the beginning\n \"\"\"\n cols = list(national_census_df.columns.values)\n cols.pop(cols.index('ORI'))\n cols.pop(cols.index('AGENCY'))\n cols.pop(cols.index('Govt_level'))\n cols.pop(cols.index('CNTY'))\n cols.pop(cols.index('YEAR'))\n\n national_census_df = national_census_df[['ORI', 'AGENCY', 'Govt_level', 'CNTY', 'YEAR'] + cols]\n #national_census_df = national_census_df[['ORI', 'AGENCY', 'YEAR'] + cols]\n\n # write the final df with updated govt_level, ori, agency etc. to a csv\n national_census_df.to_csv(f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/US Crime Analytics/data/cen_00/{filename}.csv', index=False)",
"def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']",
"def _utm_description(self):\n # 'PROJCS' vs. 'PROJCRS' in rsplit\n if int(gdal.VersionInfo()) >= 3000000:\n ifo = self._info['coordinateSystem']['wkt'].rsplit('PROJCRS[\"', 1)[-1].split('\"')[0]\n else:\n ifo = self._info['coordinateSystem']['wkt'].rsplit('PROJCS[\"', 1)[-1].split('\"')[0]\n return ifo",
"def _read_info(self):\n my_filelines = self.file_lines\n info = dict()\n\n for i, line in enumerate(my_filelines):\n if line.startswith(\"VEHICLE\"):\n vehicle_pro_start = i + 2\n elif line.startswith(\"CUSTOMER\"):\n customer_pro_start = i + 3\n\n elif line.startswith(\"NUMBER\"):\n splited = line.split(' ')\n info[splited[0]] = 0\n info[splited[-1]] = 0\n return info, (vehicle_pro_start, customer_pro_start)",
"def ReadBasicInfo():\r\n\r\n EquilibriumStep, ProductionStep,HEPCP,HEPCE,Multiple=10000000,10000000,100,100,2\r\n InputPath,OutputPath,AtomParameterPath,TaskSuffix,MaterialInputFormat='..','..','..','','mol'\r\n GasType,GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,\\\r\n TorqueSetting,MuSiCSetting,Nodes=[],[],[],[],[],[],[],[],['1:ppn=1']\r\n CutOff,GridSpacingP,GridSpacingE=12.8,2.0,2.0\r\n MakeGCMC,UsePmap,UseEmap,UsePost,MakePmap,MakeEmap,MakeTorque,KeyOne,KeyTwo,\\\r\n PDBCharges = False,False,False,False,False,False,False,False,False,False\r\n\r\n with open('GlueParameters', 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList = Line.strip().split()\r\n if len(WordList)>1 or KeyOne==True or KeyTwo==True:\r\n if WordList[0]=='#':\r\n continue\r\n\r\n # Controlled part\r\n elif WordList[0] == 'MakeGCMC:' and WordList[1] == 'open':\r\n MakeGCMC = True\r\n elif WordList[0] == 'UsePmap:' and WordList[1] == 'yes':\r\n UsePmap = True\r\n elif WordList[0] == 'UseEmap:' and WordList[1] == 'yes':\r\n UseEmap = True\r\n elif WordList[0] == 'UsePost:' and WordList[1] == 'yes':\r\n UsePost = True\r\n elif WordList[0] == 'MakePmap:' and WordList[1] == 'open':\r\n MakePmap = True\r\n elif WordList[0] == 'MakeEmap:' and WordList[1] == 'open':\r\n MakeEmap = True\r\n elif WordList[0] == 'MakeTorque:' and WordList[1] == 'open':\r\n MakeTorque = True\r\n elif WordList[0] == 'UseChargesFromPDBFile:' and WordList[1] == 'yes':\r\n PDBCharges = True\r\n\r\n # Basic part\r\n elif WordList[0]=='InputPath:':\r\n InputPath=WordList[1]\r\n elif WordList[0]=='MaterialInputFormat:':\r\n MaterialInputFormat=WordList[1]\r\n elif WordList[0]=='OutputPath:':\r\n OutputPath=WordList[1]\r\n elif WordList[0]=='AtomParameterPath:':\r\n AtomParameterPath=WordList[1]\r\n elif WordList[0] == 'GasType:':\r\n GasType = list(WordList[1:])\r\n elif WordList[0] == 'GasAtomTypeNum:':\r\n\r\n for i in WordList[1:]:\r\n GasAtomTypeNum.append(int(i))\r\n\r\n elif WordList[0] == 'GasAtomType:':\r\n GasAtomType = list(WordList[1:])\r\n elif WordList[0] == 'Multiple:':\r\n Multiple = int(WordList[1])\r\n elif WordList[0] == 'CutOff:':\r\n CutOff = float(WordList[1])\r\n\r\n # GCMC part\r\n\r\n elif WordList[0] == 'GasPartialPressure:':\r\n\r\n for j in WordList[1:]:\r\n GasPartialPressure.append(str(j))\r\n\r\n elif WordList[0] == 'TemperatureList(K):':\r\n\r\n for l in WordList[1:]:\r\n TemperatureList.append(float(l))\r\n\r\n elif WordList[0] == 'PressureList(kPa):':\r\n\r\n for k in WordList[1:]:\r\n PressureList.append(float(k))\r\n\r\n elif WordList[0] == 'EquilibriumStep:':\r\n EquilibriumStep = int(WordList[1])\r\n elif WordList[0] == 'ProductionStep:':\r\n ProductionStep = int(WordList[1])\r\n\r\n # Pmap part\r\n elif WordList[0] == 'GridSpacingP(Ang):':\r\n GridSpacingP = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffP(kJ/mol):':\r\n HEPCP = int(WordList[1])\r\n\r\n # Emap part\r\n elif WordList[0] == 'GridSpacingE(Ang):':\r\n GridSpacingE = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffE(kJ/mol):':\r\n HEPCE = int(WordList[1])\r\n\r\n # Torque part\r\n elif WordList[0] == 'Nodes:':\r\n Nodes = WordList[1:]\r\n elif WordList[0] == 'TaskSuffix:':\r\n TaskSuffix = WordList[1]\r\n elif WordList[0] == 'TorqueSetting:':\r\n KeyOne = True\r\n elif WordList[0] == 'MuSiCSetting:':\r\n KeyOne = False\r\n KeyTwo = True\r\n elif WordList[0] == 'END':\r\n KeyTwo = False\r\n elif KeyOne == True:\r\n TorqueSetting.append(Line)\r\n elif KeyTwo == True:\r\n MuSiCSetting.append(Line)\r\n\r\n return (InputPath,OutputPath,AtomParameterPath,MakeTorque,GasType,\r\n GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,CutOff,MakeGCMC,UsePmap,\r\n UseEmap,UsePost,MakePmap,MakeEmap,EquilibriumStep,ProductionStep,GridSpacingP,HEPCP,GridSpacingE,HEPCE,\r\n Multiple,TorqueSetting,MuSiCSetting,Nodes,TaskSuffix,PDBCharges,MaterialInputFormat)",
"def load_gaia_search_info(file):\n with np.load(file, 'rb', allow_pickle=True) as infile:\n # vels = infile['vels']\n pmra = infile['pmra']\n pmdec = infile['pmdec']\n parallax = infile['parallax']\n parallax_error = infile['parallax_error']\n ra = infile['ra']\n dec = infile['dec']\n\n return ra, dec, pmra, pmdec, parallax, parallax_error",
"def read_locations(db, openfile):\n pass",
"def read_locations(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n key_firms = db['nif']\n year = db['year']\n locs = db['locations']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, key_firms, year, locs, methodvalues",
"def get_gbif_occs(self):\n\n # Create a file to store occurrence data.\n self.occfile = os.path.join(self.outdir, self.params['spname'].replace(\" \", \"_\") + \".csv\")\n\n # Get the usageKey for species of interest.\n self.key = species.name_backbone(name = self.params['spname'], rank = 'species')['usageKey']\n\n # Create latitude/longitude lists.\n self.lats = []\n self.lons = []\n\n # Run a while-loop to go through all observations.\n curr_offset = 0\n end_records = False\n while not end_records:\n occ_records = occ.search(taxonKey = self.key, hasCoordinate = True, \n decimalLatitude = ','.join([str(self.params['ymin']), str(self.params['ymax'])]),\n decimalLongitude = ','.join([str(self.params['xmin']), str(self.params['xmax'])]),\n offset = curr_offset\n )\n end_records = occ_records['endOfRecords']\n curr_offset += occ_records['limit']\n\n # Add latitude/longitude results to lists.\n self.lats.extend([i['decimalLatitude'] for i in occ_records['results']])\n self.lons.extend([i['decimalLongitude'] for i in occ_records['results']])\n\n # Print a dot on each cycle to show progress.\n print(\".\", end = \"\")\n\n # When end of data is reached: build pandas dataframe from lists and remove duplicate data points.\n if occ_records['endOfRecords']:\n df = pd.DataFrame({'Latitude': self.lats, 'Longitude': self.lons})\n df = df.drop_duplicates().reset_index()\n df = df.drop('index', axis = 1)\n\n # Reform the lists by subsetting the dataframe.\n self.lats = list(df['Latitude'])\n self.lons = list(df['Longitude'])\n\n # Print final number of records.\n print(f' Found {len(self.lats)} records.')\n\n # Build array to write to CSV file. np.vstack layers arrays vertically, where each layer is species-lat-lon. \n # np.repeat copies the species names as many times as there are entries. It also combines with zip() to put\n # a newline char at the end of each layer.\n csvarr = np.vstack([np.repeat(self.params['spname'].replace(\" \", \"_\"), len(self.lats)), self.lats,\n [\"{}{}\".format(a_, b_) for a_, b_ in zip(self.lons, np.repeat('\\n', len(self.lats)))]]\n ).T\n\n # Write array to CSV file.\n with open(self.occfile, 'w') as f:\n f.write('Species,Latitude,Longitude\\n')\n for line in csvarr:\n f.write(\",\".join(line))\n\n # Transform lists to arrays for downstream application.\n self.lats = np.array(self.lats)\n self.lons = np.array(self.lons)",
"def __init__(self, raw_facil, raw_gir, raw_geo, proj):\n address1 = raw_facil.get('address1')\n address2 = raw_facil.get('address2')\n\n lon_lat = None\n if raw_geo:\n lon_lat = proj(\n raw_geo['longitude'],\n raw_geo['latitude'],\n inverse=True\n )\n\n self._init_attributes()\n self.source = 'facil-location'\n self.bldg_id = raw_facil['id']\n self.type = 'building'\n self.tags = []\n self.banner_abbreviation = raw_facil.get('abbreviation')\n self.name = raw_facil.get('name')\n self.campus = self._get_pretty_campus(raw_facil.get('campus'))\n self.address = self._get_address(address1, address2)\n self.city = raw_facil.get('city')\n self.state = raw_facil.get('state')\n self.zip = raw_facil.get('zip')\n self.geo_location = self._create_geo_location(\n lon_lat[0] if lon_lat else None,\n lon_lat[1] if lon_lat else None\n )\n self.geometry = self._create_geometry(\n raw_geo['coordinatesType'] if raw_geo else None,\n raw_geo['coordinates'] if raw_geo else None\n )\n self.gir_count = raw_gir['count'] if raw_gir else 0\n self.gir_limit = bool(raw_gir['limit'].strip()) if raw_gir and raw_gir['limit'] else None\n self.gir_locations = raw_gir['all'].strip() if raw_gir else None\n self.arcgis_abbreviation = (\n (raw_geo.get('abbreviation') if raw_geo else None)\n or (raw_gir.get('abbreviation') if raw_gir else None)\n )\n self.relationships = {'services': {'data': []}}\n self.merge = False\n self.open_hours = None\n self.description = None\n self.descriptionHtml = None\n self.images = None\n self.thumbnails = []\n self.website = None\n self.synonyms = None",
"def main():\n tl = TwoLocus(in_path='/csbiodata/public/www.csbio.unc.edu/htdocs/sgreens/pairwise_origins/')\n # tl = TwoLocus()\n # tl.preprocess(glob.glob('OR_ss_origins/*.hap'))\n print len(tl.list_available_strains())\n exit()\n # print len(tl.list_available_strains())\n # tl.preprocess(['cc_origins.csv'])\n # tl.preprocess(['ccv_origins.csv'])\n classical = [s for s in\n [\"129P1/ReJ\", # \"129P3/J\", \"129S1SvlmJ\", \"129S6\", \"129T2/SvEmsJ\", \"129X1/SvJ\", \"A/J\", \"A/WySnJ\",\n \"AEJ/GnLeJ\", \"AEJ/GnRk\", \"AKR/J\", \"ALR/LtJ\", \"ALS/LtJ\", \"BALB/cByJ\", \"BALB/cJ\", \"BDP/J\", \"BPH/2J\",\n # \"BPL/1J\", \"BPN/3J\", \"BTBR T<+>tf/J\", \"BUB/BnJ\", \"BXSB/MpJ\", \"C3H/HeJ\", \"C3HeB/FeJ\", \"C57BL/10J\",\n # \"C57BL/10ScNJ\", \"C57BL/10SAAAJ\", \"C57BL/6CR\", \"C57BL/6J\", \"C57BL/6NCI\", \"C57BL/6Tc\", \"C57BLKS/J\",\n # \"C57BR/cdJ\", \"C57L/J\", \"C58/J\", \"CBA/CaJ\", \"CBA/J\", \"CE/J\", \"CHMU/LeJ\", \"DBA/1J\", \"DBA/1LacJ\",\n # \"DBA/2DeJ\", \"DBA/2HaSmnJ\", \"DBA/2J\", \"DDK/Pas\", \"DDY/JclSidSeyFrkJ\", \"DLS/LeJ\", \"EL/SuzSeyFrkJ\",\n # \"FVB/NJ\", \"HPG/BmJ\", \"I/LnJ\", \"IBWSP2\", \"IBWSR2\", \"ICOLD2\", \"IHOT1\", \"IHOT2\", \"ILS\", \"ISS\", \"JE/LeJ\",\n # \"KK/HlJ\", \"LG/J\", \"LP/J\", \"LT/SvEiJ\", \"MRL/MpJ\", \"NOD/ShiLtJ\", \"NON/ShiLtJ\", \"NONcNZO10/LtJ\",\n # \"NONcNZO5/LtJ\", \"NOR/LtJ\", \"NU/J\", \"NZB/BlNJ\", \"NZL/LtJ\", \"NZM2410/J\", \"NZO/HlLtJ\", \"NZW/LacJ\", \"P/J\",\n # \"PL/J\", \"PN/nBSwUmabJ\", \"RF/J\", \"RHJ/LeJ\", \"RIIIS/J\", \"RSV/LeJ\", \"SB/LeJ\", \"SEA/GnJ\", \"SEC/1GnLeJ\",\n # \"SEC/1ReJ\", \"SH1/LeJ\", \"SI/Col Tyrp1 Dnahc11/J\", \"SJL/Bm\", \"SJL/J\", \"SM/J\", \"SSL/LeJ\", \"ST/bJ\",\n \"STX/Le\", ] # \"SWR/J\", \"TALLYHO/JngJ\", \"TKDU/DnJ\", \"TSJ/LeJ\", \"YBR/EiJ\", \"ZRDCT Rax<+>ChUmdJ\"]\n if tl.is_available(s)]\n wild_derived = [s for s in\n ['22MO',\n # 'BIK/g', 'BULS', 'BUSNA', 'BZO', 'CALB/RkJ', 'CASA/RkJ', 'CAST/EiJ', 'CIM', 'CKN', 'CKS',\n 'CZECHI/EiJ', 'CZECHII/EiJ', 'DCA', 'DCP', 'DDO', 'DEB', 'DGA', 'DIK', 'DJO', 'DKN', 'DMZ', 'DOT',\n # 'IS/CamRkJ', 'JF1/Ms', 'LEWES/EiJ', 'MBK', 'MBS', 'MCZ', 'MDG', 'MDGI', 'MDH', 'MGA', 'MH',\n # 'MOLD/RkJ', 'MOLF/EiJ', 'MOLG/DnJ', 'MOR/RkJ', 'MPB', 'MSM/Ms', 'PERA/EiJ', 'PERC/EiJ', 'POHN/Deh',\n # 'PWD/PhJ', 'PWK/PhJ', 'RBA/DnJ', 'RBB/DnJ', 'RBF/DnJ', 'SF/CamEiJ', 'SKIVE/EiJ', 'SOD1/EiJ',\n # 'STLT', 'STRA', 'STRB', 'STUF', 'STUP', 'STUS', 'TIRANO/EiJ', 'WLA', 'WMP', 'WSB/EiJ',\n 'ZALENDE/EiJ'] if tl.is_available(s)]\n tl.contingency_table(classical, wild_derived, '/csbiohome01/sgreens/Projects/intervals/contingency.csv')\n exit()\n x = TwoLocus(chrom_sizes=[20e6, 20e6])\n x.preprocess([\"test2.csv\"])\n x.unique_combos(['A', 'B', 'D'], ['C', 'E'])\n x.sources_at_point_pair('1', 1, '1', 10000000, ['A'])\n # x.interlocus_dependence([chr(c) for c in xrange(ord('A'), ord('J')+1)])\n # exit()\n\n x = TwoLocus(chrom_sizes=[20 * 10 ** 6, 20 * 10 ** 6])\n x.preprocess([\"test.csv\"])\n rez = x.pairwise_frequencies([\"A\"])\n\n areas = x.calculate_genomic_area(rez[0], rez[1])\n total = 0.0\n\n for combo in subspecies.iter_combos():\n print \"\\t{:15s}({:4d}):{:1.5f}\".format(subspecies.to_string(combo), combo,\n areas[str(subspecies.to_string(combo))])\n total += areas[str(subspecies.to_string(combo))]\n print \"\\t{:21s}:{:1.5f}\".format(\"Total\", total)\n\n sys.exit(1)\n # for code, combo in combos.iteritems():\n # print \"\\n\", rez[1]\n # print \"\\t{} ({}):\\n{}\".format(combo, code, rez[0][code])",
"def parse(line, half=1):\n line = line.strip()\n logger.debug(\"LINE '%s'\", line)\n\n global aipname, alonging, ats_chapter, coords_wrap, obj, feature\n global features, finalcoord, lastn, laste, lastv, airsport_intable\n global border, re_coord3, country\n global sectors, name_cont, cold_resp\n\n if line==LINEBREAK:\n # drop current feature, if we don't have vertl by now,\n # then this is just an overview polygon\n feature = {\"properties\":{}}\n obj = []\n alonging = False\n coords_wrap = \"\"\n lastv = None\n return\n\n\n if ad_aip and not \"ENNO\" in filename:\n if not ats_chapter:\n # skip to chapter 2.71\n if \"ATS airspace\" in line or \"ATS AIRSPACE\" in line:\n logger.debug(\"Found chapter 2.71\")\n ats_chapter=True\n return\n else:\n # then skip everything after\n if \"AD 2.\" in line or \"ATS COMM\" in line:\n #if \"ATS komm\" in line or \"Kallesignal\" in line:\n logger.debug(\"End chapter 2.71\")\n ats_chapter=False\n\n class_=re_class.search(line) or re_class2.search(line) or re_class_openair.search(line)\n if class_:\n logger.debug(\"Found class in line: %s\", line)\n class_=class_.groupdict()\n feature['properties']['class']=class_.get('class')\n if tia_aip:\n feature, obj = finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip)\n return\n\n # SPECIAL CASE temporary workaround KRAMFORS\n if aipname and (\"KRAMFORS\" in aipname) and (\"within\" in line):\n return\n # SPECIAL CASE workaround SÄLEN/SAAB CTR sectors\n if aipname and ((\"SÄLEN\" in aipname) or (\"SAAB\" in aipname)) and (\"Sector\" in line):\n logger.debug(\"TEST: Breaking up SÄLEN/SAAB, aipname=.\"+aipname)\n sectors.append((aipname, obj))\n feature, obj = {\"properties\":{}}, []\n if \"SÄLEN\" in aipname:\n aipname = \"SÄLEN CTR \"+line\n else:\n aipname = \"SAAB CTR \"+line\n # SPECIAL CASE check for Valldal AIP names\n if valldal and 'Valldal' in line:\n aipname=\" \".join(line.strip().split()[0:2])\n logger.debug(\"Valldal aipname: '%s'\", aipname)\n feature['properties']['class']='Luftsport'\n feature['properties']['from (ft amsl)']=0\n feature['properties']['from (m amsl)'] =0\n\n coords = re_coord.search(line)\n coords2 = re_coord2.search(line)\n coords3 = re_coord3.findall(line)\n\n if (coords or coords2 or coords3):\n\n logger.debug(\"Found %i coords in line: %s\", coords3 and len(coords3) or 1, line)\n logger.debug(printj(coords3))\n if line.strip()[-1] == \"N\":\n coords_wrap += line.strip() + \" \"\n logger.debug(\"Continuing line after N coordinate: %s\", coords_wrap)\n return\n elif coords_wrap:\n nline = coords_wrap + line\n logger.debug(\"Continued line: %s\", nline)\n coords = re_coord.search(nline)\n coords2 = re_coord2.search(nline)\n coords3 = re_coord3.findall(nline)\n logger.debug(\"Found %i coords in merged line: %s\", coords3 and len(coords3) or '1', nline)\n line = nline\n coords_wrap = \"\"\n\n if coords and not (\"Lyng\" in aipname or \"Halten\" in aipname):\n coords = coords.groupdict()\n n = coords.get('cn') or coords.get('n')\n e = coords.get('ce') or coords.get('e')\n #n = coords.get('n') or coords.get('cn')\n #e = coords.get('e') or coords.get('ce')\n rad = coords.get('rad')\n if not rad:\n rad_m = coords.get('rad_m')\n if rad_m:\n rad = m2nm(rad_m)\n if not n or not e or not rad:\n coords_wrap += line.strip() + \" \"\n # FIXME: incomplete circle continuation is broken\n logger.debug(\"Continuing line after incomplete circle: %s\", coords_wrap)\n return\n lastn, laste = n, e\n logger.debug(\"Circle center is %s %s %s %s\", coords.get('n'), coords.get('e'), coords.get('cn'), coords.get('ce'))\n logger.debug(\"COORDS is %s\", json.dumps(coords))\n c_gen = gen_circle(n, e, rad)\n logger.debug(\"LENS %s %s\", len(obj), len(c_gen))\n obj = merge_poly(obj, c_gen)\n logger.debug(\"LENS %s %s\", len(obj), len(c_gen))\n\n elif coords2:\n coords = coords2.groupdict()\n n = coords.get('n')\n e = coords.get('e')\n if n is None and e is None:\n n,e = lastn, laste\n secfrom = coords.get('secfrom')\n secto = coords.get('secto')\n radfrom = coords.get('radfrom')\n radto = coords.get('rad')\n c_gen = gen_sector(n, e, secfrom, secto, radfrom, radto)\n\n obj = merge_poly(obj, c_gen)\n\n else:\n skip_next = 0\n for blob in coords3:\n ne,n,e,along,arc,rad,cn,ce = blob[:8]\n circle = blob[8] if len(blob)==9 else None\n logger.debug(\"Coords: %s\", (n,e,ne,along,arc,rad,cn,ce,circle))\n if skip_next > 0 and n:\n logger.debug(\"Skipped.\")\n skip_next -= 1\n continue\n if arc:\n arcdata = re_arc.search(line)\n if not arcdata:\n coords_wrap += line.strip() + \" \"\n logger.debug(\"Continuing line after incomplete arc: %s\", coords_wrap)\n return\n arcdata = arcdata.groupdict()\n logger.debug(\"Completed arc: %s\", arcdata)\n n = arcdata['n']\n e = arcdata['e']\n rad = arcdata.get('rad1') or arcdata.get('rad2')\n arc = gen_circle(n, e, rad, convert=False)\n to_n = arcdata['n2']\n to_e = arcdata['e2']\n cw = arcdata['dir']\n logger.debug(\"ARC IS \"+cw)\n fill = fill_along(obj[-1],(to_n,to_e), arc, (cw=='clockwise'))\n lastn, laste = None, None\n\n for apair in fill:\n bn, be = ll2c(apair)\n obj.insert(0,(bn,be))\n skip_next = 1\n elif circle:\n coords_wrap += line.strip() + \" \"\n # FIXME: incomplete circle continuation is broken\n logger.debug(\"Continuing line after incomplete circle (3): %s\", coords_wrap)\n return\n\n\n if alonging:\n if not n and not e:\n n, e = lastn, laste\n fill = fill_along(alonging, (n,e), border)\n alonging = False\n lastn, laste = None, None\n #HACK matching point in the wrong direction - FIXME don't select closest but next point in correct direction\n if \"Sälen TMA b\" in aipname or \"SÄLEN CTR Sector b\" in aipname:\n fill=fill[1:]\n for bpair in fill:\n bn, be = ll2c(bpair)\n obj.insert(0,(bn,be))\n\n if rad and cn and ce:\n c_gen = gen_circle(cn, ce, rad)\n logger.debug(\"Merging circle using cn, ce.\")\n obj = merge_poly(obj, c_gen)\n if n and e:\n lastn, laste = n, e\n obj.insert(0,(n,e))\n if along:\n if not n and not e:\n n, e = lastn, laste\n alonging = (n,e)\n if '(' in ne:\n finalcoord = True\n logger.debug(\"Found final coord.\")\n else:\n finalcoord = False\n if (airsport_aip or aip_sup or military_aip) and finalcoord:\n if feature['properties'].get('from (ft amsl)') is not None:\n logger.debug(\"Finalizing: finalcoord.\")\n feature, obj = finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip)\n lastv = None\n\n if not valldal:\n return\n\n # IDENTIFY temporary restrictions\n period = re_period.search(line) or re_period2.search(line) or re_period3.search(line)\n\n if cold_resp and not feature.get('properties',{}).get('temporary'):\n logger.debug(\"Adding temporary restriction to cold response airspace.\")\n feature['properties']['temporary'] = True\n feature['properties']['dashArray'] = \"5 5\"\n feature['properties']['Date from'] = [\"14 MAR\"]\n feature['properties']['Date until'] = [\"31 MAR\"]\n feature['properties']['Time from (UTC)'] = \"0000\"\n feature['properties']['Time to (UTC)'] = \"2359\"\n\n # IDENTIFY frequencies\n freq = re_freq.search(line)\n if freq:\n freq = freq.groupdict()\n logger.debug(\"Found FREQUENCY: %s\", freq['freq'])\n feature['properties']['frequency'] = freq.get('freq')\n\n # IDENTIFY altitude limits\n vertl = re_vertl_upper.search(line) or re_vertl_lower.search(line) or re_vertl.search(line) or re_vertl2.search(line) or (military_aip and re_vertl3.search(line))\n\n if vertl:\n vertl = vertl.groupdict()\n logger.debug(\"Found vertl in line: %s\", vertl)\n fromamsl, toamsl = None, None\n\n v = vertl.get('ftamsl')\n flfrom = vertl.get('flfrom')\n flto = vertl.get('flto')\n fl = vertl.get('fl')\n rmk = vertl.get('rmk')\n\n if rmk is not None:\n v = 14999 # HACK: rmk = \"Lower limit of controlled airspace -> does not affect us\"\n if fl is not None:\n v = int(fl) * 100\n\n if flto is not None:\n toamsl = int(flto) * 100\n if flfrom:\n fromamsl = v or (int(flfrom) * 100)\n fl = fl or flfrom\n elif flfrom is not None:\n fromamsl = int(flfrom) * 100\n fl = fl or flfrom\n elif v is not None:\n if lastv is None:\n toamsl = v\n if fl is not None:\n flto = fl\n else:\n fromamsl = v\n else:\n fromamsl = vertl.get('msl',vertl.get('gnd',vertl.get('from')))\n if fromamsl == \"GND\": fromamsl = 0\n if fromamsl == \"MSL\": fromamsl = 0\n toamsl = vertl.get('unl',vertl.get('to'))\n if toamsl == \"UNL\": toamsl = 999999\n\n if toamsl is not None:\n lastv = toamsl\n currentv = feature['properties'].get('to (ft amsl)')\n if currentv is not None and currentv != toamsl:\n logger.warning(\"attempt to overwrite vertl_to %s with %s.\" % (currentv, toamsl))\n if int(currentv) > int(toamsl):\n logger.warning(\"skipping.\")\n return\n logger.warning(\"ok.\")\n if flto is not None:\n feature['properties']['to (fl)']=flto\n feature['properties']['to (ft amsl)']=toamsl\n feature['properties']['to (m amsl)'] = ft2m(toamsl)\n if valldal:\n feature, obj = finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip)\n lastv = None\n if fromamsl is not None:\n currentv = feature['properties'].get('from (ft amsl)')\n if currentv is not None and currentv != fromamsl:\n logger.warning(\"attempt to overwrite vertl_from %s with %s.\" % (currentv, fromamsl))\n if int(currentv) < int(fromamsl):\n logger.warning(\"skipping.\")\n return\n logger.warning(\"ok.\")\n if fl is not None:\n feature['properties']['from (fl)']=fl\n feature['properties']['from (ft amsl)']=fromamsl\n feature['properties']['from (m amsl)'] = ft2m(fromamsl)\n lastv = None\n if (((cta_aip or airsport_aip or aip_sup or tia_aip or (aipname and (\"TIZ\" in aipname))) and (finalcoord or tia_aip_acc)) or country != 'EN'):\n logger.debug(\"Finalizing poly: Vertl complete.\")\n if aipname and ((\"SÄLEN\" in aipname) or (\"SAAB\" in aipname)) and len(sectors)>0:\n for x in sectors[1:]: # skip the first sector, which is the union of the other sectors in Swedish docs\n aipname_, obj_ = x\n logger.debug(\"Restoring \"+aipname_+\" \"+str(len(sectors)))\n feature_ = deepcopy(feature)\n logger.debug(\"Finalizing SAAB/SÄLEN: \" + aipname_)\n finalize(feature_, features, obj_, source, aipname_, cta_aip, restrict_aip, aip_sup, tia_aip)\n sectors = []\n logger.debug(\"Finalizing last poly as .\"+aipname)\n feature, obj = finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip)\n\n logger.debug(\"From %s to %s\", feature['properties'].get('from (ft amsl)'), feature['properties'].get('to (ft amsl)'))\n return\n\n # IDENTIFY airspace naming\n name = re_name.search(line) or re_name2.search(line) or re_name3.search(line) or re_name4.search(line) or \\\n re_miscnames.search(line) or re_name5.search(line) or re_name_cr.search(line) or re_name6.search(line) or \\\n re_name_openair.search(line)\n\n if name_cont and not 'Real time' in line:\n aipname = aipname + \" \" + line\n logger.debug(\"Continuing name as \"+aipname)\n if line == '' or 'EN D' in aipname:\n name_cont = False\n\n if name:\n named=name.groupdict()\n if en_enr_5_1 or \"Hareid\" in line:\n logger.debug(\"RESTRICT/HAREID\")\n feature, obj = finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip)\n lastv = None\n\n name=named.get('name')\n if 'polaris' in name.lower() and 'norway' in name.lower():\n pos = name.lower().index('norway')\n name = name[:pos]\n\n if name[:6]==\"Sector\" and \"ACC\" in aipname:\n return\n\n if named.get('name_cont'):\n name += ' '+named.get('name_cont')\n name_cont=True\n\n if (name == \"Sector a\") or (name == \"Sector b\") or (aipname and (\"Sector\" in aipname) and ((\"SÄLEN\" in aipname) or (\"SAAB\" in aipname))):\n return\n if \"ES R\" in name or \"ES D\" in name:\n name_cont=True\n if \"EN D\" in name and len(name)<8:\n name_cont=True\n\n if restrict_aip or military_aip:\n if feature['properties'].get('from (ft amsl)') is not None and (feature['properties'].get('to (ft amsl)') or \"Romerike\" in aipname or \"Oslo\" in aipname):\n logger.debug(\"RESTRICT/MILITARY + name and vertl complete\")\n feature, obj = finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip)\n lastv = None\n else:\n logger.debug(\"RESTRICT/MILITARY + name and vertl NOT complete\")\n\n aipname = name\n logger.debug(\"Found name '%s' in line: %s\", aipname, line)\n return\n\n # The airsport document doesn't have recognizable airspace names\n # so we just assume every line that isn't otherwise parsed is the name of the next box.\n if airsport_aip and line.strip():\n logger.debug(\"Unhandled line in airsport_aip: %s\", line)\n if wstrip(line)==\"1\":\n logger.debug(\"Starting airsport_aip table\")\n airsport_intable = True\n elif wstrip(line)[0] != \"2\" and airsport_intable:\n logger.debug(\"Considering as new aipname: '%s'\", line)\n feature, obj = finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip)\n aipname = wstrip(line)\n\n if line.strip()==\"-+-\":\n feature, obj = finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip)",
"def get_airports():\n iata_to_city = {}\n with open('./airports.txt') as f:\n for line in f.readlines():\n line = line.strip()\n\n if len(line) < 5:\n continue\n\n r = line.strip().split(',')[0]\n r = r.replace(' ', '')\n iata, city = r.split('-', 1)\n\n if iata_to_city.get(iata) is None:\n iata_to_city[iata] = city\n\n return iata_to_city",
"def _load_obcfile(casename=None): \n\n data={}\n\n if casename==None:\n print('_load_obcfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_obc.dat','r')\n except IOError:\n print('_load_obcfile: invalid case name.')\n return data\n\n obc_str=fp.readline().split('=')\n obc_num=int(obc_str[1])\n t_data1=np.genfromtxt(casename+'_obc.dat',skip_header=1)\n fp.close()\n\n data['obcf_num']=obc_num\n data['obcf_numbers']=t_data1[:,0]\n data['obcf_nodes']=t_data1[:,1]\n data['obcf_value']=t_data1[:,2]\n\n \n return data",
"def openFullProfFile(self, filename):\n handle = open(filename)\n lines = handle.readlines()\n handle.close()\n atoms = []\n bonds = []\n conns = []\n for line in lines:\n if line[0:4] == \"CELL\":\n #format of line: CELL a b c alpha beta gamma\n vals = line.split()\n print vals\n a = float(vals[1])\n b = float(vals[2])\n c = float(vals[3])\n alpha = float(vals[4])\n gamma = float(vals[5])\n beta = float(vals[6])\n elif line[0:6] == \"SPACEG\":\n #this is the space group in Hermann-Mauguin notation.\n hm_spacegroup = (line[6:]).strip().upper()\n space_group = GetSpaceGroup(hm_spacegroup)\n elif line[0:3] == \"BOX\":\n #Format: xmin xmax ymin ymax zmin zmax\n #In this program, however, xmin, ymin, zmin = 0,0,0 always.\n vals = line.split()\n a_diff = float(vals[2]) - float(vals[1])\n b_diff = float(vals[4]) - float(vals[3])\n c_diff = float(vals[6]) - float(vals[5])\n a_cutoff = int(a_diff)\n b_cutoff = int(b_diff)\n c_cutoff = int(c_diff)\n if a_diff - a_cutoff > 0:\n a_cutoff += 1\n if b_diff - b_cutoff > 0:\n b_cutoff += 1\n if c_diff - c_cutoff > 0:\n c_cutoff += 1\n elif line[0:4] == \"ATOM\":\n vals = line.split()\n label = vals[1]\n symbol = vals[2]\n a_coord = float(vals[3])\n b_coord = float(vals[4])\n c_coord = float(vals[5])\n position = (a_coord, b_coord, c_coord)\n #Get the radius which is right after the word \"RADIUS\"\n for i in range(len(vals)):\n if vals[i] == \"RADIUS\":\n radius = float(vals[i+1])\n break\n else:\n radius = None\n #Get the color which is right after the word \"COLOR\"\n for i in range(len(vals)):\n if vals[i] == \"COLOR\":\n color = [float(vals[i+1]), float(vals[i+2]), float(vals[i+3])]\n break\n else:\n color = None\n #atomData format (each line):\n #label massNum aPos bPos cPos anisotropy_a anisotropy_b anistropy_c spin valence\n atoms.append([label, symbol, position, radius, color])\n elif line[0:4] == \"BOND\":\n #Format: BOND label1 label2 min_dist max_dist RADIUS rad COLOR r g b t\n #The color and radius need not be there and will be ignored for now since\n #the color and radius of bonds is hardcoded in right now.\n vals = line.split()\n bonds.append([vals[1], vals[2], vals[3], vals[4]])\n elif line[0:4] == \"CONN\":\n #Format: BOND symbol1 symbol2 min_dist max_dist RADIUS rad COLOR r g b t\n #The color and radius need not be there and will be ignored for now since\n #the color and radius of bonds is hardcoded in right now.\n vals = line.split()\n conns.append([vals[1], vals[2], vals[3], vals[4]])\n \n \n self.newCell(space_group.number, a, b, c, alpha, beta, gamma, 1, 1, 1,\n a_cutoff, b_cutoff, c_cutoff)\n \n for atom in atoms:\n #FPStudio does not seem to support isotopes\n massNum = None\n self.addAtom(atom[1], atom[2], massNum = massNum, radius = atom[3], rgb = atom[4])\n \n for bond in bonds:\n self.createBonds(label1 = bonds[0], label2 = bonds[1],\n minDist = bonds[2], maxDist = bonds[3])\n for conn in conns:\n self.createBonds(symbol1 = conns[0], symbol2 = conns[1],\n minDist = conns[2], maxDist = conns[3])\n \n self.refreshGUI()\n #self.cellChange(space_group.number, a, b, c, alpha, beta, gamma, magNa = 1, magNb = 1, magNc = 1, cutNa = a_cutoff, cutNb = b_cutoff, cutNc = c_cutoff, atomData = atoms)\n #self.updateCell(space_group.number, a, b, c, alpha, beta, gamma, magNa = 1, magNb = 1, magNc = 1, cutNa = a_cutoff, cutNb = b_cutoff, cutNc = c_cutoff, atomData = atoms)\n #self.refreshGUI()\n \n #send signal to the cell window to show the info that has been loaded and to vtkWindow to draw it\n send(signal = \"File Load\", sender = \"Session\",\n spaceGroup = space_group.number, a = a, b = b, c = c,\n alpha = alpha, beta = beta, gamma = gamma, magNa = a_cutoff,\n magNb = b_cutoff, magNc = c_cutoff, cutNa = a_cutoff,\n cutNb = b_cutoff, cutNc = c_cutoff)\n \n \n #TODO: use these values extracted. You could combine the three file opening functions.\n #Each function would have to extract values form it's format and then a single function\n #could be used for all three to construct the model from the extracted values.e",
"def open_igra_metadata(filename):\n import pandas as pd\n infos = \"\"\"\n IGRAID 1- 11 Character\n WMOID 13- 17 Integer\n NAME 19- 48 Character\n NAMFLAG 50- 50 Character\n LATITUDE 52- 60 Real\n LATFLAG 62- 62 Character\n LONGITUDE 64- 72 Real\n LONFLAG 74- 74 Character\n ELEVATION 76- 81 Real\n ELVFLAG 83- 83 Character\n YEAR 85- 88 Integer\n MONTH 90- 91 Integer\n DAY 93- 94 Integer\n HOUR 96- 97 Integer\n DATEIND 99- 99 Integer\n EVENT 101-119 Character\n ALTIND 121-122 Character\n BEFINFO 124-163 Character\n BEFFLAG 164-164 Character\n LINK 166-167 Character\n AFTINFO 169-208 Character\n AFTFLAG 209-209 Character\n REFERENCE 211-235 Character\n COMMENT 236-315 Character\n UPDCOM 316-346 Character\n UPDDATE 348-354 Character\n \"\"\"\n import numpy as np\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n elif it == 'Real':\n it = 'float'\n else:\n it = 'int'\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data",
"def vars_ifile(ifile):\n\n site = None\n year = None\n actual = None\n doy = None\n Ndays = None\n params = None\n Nsteps = None\n models = None\n resolution = None\n fopt = None\n window = None\n tag = None\n photo = None\n plot = None\n project = None\n\n with open(ifile, 'r') as f:\n\n for line in f:\n\n ll = line.strip()\n\n if not ll.startswith(\"#\"):\n ll = ll.rstrip()\n\n if site is None:\n site = read_var('site', ll)\n\n if year is None:\n year = read_var('year', ll)\n\n if actual is None:\n actual = read_var('actual', ll)\n\n if doy is None:\n doy = read_var('doy', ll)\n\n if Ndays is None:\n Ndays = read_var('Ndays', ll)\n\n if params is None:\n params = read_var('params', ll)\n\n if Nsteps is None:\n Nsteps = read_var('Nsteps', ll)\n\n if models is None:\n models = read_var('models', ll)\n\n if resolution is None:\n resolution = read_var('resolution', ll)\n\n if fopt is None:\n fopt = read_var('fopt', ll)\n\n if window is None:\n window = read_var('window', ll)\n\n if tag is None:\n tag = read_var('tag', ll)\n\n if photo is None:\n photo = read_var('photo', ll)\n\n if plot is None:\n plot = read_var('plot', ll)\n\n if project is None:\n project = read_var('project', ll)\n\n if actual is None: # make sure the spinup only runs for the Control\n models = 'Control'\n\n return (site, year, actual, doy, Ndays, params, Nsteps, models, resolution,\n fopt, window, tag, photo, plot, project)",
"def _read_onr2_3(self, data: bytes, ndata: int):\n op2 = self.op2\n op2.nonlinear_factor = np.nan\n op2.is_table_1 = False\n op2.is_table_2 = True\n unused_three = op2.parse_approach_code(data)\n op2.words = [\n 'aCode', 'tCode', 'eTotal', 'isubcase',\n '???', '???', 'element_name', 'load_set',\n 'format_code', 'num_wide', 'cvalres', 'setID',\n 'setID', 'eigenReal', 'eigenImag', 'rmssf',\n 'etotpos', 'etotneg', 'thresh', '???',\n '???', '???', '???', '???',\n '???', 'Title', 'subtitle', 'label']\n\n self._onr_element_name(data)\n\n #: Load set or zero\n op2.load_set = op2.add_data_parameter(data, 'load_set', b'i', 8, False)\n\n #: format code\n op2.format_code = op2.add_data_parameter(data, 'format_code', b'i', 9, False)\n\n #: number of words per entry in record\n #: .. note:: is this needed for this table ???\n op2.num_wide = op2.add_data_parameter(data, 'num_wide', b'i', 10, False)\n ## C\n op2.cvalres = op2.add_data_parameter(data, 'cvalres', b'i', 11, False)\n\n #: Set identification number Number\n op2.set_id = op2.add_data_parameter(data, 'set_id', b'i', 13, False)\n\n #: Natural eigenvalue - real part\n op2.eigen_real = op2.add_data_parameter(data, 'eigen_real', b'i', 14, False)\n\n #: Natural eigenvalue - imaginary part\n op2.eigen_imag = op2.add_data_parameter(data, 'eigen_imag', b'i', 15, False)\n\n #: Natural frequency\n op2.freq = op2.add_data_parameter(data, 'freq', b'f', 16, False)\n\n #: RMS and CRMS scale factor - NX\n op2.rmssf = op2.add_data_parameter(data, 'rmssf', b'f', 17)\n\n #: Total positive energy\n op2.etotpos = op2.add_data_parameter(data, 'etotpos', b'f', 18)\n\n #: Total negative energy\n op2.etotneg = op2.add_data_parameter(data, 'etotneg', b'f', 19, False)\n\n #: Energy Threshold - NX\n op2.thresh = op2.add_data_parameter(data, 'thresh', b'f', 17)\n\n op2.element_id = op2.add_data_parameter(data, 'node_id', b'i', 5, fix_device_code=True)\n #if op2.analysis_code == 1: # statics / displacement / heat flux\n ## load set number\n #op2.lsdvmn = op2.add_data_parameter(data, 'lsdvmn', b'i', 5, False)\n #op2.data_names = op2.apply_data_code_value('data_names', ['node_id'])\n #op2.setNullNonlinearFactor()\n\n if op2.analysis_code == 1: # static...because reasons.\n op2._analysis_code_fmt = b'i'\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id'])\n op2.apply_data_code_value('analysis_method', 'N/A')\n elif op2.analysis_code == 2: # real eigenvalues\n ## mode number\n op2.mode = op2.add_data_parameter(data, 'mode', b'i', 5)\n op2._analysis_code_fmt = b'i'\n ## real eigenvalue\n op2.eigr = op2.add_data_parameter(data, 'eigr', b'f', 6, False)\n ## mode or cycle .. todo:: confused on the type - F1???\n op2.mode_cycle = op2.add_data_parameter(data, 'mode_cycle', b'f', 7, False)\n op2.data_names = op2.apply_data_code_value('data_names',\n ['node_id', 'eigr', 'mode_cycle'])\n op2.apply_data_code_value('analysis_method', 'mode')\n #elif op2.analysis_code == 3: # differential stiffness\n #op2.lsdvmn = self.get_values(data, b'i', 5) ## load set number\n #op2.data_names = op2.data_code['lsdvmn'] = op2.lsdvmn\n #elif op2.analysis_code == 4: # differential stiffness\n #op2.lsdvmn = self.get_values(data, b'i', 5) ## load set number\n elif op2.analysis_code == 5: # frequency\n ## frequency\n #op2.freq = op2.add_data_parameter(data, 'freq', b'f', 5)\n op2._analysis_code_fmt = b'f'\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id'])\n op2.apply_data_code_value('analysis_method', 'freq')\n elif op2.analysis_code == 6: # transient\n ## time step\n #op2.dt = op2.add_data_parameter(data, 'dt', b'f', 5)\n op2._analysis_code_fmt = b'f'\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id'])\n op2.apply_data_code_value('analysis_method', 'dt')\n elif op2.analysis_code == 7: # pre-buckling\n ## load set number\n #op2.lsdvmn = op2.add_data_parameter(data, 'lsdvmn', b'i', 5)\n op2._analysis_code_fmt = b'i'\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id'])\n op2.apply_data_code_value('analysis_method', 'lsdvmn')\n elif op2.analysis_code == 8: # post-buckling\n ## load set number\n #op2.lsdvmn = op2.add_data_parameter(data, 'lsdvmn', b'i', 5)\n op2._analysis_code_fmt = b'i'\n ## real eigenvalue\n op2.eigr = op2.add_data_parameter(data, 'eigr', b'f', 6, False)\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id', 'eigr'])\n op2.apply_data_code_value('analysis_method', 'eigr')\n elif op2.analysis_code == 9: # complex eigenvalues\n ## mode number\n op2.mode = op2.add_data_parameter(data, 'mode', b'i', 5)\n op2._analysis_code_fmt = b'i'\n ## real eigenvalue\n #op2.eigr = op2.add_data_parameter(data, 'eigr', b'f', 6, False)\n ## imaginary eigenvalue\n op2.eigi = op2.add_data_parameter(data, 'eigi', b'f', 7, False)\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id', 'eigr', 'eigi'])\n op2.apply_data_code_value('analysis_method', 'mode')\n elif op2.analysis_code == 10: # nonlinear statics\n ## load step\n #self.lftsfq = op2.add_data_parameter(data, 'lftsfq', b'f', 5)\n op2._analysis_code_fmt = b'f'\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id'])\n op2.apply_data_code_value('analysis_method', 'lftsfq')\n elif op2.analysis_code == 11: # old geometric nonlinear statics\n ## load set number\n #op2.lsdvmn = op2.add_data_parameter(data, 'lsdvmn', b'i', 5)\n op2._analysis_code_fmt = b'f'\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id'])\n elif op2.analysis_code == 12:\n # contran ? (may appear as aCode=6) --> straight from DMAP...grrr...\n ## load set number\n #op2.lsdvmn = op2.add_data_parameter(data, 'lsdvmn', b'i', 5)\n op2._analysis_code_fmt = b'i'\n op2.data_names = op2.apply_data_code_value('data_names', ['node_id'])\n op2.apply_data_code_value('analysis_method', 'lsdvmn')\n else:\n msg = 'invalid analysis_code...analysis_code=%s' % op2.analysis_code\n raise RuntimeError(msg)\n\n op2.fix_format_code()\n if op2.num_wide == 8:\n op2.format_code = 1\n op2.data_code['format_code'] = 1\n else:\n #op2.fix_format_code()\n if op2.format_code == 1:\n op2.format_code = 2\n op2.data_code['format_code'] = 2\n assert op2.format_code in [2, 3], op2.code_information()\n\n if op2.is_debug_file:\n op2.binary_debug.write(' approach_code = %r\\n' % op2.approach_code)\n op2.binary_debug.write(' tCode = %r\\n' % op2.tCode)\n op2.binary_debug.write(' isubcase = %r\\n' % op2.isubcase)\n op2._read_title(data)\n op2._write_debug_bits()",
"def osp2():\n return dict(\n kloc= range(75,125),\n docu = [3,4], ltex = [2,5],\n sced = [2,3,4], Pmat = [4,5],\n Prec = [3,4, 5],\n Resl = [4], Team = [3],\n acap = [4], aexp = [4],\n cplx = [4], data = [4],\n Flex = [3], pcap = [3],\n pcon = [3], pexp = [4],\n pvol = [3], rely = [5],\n ruse = [4], site = [6],\n stor = [3], time = [3],\n tool = [5])",
"def test_parses_map_3(self):\n p = GPBEC()\n p.parse(\"GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM,X*11\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"220516\", p.timestamp)\n self.assertEquals(\"5130.02\", p.waypoint_lat)\n self.assertEquals(\"N\", p.waypoint_lat_dir)\n self.assertEquals(\"00046.34\", p.waypoint_lon)\n self.assertEquals(\"W\", p.waypoint_lon_dir)\n self.assertEquals(\"213.8\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"218.0\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"0004.6\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"EGLM\", p.waypoint_id)\n self.assertEquals(\"X\", p.faa_mode)\n self.assertEquals(\"11\", p.checksum)",
"def _scan_axograph_file(self):\n\n self.info = {}\n\n with open(self.filename, 'rb') as fid:\n f = StructFile(fid)\n\n self.logger.debug('filename: {}'.format(self.filename))\n self.logger.debug('')\n\n # the first 4 bytes are always a 4-character file type identifier\n # - for early versions of AxoGraph, this identifier was 'AxGr'\n # - starting with AxoGraph X, the identifier is 'axgx'\n header_id = f.read(4).decode('utf-8')\n self.info['header_id'] = header_id\n assert header_id in ['AxGr', 'axgx'], \\\n 'not an AxoGraph binary file! \"{}\"'.format(self.filename)\n\n self.logger.debug('header_id: {}'.format(header_id))\n\n # the next two numbers store the format version number and the\n # number of data columns to follow\n # - for 'AxGr' files, these numbers are 2-byte unsigned short ints\n # - for 'axgx' files, these numbers are 4-byte long ints\n # - the 4-character identifier changed from 'AxGr' to 'axgx' with\n # format version 3\n if header_id == 'AxGr':\n format_ver, n_cols = f.read_f('HH')\n assert format_ver == 1 or format_ver == 2, \\\n 'mismatch between header identifier \"{}\" and format ' \\\n 'version \"{}\"!'.format(header_id, format_ver)\n elif header_id == 'axgx':\n format_ver, n_cols = f.read_f('ll')\n assert format_ver >= 3, \\\n 'mismatch between header identifier \"{}\" and format ' \\\n 'version \"{}\"!'.format(header_id, format_ver)\n else:\n raise NotImplementedError(\n 'unimplemented file header identifier \"{}\"!'.format(\n header_id))\n self.info['format_ver'] = format_ver\n self.info['n_cols'] = n_cols\n\n self.logger.debug('format_ver: {}'.format(format_ver))\n self.logger.debug('n_cols: {}'.format(n_cols))\n self.logger.debug('')\n\n ##############################################\n # BEGIN COLUMNS\n\n sig_memmaps = []\n sig_channels = []\n for i in range(n_cols):\n\n self.logger.debug('== COLUMN INDEX {} =='.format(i))\n\n ##############################################\n # NUMBER OF DATA POINTS IN COLUMN\n\n n_points = f.read_f('l')\n\n self.logger.debug('n_points: {}'.format(n_points))\n\n ##############################################\n # COLUMN TYPE\n\n # depending on the format version, data columns may have a type\n # - prior to version 3, column types did not exist and data was\n # stored in a fixed pattern\n # - beginning with version 3, several data types are available\n # as documented in AxoGraph_ReadWrite.h\n if format_ver == 1 or format_ver == 2:\n col_type = None\n elif format_ver >= 3:\n col_type = f.read_f('l')\n else:\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n self.logger.debug('col_type: {}'.format(col_type))\n\n ##############################################\n # COLUMN NAME AND UNITS\n\n # depending on the format version, column titles are stored\n # differently\n # - prior to version 3, column titles were stored as\n # fixed-length 80-byte Pascal strings\n # - beginning with version 3, column titles are stored as\n # variable-length strings (see StructFile.read_string for\n # details)\n if format_ver == 1 or format_ver == 2:\n title = f.read_f('80p').decode('utf-8')\n elif format_ver >= 3:\n title = f.read_f('S')\n else:\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n self.logger.debug('title: {}'.format(title))\n\n # units are given in parentheses at the end of a column title,\n # unless units are absent\n if len(title.split()) > 0 and title.split()[-1][0] == '(' and \\\n title.split()[-1][-1] == ')':\n name = ' '.join(title.split()[:-1])\n units = title.split()[-1].strip('()')\n else:\n name = title\n units = ''\n\n self.logger.debug('name: {}'.format(name))\n self.logger.debug('units: {}'.format(units))\n\n ##############################################\n # COLUMN DTYPE, SCALE, OFFSET\n\n if format_ver == 1:\n\n # for format version 1, all columns are arrays of floats\n\n dtype = 'f'\n gain, offset = 1, 0 # data is neither scaled nor off-set\n\n elif format_ver == 2:\n\n # for format version 2, the first column is a \"series\" of\n # regularly spaced values specified merely by a first value\n # and an increment, and all subsequent columns are arrays\n # of shorts with a scaling factor\n\n if i == 0:\n\n # series\n first_value, increment = f.read_f('ff')\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n # assume this is the time column\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip memmap, chan info for time col\n\n else:\n\n # scaled short\n dtype = 'h'\n gain, offset = \\\n f.read_f('f'), 0 # data is scaled without offset\n\n elif format_ver >= 3:\n\n # for format versions 3 and later, the column type\n # determines how the data should be read\n # - column types 1, 2, 3, and 8 are not defined in\n # AxoGraph_ReadWrite.h\n # - column type 9 is different from the others in that it\n # represents regularly spaced values\n # (such as times at a fixed frequency) specified by a\n # first value and an increment, without storing a large\n # data array\n\n if col_type == 9:\n\n # series\n first_value, increment = f.read_f('dd')\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n if i == 0:\n\n # assume this is the time column\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip memmap, chan info for time col\n\n else:\n\n raise NotImplementedError(\n 'series data are supported only for the first '\n 'data column (time)!')\n\n elif col_type == 4:\n\n # short\n dtype = 'h'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 5:\n\n # long\n dtype = 'l'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 6:\n\n # float\n dtype = 'f'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 7:\n\n # double\n dtype = 'd'\n gain, offset = 1, 0 # data neither scaled nor off-set\n\n elif col_type == 10:\n\n # scaled short\n dtype = 'h'\n gain, offset = f.read_f('dd') # data scaled w/ offset\n\n else:\n\n raise NotImplementedError(\n 'unimplemented column type \"{}\"!'.format(col_type))\n\n else:\n\n raise NotImplementedError(\n 'unimplemented file format version \"{}\"!'.format(\n format_ver))\n\n ##############################################\n # COLUMN MEMMAP AND CHANNEL INFO\n\n # create a memory map that allows accessing parts of the file\n # without loading it all into memory\n array = np.memmap(\n self.filename,\n mode='r',\n dtype=f.byte_order + dtype,\n offset=f.tell(),\n shape=n_points)\n\n # advance the file position to after the data array\n f.seek(array.nbytes, 1)\n\n if i == 0:\n # assume this is the time column containing n_points values\n\n # verify times are spaced regularly\n diffs = np.diff(array)\n increment = np.median(diffs)\n max_frac_step_deviation = np.max(np.abs(\n diffs / increment - 1))\n tolerance = 1e-3\n if max_frac_step_deviation > tolerance:\n self.logger.debug('largest proportional deviation '\n 'from median step size in the first '\n 'column exceeds the tolerance '\n 'of ' + str(tolerance) + ':'\n ' ' + str(max_frac_step_deviation))\n raise ValueError('first data column (assumed to be '\n 'time) is not regularly spaced')\n\n first_value = array[0]\n\n self.logger.debug(\n 'interval: {}, freq: {}'.format(\n increment, 1 / increment))\n self.logger.debug(\n 'start: {}, end: {}'.format(\n first_value,\n first_value + increment * (n_points - 1)))\n\n t_start, sampling_period = first_value, increment\n self.info['t_start'] = t_start\n self.info['sampling_period'] = sampling_period\n\n self.logger.debug('')\n\n continue # skip saving memmap, chan info for time col\n\n else:\n # not a time column\n\n self.logger.debug('gain: {}, offset: {}'.format(gain, offset))\n self.logger.debug('initial data: {}'.format(\n array[:5] * gain + offset))\n\n # channel_info will be cast to _signal_channel_dtype\n channel_info = (\n name, str(i), 1 / sampling_period, f.byte_order + dtype,\n units, gain, offset, '0')\n\n self.logger.debug('channel_info: {}'.format(channel_info))\n self.logger.debug('')\n\n sig_memmaps.append(array)\n sig_channels.append(channel_info)\n\n # END COLUMNS\n ##############################################\n\n # initialize lists for events and epochs\n raw_event_timestamps = []\n raw_epoch_timestamps = []\n raw_epoch_durations = []\n event_labels = []\n epoch_labels = []\n\n # the remainder of the file may contain metadata, events and epochs\n try:\n\n ##############################################\n # COMMENT\n\n self.logger.debug('== COMMENT ==')\n\n comment = f.read_f('S')\n self.info['comment'] = comment\n\n self.logger.debug(comment if comment else 'no comment!')\n self.logger.debug('')\n\n ##############################################\n # NOTES\n\n self.logger.debug('== NOTES ==')\n\n notes = f.read_f('S')\n self.info['notes'] = notes\n\n self.logger.debug(notes if notes else 'no notes!')\n self.logger.debug('')\n\n ##############################################\n # TRACES\n\n self.logger.debug('== TRACES ==')\n\n n_traces = f.read_f('l')\n self.info['n_traces'] = n_traces\n\n self.logger.debug('n_traces: {}'.format(n_traces))\n self.logger.debug('')\n\n trace_header_info_list = {}\n group_ids = []\n for i in range(n_traces):\n\n # AxoGraph traces are 1-indexed in GUI, so use i+1 below\n self.logger.debug('== TRACE #{} =='.format(i + 1))\n\n trace_header_info = {}\n\n if format_ver < 6:\n # before format version 6, there was only one version\n # of the header, and version numbers were not provided\n trace_header_info['trace_header_version'] = 1\n else:\n # for format versions 6 and later, the header version\n # must be read\n trace_header_info['trace_header_version'] = \\\n f.read_f('l')\n\n if trace_header_info['trace_header_version'] == 1:\n TraceHeaderDescription = TraceHeaderDescriptionV1\n elif trace_header_info['trace_header_version'] == 2:\n TraceHeaderDescription = TraceHeaderDescriptionV2\n else:\n raise NotImplementedError(\n 'unimplemented trace header version \"{}\"!'.format(\n trace_header_info['trace_header_version']))\n\n for key, fmt in TraceHeaderDescription:\n trace_header_info[key] = f.read_f(fmt)\n # AxoGraph traces are 1-indexed in GUI, so use i+1 below\n trace_header_info_list[i + 1] = trace_header_info\n group_ids.append(\n trace_header_info['group_id_for_this_trace'])\n\n self.logger.debug(trace_header_info)\n self.logger.debug('')\n self.info['trace_header_info_list'] = trace_header_info_list\n\n ##############################################\n # GROUPS\n\n self.logger.debug('== GROUPS ==')\n\n n_groups = f.read_f('l')\n self.info['n_groups'] = n_groups\n group_ids = \\\n np.sort(list(set(group_ids))) # remove duplicates and sort\n assert n_groups == len(group_ids), \\\n 'expected group_ids to have length {}: {}'.format(\n n_groups, group_ids)\n\n self.logger.debug('n_groups: {}'.format(n_groups))\n self.logger.debug('group_ids: {}'.format(group_ids))\n self.logger.debug('')\n\n group_header_info_list = {}\n for i in group_ids:\n\n # AxoGraph groups are 0-indexed in GUI, so use i below\n self.logger.debug('== GROUP #{} =='.format(i))\n\n group_header_info = {}\n\n if format_ver < 6:\n # before format version 6, there was only one version\n # of the header, and version numbers were not provided\n group_header_info['group_header_version'] = 1\n else:\n # for format versions 6 and later, the header version\n # must be read\n group_header_info['group_header_version'] = \\\n f.read_f('l')\n\n if group_header_info['group_header_version'] == 1:\n GroupHeaderDescription = GroupHeaderDescriptionV1\n else:\n raise NotImplementedError(\n 'unimplemented group header version \"{}\"!'.format(\n group_header_info['group_header_version']))\n\n for key, fmt in GroupHeaderDescription:\n group_header_info[key] = f.read_f(fmt)\n # AxoGraph groups are 0-indexed in GUI, so use i below\n group_header_info_list[i] = group_header_info\n\n self.logger.debug(group_header_info)\n self.logger.debug('')\n self.info['group_header_info_list'] = group_header_info_list\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 1 <<')\n\n # 36 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('9l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EPISODES\n\n self.logger.debug('== EPISODES ==')\n\n # a subset of episodes can be selected for \"review\", or\n # episodes can be paged through one by one, and the indexes of\n # those currently in review appear in this list\n episodes_in_review = []\n n_episodes = f.read_f('l')\n self.info['n_episodes'] = n_episodes\n for i in range(n_episodes):\n episode_bool = f.read_f('Z')\n if episode_bool:\n episodes_in_review.append(i + 1)\n self.info['episodes_in_review'] = episodes_in_review\n\n self.logger.debug('n_episodes: {}'.format(n_episodes))\n self.logger.debug('episodes_in_review: {}'.format(\n episodes_in_review))\n\n if format_ver == 5:\n\n # the test file for version 5 contains this extra list of\n # episode indexes with unknown purpose\n old_unknown_episode_list = []\n n_episodes2 = f.read_f('l')\n for i in range(n_episodes2):\n episode_bool = f.read_f('Z')\n if episode_bool:\n old_unknown_episode_list.append(i + 1)\n\n self.logger.debug('old_unknown_episode_list: {}'.format(\n old_unknown_episode_list))\n if n_episodes2 != n_episodes:\n self.logger.debug(\n 'n_episodes2 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes2, n_episodes))\n\n # another list of episode indexes with unknown purpose\n unknown_episode_list = []\n n_episodes3 = f.read_f('l')\n for i in range(n_episodes3):\n episode_bool = f.read_f('Z')\n if episode_bool:\n unknown_episode_list.append(i + 1)\n\n self.logger.debug('unknown_episode_list: {}'.format(\n unknown_episode_list))\n if n_episodes3 != n_episodes:\n self.logger.debug(\n 'n_episodes3 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes3, n_episodes))\n\n # episodes can be masked to be removed from the pool of\n # reviewable episodes completely until unmasked, and the\n # indexes of those currently masked appear in this list\n masked_episodes = []\n n_episodes4 = f.read_f('l')\n for i in range(n_episodes4):\n episode_bool = f.read_f('Z')\n if episode_bool:\n masked_episodes.append(i + 1)\n self.info['masked_episodes'] = masked_episodes\n\n self.logger.debug('masked_episodes: {}'.format(\n masked_episodes))\n if n_episodes4 != n_episodes:\n self.logger.debug(\n 'n_episodes4 ({}) and n_episodes ({}) '\n 'differ!'.format(n_episodes4, n_episodes))\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 2 <<')\n\n # 68 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('d 9l d 4l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # FONTS\n\n if format_ver >= 6:\n font_categories = ['axis titles', 'axis labels (ticks)',\n 'notes', 'graph title']\n else:\n # would need an old version of AxoGraph to determine how it\n # used these settings\n font_categories = ['everything (?)']\n\n font_settings_info_list = {}\n for i in font_categories:\n\n self.logger.debug('== FONT SETTINGS FOR {} =='.format(i))\n\n font_settings_info = {}\n for key, fmt in FontSettingsDescription:\n font_settings_info[key] = f.read_f(fmt)\n\n # I don't know why two arbitrary values were selected to\n # represent this switch, but it seems they were\n # - setting1 could contain other undeciphered data as a\n # bitmask, like setting2\n assert font_settings_info['setting1'] in \\\n [FONT_BOLD, FONT_NOT_BOLD], \\\n 'expected setting1 ({}) to have value FONT_BOLD ' \\\n '({}) or FONT_NOT_BOLD ({})'.format(\n font_settings_info['setting1'],\n FONT_BOLD,\n FONT_NOT_BOLD)\n\n # size is stored 10 times bigger than real value\n font_settings_info['size'] = \\\n font_settings_info['size'] / 10.0\n font_settings_info['bold'] = \\\n bool(font_settings_info['setting1'] == FONT_BOLD)\n font_settings_info['italics'] = \\\n bool(font_settings_info['setting2'] & FONT_ITALICS)\n font_settings_info['underline'] = \\\n bool(font_settings_info['setting2'] & FONT_UNDERLINE)\n font_settings_info['strikeout'] = \\\n bool(font_settings_info['setting2'] & FONT_STRIKEOUT)\n font_settings_info_list[i] = font_settings_info\n\n self.logger.debug(font_settings_info)\n self.logger.debug('')\n self.info['font_settings_info_list'] = font_settings_info_list\n\n ##############################################\n # X-AXIS SETTINGS\n\n self.logger.debug('== X-AXIS SETTINGS ==')\n\n x_axis_settings_info = {}\n for key, fmt in XAxisSettingsDescription:\n x_axis_settings_info[key] = f.read_f(fmt)\n self.info['x_axis_settings_info'] = x_axis_settings_info\n\n self.logger.debug(x_axis_settings_info)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 3 <<')\n\n # 108 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('8l 3d 13l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EVENTS / TAGS\n\n self.logger.debug('=== EVENTS / TAGS ===')\n\n n_events, n_events_again = f.read_f('ll')\n self.info['n_events'] = n_events\n\n self.logger.debug('n_events: {}'.format(n_events))\n\n # event / tag timing is stored as an index into time\n raw_event_timestamps = []\n event_labels = []\n for i in range(n_events_again):\n event_index = f.read_f('l')\n raw_event_timestamps.append(event_index)\n n_events_yet_again = f.read_f('l')\n for i in range(n_events_yet_again):\n title = f.read_f('S')\n event_labels.append(title)\n\n event_list = []\n for event_label, event_index in \\\n zip(event_labels, raw_event_timestamps):\n # t_start shouldn't be added here\n event_time = event_index * sampling_period\n event_list.append({\n 'title': event_label,\n 'index': event_index,\n 'time': event_time})\n self.info['event_list'] = event_list\n for event in event_list:\n self.logger.debug(event)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug('>> UNKNOWN 4 <<')\n\n # 28 bytes of undeciphered data (types here are guesses)\n unknowns = f.read_f('7l')\n\n self.logger.debug(unknowns)\n self.logger.debug('')\n\n ##############################################\n # EPOCHS / INTERVAL BARS\n\n self.logger.debug('=== EPOCHS / INTERVAL BARS ===')\n\n n_epochs = f.read_f('l')\n self.info['n_epochs'] = n_epochs\n\n self.logger.debug('n_epochs: {}'.format(n_epochs))\n\n epoch_list = []\n for i in range(n_epochs):\n epoch_info = {}\n for key, fmt in EpochInfoDescription:\n epoch_info[key] = f.read_f(fmt)\n epoch_list.append(epoch_info)\n self.info['epoch_list'] = epoch_list\n\n # epoch / interval bar timing and duration are stored in\n # seconds, so here they are converted to (possibly non-integer)\n # indexes into time to fit into the procrustean beds of\n # _rescale_event_timestamp and _rescale_epoch_duration\n raw_epoch_timestamps = []\n raw_epoch_durations = []\n epoch_labels = []\n for epoch in epoch_list:\n raw_epoch_timestamps.append(\n epoch['t_start'] / sampling_period)\n raw_epoch_durations.append(\n (epoch['t_stop'] - epoch['t_start']) / sampling_period)\n epoch_labels.append(epoch['title'])\n self.logger.debug(epoch)\n self.logger.debug('')\n\n ##############################################\n # UNKNOWN\n\n self.logger.debug(\n '>> UNKNOWN 5 (includes y-axis plot ranges) <<')\n\n # lots of undeciphered data\n rest_of_the_file = f.read()\n\n self.logger.debug(rest_of_the_file)\n self.logger.debug('')\n\n self.logger.debug('End of file reached (expected)')\n\n except EOFError as e:\n if format_ver == 1 or format_ver == 2:\n # for format versions 1 and 2, metadata like graph display\n # information was stored separately in the \"resource fork\"\n # of the file, so reaching the end of the file before all\n # metadata is parsed is expected\n self.logger.debug('End of file reached (expected)')\n pass\n else:\n # for format versions 3 and later, there should be metadata\n # stored at the end of the file, so warn that something may\n # have gone wrong, but try to continue anyway\n self.logger.warning('End of file reached unexpectedly '\n 'while parsing metadata, will attempt '\n 'to continue')\n self.logger.debug(e, exc_info=True)\n pass\n\n except UnicodeDecodeError as e:\n # warn that something went wrong with reading a string, but try\n # to continue anyway\n self.logger.warning('Problem decoding text while parsing '\n 'metadata, will ignore any remaining '\n 'metadata and attempt to continue')\n self.logger.debug(e, exc_info=True)\n pass\n\n self.logger.debug('')\n\n ##############################################\n # RAWIO HEADER\n\n # event_channels will be cast to _event_channel_dtype\n event_channels = []\n event_channels.append(('AxoGraph Tags', '', 'event'))\n event_channels.append(('AxoGraph Intervals', '', 'epoch'))\n\n if len(sig_channels) > 0:\n signal_streams = [('Signals', '0')]\n else:\n signal_streams = []\n\n # organize header\n self.header['nb_block'] = 1\n self.header['nb_segment'] = [1]\n self.header['signal_streams'] = np.array(signal_streams, dtype=_signal_stream_dtype)\n self.header['signal_channels'] = np.array(sig_channels, dtype=_signal_channel_dtype)\n self.header['event_channels'] = np.array(event_channels, dtype=_event_channel_dtype)\n self.header['spike_channels'] = np.array([], dtype=_spike_channel_dtype)\n\n ##############################################\n # DATA OBJECTS\n\n # organize data\n self._sampling_period = sampling_period\n self._t_start = t_start\n self._raw_signals = [sig_memmaps] # first index is seg_index\n self._raw_event_epoch_timestamps = [\n np.array(raw_event_timestamps),\n np.array(raw_epoch_timestamps)]\n self._raw_event_epoch_durations = [\n None,\n np.array(raw_epoch_durations)]\n self._event_epoch_labels = [\n np.array(event_labels, dtype='U'),\n np.array(epoch_labels, dtype='U')]",
"def extract_calib_info(fname):\n\n # read in the text file\n f = open(fname, 'r')\n lines = f.readlines()\n\n # reading criteria\n k1 = 'fitting method'\n k2 = 'function evals'\n k3 = 'data points'\n k4 = 'Bayesian info crit'\n k5 = ' (' # calibrated parameters\n k6 = '(init' # calibrated parameters\n k7 = '+/-' # calibrated parameters\n k8 = ':' # calibrated parameters\n k9 = '(fixed' # calibrated parameters\n k10 = '==' # calibrated parameters\n\n # info to keep\n info = [e.split('=') if (k1 in e) else [e.split('=')[1]] if ((k2 in e) or\n (k3 in e) or (k4 in e)) else\n [(e.split(k6)[0].split(k5)[0].split(k7)[0].split(k8)[0]),\n (e.split(k6)[0].split(k5)[0].split(k7)[0].split(k8)[1]),\n e.split(k6)[0].split(k5)[0].split(k7)[1]] if (k7 in e) else\n [e.split(k6)[0].split(':')[0], e.split(k6)[0].split(':')[1], 'nan']\n if (k6 in e) else [e.split(k9)[0].split(':')[0],\n e.split(k9)[0].split(':')[1], 'nan']\n if (k9 in e) else [e.split(k10)[0].split(':')[0],\n e.split(k10)[0].split(':')[1], 'nan']\n if (k10 in e) else [''] for e in lines]\n\n # remove end lines and formatting issues\n info = [e.strip('\\n') for sub in info for e in sub if e != '']\n info = [e.replace(' ', '') if (':' in e) else e.strip() for e in info]\n\n # split into sublists containing each solver's info\n info = [list(sub) for e, sub in groupby(info, lambda x: k1 not in x) if e]\n\n return info",
"def read_cli(fname):\n \n meta = {}\n data = None\n header = []\n\n meta['fname'] = fname\n meta['id'] = ''.join([L for L in fname if L in '0123456789'])\n \n fid = open(fname, 'r')\n meta['CLIGEN Version'] = fid.readline().strip()\n fid.readline()\n meta['Station'] = ' '.join(fid.readline().strip().split())\n\n fid.readline()\n line = fid.readline().strip().split()\n meta['Latitude'] = float(line[0])\n meta['Longitude'] = float(line[1])\n meta['Elevation'] = float(line[2])\n meta['Obs. Years'] = float(line[3])\n meta['Beginning Year'] = float(line[4])\n meta['Years Simulated'] = float(line[5])\n meta['Command Line'] = ' '.join(line[6:])\n\n fid.readline()\n meta['Observed monthly ave max temperature (C)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave min temperature (C)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave solar radiation (Langleys/day)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave precipitation (mm)'] = \\\n list(map(float, fid.readline().split()))\n\n header = fid.readline().strip().split()\n \n fid.readline()\n\n _data = []\n for line in fid.readlines():\n cells = line.split()\n\n if len(cells) != len(header):\n break\n\n _data.append([float(c) for c in cells])\n \n data = {}\n for h,v in zip(header, zip(*_data)):\n data[h] = v\n\n del _data\n del header\n\n return (meta,data)",
"def sgd_features(filepath=None):\n\n if filepath == None:\n filepath=load_sgd_tab()\n\n arabic_to_roman_dict=chromosomename_roman_to_arabic()[0]\n \n with open(filepath) as f:\n lines = f.readlines()\n\n\n feature_list = []\n feature_orf_dict = {}\n feature_ars_dict = {}\n feature_telomere_dict = {}\n feature_ltr_dict = {}\n feature_centromere_dict = {}\n feature_Xelement_dict = {}\n feature_intron_dict = {}\n feature_ncrna_dict = {}\n feature_ncexon_dict = {}\n feature_trna_dict = {}\n feature_snorna_dict = {}\n feature_teg_dict = {}\n feature_5p_utrintron_dict = {}\n feature_mas_dict = {}\n feature_snrna_dict = {}\n feature_rrna_dict = {}\n feature_ets_dict = {}\n feature_its_dict = {}\n feature_oor_dict = {}\n feature_telrna_dict = {}\n \n for line in lines:\n l = line.strip('\\n').split('\\t')\n if not l[1] in feature_list:\n feature_list.append(l[1])\n\n if not l[8].endswith('micron') and not l[8] == '':\n chromosome = arabic_to_roman_dict.get(int(l[8]))\n if l[1] == 'ORF':\n feature_orf_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ARS':\n feature_ars_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomere':\n feature_telomere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'long_terminal_repeat':\n feature_ltr_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'centromere':\n feature_centromere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'X_element':\n feature_Xelement_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'intron':\n feature_intron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ncRNA_gene':\n feature_ncrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'noncoding_exon':\n feature_ncexon_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'tRNA_gene':\n feature_trna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snoRNA_gene':\n feature_snorna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'transposable_element_gene':\n feature_teg_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'five_prime_UTR_intron':\n feature_5p_utrintron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'matrix_attachment_site':\n feature_mas_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snRNA_gene':\n feature_snrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'rRNA_gene':\n feature_rrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'external_transcribed_spacer_region':\n feature_ets_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'internal_transcribed_spacer_region':\n feature_its_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'origin_of_replication':\n feature_oor_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomerase_RNA_gene':\n feature_telrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n\n\n \n\n\n genomicregions_list = ['ORF', 'ARS', 'Telomere', 'long_terminal_repeat',\n 'Centromere', 'X_element', 'Intron', 'ncRNA_gene',\n 'Noncoding_exon', 'tRNA_gene', 'snoRNA_gene',\n 'transposable_element_gene', 'five_prime_UTR_intron',\n 'matrix_attachment_site', 'snRNA_gene', 'rRNA_gene',\n 'external_transcribed_spacer_region',\n 'internal_transcribed_spacer_region',\n 'origin_of_replication', 'telomerase_RNA_gene']\n\n\n return(genomicregions_list, feature_orf_dict, feature_ars_dict, feature_telomere_dict,\n feature_ltr_dict, feature_centromere_dict, feature_Xelement_dict, feature_intron_dict,\n feature_ncrna_dict, feature_ncexon_dict, feature_trna_dict,\n feature_snorna_dict, feature_teg_dict, feature_5p_utrintron_dict,\n feature_mas_dict, feature_snrna_dict, feature_rrna_dict,\n feature_ets_dict, feature_its_dict, feature_oor_dict,\n feature_telrna_dict)",
"def read_cbf_file(inputfilename):\n \n \n with open(inputfilename, 'rb') as fid:\n BD = np.fromfile(fid, np.float64)\n \n # https://www.mathworks.com/help/matlab/ref/fwrite.html\n # https://www.mathworks.com/help/matlab/numeric-types.html\n \n k=0;\n # Static data (100 places)\n SD=BD[k:k+100]\n k=k+100\n # Priors (50 places)\n PR=BD[k:k+50];\n k=k+50;\n # Priorunc (50 places)\n PRU=BD[k:k+50]\n k=k+50 \n \n # O. Priors (50 places)\n OPR=BD[k:k+50]\n k=k+50\n # O. Priorunc (50 places)\n OPRU=BD[k:k+50]\n k=k+50\n \n CBF = {}\n CBF['PARPRIORS'] = np.expand_dims(PR,axis=1)\n CBF['PARPRIORUNC'] = np.expand_dims(PRU,axis=1)\n CBF=read_other_obs_constraints(CBF,OPR,OPRU)\n \n CBF['ID'] = SD[0] # ID (not used)\n CBF['LAT'] = SD[1] # Latitude\n CBF['nodays'] = int(SD[2]) # Number of days\n CBF['nomet'] = int(SD[3])\n CBF['noobs'] =int(SD[4])\n CBF['EDC'] = SD[5]\n CBF['EDCDIAG'] = SD[6]\n# CBF = {'PARPRIORS':np.expand_dims(PR,axis=1), \n# 'PARPRIORUNC':np.expand_dims(PRU,axis=1), \n# 'OTHERPRIORS':np.expand_dims(OPR,axis=1), #\n# 'OTHERPRIORSUNC':np.expand_dims(OPRU,axis=1),\n# 'ID':SD[0], # ID (not used)\n# 'LAT':SD[1], # Latitude\n# 'nodays':int(SD[2]), # Number of days\n# 'nomet':int(SD[3]), \n# 'noobs':int(SD[4]),\n# 'EDC':SD[5],\n# 'EDCDIAG':SD[6],\n# 'gppabs':SD[7],\n# 'rc_random_search':SD[10]==1,\n# 'nbe_annual_unc':SD[13],\n# 'etiav':SD[14],\n# 'nbe_seasonal_unc':SD[15]}\n \n #MCMC start searching EDCs from anywhere (1) or from prescribed starting\n #point(0). this is outdated - consider deleting\n CBF['rc_random_search'] = SD[10]==1\n \n #NEE IAV options\n CBF=read_obs_uncertainty_fields(CBF,SD,OPRU)\n \n \n TEMPDATA=BD[k:k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']].reshape(CBF['nodays'],\n (CBF['nomet']+CBF['noobs']))\n #All met data\n CBF['MET'] = TEMPDATA[0:CBF['nodays'],0:CBF['nomet']] # Add in new meteorology here\n# CBF['OBS'] = TEMPDATA[0:CBF['nodays'],CBF['nomet']:]\n CBFOBS = TEMPDATA[0:CBF['nodays'],CBF['nomet']:]\n CBF=define_cbf_obs_fields(CBF,CBFOBS)\n \n #Removing redundant fields\n# CBF=rmfield(CBF,'noobs');\n# # CBF=rmfield(CBF,'nomet');\n# # CBF=rmfield(CBF,'nodays');\n \n \n # Read prescribed mean meteorology\n \n if len(BD) - (k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']) == CBF['nomet'] + CBF['noobs']:\n \n kmmet= k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']\n CBF['mmet'] = BD[kmmet:kmmet+CBF['nomet']]\n \n \n #Retaining \"OTHERPRIORS\" for now\n CBF['RAW'] = {}\n CBF['RAW']['OTHERPRIORS']=OPR;\n CBF['RAW']['OTHERPRIORSUNC']=OPRU;\n CBF['RAW']['info']='Raw inputs/outputs as stored in CBF binary structure';\n CBF['RAW']['details']='For completeness & development purpose only; When re-writing CBF to file, these are over-written by CBF.OBS, etc.';\n\n \n \n \n \n return CBF\n #disp(sprintf('CHECK: .cbf file \"%s\" successfully read into matlab.',filename)) ",
"def raw_fields(self):\n (x1, x2, x3) = self.get_x_g0_defaults()\n\n # offt doesn't exist in NX nastran\n offt = set_blank_if_default(self.offt, 'GGG')\n\n list_fields = ['CBAR', self.eid, self.Pid(), self.Ga(), self.Gb(), x1, x2,\n x3, offt, self.pa, self.pb] + list(self.wa) + list(self.wb)\n return list_fields",
"def getRoiInfo(self, fh):\n fn = fh.name()\n rf = open(fn[:-4]+'.roi', 'r')\n rois = np.loadtxt(rf)\n return rois",
"def read_pfeatures(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n nif = db['nif']\n year = db['year']\n pfeatures = db['pfeatures']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, nif, year, pfeatures, methodvalues",
"def gen_info():\n # Carga la metainfo de departamentos de covidstas y filtramos departamentos de Santa Fe\n covidstats_meta_df = pd.read_csv('covidstats.csv',sep=';')\n covidstats_meta_df['LOCATION']='ARGENTINA/'+covidstats_meta_df['Provincia'].apply(normalize_str)+'/'+covidstats_meta_df['Departamento'].apply(normalize_str)\n covidstats_meta_df=covidstats_meta_df[covidstats_meta_df['LOCATION'].apply(lambda l : l.startswith('ARGENTINA/SANTA FE'))]\n covidstats_meta_df\n\n # Cargamos la info poblacional y chequemos que tengamos toda la info\n info_df=pd.read_csv('info_general.csv')\n s = set(info_df['LOCATION'])\n for l in set(covidstats_meta_df['LOCATION']):\n if l not in s:\n print('FALTA INFO DE: {}'.format(l))\n\n # Cargamos la info geografica y chequemos que tengamos toda la info\n gdf = gpd.read_file('maps_general.geojson')\n gdf=gdf[gdf['LOCATION'].apply(lambda l : l.startswith('ARGENTINA/SANTA FE'))]\n s = set(gdf['LOCATION'])\n for l in set(covidstats_meta_df['LOCATION']):\n if l not in s:\n print('FALTA INFO GEOGRAFICA DE: {}'.format(l))\n return covidstats_meta_df, info_df, gdf"
] | [
"0.60176206",
"0.56555265",
"0.5474328",
"0.5407819",
"0.5401295",
"0.5373873",
"0.5334079",
"0.5289622",
"0.52885354",
"0.5262731",
"0.5220959",
"0.5170142",
"0.51691955",
"0.5163392",
"0.5162932",
"0.5147522",
"0.5127302",
"0.5127064",
"0.51110655",
"0.5105965",
"0.5087524",
"0.5077613",
"0.50769275",
"0.5067576",
"0.50663203",
"0.50540555",
"0.5023909",
"0.50144446",
"0.50134283",
"0.5010237"
] | 0.5832184 | 1 |
Merge CGOVTYPE, ORI, AGENCY from final main file into census files based on state and place fips. | def get_glevel_ori_agency(county_cens_file, crime_df, filename, cens_year, city_cens_file=False):
"""
1. Append cities census file to counties census file
"""
national_census_df = pd.read_csv(county_cens_file)
"""
Checking for city census file coz we need to first append city census file to the bottom of county census file for 2000 and 2010.
And city census file is passed only for 2000 and 2010 since for 1990 city and county census data is already together.
"""
if city_cens_file:
cities_df = pd.read_csv(city_cens_file)
national_census_df = national_census_df.append([cities_df])
# Drop duplicates
national_census_df = national_census_df.drop_duplicates(['STATEFP', 'place_fips'])
national_census_df.to_csv(f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/US Crime Analytics/data/cen_00/Census_{cens_year}_Unique.csv', index=False)
"""
2.
Merge census unique files with Crime_Major_Gov_Fips to get the correct cgovtype, CNTY based on fips state, fips place.
Also obtain ORI, Agency columns from crime file.
"""
national_census_df = national_census_df.merge(crime_df, on=['STATEFP', 'place_fips'], how='right')
"""
3. Create final Govt_level = Govt_level_y column which has govt_level values from crime file and get rid of _x and _y columns
"""
national_census_df['Govt_level'] = national_census_df['Govt_level_y']
national_census_df['CNTY'] = national_census_df['CNTY_y']
national_census_df.drop(['Govt_level_x', 'Govt_level_y', 'CNTY_x', 'CNTY_y'], axis=1, inplace=True)
"""
Add the year column to have year for even the missing census rows for certain ORIs
"""
national_census_df['YEAR'] = cens_year
"""
4. Rearrange columns so that ORI, AGENCY, Govt_level are at the beginning
"""
cols = list(national_census_df.columns.values)
cols.pop(cols.index('ORI'))
cols.pop(cols.index('AGENCY'))
cols.pop(cols.index('Govt_level'))
cols.pop(cols.index('CNTY'))
cols.pop(cols.index('YEAR'))
national_census_df = national_census_df[['ORI', 'AGENCY', 'Govt_level', 'CNTY', 'YEAR'] + cols]
#national_census_df = national_census_df[['ORI', 'AGENCY', 'YEAR'] + cols]
# write the final df with updated govt_level, ori, agency etc. to a csv
national_census_df.to_csv(f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/US Crime Analytics/data/cen_00/{filename}.csv', index=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_final_main_cgovtype_ori_agency(file_path):\n final_main_df = pd.read_csv(file_path)\n final_main_fips_ori_agency = final_main_df[['ORI', 'AGENCY', 'CGOVTYPE', 'FIPS_STATE', 'FIPS_PLACE']]\n\n \"\"\"\n 1. Obtain only unique records from the final main file - key: fips place + fips state\n \"\"\"\n final_main_fips_ori_agency_unique = final_main_fips_ori_agency.drop_duplicates(['FIPS_STATE', 'FIPS_PLACE']) # --> 11,602 rows\n\n \"\"\"\n 2. Rename CGOVTYPE, FIPS_STATE, FIPS_PLACE to Govt_level, 'STATEFP', 'place_fips' to match national census file\n \"\"\"\n final_main_fips_ori_agency_unique = final_main_fips_ori_agency_unique.rename(\n {'CGOVTYPE': 'Govt_level', 'FIPS_STATE': 'STATEFP', 'FIPS_PLACE': 'place_fips'}, axis='columns')\n\n \"\"\"\n 3. Get only those records from 90 final main file whose cgovtype is 1,2 or 3\n \"\"\"\n final_main_fips_ori_agency_unique = final_main_fips_ori_agency_unique.loc[final_main_fips_ori_agency_unique['Govt_level'].isin([1, 2, 3])]\n\n return final_main_fips_ori_agency_unique",
"def main():\n files = init.file_list\n citations = load_citations.load_files(files)\n citations_a = citations[0]\n citations_b = citations[1]\n\n common_citations, num_common = compare_citations.common_citations(citations_a, citations_b)\n save_citations.save_citations(common_citations)",
"def datamerge_run(filenames, outdir, roc_cols):\n \n tbldict = collect2dict(filenames, outdir)\n tbldict = cogtest_manipulation(tbldict, roc_cols)\n \n #count number of tps\n tbldict['cogtests'] = count_instances(tbldict['cogtests'], 'codeb', 'NP_NoTps')\n tbldict['aseg_change'] = count_instances(tbldict['aseg_change'], 'codea', 'MRI_NoTps')\n tbldict['pibparams'] = count_instances(tbldict['pibparams'], 'codea', 'PIB_NoTps')\n \n new_tbldict = {}\n for key, tbl in tbldict.iteritems():\n tpcol = [s for s in tbl.columns if ('_Tp' in s)]\n if tpcol:\n tpcol = tpcol[0]\n tblflat, tblflatnm = flatten(tbl, tpcol, key, [1, '1'])\n new_tbldict[tblflatnm] = tblflat\n tbldict.update(new_tbldict)\n \n #make sure each table contains SubjID and BAC# fields\n for key, tbl in tbldict.iteritems():\n tbl = addcodes(tbl, tbldict['codetranslator'])\n tbldict[key] = tbl\n \n #merge tables\n tblstojoin = ['cogtests_flat','pibparams_flat','aseg_change_flat','fdg_metaroi_flat','subjinfo']\n joincol = ['codea','codeb']\n subjtbl = mergelots(tbldict, tblstojoin, joincol)\n \n #merge tables\n tblstojoin = ['cogtests','subjinfo','pibparams_flat','aseg_change_flat','fdg_metaroi_flat']\n joincol = ['codea','codeb']\n NPtbl = mergelots(tbldict, tblstojoin, joincol)\n \n cf.save_xls_and_pkl(subjtbl, 'subjtbl', outdir)\n cf.save_xls_and_pkl(NPtbl, 'NPtbl', outdir)\n \n return tbldict, NPtbl, subjtbl",
"def process_merging(lvcfs, ltoolnames, list_tool_precedence_order, dico_map_tool_acronym, lossless, merge_vcf_outfilename, l_contigs_ref_genome_fasta_dict, cmdline):\n\n\toutputFilename = merge_vcf_outfilename\n\ttuple_objs = ()\n\tl_snames = []\n\tl_contigs = []\n\n\n\tListFieldsToProcessForOurFORMATColumn = [\"GT\", \"DP\", \"AR\", \"AD\"] ## HARDCODED;\n\n\t##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\t## SECTION CHECKING PRECEDENCE ORDER if necessary\n\t##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\tlog.info(\"is list_tool_precedence empty? \".format(str(list_tool_precedence_order)))\n\tif list_tool_precedence_order is not None:\n\t\t'''here we sort and reassigned ltoolnames and lvcfs based on list_tool_precedence_order ; names of the \n\t\ttools have to match 100%\n\t\t'''\n\t\tif len(list_tool_precedence_order) != len(ltoolnames):\n\t\t\texit(\"ERROR: Tool Names in list precedence do not match 100% names in list toolnames ; check your \"\n\t\t\t \"input\\n\" + \"sorted_list_tool_precedence -> \" + str(sorted(list_tool_precedence_order)) +\n\t\t\t \"\\nsorted_list_tool_names ------> \"\n\t\t\t + str(sorted(ltoolnames)))\n\t\t## REORDERING the list of PRECEDENCE of the TOOLs\n\t\tindices = []\n\t\tfor toolname in list_tool_precedence_order:\n\t\t\tindices.append(ltoolnames.index(toolname))\n\t\t## we reallocate/reorder the vcfs files the same order of the list_tool_precedence_order\n\t\tlvcfs = [lvcfs[i] for i in indices]\n\t\tltoolnames = list_tool_precedence_order; ## we re-assigned the list\n\t\tlog.info(str(type(list_tool_precedence_order)))\n\t\tlog.info(\"Re-Ordering the Toolnames and the list of VCFs based on the given precedence list: {} \".format(\n\t\t\tlist_tool_precedence_order))\n\n\t##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\t## SECTION STARTING PROCESSING FIELDS\n\t##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\tvcfMerger_Format_Fields_Specific = [\n\t\t'##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">',\n\t\t'##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Read depth at locus in Sample\">',\n\t\t'##FORMAT=<ID=AD,Number=.,Type=Integer,Description=\"Allelic depths for the ref and alt alleles in the order listed from chosen prevalent tool\">',\n\t\t'##FORMAT=<ID=AR,Number=1,Type=Float,Description=\"Allele frequency of ALT allele from chosen prevalent tool\">'\n\t]\n\n\tTN_FLAGS = []\n\tfor tool in ltoolnames:\n\t\tTN_FLAG = str(''.join([\n\t\t\t'##INFO=<ID=' + tool + ',Number=0,Type=Flag,Description=\"Toolname Flag means that position got '\n\t\t\t 'called by this tool\">']))\n\t\tTN_FLAGS.append(TN_FLAG)\n\tAdditional_FLAGS = [\n\t\t'##INFO=<ID=CC,Number=1,Type=Integer,Description=\"CALLERS_COUNT,Number of tools calling this variant event '\n\t\t'out of a total of ' + str(len(ltoolnames)) + ' tools\">',\n\t\t''.join(['##INFO=<ID=TPCE,Number=1,Type=String,Description=\"Tool that got precedence for called position; '\n\t\t 'user gave the following order for tool precedence: ', ', '.join([str(t) for t in\n\t\t ltoolnames]),\n\t\t '\">']),\n\t\t'##INFO=<ID=VTYPE,Number=1,Type=String,Description=\"Type of Variant (snv, ins, del)\">'\n\t]\n\n\tvcfMerger_Info_Fields_Specific = TN_FLAGS + Additional_FLAGS\n\n\t# the trick is here for the Tool Precedence!!! The user has given us an ordered list of\n\t# vcfs and toolnames in order of precedence or a specific PRECEDENCE order was given via --precedence\n\t# and we sort the vcf and add them to the tuple accordingly\n\tfor i in range(len(lvcfs)):\n\t\to = vcfToDict.vcfToDict(lvcfs[i], ltoolnames[i]) ## here we map the toolname and the vcf associated\n\t\ttuple_objs = tuple_objs + (o,) ## we add instances of object vcfToDict to the tuple ; order FIFO is\n\t\t# equivalent to the order of precedence\n\t\tl_snames.append(o.samplenames) ## we add tuples of samplenames to the list l_snames as a list of tuples\n\t\tl_contigs.append(sorted(o.contigs))\n\n\t# performing checks before processing data further\n\tdvm.compareTuples(l_snames,\n\t \"SampleNames\") ## we cannot skip that one. If not matching, then modify vcf to get samples in\n\t# correct columns or with the same names across ALL the vcf files ;\n\tlog.info(\"list of vcf-within-captured Sample Names:\")\n\tlog.info(set(l_snames))\n\tlog.info(\"Number of sample in set: {}\".format(len(set(l_snames))))\n\n\t## UNCOMMENT NEXT LINE TO PUT THE CONTIGS CHECK BACK ON\n#########\tdvm.compareTuples(l_contigs, \"CONTIGS\") ## we may add an option to skip that check ; even though we do not know\n\t# what could be the consequences of having different contigs ; we cannot think any so far.\n\n\t\"\"\"\n\t### we check here the presence of the expected MANDATORY fields in the FORMAT columns ;\n\t### Unfortunately as we do not read the entire VCFs file and therefore we do not have the object VCF created yet,\n\t### we cannot use the cyvcf2 API to check if an ID is defined in the VCF header or not, or in the variant or not;\n\t### So for now, we rely on our own vcf header capture as string; we therefore check the string;\n\t### BUT: this does not mean that the ID fields are necessary present in each variant;\n\t### If we want to check that presence, we will have to read the vcf files entirely see below \"tuple_dicts = () loop\" ;\n\t### and check every variant.\n\t### Or, second option, we will check while we merge and raise ERROR and either stop merging or skip that variant, or put NULL value for that field ;\n\t### for example: if AR does not exist, we set AR=.\n\t\"\"\"\n\n\tcheck_fields_definitions_in_header = True\n\tif check_fields_definitions_in_header:\n\t\tfor flagid in ListFieldsToProcessForOurFORMATColumn:\n\t\t\tlog.info(\"common flag to be processed in FORMAT: {}\".format(flagid))\n\t\t\tfor tpo in tuple_objs:\n\t\t\t\t'''Check if flag we want to put in the format field have been defined in the VCF header'''\n\t\t\t\tres_search = search(\"\".join([\"ID=\", flagid]), tpo.headers)\n\t\t\t\tif res_search is None:\n\t\t\t\t\texit(\n\t\t\t\t\t\t\"Id Flag \" + flagid + \" not Defined in header of vcf file \" + tpo.fvcf + \".\\nPlease bring the VCF up to specs before running this merging tool. Use a wrapper specific to your tool which has already been created by the Author of the current tool. Aborting!\")\n\n\n\t# we process the files entirely after all the checks have PASSED successfully\n\t# we may make parallel this step But If we do, we lose the precedence order in the tuple_dicts variable and\n\t# this defies the purpose of that script\n\ttuple_dicts = ()\n\tfor tpo in tuple_objs:\n\t\ttuple_dicts = tuple_dicts + (tpo.dictOfLoci(tpo.readVCF()),)\n\n\t# we merge the Loci from all the VCFs [Key + Value, where Key is defined as CHROM_POS_REF_ALT as assigned in the function \"dictOfLoci\" of class vcfToDict ]\n\t\tdd = defaultdict(list)\n\n\tlog.debug(\"-\" * 41);\n\tlog.debug(str(type(tuple_dicts)))\n\n\tfor d in tuple_dicts:\n\t\tfor key, value in d.items():\n\t\t\ttry:\n\t\t\t\tdd[key].append(value)\n\t\t\texcept KeyError: ## I do not see why we should have an error here because we just list the Keys\n\t\t\t\t# from d dicts we created ; I put it probably because it happened?\n\t\t\t\tlog.warning(\"KEY ERROR Detected - Skipping this values ; It should not have happened; please \"\n\t\t\t\t \"report that to the Author\")\n\t# NOTE: in the loop above, to get your .attrib, just change append(value) to append(value.attrib)\n\t# You may then want to make a normal dict out of the defaultdict so you have normal dict behavior for non-existent keys etc: dd = dict(dd)\n\n\t# 1) first we managed the Headers from all the tools\n\tlog.info(\"processing headers of all the vcf files ...\")\n\tlist_lines_header = dvm.create_new_header_for_merged_vcf(tuple_objs,\n\t cmdline,\n\t vcfMerger_Format_Fields_Specific,\n\t vcfMerger_Info_Fields_Specific,\n\t dico_map_tool_acronym,\n\t l_contigs_ref_genome_fasta_dict\n\t )\n\t# 2) we add the modified header lines to the output merger file\n\tlog.info(\"adding the header to the out vcf file ...\")\n\tdvm.add_new_header_to_merged_file(outputFilename, list_lines_header, tuple_objs[0].header_chrom_line + \"\\n\")\n\n\t# 3) we process all the variants\n\tlog.info(\"looping over variant calls, merging and writing back to file ... \")\n\n\ttry:\n\n\t\tof = open(outputFilename, 'a') # we open the output file with merged information here\n\t\t# sort dico by keys before iterating over it ... ## normally the Keys are not sorted because we deal with a dictionary which do not keep the order\n\n\t\t# dd = OrderedDict(sorted(dd.items()))\n\t\t# if flag_natsorted : ## if necessary, and requested by users later, we will activate the sorting of teh variants themselves by contigs order as in fastadict file\n\t\t# \tsorted_keys = natsorted(dd.keys())\n\t\t# else:\n\t\t## in this next line, the variants are sorted in the same order the contigs are in the HEADER (the line above aka \"sorted_keys = natsorted(dd.keys())\" sorts the key in natural order that can be different from contgis order in header.\n\t\tsorted_keys = dvm.output_list_variant_sorted_by_contigs_as_same_order_as_in_fastdict_file(dd, l_contigs_ref_genome_fasta_dict)\n\t\t# dd.keys --> they are the KEYS that are represented by the PATTERN --> CHROM_POS_REF_ALT\n\t\t# dd.values --> represents the calls and their information from each tool having call the variant at position CHROM_POS\n\t\t# (the number of list in values may go from 1 to len(lvcfs); where len(lvcfs) represents the total number\n\t\t# of inputs vcfs and therefore ALL the tools would have called that variant )\n\t\t# wtv stands for Winning Tool Variant ; It always is the first one, as the tools have been sorted by\n\t\t# precedence given by the user\n\t\t# 3a) get the total number variants to process in order to calculate on the fly the value for the counter\n\t\t# steps\n\t\ttot_variants_count = len(dd)\n\t\ttotnum_samples = len(list(set(l_snames))[0]) ## get the number of sample detected within the VCF ; We already check if same number of samples within each vcf so no need here; But we deal with tuples of strings so we need to extract the unique tuple from the set; because we expect only ONE tuple\n\t\tlog.info(\"Expected number of Samples in each VCF: \"+str(totnum_samples))\n\t\tlog.info(\"Set of sample(s) found:: \" + str(set(l_snames)))\n\t\tlog.info(\"Total Count of Variants to be merged (aka union of variant here): \" + str(tot_variants_count))\n\n\n\t\tcounter = 0\n\t\t# step is ~10% of tot_variants and round to the nearest nth value\n\t\tstep = int(round(tot_variants_count / 10, -(len(str(round(tot_variants_count / 10))) - 1)))\n\t\tfor K in [k for k in sorted_keys]: # sub is list__list__o.ovcf_variant ;\n\t\t\tcounter += 1;\n\t\t\tif step > 1 and counter % step == 0:\n\t\t\t\tlog.info(\"processed {} variants ...\".format(counter))\n\t\t\trebuilt_variant = dvm.rebuiltVariantLine(dd[K],\n\t\t\t dico_map_tool_acronym,\n\t\t\t lossless,\n\t\t\t ListFieldsToProcessForOurFORMATColumn,\n\t\t\t totnum_samples); ## dd[K} represent a List of Variants (LV)\n\t\t\tof.write(rebuilt_variant + linesep)\n\t\tlog.info(\"total processed variants: {}\".format(counter))\n\n\n\texcept IOError as e:\n\t\tlog.info(\"Error I/O({0}): {1}\".format(e.errno, e.strerror))\n\t\tof.close()\n\telse:\n\t\tof.close()",
"def GenotypeGVCFs():\n #creates sbatch files to merge batches of batch_size genomics vcf\n cwd = os.getcwd()\n sbatch_files = []\n if not os.path.isdir(os.path.join(cwd, \"01_CombineGVCFs\")):\n sys.exit(\"Directory 01_CombineGVCFs does not exits exists, something went wrong here.\")\n if os.path.isdir(os.path.join(cwd, \"02_GenotypeGVCFs\")):\n print \"WARNING: 02_GenotypeGVCFs already present, assuming this step has been completed with success.\"\n return sbatch_files\n else:\n #create the folder structure\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"sbatch\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"std_err\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"std_out\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"VCF\"))\n #Build the sbatch files for the join calling step\n working_dir = os.path.join(cwd, \"02_GenotypeGVCFs\")\n #now retrive the VCF stored in 01_CombineGVCFs/VCF/\n combined_gvcfs_to_process = []\n if len(CONFIG[\"intervals_list\"]) == 0:\n #no intervals, I have one file for each batch\n combined_gvcf_files = []\n for current_batch in range(1, CONFIG[\"batch_number\"] +1):\n # for each batch create the vcf file that need to be created by combine step\n combined_gvcf_name = \"{}_batch{}.g.vcf.gz\".format(CONFIG[\"output_header\"], current_batch)\n combined_gvcf_full_path = os.path.join(cwd, \"01_CombineGVCFs\", \"VCF\", combined_gvcf_name)\n combined_gvcf_files.append(combined_gvcf_full_path)\n combined_gvcfs_to_process.append(combined_gvcf_files)\n else:\n for interval in CONFIG[\"intervals_list\"]:\n interval_name = os.path.basename(interval).split(\".\")[0]\n combined_gvcf_files = []\n for current_batch in range(1, CONFIG[\"batch_number\"] +1):\n # for each batch create the vcf file that need to be created by combine step\n combined_gvcf_name = \"{}_batch{}_{}.g.vcf.gz\".format(CONFIG[\"output_header\"], current_batch, interval_name)\n combined_gvcf_full_path = os.path.join(cwd, \"01_CombineGVCFs\", \"VCF\", combined_gvcf_name)\n combined_gvcf_files.append(combined_gvcf_full_path)\n #now ceate a list with interval file and all gvcf to be combines\n interval_plus_gvcfs = [interval ,combined_gvcf_files]\n combined_gvcfs_to_process.append(interval_plus_gvcfs)\n for interval_plus_gvcfs in combined_gvcfs_to_process:\n interval = interval_plus_gvcfs[0]\n combined_gvcf_files = interval_plus_gvcfs[1]\n sbatch_file = build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, CONFIG[\"scratch\"], interval)\n sbatch_files.append(sbatch_file)\n return sbatch_files",
"def main():\n \n print('=== nyc taxi to airport - step 3 clean data')\n\n if os.path.exists(output_file):\n print(\"output file exists:\", output_file)\n print(\"skipping\")\n return\n\n df = load_data(input_file)\n df = clean_data(df)\n save_as_pickle_gz(df, output_file)\n \n print('done')",
"def main(argv):\n\n \n\n if validate_argv(argv) is False:\n print \"Usage: mergeFiles.py <search_term>\"\n sys.exit()\n\n input_directory_name = 'data_raw'\n search_term = argv[0]\n output_file_name = search_term + '_merged.tsv'\n output_directory_name = 'merged'\n\n\n output_path = fp.set_output_file_path(output_file_name, output_directory_name) \n output = open(output_path, 'a')\n for h1 in range(3):\n for h2 in range(10):\n for m1 in range(6):\n for m2 in range(10):\n file_name = search_term + '_' + str(h1) + str(h2) + str(m1) + str(m2) + '.tsv'\n file_path = fp.get_file_path(file_name, input_directory_name)\n if fp.filename_exists(file_path):\n file = open(file_path, 'r')\n file.next()\n for line in file:\n output.write(line)\n file.close()\n output.close()",
"def createTerritoryGeometries(config, start_time):\n # get the correct names for all of the provinces within each territory\n file_name = config['shape_files_path'] + config['county_shape_file_name']\n names_df = gpd.read_file(file_name)\n names_df.rename(columns={'NAMELSAD':'NAME'})\n names_df = names_df[['GEOID', 'NAME']]\n\n df_holder = []\n # read in block files for the 4 excluded US territories\n for territory in ['60','66','69','78']:\n try:\n temp_time = time.localtime()\n # open the appropriate block file for the given territory\n file_name = config['shape_files_path'] +\\\n \"block/tl_%s_%s_tabblock%s.shp\" %\\\n (config['census_vintage'],territory,config['census_vintage'][2:])\n temp_df = gpd.read_file(file_name)\n # modify the column names so they match what we expect in the tract and \n # county geojson files\n change_columns = { 'STATEFP%s' % config['census_vintage'][2:]:'state_fips', \n 'COUNTYFP%s' % config['census_vintage'][2:]: 'county_fips',\n 'GEOID%s' % config['census_vintage'][2:]:'block_fips',\n 'ALAND%s' % config['census_vintage'][2:]:'aland'}\n temp_df.rename(columns=change_columns, inplace=True)\n\n # create the tract file for the given territory\n tract_df = temp_df[['block_fips', 'aland', 'geometry']]\n tract_df['GEOID'] = tract_df['block_fips'].str[:11]\n tract_df['NAME']=tract_df['GEOID'].str[5:11]\n tract_df['NAME'] = np.where(tract_df['NAME'].str[4:6] != '00', \n tract_df['NAME'].str[:4] + \".\" + tract_df['NAME'].str[4:6], \n tract_df['NAME'].str[:4])\n\n # dissolve the blocks into tract level detail\n tract_df=tract_df[['GEOID', 'NAME', 'geometry']].loc[tract_df['aland']>0].dissolve(by='GEOID')\n tract_df.reset_index(inplace=True)\n\n # save the newly created tracts for the territory into a shape file\n # for later use by processes\n file_name = config['shape_files_path'] +\\\n \"tract/gz_%s_%s_140_00_500k.shp\" %\\\n (config['census_vintage'],territory)\n tract_df.to_file(file_name)\n\n # provide status or data processing\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - FINISHED WRITING TRACT SHAPE FILE\n FOR US TERRITORY %s\n \"\"\" % territory\n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen\n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED WRITING TRACT SHAPE FILE\n FOR US TERRITORY %s\n \"\"\" % territory \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False\n\n try:\n # create the dataframe for capturing county level data\n temp_time = time.localtime()\n county_df = temp_df[['state_fips', 'county_fips', 'aland', 'geometry']]\n county_df['GEOID'] = county_df['state_fips'] + county_df['county_fips']\n\n # merge the block level data at the county level to get the geometry\n county_df=county_df[['GEOID', 'geometry']].loc[county_df['aland']>0].dissolve(by='GEOID')\n\n # the county records for US states include names. The names cannot\n # be easily constructed following a set of rules, so instead we just\n # merge the names of the territories that are listed in the tiger line\n # files with the geometries we just calculated. This ends up giving\n # us the information we need to create the equivalent of a fully \n # populated 2010 county cartographic file that includes territories\n county_df = county_df.merge(names_df, left_on='GEOID', right_on='GEOID')\n county_df = county_df[['GEOID', 'NAME', 'geometry']]\n\n # append the information to a list that we will process later\n df_holder.append(county_df)\n\n # provide the status on the data processing for this task\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - PROCESSED COUNTY DATA FOR\n US TERRITORY %s\n \"\"\" % territory\n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen \n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED PROCESSING COUNTY DATA\n FOR US TERRITORY %s\n \"\"\" % territory \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False \n\n try:\n # now that we have the county level data for the territories, we need to merge\n # it with the US county data and create a single file for subsequent processing\n # open the county cartographic bounday file\n file_name = config['shape_files_path'] + config['county_cb_shape_file_name']\n county = gpd.read_file(file_name)\n\n # the cartographic boundary files do not have full names, so concatenate the \n # name and lsad columns and overwrite the original name\n county['NAME']=county['NAME'] + ' ' + county['LSAD']\n\n # extract the county fips from the non-standard county fips identifier in the\n # 2010 cartographic boundary file and then preserve only the necessary columns\n county['GEOID']=county['GEO_ID'].str[9:]\n county = county[['GEOID', 'NAME','geometry']]\n\n # append the county data to the list to be used to build the single file\n df_holder.append(county)\n\n # merge all of the dataframes into a single dataframe, sort it, and then \n # write the file out as a shape file so it can be used later for subsequent\n # data processing\n counties = pd.concat([x for x in df_holder])\n counties.sort_values(by='GEOID',inplace=True)\n file_name = config['shape_files_path'] + config['county_gzm_shape_file_name']\n counties.to_file(file_name)\n \n # provide the status on the data processing for this task\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - COMPLETED UPDATING COUNTY \n CARTOGRAPHIC SHAPE FILE\n \"\"\" \n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time))) \n return True \n\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen \n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED UPDATING COUNTY \n CARTOGRAPHIC SHAPE FILE\n \"\"\" \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False",
"def merge(parent_folder):\n parent_folder = Path(parent_folder)\n\n address_csv_files = sorted(parent_folder.glob('*_step_*.csv'))\n\n frames = []\n\n #: read all csv's delimiter='|', quoting=csv.QUOTE_MINIMAL\n for address_csv_file in address_csv_files:\n temp = pd.read_csv(\n address_csv_file, sep='|', encoding='utf-8', names=['type', 'id', 'county', 'senate', 'house', 'census']\n )\n\n frames.append(temp)\n\n #: merge all csv's\n merged = pd.concat(frames)\n merged.to_csv(parent_folder / 'all.csv', sep='|', header=False, index=False, encoding='utf-8')",
"def main():\n\n # Accept up to three command-line arguments\n input_terms = \"<input_GO_terms_file>\"\n input_annotations = \"<input_gene_associations_file>\"\n output_filename = \"<output_filename>\"\n\n\n # The first two arguments are required GO terms file ending with .obo\n # and gene association GAF file ending with .gaf\n if len(sys.argv) < 3:\n sys.exit(\"Please provide required GO terms .obo file and gene \" +\n \"assocatiion .gaf file.\")\n elif not sys.argv[1].endswith(\".obo\"):\n sys.exit(\"Please provide a GO terms .obo file.\")\n elif not sys.argv[2].endswith(\".gaf\"):\n sys.exit(\"Please provide a gene association .gaf file.\")\n else:\n input_terms = sys.argv[1]\n input_annotations = sys.argv[2]\n\n\n # Check if the provided import .obo or .gaf files exist\n if not input_terms:\n sys.exit(input_terms + \" not found. Check the file path and try again.\")\n elif not input_annotations:\n sys.exit(input_annotations + \" not found. Check the file path and try again.\")\n elif len(sys.argv) == 3:\n output_filename = \"results.tsv\"\n sys.stdout = open(\"results.tsv\", \"w\")\n elif len(sys.argv) == 4:\n output_filename = sys.argv[3] + \".tsv\"\n sys.stdout = open(output_filename, \"w\")\n\n\n # parse id and is_valeus and make a go_dict\n split_input_terms = split_terms(input_terms)\n go_dict = {}\n for record in split_input_terms:\n (go_id, is_a) = parse_go_term(record)\n key_go_dict = \"\".join(go_id)\n go_dict[key_go_dict] = is_a\n\n\n # Export an annotation gene information to tsv format into the output file\n gene_association_map = map_protein_to_go(input_annotations)\n for protein, go_ids in sorted(gene_association_map.items()):\n print(protein, end=\"\")\n\n for go_id in sorted(go_ids):\n parent_go_ids = find_parent_terms(go_id, go_dict)\n\n count = 0\n for parent_go_id in sorted(parent_go_ids):\n\n if count == 0:\n print(\"\\t\", go_id, \"\\t\", parent_go_id)\n count += 1\n else:\n print(\"\\t\", parent_go_id, sep=\"\\t\")\n\n sys.stdout.close()",
"def _post_process_route_fcs(self):\r\n # Create the final output feature class\r\n desc = arcpy.Describe(self.route_fcs[0])\r\n helpers.run_gp_tool(\r\n LOGGER,\r\n arcpy.management.CreateFeatureclass, [\r\n os.path.dirname(self.out_routes),\r\n os.path.basename(self.out_routes),\r\n \"POLYLINE\",\r\n self.route_fcs[0], # template feature class to transfer full schema\r\n \"SAME_AS_TEMPLATE\",\r\n \"SAME_AS_TEMPLATE\",\r\n desc.spatialReference\r\n ]\r\n )\r\n\r\n # Insert the rows from all the individual output feature classes into the final output\r\n fields = [\"SHAPE@\"] + [f.name for f in desc.fields]\r\n with arcpy.da.InsertCursor(self.out_routes, fields) as cur: # pylint: disable=no-member\r\n for fc in self.route_fcs:\r\n for row in arcpy.da.SearchCursor(fc, fields): # pylint: disable=no-member\r\n cur.insertRow(row)",
"def finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip):\n global completed\n global country\n global end_notam\n\n feature['properties']['source_href']=source\n feature['properties']['country']=country\n feature['geometry'] = obj\n aipname = wstrip(str(aipname))\n if aipname == 'EN D476':\n aipname = 'EN D476 R og B 1'\n if aipname == 'EN D477':\n aipname = 'EN D477 R og B 2'\n\n if 'ACC' in aipname and country==\"ES\":\n return {\"properties\":{}}, []\n for ignore in ['ADS','AOR','FAB',' FIR','HTZ']:\n if ignore in aipname:\n logger.debug(\"Ignoring: %s\", aipname)\n return {\"properties\":{}}, []\n feature['properties']['name']=aipname\n if cta_aip or aip_sup or tia_aip or 'ACC' in aipname:\n recount = len([f for f in features if aipname in f['properties']['name']])\n recount = recount or len([f for f in accsectors if aipname in f['properties']['name']])\n if recount>0:\n separator = \" \"\n if re.search('\\d$', aipname):\n separator=\"-\"\n # special handling Farris TMA skipping counters\n if \"Farris\" in aipname:\n if recount > 4:\n recount += 2\n else:\n recount += 1\n logger.debug(\"RECOUNT renamed \" + aipname + \" INTO \" + aipname + separator + str(recount+1))\n feature['properties']['name']=aipname + separator + str(recount+1)\n if 'TIZ' in aipname or 'TIA' in aipname:\n feature['properties']['class']='G'\n elif 'CTR' in aipname:\n feature['properties']['class']='D'\n elif 'TRIDENT' in aipname \\\n or 'EN D' in aipname or 'END' in aipname \\\n or 'ES D' in aipname:\n feature['properties']['class']='D'\n elif 'EN R' in aipname \\\n or 'ES R' in aipname or 'ESTRA' in aipname \\\n or 'EUCBA' in aipname or 'RPAS' in aipname:\n feature['properties']['class']='R'\n elif 'TMA' in aipname or 'CTA' in aipname or 'FIR' in aipname \\\n or 'ACC' in aipname or 'ATZ' in aipname or 'FAB' in aipname \\\n or 'Sector' in aipname:\n feature['properties']['class']='C'\n elif '5.5' in source or \"Hareid\" in aipname:\n if \"Nidaros\" in aipname:\n #skip old Nidaros airspace\n return {\"properties\":{}}, []\n feature['properties']['class']='Luftsport'\n index = len(collection)+len(features)\n\n if names.get(aipname):\n logger.debug(\"DUPLICATE NAME: %s\", aipname)\n\n if len(obj)>100:\n logger.debug(\"COMPLEX POLYGON %s with %i points\", feature['properties'].get('name'), len(obj))\n obj=simplify_poly(obj, 100)\n feature['geometry'] = obj\n\n if len(obj)>3:\n logger.debug(\"Finalizing polygon #%i %s with %i points.\", index, feature['properties'].get('name'), len(obj))\n\n name = feature['properties'].get('name')\n source = feature['properties'].get('source_href')\n from_ = feature['properties'].get('from (ft amsl)')\n to_ = feature['properties'].get('to (ft amsl)')\n class_ = feature['properties'].get('class')\n\n\n if name in completed:\n logger.info(\"ERROR Duplicate feature name: #%i %s\", index, name)\n return {\"properties\":{}}, []\n #sys.exit(1)\n else:\n if 'ACC' in aipname:\n logger.debug(\"Writing ACC sector to separate file: %s\", aipname)\n accsectors.append(feature)\n else:\n features.append(feature)\n\n # SANITY CHECK\n if name is None:\n logger.error(\"Feature without name: #%i\", index)\n sys.exit(1)\n if \"None\" in name:\n logger.error(\"Feature without name: #%i\", index)\n sys.exit(1)\n completed[name]=True\n if source is None:\n logger.error(\"Feature without source: #%i\", index)\n sys.exit(1)\n if feature['properties'].get('name') is None:\n logger.error(\"Feature without name: #%i (%s)\", index, source)\n sys.exit(1)\n if class_ is None:\n logger.error(\"Feature without class (boo): #%i (%s)\", index, source)\n sys.exit(1)\n # SPECIAL CASE NOTAM reserved ENR in Oslo area\n if \"EN R\" in aipname and \"Kongsvinger\" in aipname:\n feature['properties']['notam_only'] = 'true'\n if \"EN R\" in aipname and (\"Romerike\" in aipname or (\"Oslo\" in aipname and not \"102\" in aipname)):\n feature['properties']['notam_only'] = 'true'\n feature['properties']['from (ft amsl)'] = '0'\n feature['properties']['to (ft amsl)'] = '99999' # unspecified\n feature['properties']['from (m amsl)'] = '0'\n feature['properties']['to (m amsl)'] = '99999'\n from_ = '0'\n to_ = '0'\n if (\"EN D\" in aipname or \"END\" in aipname) and end_notam:\n feature['properties']['notam_only'] = 'true'\n if from_ is None:\n if \"en_sup_a_2018_015_en\" in source:\n feature['properties']['from (ft amsl)']='0'\n feature['properties']['from (m amsl)']='0'\n from_ = '0'\n else:\n logger.error(\"Feature without lower limit: #%i (%s)\", index, source)\n sys.exit(1)\n if to_ is None:\n if \"en_sup_a_2018_015_en\" in source:\n feature['properties']['to (ft amsl)']='99999'\n feature['properties']['to (m amsl)']='9999'\n to_ = '99999'\n else:\n logger.error(\"Feature without upper limit: #%i (%s)\", index, source)\n sys.exit(1)\n if int(from_) >= int(to_):\n # SPECIAL CASE NOTAM reserved ENR in Oslo area\n if \"en_sup_a_2018_015_en\" in source or \"Romerike\" in aipname or \"Oslo\" in aipname:\n feature['properties']['from (ft amsl)']=to_\n feature['properties']['to (ft amsl)']=from_\n else:\n logger.error(\"Lower limit %s > upper limit %s: #%i (%s)\", from_, to_, index, source)\n sys.exit(1)\n elif len(obj)>0:\n logger.error(\"ERROR Finalizing incomplete polygon #%i (%i points)\", index, len(obj))\n\n names[aipname]=True\n logger.debug(\"OK polygon #%i %s with %i points (%s-%s).\", index, feature['properties'].get('name'),\n len(obj),\n feature['properties'].get('from (ft amsl)'),\n feature['properties'].get('to (ft amsl)'))\n return {\"properties\":{}}, []",
"def reduce_and_save():\n ### Get the signature information\n sig_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_sig_info.txt\"), sep=\"\\t\")\n ### Columns are:\n ### Index([u'sig_id', u'pert_id', u'pert_iname', u'pert_type', u'cell_id',\n ### u'pert_dose', u'pert_dose_unit', u'pert_idose', u'pert_time',\n ### u'pert_time_unit', u'pert_itime', u'distil_id'],\n ### dtype='object')\n\n ### Filter for signature ids for small molecule pertubagens\n small_mol_sigs = sig_info['sig_id'][sig_info['pert_type'] == \"trt_cp\"]\n ### Results in 205034 signatures\n\n ### Read in the gene info\n gene_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_gene_info.txt\"), sep='\\t')\n ### Index([u'pr_gene_id', u'pr_gene_symbol', u'pr_gene_title', u'pr_is_lm',\n ### u'pr_is_bing'],\n ### dtype='object')\n\n landmark_gene_ids = gene_info['pr_gene_id'][gene_info['pr_is_lm'] == 1] #Filters for directly measured transcripts\n ### Results in the 978 landmark pr_gene_ids\n\n ### LOAD in the main file filtering the columns so that only the small molecules signatures are loaded and the\n ### rows such that only the landmark genes are loaded into their custom gctoo container type\n relevent_sigs_gctoo = parse(join(FILE_PATH, \"GSE92742_Broad_LINCS_Level5_COMPZ.MODZ_n473647x12328.gctx\"),\n cid=small_mol_sigs, rid=landmark_gene_ids)\n # print small_mol_sigs.data_df.shape\n ### Should write an intermediate file with dimensions (978, 205034)\n write_gctx.write(relevent_sigs_gctoo, join(FILE_PATH, \"lm_sm_aggz\"))",
"def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')",
"def _upload_biodata(gbuild, target, all_dirs):\n if target == \"seq\":\n want_dirs = set([\"coverage\", \"editing\", \"prioritization\", \"rnaseq\",\n \"seq\", \"snpeff\", \"srnaseq\", \"validation\",\n \"variation\", \"vep\"])\n target_dirs = [x for x in all_dirs if x in want_dirs]\n else:\n target_dirs = [x for x in all_dirs if x == target]\n target_dirs = [os.path.join(gbuild, x) for x in target_dirs]\n fname = objectstore.BIODATA_INFO[\"s3\"].format(build=gbuild, target=target)\n remotef = objectstore.parse_remote(fname)\n conn = objectstore.connect(fname)\n bucket = conn.get_bucket(remotef.bucket)\n key = bucket.get_key(remotef.key)\n if not key:\n keyname = remotef.key\n bucketname = remotef.bucket\n target_dirs = \" \".join(target_dirs)\n cmd = (\"tar -cvpf - {target_dirs} | pigz -c | \"\n \"gof3r put --no-md5 -k {keyname} -b {bucketname} \"\n \"-m x-amz-storage-class:REDUCED_REDUNDANCY -m x-amz-acl:public-read\")\n do.run(cmd.format(**locals()), \"Upload pre-prepared genome data: %s %s\" % (gbuild, target))",
"def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()",
"def main():\n location = os.getcwd()\n header = \"Date,Time,Voltage,Current,Isolation,Range,SoC,Distance,Fan rpm,Fan Torque,Hyd. Pump rpm,Hyd. Pump Torque,SW Pump rpm,SW Pump Torque,Nozzle,Sidebrushes,WideSweepBrush,TempIGBT-Fan,Fan motor temp, Traction rpm, Traction torque,BMS1 Volts, BMS2 Volts\"\n header = header+\"\\n\"\n\n of =\"outFile.csv\"\n outFile = open(of, \"w\")\n outFile.write(header)\n\n for file in os.listdir(location ):\n try:\n if file.endswith(\".csv\") and not(file.startswith(\"outFile\")):\n print(\"...reading {}\".format(file))\n fcsv = csv.reader(open(file, newline=''), delimiter=' ', quotechar='|') \n for row in fcsv:\n line = ', '.join(row)\n if line[:4] == \"Date\":\n d = line[5:13]\n dd = d[6:9]+\"/\"+d[4:6]+\"/\"+d[:4]\n next\n elif line[12] == \"*\" or line[0] == \"*\":\n next\n elif line[0] == \"T\":\n next\n else:\n L = dd + \",\" + line + \"\\n\"\n outFile.write(L)\n except Exception as e:\n raise e\n print(\"No CSV files in here!\")\n\n try: \n print(\"\\nAll files have been merged into: {}\".format(of))\n outFile.close()\n \n except Exception as ee:\n raise ee",
"def merge(self, step_index):\n if (self.drip_config is not None\n and self.drip_cal_config is not None):\n from sofia_redux.instruments.forcast.getpar import getpar\n header = fits.Header()\n\n cormerge = getpar(header, 'CORMERGE', dtype=str,\n default='NOSHIFT')\n\n # modify by obstype or chop/nod mode\n if self.drip_cal_config['obstype'] == 'STANDARD_FLUX' \\\n or 'npc' in (self.drip_cal_config['cnmode'].lower()):\n cormerge = 'CENTROID'\n\n # set parameter values in current set\n if cormerge.upper() in self.merge_opt:\n idx = self.merge_opt.index(cormerge)\n self.current[step_index].set_value('cormerge',\n option_index=idx)",
"def get_clean_data(path = 'ucr_offenses_known_monthly_1960_2016_dta/', \n identifier_variables = ['fips_state_county_code', 'state', 'date', 'year', 'zip_code', 'month'], \n crime_category = ['act_aggravated_assault', 'act_simple_assault', 'act_murder', 'act_robbery_total', \n 'act_manslaughter', 'act_theft_total', 'act_mtr_vhc_theft_total', 'act_burglary_total', 'act_rape_total'], \n start_year = 1980, end_year = 2009, selected_area = 'all'):\n all_df = []\n for i in get_filenames(start_year, end_year):\n file = path + i\n print(file)\n each_df = pd.read_stata(file)\n each_df = each_df[identifier_variables + crime_category]\n each_df = each_df[each_df['fips_state_county_code'] == '06001']\n each_df['zipcode'] = each_df['zip_code'].apply(lambda x: str(x)[0:5])\n #split Alameda into West and East Alameda according to zip code\n if selected_area == 'east':\n each_df = each_df[(each_df['zipcode'] == '94550') | (each_df['zipcode'] == '94566') | \n (each_df['zipcode'] == '94586') | (each_df['zipcode'] == '94568') | \n (each_df['zipcode'] == '94588') | (each_df['zipcode'] == '94551')]\n elif selected_area == 'west':\n each_df = each_df[(each_df['zipcode'] != '94550') & (each_df['zipcode'] != '94566') & \n (each_df['zipcode'] != '94586') & (each_df['zipcode'] != '94568') & \n (each_df['zipcode'] != '94588') & (each_df['zipcode'] != '94551') &\n (each_df['zipcode'] != '0') & (each_df['zipcode'] != '0.0') & \n (each_df['zipcode'] != 'not r') & (each_df['zipcode'] != 'missi')]\n each_df.loc[:, 'YearMonth'] = [int(re.sub('-', '', date)[0:6]) for date in each_df.loc[:, 'date']]\n #sum up amount of crimes taken place in each category for each month\n each_df = each_df.groupby(['YearMonth'])[crime_category].sum()\n each_df['crime_sum'] = each_df.sum(axis = 1)\n each_df = each_df['crime_sum'].reset_index()\n all_df.append(each_df)\n df = pd.concat(all_df).fillna(0)\n df = df.sort_values('YearMonth').reset_index()\n #split variable 'YearMonth\" into two variables 'year' and \"month' for Poission regression\n del df['index']\n df['year'] = df['YearMonth'].apply(lambda x: str(x)[:4])\n df['month'] = df['YearMonth'].apply(lambda x: str(x)[4:])\n if selected_area == 'east':\n df.to_csv('east_alameda_crime.csv')\n elif selected_area == 'west':\n df.to_csv('west_alameda_crime.csv')\n else:\n df.to_csv('all_alameda_crime.csv')\n return(df)",
"def read_cbf_file(inputfilename):\n \n \n with open(inputfilename, 'rb') as fid:\n BD = np.fromfile(fid, np.float64)\n \n # https://www.mathworks.com/help/matlab/ref/fwrite.html\n # https://www.mathworks.com/help/matlab/numeric-types.html\n \n k=0;\n # Static data (100 places)\n SD=BD[k:k+100]\n k=k+100\n # Priors (50 places)\n PR=BD[k:k+50];\n k=k+50;\n # Priorunc (50 places)\n PRU=BD[k:k+50]\n k=k+50 \n \n # O. Priors (50 places)\n OPR=BD[k:k+50]\n k=k+50\n # O. Priorunc (50 places)\n OPRU=BD[k:k+50]\n k=k+50\n \n CBF = {}\n CBF['PARPRIORS'] = np.expand_dims(PR,axis=1)\n CBF['PARPRIORUNC'] = np.expand_dims(PRU,axis=1)\n CBF=read_other_obs_constraints(CBF,OPR,OPRU)\n \n CBF['ID'] = SD[0] # ID (not used)\n CBF['LAT'] = SD[1] # Latitude\n CBF['nodays'] = int(SD[2]) # Number of days\n CBF['nomet'] = int(SD[3])\n CBF['noobs'] =int(SD[4])\n CBF['EDC'] = SD[5]\n CBF['EDCDIAG'] = SD[6]\n# CBF = {'PARPRIORS':np.expand_dims(PR,axis=1), \n# 'PARPRIORUNC':np.expand_dims(PRU,axis=1), \n# 'OTHERPRIORS':np.expand_dims(OPR,axis=1), #\n# 'OTHERPRIORSUNC':np.expand_dims(OPRU,axis=1),\n# 'ID':SD[0], # ID (not used)\n# 'LAT':SD[1], # Latitude\n# 'nodays':int(SD[2]), # Number of days\n# 'nomet':int(SD[3]), \n# 'noobs':int(SD[4]),\n# 'EDC':SD[5],\n# 'EDCDIAG':SD[6],\n# 'gppabs':SD[7],\n# 'rc_random_search':SD[10]==1,\n# 'nbe_annual_unc':SD[13],\n# 'etiav':SD[14],\n# 'nbe_seasonal_unc':SD[15]}\n \n #MCMC start searching EDCs from anywhere (1) or from prescribed starting\n #point(0). this is outdated - consider deleting\n CBF['rc_random_search'] = SD[10]==1\n \n #NEE IAV options\n CBF=read_obs_uncertainty_fields(CBF,SD,OPRU)\n \n \n TEMPDATA=BD[k:k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']].reshape(CBF['nodays'],\n (CBF['nomet']+CBF['noobs']))\n #All met data\n CBF['MET'] = TEMPDATA[0:CBF['nodays'],0:CBF['nomet']] # Add in new meteorology here\n# CBF['OBS'] = TEMPDATA[0:CBF['nodays'],CBF['nomet']:]\n CBFOBS = TEMPDATA[0:CBF['nodays'],CBF['nomet']:]\n CBF=define_cbf_obs_fields(CBF,CBFOBS)\n \n #Removing redundant fields\n# CBF=rmfield(CBF,'noobs');\n# # CBF=rmfield(CBF,'nomet');\n# # CBF=rmfield(CBF,'nodays');\n \n \n # Read prescribed mean meteorology\n \n if len(BD) - (k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']) == CBF['nomet'] + CBF['noobs']:\n \n kmmet= k+(CBF['nomet']+CBF['noobs'])*CBF['nodays']\n CBF['mmet'] = BD[kmmet:kmmet+CBF['nomet']]\n \n \n #Retaining \"OTHERPRIORS\" for now\n CBF['RAW'] = {}\n CBF['RAW']['OTHERPRIORS']=OPR;\n CBF['RAW']['OTHERPRIORSUNC']=OPRU;\n CBF['RAW']['info']='Raw inputs/outputs as stored in CBF binary structure';\n CBF['RAW']['details']='For completeness & development purpose only; When re-writing CBF to file, these are over-written by CBF.OBS, etc.';\n\n \n \n \n \n return CBF\n #disp(sprintf('CHECK: .cbf file \"%s\" successfully read into matlab.',filename)) ",
"def county_file_merger(folder_path):\n\n print(\"\\n*******************--- Starting File Merger for .csv files ---*******************\")\n with open(\"result.csv\",\"wb\") as outfile:\n for filename in os.listdir(folder_path):\n with open(filename,\"rb\") as infile:\n for line in infile:\n outfile.write(line)\n infile.close()\n outfile.close()\n print(\"\\nResult saved to -----> result.csv \")\n print(\"\\n*******************--- Finished File Merger for .csv files ---*******************\")",
"def combine_files(output_filename, *passes):\n all_columns = {}\n for x in passes:\n sp = pyvyu.load_opf(x)\n column_list = sp.get_column_list()\n for c in column_list:\n all_columns[c] = sp.get_column(c)\n sp = pyvyu.Spreadsheet()\n sp.name = output_filename\n sp.columns = all_columns\n pyvyu.save_opf(sp, output_filename, True, *all_columns.keys())\n return output_filename",
"def post_process():\n for route in os.listdir(GFR_ROUTES_LOCATION):\n if os.path.isfile(MISSING_LOCATION + route):\n # If the route is missing, output the reference data with correct OSM tags.\n\n copyfile(MISSING_LOCATION + route, OUTPUT_LOCATION + route)\n add_property(OUTPUT_LOCATION + route, 'error_type', 'missing')\n elif os.path.isfile(DIFF_MISSING_LOCATION + route) and os.path.isfile(DIFF_WRONG_LOCATION + route) \\\n and merge_differences(route, DIFF_MISSING_LOCATION + route, DIFF_WRONG_LOCATION + route,\n OUTPUT_LOCATION + route):\n # If there's a geometrical difference, combine the two difference files and output it.\n\n add_property(OUTPUT_LOCATION + route, 'error_type', 'difference')\n elif os.path.isfile(TAGS_LOCATION + route):\n # When there's no geometrical difference, output the OSM data possibly containing missing tags.\n\n copyfile(TAGS_LOCATION + route, OUTPUT_LOCATION + route)\n else:\n raise Exception(\"No output file could be generated for route: \" + route)\n\n copy_to_site()\n\n # Export a last updated timestamp\n with open('last_updated', 'w') as fp:\n fp.write(str(int(time.time() * 1000)))",
"def main(args):\n input_file = args[1]\n output_occupations = args[2]\n output_states = args[3]\n\n print(\"Analyzing input file:\")\n summary = process_data.Summary(input_file)\n print(\"Reading input data\")\n summary.read_file()\n\n print(\"Computing summaries\")\n occupations = summary.get_results(input_format.Concept.SOC_NAME)\n states = summary.get_results(input_format.Concept.WORK_STATE)\n\n print(\"Writing results\")\n occupations.to_file(output_occupations)\n states.to_file(output_states)",
"def ingest_all(self):\n\t\tfor place in self.district_codes():\n\t\t\tself.sequence_ingest(place)\n\t\tif self.edition:\n\t\t\tconfigs.userconfig.update('PHE','latest_cases',self.edition)",
"def combine_gvcf(self, reference, gvcf_list, output, input_is_sorted=False, extension_list=[\"g.vcf\",],\n tmp_dir=\"./tmp_combine_gvcf/\", max_files_per_merging=50, iteration=0, threads=None,\n remove_intermediate_files=False):\n\n filtered_gvcf_list = []\n for filename in gvcf_list:\n for extension in extension_list:\n if extension == filename[-len(extension):]:\n filtered_gvcf_list.append(filename)\n break\n \n if len(filtered_gvcf_list) <= max_files_per_merging:\n options = self.parse_options(reference, filtered_gvcf_list, output, input_is_sorted, extension_list=extension_list)\n self.execute(options, runtype=\"cp\")\n if remove_intermediate_files:\n shutil.rmtree(tmp_dir, ignore_errors=True)\n\n else:\n self.safe_mkdir(tmp_dir)\n iteration_dir = \"%s/iteration_%i/\" % (tmp_dir, iteration)\n self.safe_mkdir(iteration_dir)\n\n number_of_files = len(filtered_gvcf_list)\n\n bins = np.arange(0, number_of_files, max_files_per_merging)\n #print(bins)\n if bins[-1] != number_of_files:\n if number_of_files - bins[-1] < 2:\n bins[-1] = number_of_files\n else:\n bins = np.append(bins, number_of_files)\n\n output_file_list = []\n options_list = []\n\n merged_files = 0\n for i in range(0, len(bins)-1):\n output_file = \"%s/%i.g.vcf\" % (iteration_dir, i)\n output_file_list.append(output_file)\n #print(bins[i], bins[i+1])\n\n merged_files += bins[i+1] - bins[i]\n options_list.append(self.parse_options(reference,\n filtered_gvcf_list[bins[i]:bins[i+1]],\n output_file,\n input_is_sorted, extension_list=extension_list))\n print(\"%i/%i files will be merged\" % (merged_files, number_of_files))\n\n self.parallel_execute(options_list, threads=threads, runtype=\"cp\")\n\n self.combine_gvcf(reference, output_file_list, output, input_is_sorted=input_is_sorted,\n extension_list=extension_list,\n tmp_dir=tmp_dir,\n max_files_per_merging=max_files_per_merging, iteration=iteration+1)",
"def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()",
"def write_shapefile_branch1(self, shpname):\r\n inarrays = self.read_traveltime()\r\n \r\n Narrays = len(inarrays) \r\n \r\n \r\n westlats = []\r\n westlons = []\r\n eastlats = []\r\n eastlons = [] \r\n lines1 = []\r\n for i in range(len(self.westPnts1)):\r\n westlat, westlon = utm.to_latlon(self.westPnts1[i,0], self.westPnts1[i,1], 14, 'U')\r\n eastlat, eastlon = utm.to_latlon(self.eastPnts1[i,0], self.eastPnts1[i,1], 14, 'U')\r\n lines1.append([[westlon, westlat], [eastlon, eastlat]])\r\n westlats.append(westlat)\r\n westlons.append(westlon)\r\n eastlats.append(eastlat)\r\n eastlons.append(eastlon)\r\n \r\n # Create the projection\r\n spatialReference = osgeo.osr.SpatialReference()\r\n spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\r\n \r\n # Create the shape file\r\n outfile = r'ArcGIS_online\\%s'%shpname\r\n driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')\r\n shapeData = driver.CreateDataSource(outfile)\r\n \r\n # Create the layer\r\n layer = shapeData.CreateLayer('Contour', spatialReference, osgeo.ogr.wkbLineString)\r\n layerDefinition = layer.GetLayerDefn()\r\n \r\n # Create fields containing segment infos\r\n field_def = osgeo.ogr.FieldDefn('BranchID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Density', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('SegID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Travel_T', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n \r\n def add_feature(layer, branchID, density, lines, segs, westlon, westlat, eastlon, eastlat, Ttime):\r\n \"\"\"\r\n function that adds feature to layer\r\n \"\"\" \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n line = osgeo.ogr.Geometry(osgeo.ogr.wkbLineString)\r\n # Add points individually to the line\r\n xy = lines[i]\r\n \r\n line.AddPoint_2D(xy[0][0],xy[0][1])\r\n line.AddPoint_2D(xy[1][0],xy[1][1])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(line)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(line)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID)) \r\n feature.SetField('Density', int(density[i]))\r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Travel_T', \"{:.1f}\".format(Ttime[i]))\r\n feature.SetField('Lon_west', \"{:.3f}\".format(westlon[i]))\r\n feature.SetField('Lat_west', \"{:.3f}\".format(westlat[i]))\r\n feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n \r\n layer.CreateFeature(feature)\r\n \r\n \r\n Ttime = inarrays[0][:,2]\r\n ind0 = np.nonzero(Ttime)[0][0]\r\n ind = np.arange(ind0, Ttime.shape[0])\r\n \r\n lines1 = [lines1[i] for i in ind]*Narrays\r\n westlats = [westlats[i] for i in ind]*Narrays\r\n westlons = [westlons[i] for i in ind]*Narrays\r\n eastlats = [eastlats[i] for i in ind]*Narrays\r\n eastlons = [eastlons[i] for i in ind]*Narrays\r\n \r\n inarrays_new = [inarrays[i][ind,:] for i in range(Narrays)]\r\n inarrays_stack = np.vstack(inarrays_new)\r\n \r\n add_feature(layer, 1, inarrays_stack[:,3], np.asarray(lines1), inarrays_stack[:,1], \r\n np.asarray(westlons), np.asarray(westlats), \r\n np.asarray(eastlats), np.asarray(eastlons), inarrays_stack[:,2])",
"def load_census_data(data_path):\n census_cols = {\n \"DP03_0051E\": \"total-households\",\n \"DP04_0047E\": \"total-renter-occupied-households\",\n \"DP04_0046E\": \"total-owner-occupied-households\",\n \"S2506_C01_001E\": \"total-owner-occupied-households-mortgage\",\n \"B25064_001E\": \"median-gross-rent\",\n \"DP03_0062E\": \"median-household-income\",\n \"B25077_001E\": \"median-property-value\",\n \"S2506_C01_039E\": \"median-monthly-housing-cost\",\n \"S2502_C01_002E\": \"pct-white\",\n \"S2502_C01_003E\": \"pct-af-am\",\n \"S2502_C01_009E\": \"pct-hispanic\",\n \"S2502_C01_004E\": \"pct-am-indian\",\n \"S2502_C01_005E\": \"pct-asian\",\n \"S2502_C01_006E\": \"pct-nh-pi\",\n \"S2502_C01_008E\": \"pct-multiple\",\n \"S2502_C01_007E\": \"pct-other\",\n \"DP03_0119E\": \"pct-below-poverty-level\",\n \"DP03_0099E\": \"without-health-insurance\",\n \"DP03_0096E\": \"with-health-insurance\",\n \"DP02_0003E\": \"households-children\",\n \"DP02_0009E\": \"single-parent-household\",\n \"DP02_0012E\": \"older-adult-alone\",\n \"DP02_0058E\": \"level-of-education\",\n \"DP02_0095E\": \"immigrant-status\",\n \"DP02_0112E\": \"english-fluency\",\n \"DP03_0019E\": \"drive-to-work\",\n \"DP03_0021E\": \"public-transport-to-work\",\n \"DP04_0003E\": \"vacant-properties\",\n \"DP04_0014E\": \"live-in-mobile-home\",\n \"B25035_001E\": \"median-year-structure-built\",\n }\n\n census_df = pd.read_csv(data_path, dtype={\"GEOID\": str})[\n [\"GEOID\"] + list(census_cols.keys())\n ].rename(columns=census_cols)\n\n census_df[\"pct-renter-occupied\"] = (\n census_df[\"total-renter-occupied-households\"] / census_df[\"total-households\"]\n ) * 100\n census_df[\"pct-owner-occupied\"] = (\n census_df[\"total-owner-occupied-households\"] / census_df[\"total-households\"]\n ) * 100\n census_df[\"pct-owner-occupied-mortgage\"] = (\n census_df[\"total-owner-occupied-households-mortgage\"]\n / census_df[\"total-households\"]\n ) * 100\n census_df[\"pct-owner-occupied-without-mortgage\"] = (\n (\n census_df[\"total-owner-occupied-households\"]\n - census_df[\"total-owner-occupied-households-mortgage\"]\n )\n / census_df[\"total-households\"]\n * 100\n )\n census_df[\"median-house-age\"] = (\n datetime.datetime.now().year - census_df[\"median-year-structure-built\"]\n )\n census_df[\"pct-non-white\"] = 100 - census_df[\"pct-white\"]\n census_df[\"pct-without-health-insurance\"] = (\n census_df[\"without-health-insurance\"]\n / (census_df[\"without-health-insurance\"] + census_df[\"with-health-insurance\"])\n * 100\n )\n return census_df.drop(\n [\n \"without-health-insurance\",\n \"with-health-insurance\",\n \"median-year-structure-built\",\n ],\n axis=1,\n )",
"def merge_gw(type, gameweek_path): \n count_directory(gameweek_path)\n if type == 'Understat':\n prefix = 'US_gw'\n if type == 'FPL':\n prefix = 'gw'\n num_gws = 38\n filepath = gameweek_path + f'merged_{prefix}.csv'\n if os.path.exists(filepath):\n os.remove(filepath)\n for gw in range(1, num_gws + 1): # + 1 because range is exclusive\n merged_gw_filename = f\"merged_{prefix}.csv\" # Output file\n gw_filename = prefix + str(gw) + \".csv\" \n gw_path = os.path.join(gameweek_path, gw_filename)\n fin = open(gw_path, encoding=\"utf-8\")\n reader = csv.DictReader(fin)\n fieldnames = reader.fieldnames\n fieldnames += [\"GW\"]\n rows = []\n for row in reader:\n row[\"GW\"] = gw\n rows += [row]\n out_path = os.path.join(gameweek_path, merged_gw_filename)\n fout = open(file = out_path,\n mode='a', \n encoding=\"utf-8\")\n writer = csv.DictWriter(fout, fieldnames=fieldnames, lineterminator='\\n')\n if gw == 1:\n writer.writeheader()\n for row in rows:\n writer.writerow(row)\n print(f'Succesfully wrote the {prefix} gameweek files from {gameweek_path} into a merged gameweek file to {out_path}')"
] | [
"0.63951296",
"0.5647709",
"0.5632972",
"0.5529909",
"0.5512112",
"0.5358087",
"0.53524876",
"0.534505",
"0.53141177",
"0.53104985",
"0.5308745",
"0.52762294",
"0.5263449",
"0.5253142",
"0.52261686",
"0.522328",
"0.51910317",
"0.51679087",
"0.5163909",
"0.5161484",
"0.5154538",
"0.5148629",
"0.512723",
"0.5108288",
"0.51012427",
"0.5078491",
"0.50725204",
"0.506505",
"0.5061358",
"0.5056744"
] | 0.629315 | 1 |
Return a dictionary of a location's properties. | def to_dict(self):
return {
'location_id': self.location_id,
'location_name': self.location_name
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_properties():",
"def get_locations():\n return STATUS['locations']",
"def _get_location_details(self, location):\n resp = requests.get(\n self.base_url,\n params = {\n 'address': ''.join(location.split(' ')),\n 'key': GOOGLE_API_KEY,\n }\n )\n return resp.json()",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'locations') and self.locations is not None:\n _dict['locations'] = [x.to_dict() for x in self.locations]\n return _dict",
"def get_location_dict(self) -> SubDirDict[Optional[str]]:\n location_dict: SubDirDict[Optional[str]] = SubDirDict()\n for directory, location_time in self.camera_placements.items():\n location_dict[Path(directory)] = location_time.location\n\n return location_dict",
"def location(self):\r\n return self._get('location', {})",
"def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties",
"def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties",
"def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")",
"def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")",
"def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})",
"def get_properties(self):\n return self.properties",
"def getProperties():",
"def properties(self) -> Any:\n return pulumi.get(self, \"properties\")",
"def getStandardGeoProperties():\n geoproperties = {\n 'Hsin': {\n 'tl': {\n 'lat': 21.108,\n 'long': -158.584,\n },\n 'br': {\n 'lat': 21.9,\n 'long': -157.392,\n }\n },\n 'Hsout':{\n 'tl': {\n 'lat': 20.33,\n 'long': -159.87,\n },\n 'br': {\n 'lat': 22.7,\n 'long': -156.3,\n }\n },\n }\n return geoproperties",
"def get_properties(self):\n return self.properties",
"def properties_get(self):\n return self._get('properties')",
"def location(self):\n return self.properties.get(\"location\", Location())",
"def to_dict(self):\n return self.properties",
"def to_dict(self):\n return self.properties",
"def getcarlocation(self) -> dict:\n return self.carlocation",
"def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))",
"def location_info(self) -> LocationInfoIm:\n return self._location_info",
"def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )",
"def getProperties(self):\n return self.properties",
"def get_provider_properties_dict(self):\n pass",
"def locations(self):\n return self.data.get(\"locations\", [])",
"def get_individual_json_data( location ):\n\ttemp = {\n\t\t'name' \t\t\t: location.get_name(),\n\t\t'url' \t\t\t: location.get_url(),\n\t\t'population' \t: location.get_population(),\n\t\t'type'\t\t\t: location.get_location_type()\n\t}\n\t# data[ str( idx ) ] = temp\n\treturn temp",
"def get_cloud_info(location):\n params = dict()\n # Read in the file\n with open(location, 'r') as myfile: data=myfile.read()\n obj = json.loads(data)\n for o in obj:\n params[o] = obj[o]['value']\n return params",
"async def get_location(self, location_id: str) -> dict:\r\n return await self.get(API_LOCATION.format(location_id=location_id))"
] | [
"0.66905564",
"0.6581534",
"0.65401554",
"0.65313935",
"0.65221065",
"0.650296",
"0.6466247",
"0.6466247",
"0.6427804",
"0.6427804",
"0.6396865",
"0.6388714",
"0.637269",
"0.63603663",
"0.6325594",
"0.631466",
"0.62228143",
"0.62096256",
"0.6171209",
"0.6171209",
"0.6163851",
"0.6071737",
"0.60679716",
"0.60533834",
"0.60463434",
"0.60410637",
"0.6026964",
"0.598782",
"0.59838617",
"0.5971634"
] | 0.7027157 | 0 |
Method used to sort the objects from low > highest of the currenthealth | def sort_currenthealth(cls):
CloudCtx.objCloudCtx.sort(key=lambda x: x.currenthealth)
for elem in CloudCtx.objCloudCtx:
print(elem.display_cloud_ctx()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _sort(self):\n self.population.sort()\n self.population.reverse()",
"def order_by_fitness(self):\n self.fauna_list['Herbivore'].sort(key=operator.\n attrgetter('animal_fitness'))\n self.fauna_list['Carnivore'].sort(key=operator.\n attrgetter('animal_fitness'),\n reverse=True)",
"def sort_my_hands(self):\n self.hands_list.sort(reverse=True)",
"def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return",
"def sort_population(self):\n self.population.sort(key=lambda x: x.fitness, reverse=True)",
"def sortPopulation(self):\n self.population = sorted(self.population, key=attrgetter('fitness'), reverse=True)",
"def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored",
"def sort(self):\n self.cards.sort()",
"def sort(self):\n self.cards.sort()",
"def sort():\n return -1",
"def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()",
"def sortby(self):\n ...",
"def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)",
"def sort(self):\r\n\t\treturn sorted(self.sample)",
"def test_list_healthmonitors_sort(self):\r\n resources = \"health_monitors\"\r\n cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])",
"def tiles_by_score(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.score, reverse=True)\n return sorted_list",
"def sort(self):\n # Base Case\n # If the robot has reached the end of the list and his light is off (no swaps have occurred),\n if self.can_move_right() == False and self.light_is_on() == False:\n return\n\n # Grab the first card\n self.swap_item()\n\n # While the robot is still able to move right,\n while self.can_move_right():\n\n # Move right\n self.move_right()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is greater than what he is holding (-1), swap items\n if self.compare_item() == -1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once the robot can no longer move right, he is at the end of the list and holding the largest value\n # Swap items\n self.swap_item()\n\n # Now the robot needs to traverse back to index 0, grabbing the smallest value as he goes\n # Follow the same logic as when he moved right with the largest value\n\n # If he hits a empty slot in the list, everything in front of it has been sorted\n # He doesn't need to sort anymore, he is holding the smallest value left to be sorted. \n # Put it in the blank spot and turn to move back in the other direction\n\n while self.compare_item() is not None:\n\n # Move left\n self.move_left()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is less than what he is holding (1), swap items\n if self.compare_item() == 1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once self.compare_item() is None, that means he is in front of a blank space\n # - everything to the left of the blank space has already been sorted\n # Deposit what he is holding\n self.swap_item()\n\n # Reset the light to the off position\n self.set_light_off()\n\n # Move one spot over to the right\n self.move_right()\n\n # Re-run the process all over again\n self.sort()",
"def sort_height(sprite):\n return sprite.height",
"def tiles_by_tissue_percentage(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.tissue_percentage, reverse=True)\n return sorted_list",
"def sort(self):\n\n self.models.sort(key=methodcaller('get_age'))",
"def __cmp__(self, other) :\n if self.strength > other.strength:\n return 1;\n elif self.strength == other.strength :\n if self.rank > other.rank :\n return 1;\n elif self.rank == other.rank :\n return 1 if self.kickers > other.kickers else -1 if self.kickers < other.kickers else 0;\n return -1;",
"def sort(self):\r\n\t\tif ScoreOpt.isGroupVassals():\r\n\t\t\tself._playerScores.sort(lambda x, y: cmp(x.sortKey(), y.sortKey()))\r\n\t\t\tself._playerScores.reverse()\r\n\t\tmaxPlayers = ScoreOpt.getMaxPlayers()\r\n\t\tif maxPlayers > 0 and len(self._playerScores) > maxPlayers:\r\n\t\t\tself._playerScores = self._playerScores[len(self._playerScores) - maxPlayers:]",
"def sorting(list_object): # Takes in a ListItem object and returns the\r\n # priority value - from w3schools.com\r\n return list_object.priority",
"def run_sort_home_by_score(self):\n self.homes = self.python_sort(self.homes)",
"def sort(self):\n self.deckcards.sort()",
"def hsort(h1, h2) -> int:\r\n\r\n if h1.angle < h2.angle:\r\n return -1\r\n elif h1.angle > h2.angle:\r\n return 1\r\n else:\r\n return 0",
"def sortPopulation(self, population):\n\t\tpopulation.sort(lambda a, b: cmp(b.fitness, a.fitness))",
"def sort_area(sprite):\n return sprite.area",
"def sort(self, desc):\n self.__sortByIndex(0, desc)",
"def sorted(self, key=None, reverse=True, **kwargs):\n def hv_improvement(kernel):\n if kernel.objective_values is None:\n return float('-inf')\n return self._UHVI_indicator(kernel)(kernel.objective_values)\n if key is None:\n key = hv_improvement\n return sorted(self, key=key, reverse=reverse, **kwargs)"
] | [
"0.6516192",
"0.62642473",
"0.61162984",
"0.60903406",
"0.60502887",
"0.6042862",
"0.6019754",
"0.60002744",
"0.60002744",
"0.59838814",
"0.591684",
"0.59040284",
"0.5897962",
"0.5884195",
"0.5877887",
"0.58615667",
"0.5833206",
"0.5787725",
"0.57582235",
"0.57577944",
"0.5753502",
"0.57514465",
"0.57456267",
"0.5737126",
"0.57153136",
"0.5672754",
"0.5654743",
"0.561344",
"0.5603321",
"0.5589278"
] | 0.6984742 | 0 |
Sequence init with other seq should preserve name and info. | def test_init_other_seq(self):
r = self.RNA("UCAGG", name="x", info={"z": 3})
s = Sequence(r)
self.assertEqual(s._seq, "UCAGG")
self.assertEqual(s.name, "x")
self.assertEqual(s.info.z, 3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, name, seq):",
"def __init__(self, name, sequence, description=None, seq_type=None):\n self.name = name\n self.description = description\n self.seq_type = seq_type\n self._sequence = sequence",
"def __init__(self, seq_name, seed=0, hashable=False):\n self.__name = seq_name\n self.__cur_value = seed\n return",
"def __init__(self):\n\n self.sequence = []",
"def __init__(self, seq: str, debug=False):\n self.debug = debug\n self.seq = self._validate_input_sequence(seq)\n self.suffixes = []\n self.stage = 1",
"def test_init(self):\n orig = \"TC---\"\n seq = self.SequenceClass(orig)\n self.assertEqual(str(seq), orig)",
"def __init__(self, sequence):\n self._seq = sequence # Copy of the given data.\n # Reference to the underlying data, will increment to 0 on first call\n # to next element.\n self._k = -1",
"def __init__(self,sequence,label=\"\"):\n\n self.sequence = sequence\n self.label = label",
"def __init__(self, seq, annotation=False):\n self.seq = seq\n self.length = len(seq)\n self.annotation = annotation",
"def __init__(self, seq1, seq2, algo):\n self.seq1 = seq1\n self.seq2 = seq2\n self.algo = algo",
"def __init__(self, seq=None, _factory=dict, **kwargs):\n\n self._factory = _factory\n self.__dict__ = _factory()\n\n self.__dict__.update(seq or [], **kwargs)",
"def _init_sequence(self, substr_match: SubstrMatch):\n\n basename = self._base.name\n base_frame = substr_match.groups[0]\n\n frame_pos = substr_match.pos\n\n pos1, pos2 = frame_pos.start, frame_pos.end\n\n # This is used later for creating a Concrete Sequence\n seq_str_parts = SequenceStrParts(\n prefix=basename[:pos1],\n suffix=basename[pos2:],\n pad_len=len(base_frame)\n )\n\n self._frames.add(int(base_frame))\n\n self._seq_str_parts = seq_str_parts\n self._is_sequence = True",
"def __init__(self, src_seq, trg_seq, index_seq, src_word2id, trg_word2id, max_len, conv_seq, ent, ID,ans_seq,ir_seq,max_r_ans,entity_cal,entity_nav,entity_wet):\r\n self.src_seqs = src_seq\r\n self.trg_seqs = trg_seq\r\n self.index_seqs = index_seq\r\n\r\n self.num_total_seqs = len(self.src_seqs)\r\n self.src_word2id = src_word2id\r\n self.trg_word2id = trg_word2id\r\n self.max_len = max_len\r\n self.conv_seq = conv_seq\r\n self.ent = ent\r\n self.ID = ID\r\n self.ans_seq=ans_seq\r\n # print(self.ans_seq)\r\n self.ir_seq=ir_seq\r\n self.max_r_ans=max_r_ans\r\n self.entity_cal = entity_cal\r\n self.entity_nav = entity_nav\r\n self.entity_wet = entity_wet",
"def __init__(self, seq_record=None):\n\t\tself._record = seq_record",
"def __init__(self, seq_name, first, last, score):\n self.sequence_name = seq_name\n self.first = int(first)\n self.last = int(last)\n self.score = int(score)",
"def __init__(self, seq):\n # Check the type of seq. Only strings are accepted\n if type(seq) == type(\"string\"):\n self.sequence = seq.upper()\n else:\n raise Exception(\"Invalid typesequence of nucleotides for Sequence class.\")",
"def __init__(self, length, alphabet=IUPAC.unambiguous_dna):\n seq_str = self.SampleLetters(alphabet.letters, length)\n \n Seq.__init__(self, seq_str.upper(), alphabet)",
"def test_init(self):\n orig = \"\"\n r = self.SequenceClass(orig)\n self.assertEqual(str(r), orig)\n\n orig = \"TCAGGA\"\n r = self.SequenceClass(orig)\n self.assertEqual(r._data, array([0, 1, 2, 3, 3, 2]))\n self.assertEqual(str(r), orig)",
"def __init__(self, vocab_src:nlp.Vocab, vocab_tgt:nlp.Vocab, embedding_dim:int, hidden_dim:int, dev,\n num_layers:int=1, bos_idx=2, eos_idx=3, use_attention=True):\n super(Seq2Seq, self).__init__()\n\n self._encoder = Encoder(vocab_src, embedding_dim, hidden_dim)\n self._decoder = Decoder(vocab_tgt, embedding_dim, hidden_dim)\n\n self._dev = dev\n self._hidden_dim = hidden_dim\n self._bos_idx = bos_idx\n self._eos_idx = eos_idx\n self._pad_idx = self._encoder.pad_idx\n self._mask = None\n\n # global attention related\n self._use_attention = use_attention\n self._attn = Attention(self._hidden_dim) if self._use_attention else None\n\n # teacher forcing related\n self._use_teacher_forcing = None\n self._teacher_forcing_ratio = None",
"def _set_seq(self, seq, seq_type):\n assert seq_type in (\"hit\", \"query\")\n if seq is None:\n return seq # return immediately if seq is None\n else:\n if not isinstance(seq, (str, SeqRecord)):\n raise TypeError(\n \"%s sequence must be a string or a SeqRecord object.\" % seq_type\n )\n # check length if the opposite sequence is not None\n opp_type = \"hit\" if seq_type == \"query\" else \"query\"\n opp_seq = getattr(self, \"_%s\" % opp_type, None)\n if opp_seq is not None:\n if len(seq) != len(opp_seq):\n raise ValueError(\n \"Sequence lengths do not match. Expected: %r (%s); found: %r (%s).\"\n % (len(opp_seq), opp_type, len(seq), seq_type)\n )\n\n seq_id = getattr(self, \"%s_id\" % seq_type)\n seq_desc = getattr(self, \"%s_description\" % seq_type)\n seq_feats = getattr(self, \"%s_features\" % seq_type)\n seq_name = \"aligned %s sequence\" % seq_type\n\n if isinstance(seq, SeqRecord):\n seq.id = seq_id\n seq.description = seq_desc\n seq.name = seq_name\n seq.features = seq_feats\n seq.annotations[\"molecule_type\"] = self.molecule_type\n elif isinstance(seq, str):\n seq = SeqRecord(\n Seq(seq),\n id=seq_id,\n name=seq_name,\n description=seq_desc,\n features=seq_feats,\n annotations={\"molecule_type\": self.molecule_type},\n )\n\n return seq",
"def __init__(self, sequence_number):\n super(FakeIdentifier, self).__init__()\n self.sequence_number = sequence_number",
"def __init__(self, sequence, alpha = None, name = \"\", seqinfo = \"\"):\n self.name = name\n self.info = seqinfo\n if type(sequence) is str:\n self.data = tuple(sequence)\n elif type(sequence) is tuple:\n self.data = sequence\n elif type(sequence) is list:\n self.data = tuple(sequence)\n else:\n raise RuntimeError(\"Sequence data is not specified correctly: must be string or tuple\")\n # Resolve choice of alphabet\n if alpha == None:\n # Alphabet is not set, attempt to set it automatically...\n alpha = alphabet.getBySeq(self.data)\n if alpha == None:\n raise RuntimeError(\"Could not identify alphabet from sequence\")\n elif isinstance(alpha, str):\n alphaname = alpha\n alpha = alphabet.getByName(alphaname)\n if alpha == None:\n raise RuntimeError(\"No predefined alphabet with name \\\"\" + alphaname + \"\\\"\")\n if not(alpha.isValidString(self.data)):\n raise RuntimeError(\"Invalid alphabet specified: \"+\"\".join(alpha.getSymbols())+\" is not compatible with sequence '\"+\"\".join(self.data)+\"'\")\n elif isinstance(alpha, alphabet.Alphabet):\n if not(alpha.isValidString(self.data)):\n raise RuntimeError(\"Invalid alphabet specified: \"+\"\".join(alpha.getSymbols())+\" is not compatible with sequence '\"+\"\".join(self.data)+\"'\")\n else:\n raise RuntimeError(\"Unexpected type for alpha\")",
"def test_init(self):\n orig = \"\"\n r = self.SequenceClass(orig)\n self.assertEqual(str(r), orig)\n\n orig = \"TCAGGA\"\n r = self.SequenceClass(orig)\n self.assertEqual(r._data, array([6, 62]))\n self.assertEqual(str(r), orig)",
"def set_SEQUENCE(self, newSeq):\n\t\tself.SEQUENCE = newSeq\n\t\tself.LENGTH = len(newSeq)",
"def __init__(self, sequence_string, rollover=False):\r\n self.sequence_string = sequence_string\r\n self.sequence_length = len(sequence_string[0])\r\n self.rollover = rollover\r\n self.last_item = sequence_string[-1]\r\n self.__name__ = \"%s('%s')\" % (self.__class__.__name__, sequence_string)",
"def testSeqDefaults(self):\n self.assertEqual(\n None,\n self.mr._is_seq\n )\n\n self.assertEqual(\n None,\n self.mr._sequences\n )",
"def set_seqs(self, a, b):\n\n self.set_seq1(a)\n self.set_seq2(b)",
"def __add__(self, other: Seq) -> Seq:\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)",
"def __init__(self, Label=\"\", Sequence=\"\"):\n self.Label = Label\n self.Sequence = Sequence",
"def sequence_params(self):"
] | [
"0.73695916",
"0.64641666",
"0.6406861",
"0.63860846",
"0.6326368",
"0.6289234",
"0.62387466",
"0.62214434",
"0.61863655",
"0.6182251",
"0.6173626",
"0.608901",
"0.6055515",
"0.6053556",
"0.60087323",
"0.59916127",
"0.5978379",
"0.5969416",
"0.59500825",
"0.59398437",
"0.5890074",
"0.5884882",
"0.58841205",
"0.5881188",
"0.58157134",
"0.5810202",
"0.580665",
"0.57223576",
"0.57177997",
"0.5684395"
] | 0.7811066 | 0 |
Sequence to_fasta() should return Fastaformat string | def test_to_fasta(self):
even = "TCAGAT"
odd = even + "AAA"
even_dna = self.SEQ(even, name="even")
odd_dna = self.SEQ(odd, name="odd")
self.assertEqual(even_dna.to_fasta(), ">even\nTCAGAT\n")
# set line wrap to small number so we can test that it works
self.assertEqual(even_dna.to_fasta(block_size=2), ">even\nTC\nAG\nAT\n")
self.assertEqual(odd_dna.to_fasta(block_size=2), ">odd\nTC\nAG\nAT\nAA\nA\n")
# check that changing the linewrap again works
self.assertEqual(even_dna.to_fasta(block_size=4), ">even\nTCAG\nAT\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SequenceClass(even, name=\"even\")\n odd_dna = self.SequenceClass(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")",
"def to_string(fasta):\n\n # remove header\n fasta_nh = fasta.readlines()[1:]\n\n # make into single string\n fasta_str = ''.join(fasta_nh)\n\n # remove newline characters\n seq = fasta_str.replace(\"\\n\", \"\")\n\n return seq",
"def make_fasta(data):\n result = data\n if not data.startswith(\">\"):\n result = \"\"\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += \">seq{}\\n\".format(cnt)\n result += line\n result += \"\\n\"\n cnt += 1\n return result.strip()",
"async def generate_sequence_fasta(db, sequence_id):\n sequence = await db.sequences.find_one(sequence_id, [\"sequence\", \"otu_id\", \"isolate_id\"])\n\n if not sequence:\n raise virtool.errors.DatabaseError(\"Sequence does not exist\")\n\n otu_name, isolate_name = await get_otu_and_isolate_names(db, sequence[\"otu_id\"], sequence[\"isolate_id\"])\n\n fasta = format_fasta_entry(\n otu_name,\n isolate_name,\n sequence_id,\n sequence[\"sequence\"]\n )\n\n return format_fasta_filename(otu_name, isolate_name, sequence[\"_id\"]), fasta",
"def Seq2fasta(idsSeqs):\n for data in idsSeqs:\n if data[0] != '':\n print(\">\" + data[0], end = '\\n')\n tmp = 0\n for c in range(len(data[1])+1):\n if data[1] == '':\n break \n else:\n if c % 60 == 0 and c != 0:\n print(data[1][tmp:c] + '')\n tmp = c\n elif c == len(data[1]): \n print(data[1][tmp:] + '')\n break\n else:\n break",
"def generate_fasta_single(seq_file, rfam_acc, out_dir):\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, rfam_acc + \".log\")\n logging.basicConfig(\n filename=log_file, filemode='w', level=logging.INFO)\n\n # connect to db\n cnx = RfamDB.connect()\n\n # get a new buffered cursor\n cursor = cnx.cursor(raw=True)\n\n # fetch sequence accessions for specific family - significant only!!\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"AND fr.rfam_acc=\\'%s\\'\") % (rfam_acc)\n\n # execute the query\n cursor.execute(query)\n\n # open a new fasta output file\n fp_out = gzip.open(\n os.path.join(out_dir, str(rfam_acc) + \".fa.gz\"), 'w')\n\n for region in cursor:\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(str(region[SEQ_ACC]))\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def fasta_format(self, line_width=None):\n return fasta_formatted_string(self.name, self._sequence,\n description=self.description,\n line_width=line_width)",
"def format_fasta(title, sequence):\n fasta_width = 70 # Number of characters in one line\n\n n_lines = 1 + len(sequence) // fasta_width # Number of lines\n\n lines = [ sequence[i*fasta_width: (i+1)*fasta_width] for i in range(n_lines)]\n lines = \"\\n\".join(lines)\n \n formatted = f\"> {title}\\n{lines}\\n\\n\"\n return formatted",
"def df2fasta(df, fn, sep='.', columns=None):\n if columns is None:\n columns = list(df.columns)\n if 'seq' in columns:\n columns.remove('seq')\n with open(fn, 'w') as fh:\n for ind, row in df.iterrows():\n label = '>%s' % ind\n for col in columns:\n label += '%s%s' % (sep, row[col])\n fh.write('%s\\n' % label)\n fh.write('%s\\n' % row['seq'])",
"def generate_fasta(sequences, fasta_path):\n\n with open(fasta_path, 'w+') as f:\n for i in range(len(sequences)):\n f.write('>seq '+str(i))\n f.write('\\n')\n f.write(sequences[i])\n f.write('\\n')",
"def transeq(seq):\n \n temp_file = 'PATH/TO/ROOT/Database_Outputs/temp.fasta'\n temp = open(temp_file, 'w')\n temp.write(\">Just a formality \\n\"+seq)\n temp.close()\n \n trans = \"PATH/TO/ROOT/BLISTR_support_programs/./transeq -sequence \"+temp_file+\" -outseq \"+temp_file[:-6]+\".faa\"\n proc = subprocess.Popen(trans, shell=True)\n proc.wait()\n \n temp = open(temp_file[:-6]+\".faa\", 'r')\n new_seq = \"\"\n for line in temp:\n if line.startswith(\">\"):\n continue\n new_seq += line\n \n os.remove(temp_file)\n os.remove(temp_file[:-6]+\".faa\")\n \n return new_seq",
"def test_fasta(self):\n aln2fasta = hhsuite.AlignmentToFasta()\n self.assertEqual(\n aln2fasta.fasta(self.hit, \"A-E----\"),\n \">Query\\nJKLMNOP\\n>Template\\nA-E----\\n\")",
"def fasta2align(fn,uniqueIndex=True):\n return fasta2df(fn, sep=None, columns=['name'], index='name', uniqueIndex=uniqueIndex).seq",
"def test_make_fasta_rec(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n seq = 'CTGGTC'\r\n qual = map(int, '32 32 32 19 19 19'.split())\r\n self.assertEqual(make_fastq_rec(header, seq, qual),\r\n \"\"\"@E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nCTGGTC\r\n+E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nAAA444\"\"\")",
"def coding_strand_to_AA(dna):\n #inital conditions\n protein = ''\n i = 0\n\n #for the length of DNA, translate each codon in an ORF to an amino acid\n while i < (len(dna)-2):\n codon = dna[i:i+3] \n amino_acid = aa_table[codon]\n protein= protein + amino_acid\n i += 3\n\n #return the string of amino acids\n return protein",
"def convertFastqToFasta(inputFastq, outputFasta):\n out = open(outputFasta, \"w\")\n for (titleStr, seqStr, qualityStr) in FastqIterator(inputFastq):\n out.write(\">%s\\n%s\\n\" % (titleStr, seqStr))",
"def fasta_from_sequences(seqs, make_seqlabel=None, line_wrap=None):\n warnings.warn(\n \"`fasta_from_sequences` is deprecated and will be removed in \"\n \"scikit-bio 0.3.0. Please update your code to use `skbio.io.write`.\",\n DeprecationWarning)\n\n fasta_list = []\n for i, seq in enumerate(seqs):\n # Check if it has a label, or one is to be created\n label = str(i)\n if make_seqlabel is not None:\n label = make_seqlabel(seq)\n elif hasattr(seq, 'id') and seq.id:\n label = seq.id\n elif hasattr(seq, 'Label') and seq.Label:\n label = seq.Label\n elif hasattr(seq, 'Name') and seq.Name:\n label = seq.Name\n\n # wrap sequence lines\n seq_str = str(seq)\n if line_wrap is not None:\n numlines, remainder = divmod(len(seq_str), line_wrap)\n if remainder:\n numlines += 1\n body = [seq_str[j * line_wrap:(j + 1) * line_wrap]\n for j in range(numlines)]\n else:\n body = [seq_str]\n\n fasta_list.append('>' + label)\n fasta_list += body\n\n return '\\n'.join(fasta_list)",
"def format_fasta(name, seq, wrap=60):\n return \">{}\\n{}\".format(name, textwrap.fill(seq, width=wrap))",
"def toString(self, format_='fasta', structureSuffix=':structure'):\n if format_ == 'fasta':\n return '>%s\\n%s\\n>%s%s\\n%s\\n' % (\n self.id, self.sequence, self.id, structureSuffix,\n self.structure)\n else:\n raise ValueError(\"Format must be 'fasta'.\")",
"def pdb_to_fasta(pdb_input):\n p = PDBParser(PERMISSIVE=1)\n structure = p.get_structure(pdb_input, pdb_input)\n file_name = pdb_input[0:-4] + \".fasta\"\n fasta_file = open(file_name, 'w')\n for model in structure:\n for chain in model:\n seq = list()\n chainID = chain.get_id()\n\n for residue in chain:\n if is_aa(residue.get_resname(), standard=True):\n seq.append(three_to_one(residue.get_resname()))\n else:\n seq.append(\"X\")\n chain_line = \">Chain_\" + chainID + \"\\n\" + str(\"\".join(seq)) + \"\\n\" + \"\\n\"\n fasta_file.write(chain_line)\n\n fasta_file.close()",
"def align2fasta(align, fn, applyPadding = True):\n align = padAlignment(align, applyPadding)\n\n with open(fn, 'w') as fh:\n for i in np.arange(align.shape[0]):\n ind = align.index[i]\n fh.write('>%s\\n' % ind)\n fh.write('%s\\n' % align.iloc[i])",
"def translateORFtoAAs(self,sequence,number):\r\n AAStringfromORF = str()\r\n startingM = int()\r\n for i in range(0,len(sequence)-2,3):\r\n if sequence[i:i+3] != \"AUG\":\r\n pass\r\n else:\r\n startingM = i\r\n for i in range(startingM,len(sequence)-2,3):\r\n x = self.tabletoTranslate(sequence[i:i+3])\r\n AAStringfromORF+=x\r\n if x == \"-\":\r\n self.listofSequences.append(AAStringfromORF.rstrip(\"-\").lstrip().rstrip())\r\n AAStringfromORF = str()\r\n break",
"def fetch_as_fasta(chrom,start,end,gindex,fname):\n \n # Print the sequence in fasta format.\n header = '>%s:%s-%s' % (chrom, start, end)\n fname.write('%s\\n%s\\n' % (header, gindex[chrom][start:end]))",
"def gff2FA(annotation, sequence, windows, output):\n df_gff = pd.read_csv(annotation, index_col=False, sep='\\t', header=None, comment=\"#\")\n df_gff.columns = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']\n fasta_seq = SeqIO.parse(sequence, 'fasta')\n buffer_seqs = []\n cont = 0\n for record in fasta_seq:\n print(record.id)\n dff_extract = df_gff[df_gff.seqname == record.id]\n for key,val in dff_extract.iterrows():\n clean_seq = ''.join(str(record.seq).splitlines())\n if int(val.start) - windows < 0:\n start = 0\n else:\n start = int(val.start) - windows\n if int(val.end) + windows > len(clean_seq):\n end = len(clean_seq)\n else:\n end = int(val.end) + windows\n new_seq = clean_seq[start:end]\n att = val.attribute\n id = record.id + '_' + str(start) + '_' + str(end)\n desc = \"seq_id:\" + str(record.id)\n desc += \" feature_start:\" + str(val.start)\n desc += \" feature_end:\" + str(val.end)\n desc += \" genome_start:\" + str(start)\n desc += \" genome_end:\" + str(end)\n desc += \" feature:\" + str(val.feature)\n desc += \" attributes:\" + val.attribute\n seq = SeqRecord(Seq(new_seq), id=id, description=desc)\n buffer_seqs.append(seq)\n cont += 1\n if output:\n print('Saving...')\n SeqIO.write(buffer_seqs, output, \"fasta\")\n else:\n return buffer_seqs",
"def generate_fasta(seq_file, out_dir):\n\n LOGGER.info(\"Generating fasta file\", seq_file)\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, \"missing_seqs.log\")\n logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO)\n\n cnx = RfamDB.connect()\n cursor = cnx.cursor(raw=True)\n\n # fetch clan specific family full_region data and sequence description\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"ORDER BY fr.rfam_acc\")\n\n cursor.execute(query)\n\n for region in cursor:\n\n # new family\n if str(region[RFAM_ACC]) != rfam_acc:\n # check if there's no open file\n if fp_out is not None:\n fp_out.close()\n\n # open new fasta file\n fp_out = gzip.open(\n os.path.join(out_dir, str(region[RFAM_ACC]) + \".fa.gz\"), 'w')\n\n rfam_acc = region[RFAM_ACC]\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(sequence)\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def get_fasta(self):\n\t\tif not self.fastas:\n\t\t\treturn None\n\t\telif self.fastas.get('twodirections') is not None:\n\t\t\treturn self.fastas.get('twodirections')\n\t\telif self.fastas.get('template') is not None:\n\t\t\treturn self.fastas.get('template')\n\t\telif self.fastas.get('complement') is not None:\n\t\t\treturn self.fastas.get('complement')",
"def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st",
"def fast_Q2A(fastq_filepath):\n filein = open(fastq_filepath, \"r\")\n fileout = open(fastq_filepath[:-5] + \"fasta\", \"w\")\n found_id = 0\n num_of_seqs = 0\n for i in filein:\n if i[0] == \"@\":\n seq_id = \">\" + i[1:]\n found_id = 1\n num_of_seqs += 1\n continue\n if found_id == 1:\n seq = i\n found_id = 0\n fileout.write(seq_id + seq)\n filein.close()\n fileout.close()\n print num_of_seqs\n return os.path.abspath(fileout.name)",
"def getSequence(ref, fasta):\n\n fasta_header = \"\"\n\n fh_fasta = open(fasta, \"r\")\n entry = (x[1] for x in groupby(fh_fasta, lambda line: line[0] == \">\"))\n\n for header in entry:\n headerStr = header.__next__()[1:].strip()\n\n seq = \"\".join(s.strip() for s in entry.__next__())\n\n if ref == headerStr.replace('>',''):\n filename = os.path.join(os.getcwd(), ref.replace('/','_').split('|')[0])\n fasta_header = replace_char(headerStr)\n\n with open(filename + '.fa', \"w\") as output_file:\n output_file.write(\">\" + fasta_header + \"\\\\n\" + seq.upper() + \"\\\\n\")\n\n fh_fasta.close()\n return fasta_header",
"def read_fasta_sequences_to_str(filename):\n with open(filename) as f:\n lines = [line.strip() for line in f.readlines()]\n sequences = []\n text = ''\n\n for line in lines:\n if line[0] == '>':\n if len(text) > 0:\n sequences.append(text)\n text = ''\n else:\n if len(line):\n text += line\n if len(text) > 0:\n sequences.append(text)\n\n return sequences"
] | [
"0.74931866",
"0.7302847",
"0.70808065",
"0.6741352",
"0.67153966",
"0.67040503",
"0.6695643",
"0.66434294",
"0.66212684",
"0.6602914",
"0.6531266",
"0.65176135",
"0.65008026",
"0.6492443",
"0.648414",
"0.64614666",
"0.638201",
"0.6333177",
"0.6327045",
"0.62993723",
"0.62658256",
"0.622666",
"0.62186885",
"0.62136865",
"0.62016344",
"0.6164991",
"0.6160582",
"0.6150265",
"0.6123191",
"0.6081215"
] | 0.74317557 | 1 |
correctly annotates a Sequence from a gff file | def test_annotate_from_gff(self):
from cogent3.parse.fasta import FastaParser
fasta_path = os.path.join("data/c_elegans_WS199_dna_shortened.fasta")
gff3_path = os.path.join("data/c_elegans_WS199_shortened_gff.gff3")
name, seq = next(FastaParser(fasta_path))
sequence = Sequence(seq)
sequence.annotate_from_gff(gff3_path)
matches = [m for m in sequence.get_annotations_matching("*", extend_query=True)]
# 13 features with one having 2 parents, so 14 instances should be found
self.assertEqual(len(matches), 14) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gff2FA(annotation, sequence, windows, output):\n df_gff = pd.read_csv(annotation, index_col=False, sep='\\t', header=None, comment=\"#\")\n df_gff.columns = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']\n fasta_seq = SeqIO.parse(sequence, 'fasta')\n buffer_seqs = []\n cont = 0\n for record in fasta_seq:\n print(record.id)\n dff_extract = df_gff[df_gff.seqname == record.id]\n for key,val in dff_extract.iterrows():\n clean_seq = ''.join(str(record.seq).splitlines())\n if int(val.start) - windows < 0:\n start = 0\n else:\n start = int(val.start) - windows\n if int(val.end) + windows > len(clean_seq):\n end = len(clean_seq)\n else:\n end = int(val.end) + windows\n new_seq = clean_seq[start:end]\n att = val.attribute\n id = record.id + '_' + str(start) + '_' + str(end)\n desc = \"seq_id:\" + str(record.id)\n desc += \" feature_start:\" + str(val.start)\n desc += \" feature_end:\" + str(val.end)\n desc += \" genome_start:\" + str(start)\n desc += \" genome_end:\" + str(end)\n desc += \" feature:\" + str(val.feature)\n desc += \" attributes:\" + val.attribute\n seq = SeqRecord(Seq(new_seq), id=id, description=desc)\n buffer_seqs.append(seq)\n cont += 1\n if output:\n print('Saving...')\n SeqIO.write(buffer_seqs, output, \"fasta\")\n else:\n return buffer_seqs",
"def annotate(m, ss_seq): # -> None:\n ...",
"def test_sequence_to_moltype(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test1\")\n annot1 = s.add_annotation(Feature, \"exon\", \"fred\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"trev\", [(10, 14)])\n got = s.to_moltype(\"rna\")\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertNotEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test1\")\n\n s = Sequence(\"AAGGGGAAAACCCCCAAAAAAAAAATTTTTTTTTTAAA\", name=\"test2\")\n xx_y = [[[2, 6], 2.4], [[10, 15], 5.1], [[25, 35], 1.3]]\n y_valued = s.add_annotation(Variable, \"SNP\", \"freq\", xx_y)\n got = s.to_moltype(\"rna\")\n y_valued_slice = str(y_valued.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(y_valued_slice, got_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test2\")\n\n s = Sequence(\"TTTTTTTTTTAAAAAAAAAA\", name=\"test3\")\n data = [i for i in range(20)]\n annot4 = s.add_annotation(SimpleVariable, \"SNP\", \"freq\", data)\n got = s.to_moltype(RNA)\n annot4_slice = str(annot4.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(annot4_slice[:10], got_slice[:10])\n self.assertEqual(annot4_slice[10:20], got_slice[10:20])\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test3\")\n\n # calling with a null object should raise an exception\n with self.assertRaises(ValueError):\n s.to_moltype(None)\n\n with self.assertRaises(ValueError):\n s.to_moltype(\"\")",
"def build_gff(annotations, faa):\n with open(faa, \"rt\") as faa_file:\n for line in faa_file:\n if \">\" not in line:\n continue\n\n # each fasta is suffixed on the annotated faa if a prefix _INT (_1 .. _n)\n contig_name, start, end, strand = parse_fasta_header(line)\n if None in (contig_name, start, end, strand):\n print(\n \"It was not possible to parse the \" + line, end=\"\", file=sys.stderr\n )\n continue\n\n clean_name = Annotation.clean_seq_name(contig_name)\n\n row_annotations = Annotation.merge(\n [ann.get() for ann in annotations.get(contig_name, [])]\n )\n\n ann_string = \";\".join(\n [\n \"{}={}\".format(k, \",\".join(v).strip())\n for k, v in row_annotations.items()\n ]\n )\n\n eggNOGScore = \"\".join(row_annotations.get(\"eggNOG_score\", []))\n\n if len(ann_string):\n yield [\n clean_name,\n \"eggNOG-v2\",\n \"CDS\",\n start,\n end,\n eggNOGScore or \".\",\n \"+\" if strand == \"1\" else \"-\",\n \".\",\n \"ID=\" + clean_name + \";\" + ann_string,\n ]",
"def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds",
"def parse_anno_from_gff3(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### initial run to get the transcript to gene mapping\n if options.verbose:\n print >> sys.stderr, \"... init structure\"\n\n trans2gene = dict() ### dict with: keys = transcript IDs, values = gene IDs\n for line in open(options.anno, 'r'):\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n if sl[2] in ['mRNA', 'transcript', 'mrna', 'miRNA', 'tRNA', 'snRNA', 'snoRNA', 'ncRNA', 'mRNA_TE_gene', 'rRNA', 'pseudogenic_transcript', 'transposon_fragment']:\n tags = get_tags_gff(sl[8])\n trans2gene[tags['ID']] = tags['Parent']\n\n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for contig %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1,), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict() # contains the exon list per transcript, only need this for mask_alternative_overlap\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n tags = get_tags_gff(sl[8])\n if sl[2] == 'exon':\n trans_id = tags['Parent']\n gene_id = trans2gene[trans_id]\n else:\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n ### store for each position of the transcriptome a tuple containing all overlapping gene IDs\n ### assume positions are 1 based and in closed intervals\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n if not sl[0] in exons:\n exons[sl[0]] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[sl[0]][trans_id].append([start, stop])\n except KeyError:\n exons[sl[0]][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[sl[0]][start:stop] > 0):\n for p in range(start, stop):\n if anno[sl[0]][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[sl[0]][p]]) | set([gene_id]))\n try:\n anno[sl[0]][p] = gene2idx[new_set]\n except KeyError:\n anno[sl[0]][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[sl[0]][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n \n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def test_feature_without_id():\n annot = seq.Annotation(\n [seq.Feature(\n key = \"CDS\",\n locs = [seq.Location(1,2), seq.Location(4,5)],\n qual = {\"some\" : \"qualifiers\"}\n )]\n )\n file = gff.GFFFile()\n with pytest.raises(ValueError):\n gff.set_annotation(file, annot)",
"def parse_gff(g):\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)",
"def parse_b2go(annot_f, annot_d=None):\n with open(annot_f, \"r\") as f:\n\n for line in f:\n line = line.strip().split(\"\\t\")\n seq_id = line[0]\n\n # Create a dict entry if new seqid\n # This is weird (should it be created out of the for loop ?)\n if not annot_d:\n annot_d = {}\n\n if seq_id not in annot_d:\n annot_d[seq_id] = {}\n\n # Check and sort by annotation type\n for i in line[1:]:\n\n if i.startswith(\"GO:\"):\n # Method for creating or appending to existing list\n annot_d[ seq_id ].setdefault(\"GOs\", []).append(i)\n\n elif i.startswith(\"EC:\"):\n annot_d[ seq_id ].setdefault(\"ECs\", []).append(i)\n\n # Should not have more than 1 annot but for doublec hecking\n else:\n annot_d[ seq_id ].setdefault(\"annot\",[]).append(i)\n\n return annot_d",
"def parse_KEGG(kaas_f, annot_d):\n\n with open(kaas_f,\"r\") as f:\n\n for line in f:\n line = line.strip().split(\"\\t\")\n \n # If there is a kegg annotation (I think KEGGS should be unique)\n if not len(line) == 1:\n seqid = line[0]\n kegg = line[1]\n \n annot_d[seqid][\"KEGG\"] = kegg\n \n return annot_d",
"def read_gff(gff):\n genome = getseq(args.genome)\n dictoftranscripts = {}\n for k in open(gff):\n if not k.startswith(\"#\"):\n lines = k.strip().split(\"\\t\")\n if lines[2] == \"exon\":\n strand = lines[6]\n chromosome = lines[0]\n start = lines[3]\n end = lines[4]\n transcriptid = re.search(\"Parent=transcript:(.*)\", lines[8]).group(1)\n if transcriptid + \"#\" + chromosome in dictoftranscripts:\n dictoftranscripts[transcriptid + \"#\" + chromosome].extend([start, end])\n else:\n dictoftranscripts[transcriptid + \"#\" + chromosome] = []\n dictoftranscripts[transcriptid + \"#\" + chromosome].extend([start, end])\n\n for key, value in dictoftranscripts.iteritems():\n value.sort()\n print value\n for coord1 in value:\n\n for coord2 in value[1:]:\n #print coord1, coord2\n if int(coord1) != int(value[-1]) and value.index(coord2) != value.index(coord1)+1 and value.index(coord2) > value.index(coord1):\n\n exon1_start = int(coord1)\n exon1_end = int(coord2)\n #print exon1_start, exon1_end\n #print key.split(\"#\")[1]\n #print value.index(coord1), value.index(coord2)\n exon_seq = genome.get(key.split(\"#\")[1],\"NA\")\n\n if exon_seq != \"NA\":\n sequence_exon = exon_seq[exon1_start:exon1_end+1]\n #print exon1_start, exon1_end, sequence_exon\n for start, end, strand, frame, pro in translate(sequence_exon):\n junction =\n print start, end, strand, frame, pro",
"def parse_sequences_from_gff_metadata( file ):\n import pandas\n result = []\n for line in file:\n if line.startswith( '##sequence-region' ):\n parts = line.strip().split( \" \" )\n nameStartEnd = parts[-3:] # last 3 elements\n result.append({\n \"seqid\": nameStartEnd[0],\n \"start\": int( nameStartEnd[1] ),\n \"end\": int( nameStartEnd[2] )\n })\n elif not line[0] == '#':\n # quit when we meet the first non-metadata line\n break\n return pandas.DataFrame( result )",
"def premrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnaacc = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n mrnalen = int(fields[4]) - int(fields[3]) + 1\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'pre-mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n mrnaacc = ''\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n elif '\\texon\\t' in entry:\n exoncount += 1\n elif '\\tintron\\t' in entry:\n introncount += 1\n elif '\\tfive_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr5plen += int(fields[4]) - int(fields[3]) + 1\n elif '\\tthree_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr3plen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if mrnaacc != '':\n values = '%s %d %.3f %.3f %.3f %d %d %d %d' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent,\n exoncount, introncount, utr5plen, utr3plen)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n exonlen = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0",
"def mrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnalen += int(fields[4]) - int(fields[3]) + 1\n accmatch = re.search(r'accession=([^;\\n]+)', fields[8])\n assert accmatch, 'Unable to parse mRNA accession: %s' % fields[8]\n mrnaacc = accmatch.group(1)\n elif entry.startswith('###'):\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'mature mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n values = '%s %d %.3f %.3f %.3f' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0",
"def parse_anno_from_gtf(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for chr %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1, ), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict()\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n if sl[2] != 'exon':\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n tags = get_tags_gtf(sl[8])\n gene_id = tags['gene_id']\n trans_id = tags['transcript_id']\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n chrm = sl[0]\n if chrm == 'chrM_rCRS':\n chrm = 'chrM'\n\n if not chrm in exons:\n exons[chrm] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[chrm][trans_id].append([start, stop])\n except KeyError:\n exons[chrm][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[chrm][start:stop] > 0):\n for p in range(start, stop):\n if anno[chrm][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[chrm][p]]) | set([gene_id]))\n try:\n anno[chrm][p] = gene2idx[new_set]\n except KeyError:\n anno[chrm][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[chrm][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n #print >> sys.stderr, 'found %i positions' % p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def get_sequin_annots(sequin_path, ref_contigs, quiet=False):\n annots = defaultdict(list)\n \n # We need a dummy class to hold the current state while parsing\n # (otherwise the below private functions can't modify it; there's no \"nonlocal\" in python 2.x)\n class _:\n in_contig = None\n in_feature = None\n gene_name = None\n desc = None\n chrom_start = None\n chrom_end = None\n strand = None\n feature_seq_str = \"\"\n coding_blocks = []\n \n def _save_sequin_feature():\n # The only features we care about are the CDS features. Others get discarded during parsing.\n if _.in_feature == \"CDS\":\n if len(_.feature_seq_str) == 0:\n if not quiet: sys.stderr.write(\"WARN: 0-length CDS in contig %s\" % _.in_contig)\n elif _.gene_name is None or _.strand is None or _.chrom_start is None or _.chrom_end is None:\n if not quiet: sys.stderr.write(\"WARN: invalid CDS feature in contig %s\" % _.in_contig)\n else:\n gene_seq = Seq(_.feature_seq_str, generic_dna)\n if _.strand == '-':\n gene_seq = gene_seq.reverse_complement()\n gene_seq_record = SeqRecord(gene_seq, id=_.gene_name, name=_.gene_name, description=_.desc)\n annot = Annot(_.chrom_start, _.chrom_end, _.strand == '-', gene_seq_record, \n _.coding_blocks)\n annots[contig_to_vcf_chrom(_.in_contig)].append(annot)\n _.in_feature = _.gene_name = _.desc = _.chrom_start = _.chrom_end = _.strand = None\n _.feature_seq_str = \"\"\n _.coding_blocks = []\n \n def _update_sequin_feature(fields):\n if fields[0] != \"\" and fields[1] != \"\":\n # If the first two fields are present, this specifies a sequence range\n if not (fields[0].isdigit() and fields[1].isdigit()):\n # We will only attempt to utilize *complete* CDS features\n # (None of the start or end positions can be qualified by \">\" or \"<\")\n _.in_feature = \"CDS-partial\"\n return\n\n # Append the specified sequence to the `_.feature_seq_str`.\n # Note: Sequin table coordinates, like GenBank, are 1-indexed, right-closed.\n start = int(fields[0])\n end = int(fields[1])\n if _.strand is None: \n _.strand = '+' if start <= end else '-'\n elif _.strand != ('+' if start <= end else '-'):\n sys.stderr.write(\"WARN: strand changed direction, invalid CDS\")\n _.in_feature = \"CDS-partial\"\n return\n if _.strand == '-':\n start, end = end, start\n start -= 1\n ref_contig = ref_contigs[_.in_contig]\n seg = str(ref_contig.seq)[start:end]\n _.coding_blocks.append((start, end))\n _.feature_seq_str = seg + _.feature_seq_str if _.strand == '-' else _.feature_seq_str + seg\n _.chrom_start = min(start, _.chrom_start if _.chrom_start is not None else float('inf'))\n _.chrom_end = max(end, _.chrom_end if _.chrom_end is not None else float('-inf'))\n \n elif len(fields) >= 5:\n # If the first three fields are blank, this specifies a qualifier key + value\n if fields[3] == \"gene\":\n _.gene_name = fields[4]\n elif fields[3] == \"product\":\n _.desc = fields[4]\n \n with open(sequin_path) as f:\n for line in f:\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\", 4)\n if len(line.strip()) == 0:\n # Whitespace-only lines signal the end of feature data for a contig.\n # They may be followed by INFO: lines from the annotator, which we ignore.\n _save_sequin_feature()\n _.in_contig = None\n elif _.in_contig is None and line[0] == '>':\n # Lines that begin with \">Feature \" signal the start of feature data for a contig\n # Fields are separated by spaces; the second field is the full contig ID\n _save_sequin_feature()\n sp_fields = line[1:].split(' ')\n if sp_fields[0] == 'Feature' and len(sp_fields) >= 2:\n if ref_contigs.has_key(sp_fields[1]):\n _.in_contig = sp_fields[1]\n elif not quiet:\n sys.stderr.write(\"WARN: unknown contig in Sequin file: %s\" % sp_fields[1])\n elif _.in_contig is not None:\n if len(fields) < 3: \n if not quiet: sys.stderr.write(\"WARN: incomplete Sequin line: %s\" % line)\n next\n in_new_feature = fields[2].strip() != \"\"\n if _.in_feature is None or in_new_feature:\n _save_sequin_feature()\n _.in_feature = fields[2].strip()\n if _.in_feature == \"CDS\":\n _update_sequin_feature(fields)\n elif _.in_feature == \"CDS\":\n _update_sequin_feature(fields)\n \n return annots",
"def read_annotations(filename, documents_are_sequences=False, useBERT=False):\n\n\twith open(filename, encoding=\"utf-8\") as f:\n\t\tsentence = []\n\t\tsentences = []\n\t\tsentenceID=0\n\t\tfor line in f:\n\t\t\tif len(line) > 0:\n\t\t\t\tif line == '\\n':\n\t\t\t\t\tsentenceID+=1\n\t\n\t\t\t\t\tif documents_are_sequences == False:\t# each individual sentences is its own sequence\n\t\t\t\t\t\tsentences.append(sentence)\n\t\t\t\t\t\tsentence = []\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\telse:\n\t\t\t\t\tdata=[]\n\t\t\t\t\tsplit_line = line.rstrip().split('\\t')\n\n\t\t\t\t\tdata.append(split_line[0])\n\t\t\t\t\tdata.append(1 if split_line[1] == \"EVENT\" else 0)\n\n\t\t\t\t\tdata.append(sentenceID)\n\t\t\t\t\tdata.append(filename)\n\n\t\t\t\t\tif useBERT:\n\n\t\t\t\t\t\tbert=np.array(split_line[2].split(\" \"), dtype=float)\n\t\t\t\t\t\tdata.append(bert)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata.append(None)\n\n\t\t\t\t\tsentence.append(data)\n\t\t\n\t\tif len(sentence) > 0:\n\t\t\tsentences.append(sentence)\n\n\treturn sentences",
"def __init__(self, seq, annotation=False):\n self.seq = seq\n self.length = len(seq)\n self.annotation = annotation",
"def test_conversion_highlevel(path):\n gff_file = gff.GFFFile.read(join(data_dir(\"sequence\"), path))\n ref_annot = gff.get_annotation(gff_file)\n ref_phases = []\n for _, _, type, _, _, _, _, phase, _ in gff_file:\n if type == \"CDS\":\n ref_phases.append(phase)\n\n gff_file = gff.GFFFile()\n gff.set_annotation(gff_file, ref_annot)\n temp = TemporaryFile(\"w+\")\n gff_file.write(temp)\n\n temp.seek(0)\n gff_file = gff.GFFFile.read(temp)\n temp.close()\n test_annot = gff.get_annotation(gff_file)\n test_phases = []\n for _, _, type, _, _, _, _, phase, _ in gff_file:\n if type == \"CDS\":\n test_phases.append(phase)\n \n assert ref_annot == test_annot\n assert test_phases == ref_phases",
"def setup_annotations(self):\n sbd_path = get_data_path('sbd')\n target_path = pjoin(self.root, 'SegmentationClass/pre_encoded')\n if not os.path.exists(target_path): os.makedirs(target_path)\n path = pjoin(sbd_path, 'dataset/train.txt')\n sbd_train_list = tuple(open(path, 'r'))\n sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]\n train_aug = self.files['train'] + sbd_train_list\n\n # keep unique elements (stable)\n train_aug = [train_aug[i] for i in \\\n sorted(np.unique(train_aug, return_index=True)[1])]\n self.files['train_aug'] = train_aug\n set_diff = set(self.files['val']) - set(train_aug) # remove overlap\n self.files['train_aug_val'] = list(set_diff)\n\n pre_encoded = glob.glob(pjoin(target_path, '*.png'))\n expected = np.unique(self.files['train_aug'] + self.files['val']).size\n\n if len(pre_encoded) != expected:\n print(\"Pre-encoding segmentation masks...\")\n for ii in tqdm(sbd_train_list):\n lbl_path = pjoin(sbd_path, 'dataset/cls', ii + '.mat')\n data = io.loadmat(lbl_path)\n lbl = data['GTcls'][0]['Segmentation'][0].astype(np.int32)\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, ii + '.png'), lbl)\n\n for ii in tqdm(self.files['trainval']):\n fname = ii + '.png'\n lbl_path = pjoin(self.root, 'SegmentationClass', fname)\n lbl = self.encode_segmap(m.imread(lbl_path))\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, fname), lbl)\n\n assert expected == 9733, 'unexpected dataset sizes'",
"def _generateSequence(self, classifications, detections):\n det_len = len(detections)\n\n # Convert classifications and detections to input required for network\n seq_len = int(self.input_tensor.shape[1])\n fea_len = int(self.input_tensor.shape[2])\n input_data = np.zeros((seq_len,fea_len))\n\n # Add padding before and after sequence based on KEYFRAME_OFFSET\n input_data[:KEYFRAME_OFFSET,0] = np.ones(KEYFRAME_OFFSET)\n input_data[det_len:det_len+KEYFRAME_OFFSET,0] = np.ones(KEYFRAME_OFFSET)\n # Iterate through each frame of the data\n for idx, frame_detections in enumerate(detections):\n # We have already padded before and after\n seq_idx = idx + KEYFRAME_OFFSET\n\n # Skip through frames with no detections\n if len(frame_detections) == 0:\n input_data[seq_idx][0] = 1.0\n continue\n\n detection = frame_detections[0]\n classification = classifications[idx][0]\n\n # Do a size check on input\n # We expect either 1 or 2 models per sequence\n num_species = len(classification.species)\n num_cover = len(classification.cover)\n num_loc = len(detection.location)\n num_fea = num_species + num_cover + num_loc + 2\n num_of_models = int(fea_len / num_fea)\n\n if num_of_models != 2 and num_of_models != 1:\n raise Exception('Bad Feature Length')\n\n # Layout of the feature is:\n # Species, Cover, Normalized Location, Confidence, SSD Species\n # Optional duplicate\n\n for model_idx in range(num_of_models):\n # Calculate indices of vector based on model_idx\n fea_idx = model_idx * num_fea\n species_stop = fea_idx + num_species\n cover_stop = species_stop + num_cover\n loc_stop = cover_stop + num_loc\n ssd_conf = loc_stop\n ssd_species = ssd_conf + 1\n\n input_data[seq_idx,fea_idx:species_stop] = \\\n classification.species\n input_data[seq_idx,species_stop:cover_stop] = \\\n classification.cover\n input_data[seq_idx,cover_stop:loc_stop] = \\\n self._normalizeDetection(detection.location)\n input_data[seq_idx, ssd_conf] = detection.confidence\n input_data[seq_idx, ssd_species] = detection.species\n return input_data",
"def generate_gff( mapfile, funtax_orf_file ):\n annotation2assembly_map = pd.read_table(mapfile,\n names=['annotation','assembly','length'],\n index_col='annotation')\n funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)\n funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']\n funtax_gff['source'] = 'Prodigal_v2.00'\n funtax_gff['type'] = 'CDS'\n funtax_gff['score'] = 100.0\n funtax_gff['phase'] = 0\n funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\\1;')\n return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]",
"def test_sequence_annotate(self):\n self.t(\"1,2 annotate note\")\n code, out, err = self.t(\"_get 1.annotations.1.description 2.annotations.1.description\")\n self.assertEqual(\"note note\\n\", out)",
"def main():\n args = get_args()\n annot_fp = args.annotations\n out_fp = args.outfile\n blast_fp = args.positional\n\n #print('output_arg = \"{}\"'.format(out_fp))\n #print('annotation_arg = \"{}\"'.format(annot_fp))\n #print('blast_fp = \"{}\"'.format(blast_fp))\n\n if not os.path.isfile(annot_fp):\n print(\"\\\"{}\\\" is not a file\".format(annot_fp))\n exit(1)\n if not os.path.isfile(blast_fp):\n print(\"\\\"{}\\\" is not a file\".format(blast_fp))\n exit(1)\n\n #Load the annotations\n annots_dict = {}\n with open(annot_fp, 'r') as f:\n for l in f:\n larr = l[:-1].split(\",\")\n annots_dict[larr[0]] = larr[6:]\n\n header_str = \"seq_id\\tpident\\tgenus\\tspecies\"\n if out_fp != \"\":\n out = open(out_fp, 'w')\n out.write(\"{}\\n\".format(header_str))\n else:\n print(header_str)\n\n with open(blast_fp, 'r') as f:\n for l in f:\n larr = l.split(\"\\t\")\n seq_id = larr[1]\n tax_info = annots_dict.get(seq_id, [\"BAD\", \"BAD\"])\n if tax_info[0] == \"BAD\":\n warn(msg=\"Cannot find seq {} in lookup\".format(seq_id))\n continue\n genus = tax_info[0]\n species = tax_info[1]\n if genus == \"\":\n genus = \"NA\"\n if species == \"\":\n species = \"NA\"\n if out_fp == \"\":\n print(\"{}\\t{}\\t{}\\t{}\".format(seq_id, larr[2], genus, species))\n else:\n out.write(\"{}\\t{}\\t{}\\t{}\\n\".format(seq_id, larr[2], genus, species))\n\n if out_fp != \"\":\n out.close()",
"def TrinityAnnotation(Trinityfa,fileBlast6,GIfa,output):\n from Bio import SeqIO\n myTrinity = list(SeqIO.parse(Trinityfa,\"fasta\"))\n mygislist = list(SeqIO.parse(GIfa,\"fasta\"))\n mygis = {}\n for giseq in mygislist:\n mygis[giseq.id.split(\"|\")[1]] = giseq\n mylist = open(fileBlast6,\"r\").readlines()\n fout = open(output,\"w\")\n sqdic = {}\n queryset = set()\n for ele in mylist:\n subject = ele.split()[1] +\"|\" + ele.split()[3]+\" Identity: \" + ele.split()[2]\n query = ele.split()[0]\n if query not in queryset:\n sqdic[query]=subject\n queryset.add(query)\n for ele in myTrinity:\n if ele.id not in queryset:\n fout.write(ele.id +\"\\t\"+\"NA\\n\")\n else:\n fout.write(ele.id +\"\\t\"+mygis[sqdic[ele.id].split(\"|\")[1]].description+\" QL:\"+\\\n str(len(ele.seq)) +\" SL: \"+str(len(mygis[sqdic[ele.id].split(\"|\")[1]].seq)) +\\\n \" ML: \"+ sqdic[ele.id].split(\"|\")[-1] +\"\\n\")\n fout.close()",
"def decoding(file_path, id2spo, logits_all, seq_len_all,\n tok_to_orig_start_index_all, tok_to_orig_end_index_all):\n example_all = []\n with open(file_path, \"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n example_all.append(json.loads(line))\n\n formatted_outputs = []\n for (i, (example, logits, seq_len, tok_to_orig_start_index, tok_to_orig_end_index)) in \\\n enumerate(zip(example_all, logits_all, seq_len_all, tok_to_orig_start_index_all, tok_to_orig_end_index_all)):\n\n logits = logits[1:seq_len +\n 1] # slice between [CLS] and [SEP] to get valid logits\n logits[logits >= 0.5] = 1\n logits[logits < 0.5] = 0\n tok_to_orig_start_index = tok_to_orig_start_index[1:seq_len + 1]\n tok_to_orig_end_index = tok_to_orig_end_index[1:seq_len + 1]\n predictions = []\n for token in logits:\n predictions.append(np.argwhere(token == 1).tolist())\n\n # format predictions into example-style output\n formatted_instance = {}\n text_raw = example['text']\n complex_relation_label = [8, 10, 26, 32, 46]\n complex_relation_affi_label = [9, 11, 27, 28, 29, 33, 47]\n\n # flatten predictions then retrival all valid subject id\n flatten_predictions = []\n for layer_1 in predictions:\n for layer_2 in layer_1:\n flatten_predictions.append(layer_2[0])\n subject_id_list = []\n for cls_label in list(set(flatten_predictions)):\n if 1 < cls_label <= 56 and (cls_label + 55) in flatten_predictions:\n subject_id_list.append(cls_label)\n subject_id_list = list(set(subject_id_list))\n\n # fetch all valid spo by subject id\n spo_list = []\n for id_ in subject_id_list:\n if id_ in complex_relation_affi_label:\n continue # do this in the next \"else\" branch\n if id_ not in complex_relation_label:\n subjects = find_entity(text_raw, id_, predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)\n objects = find_entity(text_raw, id_ + 55, predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)\n for subject_ in subjects:\n for object_ in objects:\n spo_list.append({\n \"predicate\": id2spo['predicate'][id_],\n \"object_type\": {\n '@value': id2spo['object_type'][id_]\n },\n 'subject_type': id2spo['subject_type'][id_],\n \"object\": {\n '@value': object_\n },\n \"subject\": subject_\n })\n else:\n # traverse all complex relation and look through their corresponding affiliated objects\n subjects = find_entity(text_raw, id_, predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)\n objects = find_entity(text_raw, id_ + 55, predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)\n for subject_ in subjects:\n for object_ in objects:\n object_dict = {'@value': object_}\n object_type_dict = {\n '@value': id2spo['object_type'][id_].split('_')[0]\n }\n if id_ in [8, 10, 32, 46\n ] and id_ + 1 in subject_id_list:\n id_affi = id_ + 1\n object_dict[id2spo['object_type'][id_affi].split(\n '_')[1]] = find_entity(text_raw, id_affi + 55,\n predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)[0]\n object_type_dict[id2spo['object_type'][\n id_affi].split('_')[1]] = id2spo['object_type'][\n id_affi].split('_')[0]\n elif id_ == 26:\n for id_affi in [27, 28, 29]:\n if id_affi in subject_id_list:\n object_dict[id2spo['object_type'][id_affi].split('_')[1]] = \\\n find_entity(text_raw, id_affi + 55, predictions, tok_to_orig_start_index, tok_to_orig_end_index)[0]\n object_type_dict[id2spo['object_type'][id_affi].split('_')[1]] = \\\n id2spo['object_type'][id_affi].split('_')[0]\n spo_list.append({\n \"predicate\": id2spo['predicate'][id_],\n \"object_type\": object_type_dict,\n \"subject_type\": id2spo['subject_type'][id_],\n \"object\": object_dict,\n \"subject\": subject_\n })\n\n formatted_instance['text'] = example['text']\n formatted_instance['spo_list'] = spo_list\n formatted_outputs.append(formatted_instance)\n return formatted_outputs",
"def load_seq(\n filename: os.PathLike,\n annotation_path: Optional[os.PathLike] = None,\n format: Optional[str] = None,\n moltype: Optional[str] = None,\n label_to_name: Optional[Callable] = None,\n parser_kw: Optional[dict] = None,\n info: Optional[dict] = None,\n **kw,\n) -> Sequence:\n info = info or {}\n info[\"source\"] = str(filename)\n file_format, _ = get_format_suffixes(filename)\n if file_format == \"json\":\n seq = load_from_json(filename, (Sequence,))\n seq.name = label_to_name(seq.name) if label_to_name else seq.name\n return seq\n\n data = _load_seqs(file_format, filename, format, kw, parser_kw)\n name, seq = data[0]\n name = label_to_name(name) if label_to_name else name\n result = make_seq(seq, name, moltype=moltype)\n result.info.update(info)\n\n if getattr(seq, \"annotation_db\", None):\n result.annotation_db = seq.annotation_db\n\n if annotation_path is not None:\n result.annotation_db = load_annotations(path=annotation_path, seqids=[name])\n return result",
"def test_annot():\n annots = ['aparc', 'aparc.a2005s']\n for a in annots:\n annot_path = pjoin(data_path, \"label\", \"%s.%s.annot\" % (\"lh\", a))\n labels, ctab, names = read_annot(annot_path)\n assert_true(labels.shape == (163842, ))\n assert_true(ctab.shape == (len(names), 5))",
"def intron_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n intronpos = defline[1:].split(' ')[1]\n seqs[intronpos] = seq\n\n reported_introns = {}\n introns = []\n mrnaid = None\n start, stop = None, None\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n mrnaid = re.search(r'accession=([^;\\n]+)', entry).group(1)\n elif '\\tintron\\t' in entry:\n introns.append(entry)\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if mrnaid is None:\n continue\n assert start, 'No start codon for introns(s): %s' % introns[0]\n assert stop, 'No stop codon for introns(s): %s' % introns[0]\n if len(introns) > 0:\n for intron in introns:\n fields = intron.split('\\t')\n assert len(fields) == 9, \\\n 'entry does not have 9 fields: %s' % intron\n intronpos = '%s_%s-%s%s' % (fields[0], fields[3],\n fields[4], fields[6])\n if intronpos in reported_introns:\n continue\n intronlength = int(fields[4]) - int(fields[3]) + 1\n intronseq = seqs[intronpos]\n assert len(intronseq) == intronlength, \\\n 'intron \"%s\": length mismatch; gff=%d, fa=%d' % (\n intronpos, intronlength, len(intronseq))\n gccontent = gc_content(intronseq)\n gcskew = gc_skew(intronseq)\n ncontent = n_content(intronseq)\n context = intron_context(intron, start, stop)\n values = '%s %s %d %.3f %.3f %.3f %s' % (\n intronpos, mrnaid, intronlength, gccontent, gcskew,\n ncontent, context)\n reported_introns[intronpos] = 1\n yield values.split(' ')\n mrnaid = None\n introns = []\n start, stop = None, None",
"def exon_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n exonpos = defline[1:].split(' ')[1]\n seqs[exonpos] = seq\n\n rnaid_to_accession = dict()\n reported_exons = {}\n exons, cdss = [], {}\n start, stop = None, None\n moltypes = ['mRNA', 'tRNA', 'ncRNA', 'transcript', 'primary_transcript',\n 'V_gene_segment', 'D_gene_segment', 'J_gene_segment',\n 'C_gene_segment']\n for entry in gff3:\n for moltype in moltypes:\n if ('\\t%s\\t' % moltype) in entry:\n accession = re.search(r'accession=([^;\\n]+)', entry).group(1)\n tid = re.search(r'ID=([^;\\n]+)', entry).group(1)\n rnaid_to_accession[tid] = accession\n\n if '\\texon\\t' in entry:\n exons.append(entry)\n elif '\\tCDS\\t' in entry:\n fields = entry.split('\\t')\n pos = '%s_%s-%s%s' % (fields[0], fields[3], fields[4], fields[6])\n cdss[pos] = entry\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if len(exons) == 0:\n continue\n xcept = False\n for exonpos in cdss:\n if ';exception=ribosomal slippage' in cdss[exonpos]:\n xcept = True\n if xcept:\n exons, cdss = [], {}\n start, stop = None, None\n continue\n assert start, 'No start codon for exon(s): %s' % exons[0]\n assert stop, 'No stop codon for exon(s): %s' % exons[0]\n for exon in exons:\n fields = exon.split('\\t')\n assert len(\n fields) == 9, 'entry does not have 9 fields: %s' % exon\n mrnaid = re.search(r'Parent=([^;\\n]+)', fields[8]).group(1)\n exonpos = '%s_%s-%s%s' % (fields[0],\n fields[3], fields[4], fields[6])\n if exonpos in reported_exons:\n continue\n exonlength = int(fields[4]) - int(fields[3]) + 1\n exonseq = seqs[exonpos]\n assert len(exonseq) == exonlength, \\\n 'exon \"%s\": length mismatch; gff=%d, fa=%d' % (\n exonpos, exonlength, len(exonseq))\n gccontent = gc_content(exonseq)\n gcskew = gc_skew(exonseq)\n ncontent = n_content(exonseq)\n context = exon_context(exon, start, stop)\n phase = None\n remainder = None\n if context == 'cds':\n cexon = cdss[exonpos]\n phase = int(cexon.split('\\t')[7])\n remainder = (exonlength - phase) % 3\n values = '%s %s %d %.3f %.3f %.3f %s %r %r' % (\n exonpos, rnaid_to_accession[mrnaid], exonlength, gccontent,\n gcskew, ncontent, context, phase, remainder)\n reported_exons[exonpos] = 1\n yield values.split(' ')\n exons, cdss = [], {}\n start, stop = None, None"
] | [
"0.68458533",
"0.6741979",
"0.64248884",
"0.6258323",
"0.62455755",
"0.61349237",
"0.6114975",
"0.6035717",
"0.6020976",
"0.5951338",
"0.5917856",
"0.5893167",
"0.5850203",
"0.58161116",
"0.58051586",
"0.5799951",
"0.5758082",
"0.57514256",
"0.57467973",
"0.57466453",
"0.56797063",
"0.5663408",
"0.56561613",
"0.56511337",
"0.563022",
"0.5627649",
"0.5612986",
"0.56125677",
"0.56041354",
"0.5600636"
] | 0.6810655 | 1 |
Sequence strip_degenerate should remove any degenerate bases | def test_strip_degenerate(self):
self.assertEqual(self.RNA("UCAG-").strip_degenerate(), "UCAG-")
self.assertEqual(self.RNA("NRYSW").strip_degenerate(), "")
self.assertEqual(self.RNA("USNG").strip_degenerate(), "UG") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cleaning_ambiguous_bases(seq):\n # compile the regex with all ambiguous bases\n pat = re.compile(r'[NRYWXSKM]')\n # look for the ambiguous bases and replace by\n # nothing\n return re.sub(pat, '', seq)",
"def degenerate2(s):\n from lasagna.utils import base_repr\n\n n = s.count('N')\n seed = hash(s) % (2**32 - 1)\n rng = random.Random(seed)\n random_base_ix = lambda: base_repr(rng.randint(0, 4**(n + 1) - 1), 4, n + 1)[::-1]\n while True:\n bases = ['ACTG'[int(j)] for j in random_base_ix()]\n s2 = s\n for b in bases:\n s2 = s2.replace('N', b, 1)\n yield s2",
"def test_is_degenerate(self):\n assert not self.RNA(\"\").is_degenerate()\n assert not self.RNA(\"UACGCUACAUGuacgucaguGCUAGCUA---ACGUCAG\").is_degenerate()\n assert self.RNA(\"N\").is_degenerate()\n assert self.RNA(\"R\").is_degenerate()\n assert self.RNA(\"y\").is_degenerate()\n assert self.RNA(\"GCAUguagcucgUCAGUCAGUACgUgcasCUAG\").is_degenerate()\n assert self.RNA(\"ACGYAUGCUGYWWNMNuwbycwuybcwbwub\").is_degenerate()",
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad_and_gaps(), \"\"\n )\n self.assertEqual(\n self.RNA(\"aaa ggg ---!ccc\", check=False).strip_bad_and_gaps(), \"AAAGGGCCC\"\n )",
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"ACG--GRN?\")\n self.assertEqual(r.strip_bad_and_gaps(), \"ACGGRN\")\n r._data[0] = 99\n self.assertEqual(r.strip_bad_and_gaps(), \"CGGRN\")",
"def removeDegenerate(self):\n return self[~self.testDegenerate()]",
"def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UCxxxAGwsnyrHBNzzzD-D\", check=False).strip_bad(),\n \"UCAGWSNYRHBND-D\",\n )\n self.assertEqual(self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad(), \"\")\n self.assertEqual(\n self.RNA(\"aaaxggg---!ccc\", check=False).strip_bad(), \"AAAGGG---CCC\"\n )",
"def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)",
"def elimination_technique_2(C):\n rels = C._reidemeister_relators\n rels.sort(reverse=True)\n gens = C._schreier_generators\n for i in range(len(gens) - 1, -1, -1):\n rel = rels[i]\n for j in range(len(gens) - 1, -1, -1):\n gen = gens[j]\n if rel.generator_count(gen) == 1:\n k = rel.exponent_sum(gen)\n gen_index = rel.index(gen**k)\n bk = rel.subword(gen_index + 1, len(rel))\n fw = rel.subword(0, gen_index)\n rep_by = (bk*fw)**(-1*k)\n del rels[i]; del gens[j]\n for l in range(len(rels)):\n rels[l] = rels[l].eliminate_word(gen, rep_by)\n break\n C._reidemeister_relators = rels\n C._schreier_generators = gens\n return C._schreier_generators, C._reidemeister_relators",
"def reverse_complement(base):\n try:\n assert isinstance(base, str)\n assert len(base) is 1\n rc = str.maketrans('ACGT', 'TGCA') # Traslation table for reverse complentary sequences\n return base.translate(rc)\n except AssertionError:\n raise NotABaseError",
"def zzx_strip(f):\n if not f or f[0]:\n return f\n\n k = 0\n\n for coeff in f:\n if coeff:\n break\n else:\n k += 1\n\n return f[k:]",
"def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"UCAGRYU\")\n r._data[0] = 31\n r._data[2] = 55\n self.assertEqual(r.strip_bad(), \"CGRYU\")",
"def complement_base(base):\n return complements[base]",
"def sequence_cleaner(sequence, alphabet):\n seq = sequence.upper()\n sequence = [base for base in seq if base in alphabet]\n return ''.join(sequence)",
"def strip_barcodes(input_file, wanted_set):\n file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(file_name + \"_adapters_removed.fasta\", \"w\") as out:\n for record in SeqIO.parse(input_file, \"fasta\"):\n match = re.search(r'\\S*:', record.id)\n if match:\n correct = match.group().rstrip(\":\")\n else:\n correct = str(record.id)\n SEQ = str(record.seq)\n if correct in wanted_set:\n out.write(\">\" + correct + \"\\n\" + SEQ + \"\\n\")",
"def complement_base(base):\n\n if base == 'A' or base == 'a':\n return 'T'\n elif base == 'T' or base == 't':\n return 'A'\n elif base == 'G' or base == 'g':\n return 'C'\n else:\n return 'G'",
"def complement_base(base,material='DNA'):\n if base in 'Aa':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base in 'TtUu':\n return 'A'\n elif base in 'Gg':\n return 'C'\n else:\n return 'G'",
"def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s",
"def complement_base(base, material='DNA'):\n\n if base == 'A' or base == 'a':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base == 'T' or base == 't' or base == 'U' or base == 'u':\n return 'A'\n elif base == 'G' or base == 'g':\n return 'C'\n else:\n return 'G'",
"def rstrip(self) -> String:\n pass",
"def reverseComplement(s):\n\tcomplement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}\n\tt = ''\n\tfor base in s:\n\t\tt = complement[base] + t\n\treturn t",
"def fours_removed(seq):\n length = len(seq) - 4\n new_seq = seq[4:length:2]\n return new_seq",
"def complement_this(seq):\n compliment_dict = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}\n rev_seq = ''\n for nuc in seq:\n if nuc in ['A', 'T', 'G', 'C']:\n rev_seq += compliment_dict[nuc]\n return rev_seq",
"def strip_other_charcter():\n pass",
"def complement(seq):\n if PY3:\n table = str.maketrans('ACTGNactg', 'TGACNtgac')\n elif PY2:\n table = string.maketrans('ACTGNactg', 'TGACNtgac')\n return str(seq).translate(table)",
"def cleaning_sequence_regex(sequence):\n amb = re.compile(r\"[^ACGT]\")\n return amb.sub(\"\", sequence)",
"def revise():",
"def expand_degeneracies(raw_primers):\r\n\r\n expanded_primers = []\r\n\r\n for raw_primer in raw_primers:\r\n primer_seq = DNASequence(raw_primer.strip())\r\n\r\n for expanded_primer in primer_seq.nondegenerates():\r\n expanded_primers.append(str(expanded_primer))\r\n\r\n return expanded_primers",
"def reverse_complement(seq):\n return ''.join([BASE_TO_COMP[b] for b in seq][::-1])",
"def testBinizeUnbinize(self):\n console.terse(\"{0}\\n\".format(self.testBinizeUnbinize.__doc__))\n\n n = 5\n u = aiding.binize(n, 8)\n self.assertEqual(u, '00000101')\n n = aiding.unbinize(u)\n self.assertEqual(n, 5)"
] | [
"0.670461",
"0.6552507",
"0.5997409",
"0.5994004",
"0.59098065",
"0.58974713",
"0.5855396",
"0.58462846",
"0.5815244",
"0.5701709",
"0.56765515",
"0.5675322",
"0.5673388",
"0.5557388",
"0.55281866",
"0.55277646",
"0.55103743",
"0.5502395",
"0.548646",
"0.5485527",
"0.53843564",
"0.5381859",
"0.5380942",
"0.5373095",
"0.5351188",
"0.53494036",
"0.5348627",
"0.53439605",
"0.53235906",
"0.5318481"
] | 0.7007799 | 0 |
Sequence strip_bad should remove any nonbase, nongap chars | def test_strip_bad(self):
# have to turn off check to get bad data in; no longer preserves case
self.assertEqual(
self.RNA("UCxxxAGwsnyrHBNzzzD-D", check=False).strip_bad(),
"UCAGWSNYRHBND-D",
)
self.assertEqual(self.RNA("@#^*($@!#&()!@QZX", check=False).strip_bad(), "")
self.assertEqual(
self.RNA("aaaxggg---!ccc", check=False).strip_bad(), "AAAGGG---CCC"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strip_other_charcter():\n pass",
"def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"UCAGRYU\")\n r._data[0] = 31\n r._data[2] = 55\n self.assertEqual(r.strip_bad(), \"CGRYU\")",
"def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s",
"def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s",
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad_and_gaps(), \"\"\n )\n self.assertEqual(\n self.RNA(\"aaa ggg ---!ccc\", check=False).strip_bad_and_gaps(), \"AAAGGGCCC\"\n )",
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"ACG--GRN?\")\n self.assertEqual(r.strip_bad_and_gaps(), \"ACGGRN\")\n r._data[0] = 99\n self.assertEqual(r.strip_bad_and_gaps(), \"CGGRN\")",
"def RemoveNonUtf8BadChars(line):\n return \"\".join([ch for ch in line if ch in printable])",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def basic_cleaning2(string):\n\n string = string.lower()\n string = re.sub('[0-9\\(\\)\\!\\^\\%\\$\\'\\\"\\.;,-\\?\\{\\}\\[\\]\\\\/]', ' ', string)\n string = re.sub(' +', ' ', string)\n return string",
"def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data",
"def remove_bad_chars(self, corpus: List[str]) -> List[str]:\n corpus_clean: List[str] = list()\n for doc in corpus:\n doc_tmp = \"\"\n doc_tmp = re.sub(self.bad_chars, \"\", doc)\n corpus_clean.append(doc_tmp)\n return corpus_clean",
"def remove_non_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]', ' ', text)",
"def Clean(s):\n for c in BAD_CHARACTERS:\n s = s.replace(c, '_')\n return s",
"def stripword( s ) :\n return re.sub( '[\\W\\d]', '', s )",
"def cstrip(inString):\n zeroDex = inString.find('\\x00')\n if zeroDex == -1:\n return inString\n else:\n return inString[:zeroDex]",
"def strip_non_unicode(value):\n UNICODE_PATTERN = r'[^\\x00-\\x7F]+'\n try:\n value = re.sub(UNICODE_PATTERN, '', value)\n return value.strip()\n except Exception:\n return value",
"def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet",
"def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent",
"def clean_str_vn(string):\n string = re.sub(r\"[~`@#$%^&*-+]\", \" \", string)\n def sharp(str):\n b = re.sub('\\s[A-Za-z]\\s\\.', ' .', ' '+str)\n while (b.find('. . ')>=0): b = re.sub(r'\\.\\s\\.\\s', '. ', b)\n b = re.sub(r'\\s\\.\\s', ' # ', b)\n return b\n string = sharp(string)\n string = re.sub(r\" : \", \":\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \"\", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()",
"def remove_special(s):\n return ansi_escape_chars.sub('', s)",
"def removeNonAscii(s):\r\n return \"\".join([i for i in s if ord(i)>31])",
"def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str",
"def _strip(obj):\n return obj.translate(None, STRIP_CHARS)",
"def get_strip_string(self, i_str):\n return ''.join(e for e in i_str if e.isalnum())",
"def sanitize_string(unclean_string: str) -> str:\n return unidecode(unclean_string)",
"def sanitize(buf,\n backspaces=['\\x08\\x1b[K', '\\x08 \\x08'],\n escape_regex=re.compile(r'\\x1b(\\[|\\]|\\(|\\))[;?0-9]*[0-9A-Za-z](.*\\x07)?')):\n # Filter out control characters\n\n # First, handle the backspaces.\n for backspace in backspaces:\n try:\n while True:\n ind = buf.index(backspace)\n buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):]))\n except:\n pass\n\n strip_escapes = escape_regex.sub('',buf)\n\n # strip non-printable ASCII characters\n\n clean = ''.join([x for x in strip_escapes if is_printable(x)])\n return clean",
"def strip_not_alnum_char(str):\n\n i = 0\n # While we don't find a character or a digit,\n # that means it's a special char (logical!)\n if str:\n while not str[i].isalnum() and i < len(str) - 1:\n i += 1\n if i != len(str) - 2:\n str = str[i:]\n return str",
"def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)",
"def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")"
] | [
"0.715122",
"0.70498705",
"0.69027036",
"0.69027036",
"0.68354553",
"0.67104006",
"0.66530055",
"0.65649366",
"0.65649366",
"0.65639234",
"0.65602666",
"0.653047",
"0.65097004",
"0.6500392",
"0.64983284",
"0.6485774",
"0.64769524",
"0.6473822",
"0.6450652",
"0.6406869",
"0.6405177",
"0.64000595",
"0.6389645",
"0.63759357",
"0.6360856",
"0.63529545",
"0.635289",
"0.63386315",
"0.63264376",
"0.6324431"
] | 0.7429765 | 0 |
Sequence strip_bad_and_gaps should remove gaps and bad chars | def test_strip_bad_and_gaps(self):
# have to turn off check to get bad data in; no longer preserves case
self.assertEqual(
self.RNA("UxxCAGwsnyrHBNz#!D-D", check=False).strip_bad_and_gaps(),
"UCAGWSNYRHBNDD",
)
self.assertEqual(
self.RNA("@#^*($@!#&()!@QZX", check=False).strip_bad_and_gaps(), ""
)
self.assertEqual(
self.RNA("aaa ggg ---!ccc", check=False).strip_bad_and_gaps(), "AAAGGGCCC"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"ACG--GRN?\")\n self.assertEqual(r.strip_bad_and_gaps(), \"ACGGRN\")\n r._data[0] = 99\n self.assertEqual(r.strip_bad_and_gaps(), \"CGGRN\")",
"def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s",
"def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UCxxxAGwsnyrHBNzzzD-D\", check=False).strip_bad(),\n \"UCAGWSNYRHBND-D\",\n )\n self.assertEqual(self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad(), \"\")\n self.assertEqual(\n self.RNA(\"aaaxggg---!ccc\", check=False).strip_bad(), \"AAAGGG---CCC\"\n )",
"def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"UCAGRYU\")\n r._data[0] = 31\n r._data[2] = 55\n self.assertEqual(r.strip_bad(), \"CGRYU\")",
"def strip_other_charcter():\n pass",
"def cleaning_sequence_regex(sequence):\n amb = re.compile(r\"[^ACGT]\")\n return amb.sub(\"\", sequence)",
"def test_strip_degenerate(self):\n self.assertEqual(self.RNA(\"UCAG-\").strip_degenerate(), \"UCAG-\")\n self.assertEqual(self.RNA(\"NRYSW\").strip_degenerate(), \"\")\n self.assertEqual(self.RNA(\"USNG\").strip_degenerate(), \"UG\")",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)",
"def fix_ending(x):\n x = strip_stoich_wrapper(x)\n x = re.sub(r'(?<=[a-zA-Z])\\-(?=[a-zA-Z]$)', ' ', x)\n return x",
"def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text",
"def _removeRepetitions(s, encod='utf-8'): \n if not isinstance(s, unicode):\n s = unicode(s, encod,'replace')\n \n # Remove additional caracters \n s = re.sub(r'(\\w)\\1{2,100}', r'\\1', s) \n # Remove additional white spaces \n s = re.sub( '\\s+', ' ', s ).strip() \n \n return s",
"def lstrip(self, chars=None):\n clean = self._clean_string\n raw = self._raw_string\n\n # count continuous sequence of chars from left and right\n nlen = len(clean)\n nlstripped = nlen - len(clean.lstrip(chars))\n # within the stripped regions, only retain parts of the raw\n # string *not* matching the clean string (these are ansi/mxp tags)\n lstripped = \"\"\n ic, ir1 = 0, 0\n while nlstripped:\n if ic >= nlstripped:\n break\n elif raw[ir1] != clean[ic]:\n lstripped += raw[ir1]\n else:\n ic += 1\n ir1 += 1\n return ANSIString(lstripped + raw[ir1:])",
"def strip(self, chars=None):\n clean = self._clean_string\n raw = self._raw_string\n\n # count continuous sequence of chars from left and right\n nlen = len(clean)\n nlstripped = nlen - len(clean.lstrip(chars))\n nrstripped = nlen - len(clean.rstrip(chars))\n\n # within the stripped regions, only retain parts of the raw\n # string *not* matching the clean string (these are ansi/mxp tags)\n lstripped = \"\"\n ic, ir1 = 0, 0\n while nlstripped:\n if ic >= nlstripped:\n break\n elif raw[ir1] != clean[ic]:\n lstripped += raw[ir1]\n else:\n ic += 1\n ir1 += 1\n rstripped = \"\"\n ic, ir2 = nlen - 1, len(raw) - 1\n while nrstripped:\n if nlen - ic > nrstripped:\n break\n elif raw[ir2] != clean[ic]:\n rstripped += raw[ir2]\n else:\n ic -= 1\n ir2 -= 1\n rstripped = rstripped[::-1]\n return ANSIString(lstripped + raw[ir1 : ir2 + 1] + rstripped)",
"def cleaning(string, EOS=False):\n\n # before cleaning up, first identify end of the sentences (EOS)\n if EOS:\n pLu = '[{}]'.format(\"\".join([chr(i) for i in range(sys.maxunicode) if chr(i).isupper()]))\n EOS = re.compile(r'([a-z]+|[ş|ı])(\\. )((' + pLu + '[a-z]?)|([0-9]+))')\n string = EOS.sub(r'\\1#\\3', string)\n\n # period at the end of the sentences are being replaced with hastag (#)\n string = string.lower()\n mapping = {}\n mapping['99_807'] = 231\n mapping['105_770'] = 105\n mapping['117_770'] = 117\n mapping['105_775'] = 105\n mapping['117_776'] = 252\n mapping['115_807'] = 351\n mapping['103_774'] = 287\n mapping['97_770'] = 97\n mapping['111_776'] = 246\n mapping['97_785'] = 97\n Alist = {97, 99, 103, 105, 111, 115, 117}\n solv_prob = []\n flag = False\n for i, c in enumerate(string):\n if flag:\n flag = False\n continue # pass this character\n if not ord(c) in Alist:\n solv_prob.append(c) # no need to check this character\n else:\n if i == len(string) - 1:\n continue\n cn = string[i + 1] # next character\n key = '{}_{}'.format(ord(c), ord(cn)) # creating string with their ordinal\n if key in mapping.keys(): # cheking if this is to be mapped\n solv_prob.append(chr(mapping[key])) # append the mapped character to the list\n flag = True # raising flag to pass next character\n continue\n else:\n solv_prob.append(c)\n\n data = ''.join(solv_prob)\n data = data.replace('iğdır', 'ığdır')\n data = data.replace('irak', 'ırak')\n # Data= [d if len(d) > 0 else '#' for d in data.splitlines()] # removing empty lines\n return data",
"def _strip(s, chars):\n # leading characters\n while len(s) > 0 and s[0] in chars:\n s = s[1:]\n # trailing characters\n while len(s) > 0 and s[-1] in chars:\n s = s[:-1]\n return s",
"def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s",
"def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s",
"def test_preprocess_bad_chars_in_mapping(self):\r\n\r\n # Should discard all reads due to sequence length being too short\r\n # But should not halt due to bad characters in a data field\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_bad_char_datafield_f\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 200\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 1\r\n trim_seq_len = True\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = []\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 200 and 1000\\t6\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 1: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'No sequences passed quality filters for writing.\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t0/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n 's1\\t0\\tACACATGTCTAC\\n',\r\n 's3\\t0\\tAACTGTGCGTAC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t0']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t0\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)\r\n\r\n '''# With invalid character in a SampleID, should raise ValueError\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_bad_char_sampleid_f\r\n barcode_type=\"golay_12\"\r\n min_seq_len=200\r\n max_seq_len=1000\r\n min_qual_score=25\r\n starting_ix=1\r\n keep_primer=False\r\n max_ambig=0\r\n max_primer_mm=1\r\n trim_seq_len=True\r\n dir_prefix=self.output_dir\r\n max_bc_errors=2\r\n max_homopolymer=4\r\n retain_unassigned_reads=False\r\n keep_barcode=False\r\n attempt_bc_correction=True\r\n qual_score_window=0\r\n disable_primer_check=False\r\n reverse_primers='disable'\r\n record_qual_scores=False\r\n discard_bad_windows=False\r\n median_length_filtering=None\r\n added_demultiplex_field=None\r\n\r\n\r\n self.assertRaises(ValueError, preprocess, fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)'''",
"def clean_whitespaces(text):\n length = len(text)\n i = 0\n prev_char = None\n while i < length:\n curr_char = text[i]\n return_char = curr_char if curr_char not in string.whitespace else \" \"\n\n if not (prev_char == \" \" and return_char == \" \"):\n yield return_char\n\n prev_char = return_char\n i += 1",
"def custom_strip(string, char):\n #beginning\n difference = 0\n while len(string) > 0 and string[0] == char:\n string = string[1:]\n difference += 1 #count the number of character removed at the beginning\n #end\n while len(string) > 0 and string[-1] == char:\n string = string[:-1]\n return (string, difference)",
"def rstrip(self, chars=None):\n clean = self._clean_string\n raw = self._raw_string\n nlen = len(clean)\n nrstripped = nlen - len(clean.rstrip(chars))\n rstripped = \"\"\n ic, ir2 = nlen - 1, len(raw) - 1\n while nrstripped:\n if nlen - ic > nrstripped:\n break\n elif raw[ir2] != clean[ic]:\n rstripped += raw[ir2]\n else:\n ic -= 1\n ir2 -= 1\n rstripped = rstripped[::-1]\n return ANSIString(raw[: ir2 + 1] + rstripped)",
"def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()",
"def get_sequence_without_gaps_or_padding(sequence: str) -> str:\n return sequence.replace(dc_constants.GAP_OR_PAD,\n '').replace(dc_constants.GAP_OR_PAD, '')",
"def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text",
"def remove_bad_chars(val):\n if val == '-':\n return None\n return val",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)",
"def normalize_text(text,pad_punc='!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~',remove_punc='!\"#$%&\\'()*+,-/:;<=>?@[\\\\]^_`{|}~',remove_number='[0-9]',chars=False):\n punc_spaces = re.compile('([%s])' % re.escape(pad_punc))\n punc = re.compile('[%s]' % re.escape(remove_punc))\n text = text.lower()\n if chars:\n text = re.sub(punc,'',text)\n else:\n text = re.sub('\\.{3,}',' dots',text)\n text = re.sub(punc_spaces, r' \\1 ', text)\n text = re.sub(remove_number,'',text)\n text = re.sub(punc,'',text)\n text = re.sub(r'\\b((?![ai])[a-z])\\b','',text)\n text = re.sub('\\s{2,}', ' ', text)\n text = re.sub('\\n', ' ', text)\n text = re.sub('\\t', ' ', text)\n text=text.strip()\n \n return text",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)"
] | [
"0.7566995",
"0.6872599",
"0.686638",
"0.6582646",
"0.6464975",
"0.6438769",
"0.63625026",
"0.6273571",
"0.6273571",
"0.6253653",
"0.6191777",
"0.6101316",
"0.60952926",
"0.6083035",
"0.602748",
"0.60102254",
"0.59808695",
"0.5978832",
"0.5978832",
"0.59764034",
"0.59651965",
"0.595399",
"0.594863",
"0.5915078",
"0.5907393",
"0.59051",
"0.5902576",
"0.5886974",
"0.5873392",
"0.58502686"
] | 0.76906425 | 0 |
Sequence shuffle should return new random sequence w/ same monomers | def test_shuffle(self):
r = self.RNA("UUUUCCCCAAAAGGGG")
s = r.shuffle()
self.assertNotEqual(r, s)
self.assertEqualItems(r, s) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_shuffle(self):\n random.shuffle(self.seq)\n self.seq.sort()\n self.assertEqual(self.seq, range(10))",
"def shuffle(self):\n for i in xrange(self.n - 1):\n pos = random.randint(i, self.n - 1)\n self.to[i], self.to[pos] = self.to[pos], self.to[i]\n self.a[i], self.a[pos] = self.a[pos], self.a[i]\n return self.a",
"def shuffle(self) -> List[int]:",
"def shuffle_chromosomes(mutated_genome):\n random.shuffle(mutated_genome)",
"def shuffle_opacities(mutated_genome):\n mutated_genome",
"def shuffle(self):\n self.__c_elem().melange()",
"def shuffle(self):\n shuffle(self.cards)",
"def shuffle(self):\n shuffle(self.cards)",
"def _shuffle():\n\n random.shuffle(deck)",
"def shuffle_question(self):\n r = random.SystemRandom()\n r.shuffle(self.question_list)",
"def shuffle(self):\n new_list = [] \n while True:\n if len(self.init_nums) == 0 :\n pass\n break\n else: \n while self.init_nums is not None: \n if len(self.init_nums) is 0: \n break\n else :\n ks = random.choice(self.init_nums) \n new_list.append(ks)\n self.init_nums.remove(ks)\n\n if self.orig == new_list:\n continue\n else:\n print(new_list)\n break \n self.init_nums = new_list\n return(new_list)",
"def shuffle(self):\n x = len(self.org)\n result = self.org[:]\n var = x\n for i in range(x):\n id = random.randrange(0, var)\n result[id], result[var - 1] = result[var - 1], result[id]\n var -= 1\n\n return result",
"def shuffle( self ):\n random.shuffle(self.__deck)",
"def shuffle(self) -> List[int]:\n runs = self.nums.copy()\n # Fisher-Yates Algorithm\n n = len(runs)\n for i in range(n):\n j = random.randint(i, n - 1)\n runs[i], runs[j] = runs[j], runs[i]\n return runs",
"def shuffle(self):\n self.shuffle_range(len(self.cards))",
"def shuffle(self):\n import random\n random.shuffle(self.cards)",
"def shuffle(self):\n random.shuffle(self.cards)",
"def shuffle(self):\n random.shuffle(self.cards)",
"def shuffle(self):\n random.shuffle(self.cards)",
"def shuffle(self):\n random.shuffle(self.cards)",
"def shuffle(self):\n random.shuffle(self.cards)",
"def main():\n input_1 = [7, 6, 5, 4, 3, 2, 1]\n print shuffle(input_1)\n print input_1",
"def shuffle(lol, seed):\n for l in lol:\n random.seed(seed)\n random.shuffle(l)",
"def shuffle(self):\r\n random.shuffle(self.deck)",
"def test_case_3(self):\n\n print(\"-------------------------shuffle-----------------------------------\")\n\n deck_size = 10\n deck = np.arange(deck_size)\n shuffle_deck = shuffle(deck, my_seed=2)\n self.assertSequenceEqual(list(shuffle_deck), [9, 8, 7, 6, 5, 4, 3, 2, 1, 0])\n\n deck_size = 100\n deck = np.arange(deck_size)\n shuffle_deck = shuffle(deck, my_seed=39)\n self.assertSequenceEqual(list(shuffle_deck),\n [8, 7, 6, 5, 4, 3, 2, 1, 0, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85,\n 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64,\n 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43,\n 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22,\n 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9])\n\n deck_size = 333\n deck = np.arange(deck_size)\n shuffle_deck = shuffle(deck, my_seed=139)\n self.assertSequenceEqual(list(shuffle_deck),\n [332, 331, 330, 329, 328, 327, 326, 325, 324, 323, 322, 321, 320, 319, 318, 317, 316,\n 315, 314, 313, 312, 311, 310, 309, 308, 307, 306, 305, 304, 303, 302, 301, 300, 299,\n 298, 297, 296, 295, 294, 293, 292, 291, 290, 289, 288, 287, 286, 285, 284, 283, 282,\n 281, 280, 279, 278, 277, 276, 275, 274, 273, 272, 271, 270, 269, 268, 267, 266, 265,\n 264, 263, 262, 261, 260, 259, 258, 257, 256, 255, 254, 253, 252, 251, 250, 249, 248,\n 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231,\n 230, 229, 228, 227, 226, 225, 224, 223, 222, 221, 220, 219, 218, 217, 216, 215, 214,\n 213, 212, 211, 210, 209, 208, 207, 206, 205, 204, 203, 202, 201, 200, 199, 198, 197,\n 196, 195, 194, 193, 192, 191, 190, 189, 188, 187, 186, 185, 184, 183, 182, 181, 180,\n 179, 178, 177, 176, 175, 174, 173, 172, 171, 170, 169, 168, 167, 166, 165, 164, 163,\n 162, 161, 160, 159, 158, 157, 156, 155, 154, 153, 152, 151, 150, 149, 148, 147, 146,\n 145, 144, 143, 142, 141, 140, 139, 138, 137, 136, 135, 134, 133, 132, 131, 130, 129,\n 128, 127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112,\n 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94,\n 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73,\n 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52,\n 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31,\n 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9,\n 8, 7, 6, 5, 4, 3, 2, 1, 0])\n\n print(\"random shuffle check for different seed ok: PASS\")",
"def shuffle(self):\n random.shuffle(self.get_cards())",
"def ScrambleMutation(item):\n item=copy.deepcopy(item)\n countryNo = len(item)\n [start,end] = sorted(random.sample(range(1,countryNo+1),2))\n shuffle_slice(item,start,end)\n return item",
"def shuffle(list_, random_seed=123):\n random.Random(random_seed).shuffle(list_)",
"def shuffle(self) -> None:\n shuffle(self.cards)",
"def shuffle(self):\n random.SystemRandom().shuffle(self.deck)"
] | [
"0.74236465",
"0.70922947",
"0.70799804",
"0.7030679",
"0.6991424",
"0.6946981",
"0.68759125",
"0.68759125",
"0.681591",
"0.6815623",
"0.6789187",
"0.67795455",
"0.67775214",
"0.6772275",
"0.67656815",
"0.67480785",
"0.6738267",
"0.6738267",
"0.6738267",
"0.6738267",
"0.6738267",
"0.6737064",
"0.66967946",
"0.668795",
"0.6683359",
"0.6679404",
"0.6675339",
"0.6618038",
"0.6616095",
"0.6608917"
] | 0.7527803 | 0 |
Sequence is_gap should return True if char is a valid gap char | def test_is_gap(self):
r = self.RNA("ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN")
for char in "qwertyuiopasdfghjklzxcvbnmQWERTYUIOASDFGHJKLZXCVBNM":
assert not r.is_gap(char)
assert r.is_gap("-")
# only works on a single literal that's a gap, not on a sequence.
# possibly, this behavior should change?
assert not r.is_gap("---")
# check behaviour on self
assert not self.RNA("CGAUACGUACGACU").is_gap()
assert not self.RNA("---CGAUA----CGUACG---ACU---").is_gap()
assert self.RNA("").is_gap()
assert self.RNA("----------").is_gap() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_gaps(s, gapcode=45):\r\n return nonzero(fromstring(s, dtype=uint8) == gapcode)",
"def find_gaps(s, gapcode=45):\n return nonzero(fromstring(s,dtype=uint8) == gapcode)",
"def checkForNOrGap(character):\n if character == \"-\" or character == \"N\":\n return False\n else:\n return True",
"def checkForNOrGap(character):\n if character == \"-\" or character == \"N\":\n return False\n else:\n return True",
"def is_all_gap(self, pos):\n for seq in self.values():\n if seq[pos] != '-':\n return False\n return True",
"def test_not_gap(self):\n m, seq = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n self.assertTrue(not_gap(m[0]))\n self.assertFalse(not_gap(m[5]))",
"def test_is_gapped(self):\n assert not self.RNA(\"\").is_gapped()\n assert not self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\").is_gapped()\n assert self.RNA(\"-\").is_gapped()\n assert self.PROT(\"--\").is_gapped()\n assert self.RNA(\"CAGUCGUACGUCAGUACGUacucauacgac-caguACUG\").is_gapped()\n assert self.RNA(\"CA--CGUAUGCA-----g\").is_gapped()\n assert self.RNA(\"CAGU-\").is_gapped()",
"def check_gapped(sequence):\n w_regexp = re.compile('n|N')\n regexp_obj = w_regexp.search(sequence)\n if (regexp_obj):\n return True\n else:\n return False",
"def is_valid_sequence(dna):\n num_char = 0\n \n for char in dna:\n if not char in 'ATCG':\n num_char += 1\n\n return num_char == 0",
"def is_gap_state(self, el):\n try:\n return el is self.gap\n except:\n return False",
"def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)",
"def test_first_gap(self):\n self.assertEqual(self.RNA(\"\").first_gap(), None)\n self.assertEqual(self.RNA(\"a\").first_gap(), None)\n self.assertEqual(self.RNA(\"uhacucHuhacUUhacan\").first_gap(), None)\n self.assertEqual(self.RNA(\"-abc\").first_gap(), 0)\n self.assertEqual(self.RNA(\"b-ac\").first_gap(), 1)\n self.assertEqual(self.RNA(\"abcd-\").first_gap(), 4)",
"def jump_gap(self, last):\n a_gap = last.a - self.a_cursor\n b_gap = last.b - self.b_cursor\n return (a_gap <= self.gap_length and\n b_gap <= self.gap_length)",
"def _encode_gap(self):\n\t\tgap_length = self.config.get('repeat_gap',\n\t\t self.config.get('gap',\n\t\t 0))\n\t\treturn self._encode_bit('0', gap_length)",
"def test_gap_vector(self):\n\n def g(x):\n return self.RNA(x).gap_vector()\n\n self.assertEqual(g(\"\"), [])\n self.assertEqual(g(\"ACUGUCAGUACGHCSDKCCUCCDNCNS\"), [False] * 27)\n self.assertEqual(\n g(\"GUACGUAACAKADC-SDAHADSAK\"),\n list(map(bool, list(map(int, \"000000000000001000000000\")))),\n )\n self.assertEqual(g(\"-DSHSUHDSS\"), list(map(bool, list(map(int, \"1000000000\")))))\n self.assertEqual(\n g(\"UACHASCAGDS-\"), list(map(bool, list(map(int, \"000000000001\"))))\n )\n self.assertEqual(\n g(\"---CGAUgCAU---ACGHc---ACGUCAGU--?\"),\n list(map(bool, list(map(int, \"111000000001110000011100000000111\")))),\n )",
"def _substitute_opening_gap_char(seq):\n newseq=list(seq)\n iterator=rex.finditer(seq)\n for match in iterator:\n try:\n newseq[match.span()[1]-1]=\"|\"\n except:\n continue\n return \"\".join(newseq)",
"def is_legit_DNA_sequence(record_seq: str) -> bool:\n nts = {\"A\", \"G\", \"T\", \"C\", \"N\"}\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(nts)",
"def is_aligned_dna(sequence):\r\n #ensure that the given sequence is uppercase\r\n sequence = sequence.upper()\r\n \r\n #replace all A C G and T and compare length with 0\r\n if len(sequence.replace(\"A\", \"\").replace(\"C\", \"\").replace(\"G\",\"\").replace(\"T\",\"\").replace(\"-\",\"\")) == 0:\r\n return True\r\n else:\r\n return False",
"def seq_exceeds_homopolymers(curr_seq, max_len=6):\r\n for base in 'ATGC':\r\n curr = base * (max_len + 1)\r\n if curr in curr_seq:\r\n return True\r\n return False",
"def test_gap_array(self):\n r = self.RNA(\"-?A-?NRY-\")\n v = r.gap_array()\n self.assertEqual(v, array([1, 1, 0, 1, 1, 0, 0, 0, 1]))\n r = self.RNA(\"AC\")\n v = r.gap_array()\n self.assertEqual(v, array([0, 0]))\n r = self.RNA(\"-?\")\n v = r.gap_array()\n self.assertEqual(v, array([1, 1]))",
"def is_valid_sequence(dna):\n \n nucleotides = 'ATCG'\n error = 0\n \n for char in dna:\n if not char in nucleotides:\n error = error + 1\n return error == 0",
"def compareGap(self, s, f):\n g = self.gap\n if g is not None:\n return abs(g[1]-g[0]) < abs(f-s)\n else:\n return True",
"def starts_before(self, alignment_index):\n if alignment_index > self.length():\n raise IndexError(\"alignment index out of range\")\n alignment_index %= self.length()\n for s in self.sequence[:alignment_index + 1]:\n if s not in GAP_CHARACTERS:\n return True\n return False",
"def checkChar(self, char):\n return char not in self.guessedChars",
"def is_connective(char):\n return char in [u\"¬\", u\"∧\", u\"∨\", u\"→\", u\"↔\"]",
"def is_legit_peptide_sequence(record_seq: str) -> bool:\n aas = {\n \"A\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"P\",\n \"Q\",\n \"R\",\n \"S\",\n \"T\",\n \"V\",\n \"W\",\n \"Y\",\n \"*\",\n }\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(aas)",
"def nearest_gap(seq,pos):\n # Catch negative sequence positions\n if pos < 0:\n raise IndexError, \"Sequence positions cannot be negative: %d\" % pos\n \n # If pos contains a gap, that's the closest gap\n if seq[pos] == '-':\n return pos\n \n # create a list to store the nearest gap character in the 5' and\n # 3' directions\n choices = []\n # find the nearest gap 5' of pos\n try:\n gap_index = ''.join(seq[:pos]).rindex('-')\n distance = pos - gap_index\n choices.append((distance,gap_index))\n except ValueError:\n pass\n \n # find the nearest gap 3' of pos\n try:\n gap_index = pos + ''.join(seq[pos:]).index('-')\n distance = gap_index - pos\n choices.append((distance,gap_index))\n except ValueError:\n pass\n \n # error if there are no gaps in the sequence\n if not choices:\n raise UnalignableSequenceError,\\\n \"Can't adjust alignment because there are too few gaps to \"+\\\n \"remove in the aligned candidate to reduce to the length of \"+\\\n \"the template alignment (i.e., candidate adds too many insertions \"+\\\n \"during pairwise alignment).\"\n \n # return the gap_index of the choice with the smaller distance -- if there\n # is a tie, will delete the 5' gap (which is what original NAST does)\n return min(choices)[1]",
"def gap(num: int) -> int:\n\tbinary = \"{0:b}\".format(num)\n\tg_max = g_cur = 0\n\n\tfor i, char in enumerate(binary):\n\t\tif binary[i:].count('1') == 0:\n\t\t\tbreak\n\n\t\tg_cur = calc_g_cur(g_cur, char)\n\t\tg_max = calc_g_max(g_cur, g_max)\n\n\treturn g_max",
"def has_balanced_parens(string):",
"def is_space(self):\n return self.pos == self.SPACE_POS"
] | [
"0.72288585",
"0.7180023",
"0.7167158",
"0.7167158",
"0.68990767",
"0.68299943",
"0.65973693",
"0.6557601",
"0.64990675",
"0.64894366",
"0.63977575",
"0.6362843",
"0.6229104",
"0.61301273",
"0.60077465",
"0.5992771",
"0.5968189",
"0.59411037",
"0.593851",
"0.5772497",
"0.5726053",
"0.5694904",
"0.5662419",
"0.5641414",
"0.5625732",
"0.56155163",
"0.5612514",
"0.5544225",
"0.5527094",
"0.54995775"
] | 0.8379988 | 0 |
Sequence is_degenerate should return True if degen symbol in seq | def test_is_degenerate(self):
assert not self.RNA("").is_degenerate()
assert not self.RNA("UACGCUACAUGuacgucaguGCUAGCUA---ACGUCAG").is_degenerate()
assert self.RNA("N").is_degenerate()
assert self.RNA("R").is_degenerate()
assert self.RNA("y").is_degenerate()
assert self.RNA("GCAUguagcucgUCAGUCAGUACgUgcasCUAG").is_degenerate()
assert self.RNA("ACGYAUGCUGYWWNMNuwbycwuybcwbwub").is_degenerate() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)",
"def test_first_degenerate(self):\n self.assertEqual(self.RNA(\"\").first_degenerate(), None)\n self.assertEqual(self.RNA(\"a\").first_degenerate(), None)\n self.assertEqual(self.RNA(\"UCGACA--CU-gacucaguacgua\").first_degenerate(), None)\n self.assertEqual(self.RNA(\"nCAGU\").first_degenerate(), 0)\n self.assertEqual(self.RNA(\"CUGguagvAUG\").first_degenerate(), 7)\n self.assertEqual(self.RNA(\"ACUGCUAacgud\").first_degenerate(), 11)",
"def is_degenerated(self):\n for interval in self.intervals:\n if not Interval.is_degenerated(interval):\n return False\n return True",
"def is_dna(sequentie):\r\n for nucleotide in sequentie:\r\n if nucleotide not in \"ACGTN\":\r\n return False\r\n return True",
"def is_legit_DNA_sequence(record_seq: str) -> bool:\n nts = {\"A\", \"G\", \"T\", \"C\", \"N\"}\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(nts)",
"def check_gapped(sequence):\n w_regexp = re.compile('n|N')\n regexp_obj = w_regexp.search(sequence)\n if (regexp_obj):\n return True\n else:\n return False",
"def seq_validator(sequence):\n\n # checks for ascii characters that should not appear in a fasta sequence\n seq_val = re.compile(r\"[.-@|\\s| -)|z-~|Z-`|EFIJLOPQX|efijlopqx+,]+\")\n\n if seq_val.search(sequence) is None:\n return True\n\n return False",
"def is_valid_sequence(dna):\n num_char = 0\n \n for char in dna:\n if not char in 'ATCG':\n num_char += 1\n\n return num_char == 0",
"def is_sd(sequence, stringency=\"medium\"):\n if \"T\" in sequence:\n sequence = sequence.replace(\"T\", \"U\")\n if stringency == \"broad\":\n variants = sd_variants_broad[4]\n else:\n variants = sd_variants_medium[4]\n for variant in variants:\n if variant in sequence:\n return True\n return False",
"def is_valid_sequence(dna):\n \n nucleotides = 'ATCG'\n error = 0\n \n for char in dna:\n if not char in nucleotides:\n error = error + 1\n return error == 0",
"def confirm_next(self, seq):\n for n, i in enumerate(seq):\n try:\n if self.items[self.pos + n] != i:\n return False\n except IndexError:\n return False\n return True",
"def testDegenerate(self):\n srt = asarray(self.copy())\n srt.sort(axis=1)\n return (srt[:,:-1] == srt[:,1:]).any(axis=1)",
"def seq_exceeds_homopolymers(curr_seq, max_len=6):\r\n for base in 'ATGC':\r\n curr = base * (max_len + 1)\r\n if curr in curr_seq:\r\n return True\r\n return False",
"def test_disambiguate(self):\n self.assertEqual(self.RNA(\"\").disambiguate(), \"\")\n self.assertEqual(\n self.RNA(\"AGCUGAUGUA--CAGU\").disambiguate(), \"AGCUGAUGUA--CAGU\"\n )\n self.assertEqual(\n self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\").disambiguate(\"strip\"), \"AU--CG\"\n )\n s = self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\")\n t = s.disambiguate(\"random\")\n u = s.disambiguate(\"random\")\n for i, j in zip(str(s), str(t)):\n if i in s.moltype.degenerates:\n assert j in s.moltype.degenerates[i]\n else:\n assert i == j\n self.assertNotEqual(t, u)\n self.assertEqual(len(s), len(t))",
"def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))",
"def guess_seq(seq):\n dna = \"ACTG-N\"\n \n chars = util.unique(seq.upper())\n \n for char in chars:\n if char not in dna:\n return \"pep\"\n return \"dna\"",
"def CheckSeq(Seq):\n OkNucleo = (\"A\", \"C\", \"G\", \"T\")\n for i in Seq:\n if i not in OkNucleo:\n raise InputError(Seq,\"malformed input\")",
"def is_degenerated(interval):\n return interval.right == interval.left",
"def test_count_degenerate(self):\n self.assertEqual(self.RNA(\"\").count_degenerate(), 0)\n self.assertEqual(self.RNA(\"GACUGCAUGCAUCGUACGUCAGUACCGA\").count_degenerate(), 0)\n self.assertEqual(self.RNA(\"N\").count_degenerate(), 1)\n self.assertEqual(self.PROT(\"N\").count_degenerate(), 0)\n self.assertEqual(self.RNA(\"NRY\").count_degenerate(), 3)\n self.assertEqual(\n self.RNA(\"ACGUAVCUAGCAUNUCAGUCAGyUACGUCAGS\").count_degenerate(), 4\n )",
"def sequence_is_decreasing(sequence, strict=False):\n\n return sequence_is_increasing(list(reversed(sequence)), strict)",
"def test_isIndel(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n isIndel = model.get_predefined_predicate(\"indel\")\n assert isIndel(\"A\", \"-\")\n assert isIndel(\"-\", \"G\")\n # assert not self.submodel.isIndel('-', '-')\n assert not isIndel(\"a\", \"t\")",
"def removeDegenerate(self):\n return self[~self.testDegenerate()]",
"def test_is_gapped(self):\n assert not self.RNA(\"\").is_gapped()\n assert not self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\").is_gapped()\n assert self.RNA(\"-\").is_gapped()\n assert self.PROT(\"--\").is_gapped()\n assert self.RNA(\"CAGUCGUACGUCAGUACGUacucauacgac-caguACUG\").is_gapped()\n assert self.RNA(\"CA--CGUAUGCA-----g\").is_gapped()\n assert self.RNA(\"CAGU-\").is_gapped()",
"def __ne__(self, seq):\n return not self.__eq__(seq) # Reverse of equality check",
"def is_separated(g):\n nonts = nonterminals(g)\n\n for nont in nonts:\n starts = set()\n for prod in g.productions(nont):\n start = prod.rhs()[0]\n\n if is_nonterminal(start):\n return False\n\n if start in starts:\n return False\n\n starts.add(start)\n\n return True",
"def isdistinct(seq):\n return len(seq) == len(set(seq))",
"def isSequenceValid(sequence):\n if not sequence:\n return False\n allowed_chars = set('GCAU')\n return set(sequence).issubset(allowed_chars)",
"def no(seq, pred=None):\n for elem in ifilter(pred, seq):\n return False\n return True",
"def test_seq_exceeds_homopolymers(self):\r\n self.assertEqual(seq_exceeds_homopolymers('AAACGA', 3), False)\r\n self.assertEqual(seq_exceeds_homopolymers('AAACGA', 2), True)\r\n self.assertEqual(seq_exceeds_homopolymers('AAACGA', 1), True)\r\n self.assertEqual(seq_exceeds_homopolymers('AAACGATTTT', 3), True)",
"def removeDuplicates(seq):\n\n pass"
] | [
"0.6201668",
"0.6091381",
"0.6069485",
"0.59473264",
"0.59035546",
"0.5826139",
"0.57964295",
"0.5788602",
"0.57822496",
"0.57488996",
"0.5700209",
"0.56820357",
"0.5671976",
"0.56627655",
"0.5661535",
"0.56607664",
"0.56530017",
"0.563402",
"0.5610719",
"0.5593537",
"0.5539772",
"0.55251664",
"0.5524775",
"0.5522615",
"0.551885",
"0.550337",
"0.54691595",
"0.54657906",
"0.5437754",
"0.5431225"
] | 0.7355169 | 0 |
Sequence is_strict should return True if all symbols in Monomers | def test_is_strict(self):
assert self.RNA("").is_strict()
assert self.PROT("A").is_strict()
assert self.RNA("UAGCACUgcaugcauGCAUGACuacguACAUG").is_strict()
assert not self.RNA("CAGUCGAUCA-cgaucagUCGAUGAC").is_strict() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_atomic(self):\n \n symbols=set()\n for e in self.symbols:\n if not e=='':\n symbols.add(e)\n\n for s in symbols: #unicity first\n count=0\n for e in symbols:\n if s==e:\n count+=1\n if count!=1:\n return False\n else:\n continue \n temp=symbols.copy()\n for s in symbols:\n temp.remove(s)\n for e in temp:\n if s in e:\n return False\n else:\n continue\n temp=symbols.copy()\n\n return True",
"def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())",
"def all_simple (phrase):\r\n\r\n\r\n for x in phrase:\r\n if (x not in self.operations and not (isinstance(x,(int,type(ListType()),float,bool) or (isinstance(x,str) and quoted(x)))) or self.current_register.contains(x)):\r\n return False\r\n return True",
"def true(symbol):\n return True",
"def hasConstantForm(self, sentence):",
"def isTrueConstant(self, sentence):",
"def check_symbols(self):\n # this method has a bug in that it never raises KeyError, it raises \n # ValueError instead.\n \n def is_valid(sym):\n # what symbols are valid? (, ), digits, atoms\n if sym in \"()\": return True\n #if sym.isdigit(): return True\n #if sym in _atomic_mass: return True\n if sym.isalnum(): return True\n return False\n\n for t in self._gettokens():\n if not is_valid(t): raise ValueError(\"bad symbol \" + t)\n if t.isalpha() and t not in _atomic_mass: raise KeyError(\"key error \" + t)\n return True",
"def uses_all(w, letters):\n\treturn set(letters).issubset(set(w))",
"def istrue(self):\n return has_pos_and_neg(self.literals)",
"def needs_recoding(strings):\n for string in strings:\n for char in string:\n if 127 < ord(char) < 256:\n return True\n return False",
"def is_strummable(self):\n strummable = True\n start_string = -1\n for i in range(0, len(self.notes)):\n if self.notes[i].name not in ['NULL', 'MUTE']:\n start_string = i\n break\n if start_string == -1:\n self.strummable = False\n return False\n for i in range(start_string + 1, len(self.notes)):\n if self.notes[i].name in ['NULL', 'MUTE']:\n strummable = False\n self.strummable = strummable\n return strummable",
"def is_special(s):\n for part in xrange(1, 3**len(s)):\n p = part\n sa = 0\n ca = 0\n sb = 0\n cb = 0\n for i, x in enumerate(s):\n if p%3 == 1:\n sa += x\n ca += 1\n elif p%3 == 2:\n sb += x\n cb += 1\n p = p//3\n if ca == 0 or cb == 0:\n continue\n if sa == sb:\n return False\n if ca > cb and sa <= sb:\n return False\n if cb > ca and sb <= sa:\n return False\n return True",
"def is_regular(self):\r\n # if they are the form aA\r\n are_productions_regular = all(map(is_production_regular, self.productions))\r\n if not are_productions_regular:\r\n return False\r\n\r\n # those that contain epsilon\r\n epsilon_prods = {prod.non_terminal for prod in self.productions if '@' in prod.symbols}\r\n if len(epsilon_prods) > 1 or (len(epsilon_prods) == 1 and list(epsilon_prods)[0] != 'S'):\r\n return False # epsilon not in S\r\n s_has_epsilon = len(epsilon_prods) == 1 and list(epsilon_prods)[0] == 'S'\r\n s_producing_prods = {prod.non_terminal for prod in self.productions if 'S' in prod.symbols}\r\n\r\n # if s contains epsilon, it doesn't appear in other productions\r\n if s_has_epsilon and (\r\n len(s_producing_prods) > 1 or (len(s_producing_prods) == 1 and list(s_producing_prods)[0] != 'S')):\r\n return False\r\n\r\n return True",
"def _is_well_formed(l):\n\tif _is_symbol(l):\n\t\treturn 1\n\tif (type(l) == types.TupleType and len(l) == 2\n\t\t\tand l[0] == neg and _is_well_formed(l[1])):\n\t\treturn 1\n\tif (type(l) == types.TupleType and len(l) == 3\n\t\t\tand _is_binary(l[1])\n\t\t\tand _is_well_formed(l[0]) and _is_well_formed(l[2])):\n\t\treturn 1\n\treturn 0",
"def uses_only(w, letters):\n\treturn set(w).issubset(set(letters))",
"def is_atom_convex(self):\n return True",
"def contains_strict(self, gi):\n if gi is None:\n return False\n for gi_obj in self.gradual_items:\n if (gi.attribute_col == gi_obj.attribute_col) and (gi.symbol == gi_obj.symbol):\n return True\n return False",
"def is_atom_convex(self) -> bool:\n return False",
"def find_pure_symbol(symbols, clauses):\n for s in symbols:\n found_pos, found_neg = False, False\n for c in clauses:\n if not found_pos and s in disjuncts(c): found_pos = True\n if not found_neg and ~s in disjuncts(c): found_neg = True\n if found_pos != found_neg: return s, found_pos\n return None, None",
"def isSetStrict(self):\n return _libsbml.FbcModelPlugin_isSetStrict(self)",
"def is_production_regular(prod):\r\n return len(prod.symbols) <= 2 and all(map(is_terminal, prod.symbols[:-1]))",
"def is_atom_convex(self):\n return False",
"def is_atom_convex(self):\n return False",
"def __call__(self, s):\n state = self._initial\n try:\n for sym in s:\n state = self._trans_matrix[state][self._syms_to_indices[sym]]\n except KeyError:\n raise NotInAlphabetError(sym) from None\n return state in self._accepting",
"def astral(msg):\r\n return any(ord(c) > 0xFFFF for c in msg)",
"def seq_exceeds_homopolymers(curr_seq, max_len=6):\r\n for base in 'ATGC':\r\n curr = base * (max_len + 1)\r\n if curr in curr_seq:\r\n return True\r\n return False",
"def accepts(self, word: Iterable[str]) -> bool:\n if self._enfa is None:\n self._enfa = self.to_epsilon_nfa()\n return self._enfa.accepts(word)",
"def is_lexical(word_i, word_j):\n if word_i.isalpha() and word_j.isalpha():\n return True\n return False",
"def check_for_symbols(word: str) -> bool:\n \n if MENTION_SYMBOL in word or HASH_SYMBOL in word or URL_START in word:\n return False\n return True",
"def ascii_numeric(s: str) -> bool:\n return frozenset(s).issubset(_ascii_n)"
] | [
"0.59880435",
"0.5868502",
"0.57117075",
"0.5693459",
"0.56859756",
"0.5625319",
"0.55478084",
"0.5500837",
"0.5423727",
"0.5399986",
"0.5368656",
"0.53295964",
"0.53133947",
"0.52879006",
"0.5250921",
"0.5220573",
"0.5218399",
"0.5208652",
"0.5200181",
"0.5194953",
"0.5173776",
"0.5167607",
"0.5167607",
"0.51661015",
"0.51566565",
"0.514712",
"0.51443344",
"0.51344174",
"0.5129347",
"0.5108245"
] | 0.7072339 | 0 |
Sequence first_gap should return index of first gap symbol, or None | def test_first_gap(self):
self.assertEqual(self.RNA("").first_gap(), None)
self.assertEqual(self.RNA("a").first_gap(), None)
self.assertEqual(self.RNA("uhacucHuhacUUhacan").first_gap(), None)
self.assertEqual(self.RNA("-abc").first_gap(), 0)
self.assertEqual(self.RNA("b-ac").first_gap(), 1)
self.assertEqual(self.RNA("abcd-").first_gap(), 4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first(seq):\n return next(iter(seq))",
"def _substitute_opening_gap_char(seq):\n newseq=list(seq)\n iterator=rex.finditer(seq)\n for match in iterator:\n try:\n newseq[match.span()[1]-1]=\"|\"\n except:\n continue\n return \"\".join(newseq)",
"def test_is_gap(self):\n r = self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\")\n for char in \"qwertyuiopasdfghjklzxcvbnmQWERTYUIOASDFGHJKLZXCVBNM\":\n assert not r.is_gap(char)\n assert r.is_gap(\"-\")\n # only works on a single literal that's a gap, not on a sequence.\n # possibly, this behavior should change?\n assert not r.is_gap(\"---\")\n # check behaviour on self\n assert not self.RNA(\"CGAUACGUACGACU\").is_gap()\n assert not self.RNA(\"---CGAUA----CGUACG---ACU---\").is_gap()\n assert self.RNA(\"\").is_gap()\n assert self.RNA(\"----------\").is_gap()",
"def first_segment(self):\n\t\tseg_sort = sorted(self.segments, key=lambda x: stringutil.extract_numbers(x.filename))\n\t\tif seg_sort:\n\t\t\treturn seg_sort[0]\n\t\telse:\n\t\t\treturn None",
"def first(xs):\n if not xs:\n return None\n return xs[0]",
"def first(xs):\n if not xs:\n return None\n return xs[0]",
"def nearest_gap(seq,pos):\n # Catch negative sequence positions\n if pos < 0:\n raise IndexError, \"Sequence positions cannot be negative: %d\" % pos\n \n # If pos contains a gap, that's the closest gap\n if seq[pos] == '-':\n return pos\n \n # create a list to store the nearest gap character in the 5' and\n # 3' directions\n choices = []\n # find the nearest gap 5' of pos\n try:\n gap_index = ''.join(seq[:pos]).rindex('-')\n distance = pos - gap_index\n choices.append((distance,gap_index))\n except ValueError:\n pass\n \n # find the nearest gap 3' of pos\n try:\n gap_index = pos + ''.join(seq[pos:]).index('-')\n distance = gap_index - pos\n choices.append((distance,gap_index))\n except ValueError:\n pass\n \n # error if there are no gaps in the sequence\n if not choices:\n raise UnalignableSequenceError,\\\n \"Can't adjust alignment because there are too few gaps to \"+\\\n \"remove in the aligned candidate to reduce to the length of \"+\\\n \"the template alignment (i.e., candidate adds too many insertions \"+\\\n \"during pairwise alignment).\"\n \n # return the gap_index of the choice with the smaller distance -- if there\n # is a tie, will delete the 5' gap (which is what original NAST does)\n return min(choices)[1]",
"def start_with_the_beggining(rna: str):\n return 0",
"def find_gaps(s, gapcode=45):\n return nonzero(fromstring(s,dtype=uint8) == gapcode)",
"def gap(l):\n if l < 3:\n return 0\n\n # places one person in the middle of the gap,\n # and starts over on the new smaller gaps on either side.\n return gap(int(l / 2)) + 1 + gap(ceil(l / 2) - 1)",
"def find_gaps(s, gapcode=45):\r\n return nonzero(fromstring(s, dtype=uint8) == gapcode)",
"def first(pair):\n\treturn pair[0]",
"def first_unsorted(self, start):\n def get_from(start, stop):\n for i in range(start, stop):\n if self.nums[i] > self.nums[i+1]:\n return i+1\n return None\n if start == len(self.nums):\n start = 0\n new_n = get_from(start, len(self.nums)-1)\n if new_n is None:\n new_n = get_from(0, start)\n return new_n",
"def first(word):\n\treturn word[0]",
"def first_last_chop(seq):\n return seq[4:-4:2]",
"def first(seq):\n try: # try iterator interface\n return seq.next()\n except AttributeError:\n pass\n try: # seq is no iterator, try indexed lookup\n return seq[0]\n except IndexError:\n pass\n raise TypeError(\n \"Argument to `first()` method needs to be iterator or sequence.\")",
"def get_first_space(x):\n\n return x.split()[0]",
"def first(word):\n return word[0]",
"def get_first_seg(*args):\n return _ida_segment.get_first_seg(*args)",
"def get_first(self):\n return self.A[1][0] if self.n > 0 else None",
"def delete_first(self):\n if self.n == 0:\n return None\n first = self.A[1]\n self.n -= 1\n last = self.A.pop()\n if self.n > 0:\n self.A[1] = last\n self.pos[last[0]] = 1\n self.combine(1)\n return first[0]",
"def test_gap_indices(self):\n self.assertEqual(self.RNA(\"\").gap_indices(), [])\n self.assertEqual(self.RNA(\"ACUGUCAGUACGHSDKCUCDNNS\").gap_indices(), [])\n self.assertEqual(self.RNA(\"GUACGUACAKDC-SDHDSK\").gap_indices(), [12])\n self.assertEqual(self.RNA(\"-DSHUHDS\").gap_indices(), [0])\n self.assertEqual(self.RNA(\"UACHASADS-\").gap_indices(), [9])\n self.assertEqual(\n self.RNA(\"---CGAUgCAU---ACGHc---ACGUCAGU---\").gap_indices(),\n [0, 1, 2, 11, 12, 13, 19, 20, 21, 30, 31, 32],\n )",
"def first(self):\n return self.begin and self.begin.value or None",
"def getSymbolBefore(self, symbol: ghidra.program.model.symbol.Symbol) -> ghidra.program.model.symbol.Symbol:\n ...",
"def _encode_gap(self):\n\t\tgap_length = self.config.get('repeat_gap',\n\t\t self.config.get('gap',\n\t\t 0))\n\t\treturn self._encode_bit('0', gap_length)",
"def get_gap(sparse_operator, initial_guess=None):\n if not is_hermitian(sparse_operator):\n raise ValueError('sparse_operator must be Hermitian.')\n\n values, _ = scipy.sparse.linalg.eigsh(sparse_operator,\n k=2,\n v0=initial_guess,\n which='SA',\n maxiter=1e7)\n\n gap = abs(values[1] - values[0])\n return gap",
"def return_first(x):\r\n if x == []:\r\n return ''\r\n else:\r\n return x[0]",
"def get_head_pos( head, ngram ):\n try:\n tokens = ngram.split( ' ' )\n return str([ i for i, t in enumerate( tokens ) if t.startswith( head + \"/\" )][0] + 1 )\n except ValueError:\n return None",
"def firstMissingPositive(nums):\n\n n = len(nums)\n\n # Base case.\n if 1 not in nums:\n return 1\n\n # nums = [1]\n if n == 1:\n return 2\n\n # Replace negative numbers, zeros,\n # and numbers larger than n by 1s.\n # After this conversion nums will contain \n # only positive numbers.\n for i in range(n):\n if nums[i] <= 0 or nums[i] > n:\n nums[i] = 1\n\n # Use index as a hash key and number sign as a presence detector.\n # For example, if nums[1] is negative that means that number `1`\n # is present in the array. \n # If nums[2] is positive - number 2 is missing.\n for i in range(n): \n a = abs(nums[i])\n # If you meet number a in the array - change the sign of a-th element.\n # Be careful with duplicates : do it only once.\n # [3,4,-1,1]\n if a == n:\n nums[0] = - abs(nums[0])\n else:\n nums[a] = - abs(nums[a])\n \n # Now the index of the first positive number \n # is equal to first missing positive.\n for i in range(1, n):\n if nums[i] > 0:\n return i\n\n if nums[0] > 0:\n return n\n\n return n + 1",
"def test_first_non_strict(self):\n self.assertEqual(self.RNA(\"\").first_non_strict(), None)\n self.assertEqual(self.RNA(\"A\").first_non_strict(), None)\n self.assertEqual(self.RNA(\"ACGUACGUcgaucagu\").first_non_strict(), None)\n self.assertEqual(self.RNA(\"N\").first_non_strict(), 0)\n self.assertEqual(self.RNA(\"-\").first_non_strict(), 0)\n self.assertEqual(self.RNA(\"ACGUcgAUGUGCAUcagu-\").first_non_strict(), 18)"
] | [
"0.62938017",
"0.5901865",
"0.58401036",
"0.5820655",
"0.58202505",
"0.58202505",
"0.5801659",
"0.5738459",
"0.57372636",
"0.5688006",
"0.5682002",
"0.5681534",
"0.56456417",
"0.5599555",
"0.5591629",
"0.5551406",
"0.552724",
"0.5501178",
"0.5490658",
"0.545651",
"0.54537",
"0.5392011",
"0.5391302",
"0.5380197",
"0.53755176",
"0.5375197",
"0.53706294",
"0.5352211",
"0.5348582",
"0.53393865"
] | 0.75103754 | 0 |
Sequence first_degenerate should return index of first degen symbol | def test_first_degenerate(self):
self.assertEqual(self.RNA("").first_degenerate(), None)
self.assertEqual(self.RNA("a").first_degenerate(), None)
self.assertEqual(self.RNA("UCGACA--CU-gacucaguacgua").first_degenerate(), None)
self.assertEqual(self.RNA("nCAGU").first_degenerate(), 0)
self.assertEqual(self.RNA("CUGguagvAUG").first_degenerate(), 7)
self.assertEqual(self.RNA("ACUGCUAacgud").first_degenerate(), 11) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def firstNotRepeatingCharacter(s):\n\n # even positions = number of characters\n # odd positions = last occurrence of that character\n scounter = [0] * 52\n\n for i in range(len(s)):\n char_pos = (ord(s[i]) - 97) * 2\n scounter[char_pos] += 1\n scounter[char_pos + 1] = i\n\n last_occurrence = len(s)\n for i in range(0, 52, 2):\n if scounter[i] == 1 and scounter[i + 1] < last_occurrence:\n last_occurrence = scounter[i + 1]\n\n if last_occurrence < len(s):\n return s[last_occurrence]\n\n return '_'",
"def first_not_repeating_character(string):\n counter = Counter(string)\n for key, value in counter.items():\n if value <= 1:\n return key\n break\n return '_'",
"def first(seq):\n return next(iter(seq))",
"def _find_index(string):\n if string[0] == 'X':\n return 0\n elif string == 'D':\n return 1\n else:\n return np.where(sym == string)[0][0]",
"def find1symbols(symbol, reel):\n for i in range(len(reel)):\n if reel[i] == symbol:\n return i",
"def first(word):\n\treturn word[0]",
"def get_index(line):\n for dummy_i in range(0,len(line) - 1):\n if line[dummy_i] !=0 and line[dummy_i] == line[dummy_i+1]:\n return dummy_i",
"def first(word):\n return word[0]",
"def simple_index_of_min(deck, firstIndex):\n min_val = deck[firstIndex] \n min_idx = firstIndex\n for i in range(firstIndex + 1, len(deck)):\n if min_val > deck[i]:\n min_val = deck[i]\n min_idx = i\n return min_idx",
"def _gen_find(subseq, generator):\n if isinstance(subseq, bytes):\n subseq = bytearray(subseq)\n subseq = list(subseq)\n pos = 0\n saved = []\n\n for c in generator:\n saved.append(c)\n if len(saved) > len(subseq):\n saved.pop(0)\n pos += 1\n if saved == subseq:\n return pos\n return -1",
"def initial_finder(self, seq, ins):\n# print('call initial_finder, input = '+seq)\n letter=seq[0]\n if letter in ins:\n if letter in ['д','т','ц','с']:\n next_letter=seq[:2]\n if next_letter in ins:\n initial=next_letter\n len_init=2\n else:\n initial=letter\n len_init=1\n else:\n initial=letter\n len_init=1 \n else:\n initial='_'\n len_init=0\n# print(initial)\n return initial, len_init",
"def first_recurring_char(s: str) -> str:\n h = {} # using dictionary as hash\n for ch in s:\n if ch in h:\n return ch\n\n h[ch] = 0\n return None",
"def first(self):# -> set:\r\n return self.alphabet",
"def index(sequence, i):\n try:\n return sequence[i]\n except IndexError:\n return u\"\"",
"def r2_index(species):\n result = []\n curr_char = species[0]\n curr_len = 1\n for i in range(1, len(species)):\n if curr_char != species[i]:\n result.append((curr_char, curr_len))\n curr_char = species[i]\n curr_len = 0\n curr_len += 1\n result.append((curr_char, curr_len))\n return result",
"def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)",
"def _get_first_index(self, degree):\n if degree < 1:\n raise ValueError('degree must be 1 or greater')\n lo = 0\n hi = len(self._degreesDesc) - 1\n\n while lo < hi:\n mid = (lo + hi + 1) // 2\n if degree < self._degreesDesc[mid]:\n lo = mid\n else:\n hi = mid - 1\n\n if degree == self._degreesDesc[hi] and hi != -1:\n return hi\n else:\n return hi + 1",
"def first_non_rep(string):\n if len(string) == 0:\n return None\n\n if len(string) == 1:\n return string\n\n letter_dict = {}\n # Letters in the string will get appended to the list, so order is maintained.\n one_char = []\n\n for letter in string:\n letter = letter.lower() \n if letter in letter_dict:\n letter_dict[letter] += 1\n if letter in one_char:\n one_char.remove(letter)\n else:\n letter_dict[letter] = 1\n one_char.append(letter)\n if not one_char:\n return None\n else:\n return one_char[0]",
"def test_first_non_strict(self):\n self.assertEqual(self.RNA(\"\").first_non_strict(), None)\n self.assertEqual(self.RNA(\"A\").first_non_strict(), None)\n self.assertEqual(self.RNA(\"ACGUACGUcgaucagu\").first_non_strict(), None)\n self.assertEqual(self.RNA(\"N\").first_non_strict(), 0)\n self.assertEqual(self.RNA(\"-\").first_non_strict(), 0)\n self.assertEqual(self.RNA(\"ACGUcgAUGUGCAUcagu-\").first_non_strict(), 18)",
"def trivial(seq):\n # Runtime: O(2^n)\n from itertools import combinations\n for i in range(len(seq), 0, -1):\n for subsequence in combinations(seq, i):\n if is_increasing(subsequence):\n return i\n return 0",
"def get_opposite_character(st, index):\n if st[index] is 'A':\n return 'C'\n elif st[index] is 'C':\n return 'A'\n elif st[index] is 'D':\n return 'B'\n elif st[index] is 'B':\n return 'D'",
"def first_not_repeating_character(string):\n letters = {}\n order = {}\n for s in xrange(string):\n letters.setdefault(ord(string[s]) - 97, 0)\n letters[ord(string[s]) - 97] += 1\n order[s] = ord(string[s]) - 97\n for i in xrange(len(order)):\n if letters[order[i]] == 1:\n return chr(order[i] + 97)\n return '_'",
"def first_in_first_out(table):\n min_order = table[0].fifo_order\n min_index = 0\n for index, table_line in enumerate(table):\n if table_line.fifo_order < min_order:\n min_order = table_line.fifo_order\n min_index = index\n\n return min_index",
"def first_unique_character_map(string: str) -> int:\n seen = {}\n for char in string:\n if seen.get(char):\n seen[char] = seen.get(char) + 1\n else:\n seen[char] = 1\n\n for i, _ in enumerate(string):\n char = string[i]\n if seen.get(char) == 1:\n return i\n\n return -1",
"def first_unique_character_list(string: str) -> int:\n counts = [0] * 26\n\n def get_idx(character: str) -> int:\n return ord(character) - 97\n\n for char in string:\n idx = get_idx(char)\n counts[idx] += 1\n\n for i, char in enumerate(string):\n idx = get_idx(char)\n if counts[idx] == 1:\n return i\n\n return -1",
"def get_start(i,v):\n return i-v[i]-1",
"def index_of_min(deck, firstIndex):\n return deck.index(np.min(deck[firstIndex:]))",
"def first_unsorted(self, start):\n def get_from(start, stop):\n for i in range(start, stop):\n if self.nums[i] > self.nums[i+1]:\n return i+1\n return None\n if start == len(self.nums):\n start = 0\n new_n = get_from(start, len(self.nums)-1)\n if new_n is None:\n new_n = get_from(0, start)\n return new_n",
"def first(self): #TODO\r\n result = []\r\n for x in self.first_lookup(self.initialsymbol):\r\n result += x.first()\r\n if len(result) == 1:\r\n return result[0]\r\n return Choice(result)",
"def getFisrtCharThatAppearsOnce(myString):\n myString = \"\".join(myString.lower().split())\n charDict = {key:[0, 0] for key in string.ascii_lowercase}\n for pos, char in enumerate(myString):\n charDict[char][0] += 1\n charDict[char][1] = pos\n charDict = {key:values for key, values in charDict.items() if values[0] == 1}\n sortedCharDict = sorted(charDict.items(), key=operator.itemgetter(1))\n strOut = sortedCharDict[0][0] if sortedCharDict else False\n return strOut"
] | [
"0.629569",
"0.61979645",
"0.61258584",
"0.5994529",
"0.59364647",
"0.59274447",
"0.5868857",
"0.5833778",
"0.57321924",
"0.56907326",
"0.567017",
"0.5665031",
"0.5656412",
"0.56558704",
"0.56542057",
"0.5627427",
"0.5622604",
"0.56069934",
"0.5604781",
"0.5595544",
"0.55856377",
"0.55837333",
"0.5547935",
"0.5543549",
"0.55386764",
"0.553383",
"0.55332774",
"0.5487401",
"0.54783195",
"0.5474913"
] | 0.67240775 | 0 |
Sequence first_non_strict should return index of first nonstrict symbol | def test_first_non_strict(self):
self.assertEqual(self.RNA("").first_non_strict(), None)
self.assertEqual(self.RNA("A").first_non_strict(), None)
self.assertEqual(self.RNA("ACGUACGUcgaucagu").first_non_strict(), None)
self.assertEqual(self.RNA("N").first_non_strict(), 0)
self.assertEqual(self.RNA("-").first_non_strict(), 0)
self.assertEqual(self.RNA("ACGUcgAUGUGCAUcagu-").first_non_strict(), 18) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first(word):\n\treturn word[0]",
"def _find_index(string):\n if string[0] == 'X':\n return 0\n elif string == 'D':\n return 1\n else:\n return np.where(sym == string)[0][0]",
"def first(seq):\n return next(iter(seq))",
"def test_strings_first_symbol():\n\n first_result = strings_ops.strings_first_symbol(\"Otus\", \"october\")\n assert first_result is True",
"def first(word):\n return word[0]",
"def find1symbols(symbol, reel):\n for i in range(len(reel)):\n if reel[i] == symbol:\n return i",
"def find_pure_symbol(symbols, clauses):\n for s in symbols:\n found_pos, found_neg = False, False\n for c in clauses:\n if not found_pos and s in disjuncts(c): found_pos = True\n if not found_neg and ~s in disjuncts(c): found_neg = True\n if found_pos != found_neg: return s, found_pos\n return None, None",
"def findFirstTrueValue( data ):\n # for long sound with a lot of silence and noise, it's faster to recode it having a return well placed. (8sec => 0.052sec)\n n = len(data);\n i = 0; \n while( i < n ):\n if( data[i] ):\n return i\n i += 1\n return -1",
"def min_search(arr: Sequence) -> int:\n\tif len(arr) == 0:\n\t\tprint(\"упс\")\n\t\treturn\n\n\ti = 0\n\tmin_index = 0\n\tmin_value = arr[0]\n\twhile i < len(arr) - 1:\n\t\ti += 1\n\t\tif arr[i] < min_value:\n\t\t\tmin_value = arr[i]\n\t\t\tmin_index = i\n\n\n\tprint(f\"arr:{arr},\\nmin:{min_value}; index:{min_index}\")\n\treturn min_index",
"def first(self): #TODO\r\n result = []\r\n for x in self.first_lookup(self.initialsymbol):\r\n result += x.first()\r\n if len(result) == 1:\r\n return result[0]\r\n return Choice(result)",
"def firstNotRepeatingCharacter(s):\n\n # even positions = number of characters\n # odd positions = last occurrence of that character\n scounter = [0] * 52\n\n for i in range(len(s)):\n char_pos = (ord(s[i]) - 97) * 2\n scounter[char_pos] += 1\n scounter[char_pos + 1] = i\n\n last_occurrence = len(s)\n for i in range(0, 52, 2):\n if scounter[i] == 1 and scounter[i + 1] < last_occurrence:\n last_occurrence = scounter[i + 1]\n\n if last_occurrence < len(s):\n return s[last_occurrence]\n\n return '_'",
"def symbolic_start(self):\n return self.symbolic_bounds[0]",
"def first_not_repeating_character(string):\n counter = Counter(string)\n for key, value in counter.items():\n if value <= 1:\n return key\n break\n return '_'",
"def check_symbol(s,next_index,symbol):\n try:\n next_index = jump_over_space(s,next_index)\n if s[next_index:next_index + len(symbol)] == symbol:\n return next_index + len(symbol) # We must ignore the symbol\n except IndexError:\n return False\n else:\n return False",
"def start_with_the_beggining(rna: str):\n return 0",
"def _start_key(expr):\n try:\n start = expr.start\n except (NotImplementedError,\n AttributeError, ValueError):\n start = S.Infinity\n return start",
"def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)",
"def get_opposite_character(st, index):\n if st[index] is 'A':\n return 'C'\n elif st[index] is 'C':\n return 'A'\n elif st[index] is 'D':\n return 'B'\n elif st[index] is 'B':\n return 'D'",
"def first_missing_positive_int_linear(nums):\n\t\n\t# Here's the trick: the first missing positive number must be \n\t# between 1 and len(array) + 1 \t\n\ts = set(nums)\n\t\n\tfor i in range(1, len(nums) + 1):\n\t\tif i not in s:\n\t\t\treturn i",
"def get_index(line):\n for dummy_i in range(0,len(line) - 1):\n if line[dummy_i] !=0 and line[dummy_i] == line[dummy_i+1]:\n return dummy_i",
"def find_minima(s, wrapped=False):\n return _extrema(s, lambda x: x == +1, wrapped)",
"def start_chop_and_trans(s, strict=True):\n if strict:\n assert s[-3:] in stops, s\n assert len(s) % 3 == 0\n for match in re_starts.finditer(s):\n # Must check the start is in frame\n start = match.start()\n if start % 3 == 0:\n n = s[start:]\n assert len(n) % 3 == 0, \"%s is len %i\" % (n, len(n))\n if strict:\n t = translate(n, options.table, cds=True)\n else:\n # Use when missing stop codon,\n t = \"M\" + translate(n[3:], options.table, to_stop=True)\n return start, n, t\n return None, None, None",
"def first(self):\n\n for literal in self.literals:\n return literal",
"def first_recurring_char(s: str) -> str:\n h = {} # using dictionary as hash\n for ch in s:\n if ch in h:\n return ch\n\n h[ch] = 0\n return None",
"def simple_index_of_min(deck, firstIndex):\n min_val = deck[firstIndex] \n min_idx = firstIndex\n for i in range(firstIndex + 1, len(deck)):\n if min_val > deck[i]:\n min_val = deck[i]\n min_idx = i\n return min_idx",
"def index(sequence, i):\n try:\n return sequence[i]\n except IndexError:\n return u\"\"",
"def get_initial_symb(self):\n return self.symb_val[0]",
"def index_tag_seq(words, seq, strict=False):\n tags = get_tag_seq(words)\n nouns = 'NN' in seq or 'NNS' in seq\n alt_seq = None\n if strict is False:\n if nouns is True:\n alt_seq = [\n 'NNS' if x == 'NN' else \n 'NN' if x == 'NNS' else \n x for x in seq\n ] \n \n for i in range(len(tags)):\n check_seq = tags[i:i+len(seq)]\n if check_seq == seq:\n return i\n if nouns:\n if check_seq == alt_seq:\n return i\n\n return -1",
"def index_of_min(deck, firstIndex):\n return deck.index(np.min(deck[firstIndex:]))",
"def getFisrtCharThatAppearsOnce(myString):\n myString = \"\".join(myString.lower().split())\n charDict = {key:[0, 0] for key in string.ascii_lowercase}\n for pos, char in enumerate(myString):\n charDict[char][0] += 1\n charDict[char][1] = pos\n charDict = {key:values for key, values in charDict.items() if values[0] == 1}\n sortedCharDict = sorted(charDict.items(), key=operator.itemgetter(1))\n strOut = sortedCharDict[0][0] if sortedCharDict else False\n return strOut"
] | [
"0.6179377",
"0.61144847",
"0.60682476",
"0.6048123",
"0.60406363",
"0.5949297",
"0.5831545",
"0.58217716",
"0.5802844",
"0.57483494",
"0.57339954",
"0.57324356",
"0.5702078",
"0.56688476",
"0.5591062",
"0.5555485",
"0.55398554",
"0.5539652",
"0.55382264",
"0.5532169",
"0.5529822",
"0.55287296",
"0.5512742",
"0.5501488",
"0.548561",
"0.5461318",
"0.54583216",
"0.5382157",
"0.5370878",
"0.5368634"
] | 0.7133279 | 0 |
Sequence disambiguate should remove degenerate bases | def test_disambiguate(self):
self.assertEqual(self.RNA("").disambiguate(), "")
self.assertEqual(
self.RNA("AGCUGAUGUA--CAGU").disambiguate(), "AGCUGAUGUA--CAGU"
)
self.assertEqual(
self.RNA("AUn-yrs-wkmCGwmrNMWRKY").disambiguate("strip"), "AU--CG"
)
s = self.RNA("AUn-yrs-wkmCGwmrNMWRKY")
t = s.disambiguate("random")
u = s.disambiguate("random")
for i, j in zip(str(s), str(t)):
if i in s.moltype.degenerates:
assert j in s.moltype.degenerates[i]
else:
assert i == j
self.assertNotEqual(t, u)
self.assertEqual(len(s), len(t)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cleaning_ambiguous_bases(seq):\n # compile the regex with all ambiguous bases\n pat = re.compile(r'[NRYWXSKM]')\n # look for the ambiguous bases and replace by\n # nothing\n return re.sub(pat, '', seq)",
"def back_translate(self):\n base = Bio.Alphabet._get_base_alphabet(self.alphabet)\n if not isinstance(base, Bio.Alphabet.ProteinAlphabet):\n raise ValueError(\"Nucleic acids cannot be back translated!\")\n\n # right now this just uses the most-prevalent codon for each AA\n # TODO: select codons with a weighted average using random.choice\n return Seq(\n \"\".join([CodonUsage.SynonymousCodons[seq3(AA).upper()][0] for AA in str(self)]),\n IUPAC.unambiguous_dna,\n )",
"def back_translate(seq):\n\n base_nucleotide_list = []\n for i in seq:\n res = __get_key(i,CodonTable)\n base_nucleotide_list.append(res)\n return ''.join(base_nucleotide_list)",
"def check_and_clean_sequence(sequence, alphabet):\n if set(sequence).issubset(alphabet):\n return sequence\n else:\n return cleaning_ambiguous_bases(sequence)",
"def reverse_complement(base):\n try:\n assert isinstance(base, str)\n assert len(base) is 1\n rc = str.maketrans('ACGT', 'TGCA') # Traslation table for reverse complentary sequences\n return base.translate(rc)\n except AssertionError:\n raise NotABaseError",
"def degenerate2(s):\n from lasagna.utils import base_repr\n\n n = s.count('N')\n seed = hash(s) % (2**32 - 1)\n rng = random.Random(seed)\n random_base_ix = lambda: base_repr(rng.randint(0, 4**(n + 1) - 1), 4, n + 1)[::-1]\n while True:\n bases = ['ACTG'[int(j)] for j in random_base_ix()]\n s2 = s\n for b in bases:\n s2 = s2.replace('N', b, 1)\n yield s2",
"def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)",
"def complement_this(seq):\n compliment_dict = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}\n rev_seq = ''\n for nuc in seq:\n if nuc in ['A', 'T', 'G', 'C']:\n rev_seq += compliment_dict[nuc]\n return rev_seq",
"def test_is_degenerate(self):\n assert not self.RNA(\"\").is_degenerate()\n assert not self.RNA(\"UACGCUACAUGuacgucaguGCUAGCUA---ACGUCAG\").is_degenerate()\n assert self.RNA(\"N\").is_degenerate()\n assert self.RNA(\"R\").is_degenerate()\n assert self.RNA(\"y\").is_degenerate()\n assert self.RNA(\"GCAUguagcucgUCAGUCAGUACgUgcasCUAG\").is_degenerate()\n assert self.RNA(\"ACGYAUGCUGYWWNMNuwbycwuybcwbwub\").is_degenerate()",
"def ReverseComplement1(seq):\n seq_dict = {'A':'T','T':'A','G':'C','C':'G'}\n return \"\".join([seq_dict[base] for base in reversed(seq)])",
"def reverse_rna_complement(seq):\n\n seq_upper = seq.isupper()\n\n seq = seq[::-1]\n\n seq = seq.upper()\n\n #compute complement\n seq = seq.replace('A','u')\n seq = seq.replace('T','a')\n seq = seq.replace('G','c')\n seq = seq.replace('C','g')\n\n if seq_upper:\n return seq.upper()\n else:\n return seq",
"def guess_seq(seq):\n dna = \"ACTG-N\"\n \n chars = util.unique(seq.upper())\n \n for char in chars:\n if char not in dna:\n return \"pep\"\n return \"dna\"",
"def test_is_gapped(self):\n assert not self.RNA(\"\").is_gapped()\n assert not self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\").is_gapped()\n assert self.RNA(\"-\").is_gapped()\n assert self.PROT(\"--\").is_gapped()\n assert self.RNA(\"CAGUCGUACGUCAGUACGUacucauacgac-caguACUG\").is_gapped()\n assert self.RNA(\"CA--CGUAUGCA-----g\").is_gapped()\n assert self.RNA(\"CAGU-\").is_gapped()",
"def removeDuplicates(seq):\n\n pass",
"def mask_sequence(seq, gaps):\n seq = [i.upper() for i in seq]\n for gap in gaps:\n for i in range(gap[0] - 1, gap[1]):\n try:\n seq[i] = seq[i].lower()\n except:\n continue\n return ''.join(seq)",
"def count_umbiguous_bases(sequence):\n sequence = sequence.upper()\n amb = ['N', 'R', 'Y', 'W', 'S', 'K', 'M']\n return sum({base: sequence.count(base) for base in amb}.values())",
"def degap_fasta_aln(seqs):\r\n\r\n for (label, seq) in seqs:\r\n yield DNASequence(seq, id=label).degap()",
"def remove_guff(seqs):\n new_seqs = {}\n stop_codons = [\"TGA\", \"TAA\", \"TAG\"]\n for key, value in seqs.items():\n new_seq = \"\"\n for i in range(len(value)-2):\n if value[i:i+3] == \"ATG\":\n break\n\n for j in range(i, len(value)-2, 3):\n if value[j:j+3] in stop_codons:\n new_seqs[key] = value[i:j+3]\n break\n\n return new_seqs",
"def test_strip_degenerate(self):\n self.assertEqual(self.RNA(\"UCAG-\").strip_degenerate(), \"UCAG-\")\n self.assertEqual(self.RNA(\"NRYSW\").strip_degenerate(), \"\")\n self.assertEqual(self.RNA(\"USNG\").strip_degenerate(), \"UG\")",
"def sequence_cleaner(sequence, alphabet):\n seq = sequence.upper()\n sequence = [base for base in seq if base in alphabet]\n return ''.join(sequence)",
"def test_first_degenerate(self):\n self.assertEqual(self.RNA(\"\").first_degenerate(), None)\n self.assertEqual(self.RNA(\"a\").first_degenerate(), None)\n self.assertEqual(self.RNA(\"UCGACA--CU-gacucaguacgua\").first_degenerate(), None)\n self.assertEqual(self.RNA(\"nCAGU\").first_degenerate(), 0)\n self.assertEqual(self.RNA(\"CUGguagvAUG\").first_degenerate(), 7)\n self.assertEqual(self.RNA(\"ACUGCUAacgud\").first_degenerate(), 11)",
"def complement(seq):\n complement_dict = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}\n seq_list = list(seq)\n seq_list = [complement_dict[base] for base in seq_list]\n return ''.join(seq_list)",
"def complement_base(base,material='DNA'):\n if base in 'Aa':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base in 'TtUu':\n return 'A'\n elif base in 'Gg':\n return 'C'\n else:\n return 'G'",
"def convert_ambigs(strings, alph):\n ms = alph.translator(False)\n for i in range(len(strings)):\n strings[i] = strings[i].translate(ms)\n return(strings)",
"def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s",
"def complement_base(base, material='DNA'):\n\n if base == 'A' or base == 'a':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base == 'T' or base == 't' or base == 'U' or base == 'u':\n return 'A'\n elif base == 'G' or base == 'g':\n return 'C'\n else:\n return 'G'",
"def checkGuide(seq, plen, pam, rpam, is_upstream_pam):\n if is_upstream_pam:\n if pam.match(seq[:plen]):\n yield seq, \"+\"\n if rpam.match(seq[-plen:]):\n yield reverseComplement(seq), \"-\"\n else:\n if pam.match(seq[-plen:]):\n yield seq, \"+\"\n if rpam.match(seq[:plen]):\n yield reverseComplement(seq), \"-\"\n #yield \"\", \"\"",
"def forbid_sequence(*s,min_len=2):\n assert len(s) >= 1\n notes = [part for part in tools.iter_melodies(*s)]\n\n for start in range(len(notes)):\n for end in range(start,len(notes)):\n if end - start < min_len:\n continue\n\n # try a motif\n motif = []\n for i in range(start,end+1):\n motif.extend(notes[i])\n\n # try a following\n part_nb = end - start + 1\n try:\n following = []\n for i in range(end+1, part_nb + end + 1):\n following.extend(notes[i])\n except IndexError:\n break\n\n # is there a sequence?\n try:\n if tools.matchSequence(motif, following, s[0].scale):\n warn(f\"Sequence in {(s.title for s in s)}.\",motif,following)\n except ValueError:\n continue",
"def complement(seq):\n if PY3:\n table = str.maketrans('ACTGNactg', 'TGACNtgac')\n elif PY2:\n table = string.maketrans('ACTGNactg', 'TGACNtgac')\n return str(seq).translate(table)",
"def backtranslate(p_seq, n_seq):\r\n # Keep track of the new sequence. Also keep track of which codon we are\r\n # actually processing (gaps don't count)\r\n newseq = ''\r\n codon = 0\r\n for aa in p_seq:\r\n if aa == '-':\r\n newseq += '---'\r\n else:\r\n newseq += n_seq[codon*3:(codon*3) + 3]\r\n codon += 1\r\n return newseq"
] | [
"0.7825706",
"0.632825",
"0.62402546",
"0.5868822",
"0.58633727",
"0.5861185",
"0.58539385",
"0.5849037",
"0.57864356",
"0.5770885",
"0.569351",
"0.5685995",
"0.55985564",
"0.5540336",
"0.55358434",
"0.5530427",
"0.5522565",
"0.5515272",
"0.55127937",
"0.5512095",
"0.55023056",
"0.5495544",
"0.5488593",
"0.54727495",
"0.5454149",
"0.5427608",
"0.54129857",
"0.54122174",
"0.54112506",
"0.53799814"
] | 0.67777115 | 1 |
Sequence gap_indices should return correct gap positions | def test_gap_indices(self):
self.assertEqual(self.RNA("").gap_indices(), [])
self.assertEqual(self.RNA("ACUGUCAGUACGHSDKCUCDNNS").gap_indices(), [])
self.assertEqual(self.RNA("GUACGUACAKDC-SDHDSK").gap_indices(), [12])
self.assertEqual(self.RNA("-DSHUHDS").gap_indices(), [0])
self.assertEqual(self.RNA("UACHASADS-").gap_indices(), [9])
self.assertEqual(
self.RNA("---CGAUgCAU---ACGHc---ACGUCAGU---").gap_indices(),
[0, 1, 2, 11, 12, 13, 19, 20, 21, 30, 31, 32],
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_gap_indices(self):\n r = self.RNA(\"-?A-?NRY-\")\n v = r.gap_indices()\n self.assertEqual(v, array([0, 1, 3, 4, 8]))\n r = self.RNA(\"AC\")\n v = r.gap_indices()\n self.assertEqual(v, array([])) # note: always returns array\n r = self.RNA(\"-?\")\n v = r.gap_indices()\n self.assertEqual(v, array([0, 1]))",
"def get_gaps( rows ):\n\n n = len(rows) - 1\n gaps = [ rows[i+1][0]-rows[i][1] for i in range(n) ]\n return gaps",
"def gaps(self):\n return self.gaps_L + self.gaps_R",
"def pos_gaps(df, gaps):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0)#.max(axis=0).ge(conservation * nb_rows)\n\n ge = []\n for i in value_counts.columns:\n try:\n if value_counts[i]['-'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n try:\n if value_counts[i]['.'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n return ge",
"def extract_labeled_sequence_gaps(source_seq, test_seq):\n slot_vals = {} \n tmp_gap = []\n prev_word_pos = 0 # the temp value used as a key for the gaps\n pos_in_seq = 0 # position of source_seq of test_seq's current match\n for i, el in enumerate(test_seq):\n if (len(source_seq)-pos_in_seq > len(test_seq)-i) or (pos_in_seq == len(source_seq)):\n return {} \n if el == source_seq[pos_in_seq]:\n # match\n pos_in_seq += 1\n if pos_in_seq != 1 and len(tmp_gap) != 0:\n slot_vals[prev_word_pos] = tmp_gap\n tmp_gap = []\n prev_word_pos = i \n else:\n tmp_gap.append(el)\n if pos_in_seq == len(source_seq):\n return slot_vals\n return {}",
"def _scan_forward( gaps, center, dist ):\n\n n = len(gaps)\n for i in range( center, n ):\n idx_gap = i\n gap = gaps[idx_gap]\n if gap >= dist: return idx_gap+1\n return n+1",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def test_insert_gaps_order_invariant():\n gaps1 = insert_gaps(log)\n gaps2 = insert_gaps(log.iloc[[1,0]])\n\n get_gaps = lambda x: x[x['name'] == 'gap']['length'].reset_index(drop=True)\n assert (get_gaps(gaps1) == get_gaps(gaps2.iloc[::-1])).all()",
"def get_positions(start_idx, end_idx, length):\n return list(range(-start_idx, 0)) + [0] * (end_idx - start_idx + 1) + \\\n list(range(1, length - end_idx))",
"def get_positions(start_idx, end_idx, length):\n return list(range(-start_idx, 0)) + [0]*(end_idx - start_idx + 1) + \\\n list(range(1, length-end_idx))",
"def calculateIndels(mfaPairs, regions):\n gapLength = 0\n gaps = [0]*1000\n for i in regions:\n for j in xrange(i[0], i[1]):\n k = mfaPairs[j]\n if k == GAP:\n gapLength += 1\n else:\n if gapLength != 0:\n gaps[gapLength] += 1\n gapLength = 0\n return gaps",
"def gaps(df):\n return [(round(df[i][\"o\"] - df[i - 1][\"c\"], 2)) for i in range(1, len(df))]",
"def step_indices(group_idx):\n ilen = step_count(group_idx) + 1\n indices = np.empty(ilen, np.int64)\n indices[0] = 0\n indices[-1] = group_idx.size\n cmp_pos = 0\n ri = 1\n for i in range(len(group_idx)):\n if group_idx[cmp_pos] != group_idx[i]:\n cmp_pos = i\n indices[ri] = i\n ri += 1\n return indices",
"def throw_random_gap_list(lengths, mask, save_interval_func, allow_overlap=False):\n # Use mask to find the gaps; gaps is a list of (length,start,end)\n lengths = [length for length in lengths if length > 0]\n min_length = min(lengths)\n gaps = []\n start = end = 0\n while True:\n start = mask.next_clear(end)\n if start == mask.size:\n break\n end = mask.next_set(start)\n if end - start >= min_length:\n gaps.append((end - start, start, None))\n # Sort (long regions first)\n gaps.sort()\n gaps.reverse()\n # Throw\n throw_random_private(lengths, gaps, save_interval_func, allow_overlap, three_args=False)",
"def prime_gap_plots(maxp, gap_sizes):\n P = prime_range(maxp + 1)\n v = [[(0, 0)] for i in gap_sizes]\n k = dict([(g, i) for i, g in enumerate(gap_sizes)])\n for i in range(len(P) - 1):\n g = P[i + 1] - P[i]\n if g in k:\n w = v[k[g]]\n w.append((P[i + 1], w[-1][1]))\n w.append((P[i + 1], w[-1][1] + 1))\n return v",
"def detect_time_gaps(st, min_samples=10, epsilon=1e-20, thresh_disc=100):\n # Read data\n tdata = st[0].data\n indz = np.where(abs(tdata) < epsilon)[0] # indices where we have 0\n diff_indz = indz[min_samples:] - indz[0:-min_samples] # Need min_samples consecutive samples with 0's to identify as time gap\n ind_des = np.where(diff_indz == min_samples)[0] # desired indices: value is equal to min_samples in the time gap\n ind_gap = indz[ind_des] # indices of the time gaps\n gap_start_ind = []\n gap_end_ind = []\n if (0 == len(ind_gap)): \n num_gaps = 0\n else:\n print \"Warning: %s time gap(s) with zeros found\"%len(ind_gap)\n # May have more than 1 time gap\n ind_diff = np.diff(ind_gap) # discontinuities in indices of the time gaps, if there is more than 1 time gap\n ind_disc = np.where(ind_diff > thresh_disc)[0]\n # N-1 time gaps\n curr_ind_start = ind_gap[0]\n for igap in range(len(ind_disc)): # do not enter this loop if ind_disc is empty\n gap_start_ind.append(curr_ind_start)\n last_index = ind_gap[ind_disc[igap]] + min_samples\n gap_end_ind.append(last_index)\n curr_ind_start = ind_gap[ind_disc[igap]+1] # update for next iteration\n # Last time gap\n gap_start_ind.append(curr_ind_start)\n gap_end_ind.append(ind_gap[-1] + min_samples)\n num_gaps = len(gap_start_ind)\n\n return [num_gaps, gap_start_ind, gap_end_ind]",
"def create_position_ids_from_input_ids(self, x):\r\n mask = x.ne(self.padding_idx).long()\r\n incremental_indicies = torch.cumsum(mask, dim=1) * mask\r\n return incremental_indicies + self.padding_idx",
"def get_insert_indices(my_timestamps, existing_timestamps):\n existing_timestep = existing_timestamps[1] - existing_timestamps[0]\n my_timestep = my_timestamps[1] - my_timestamps[0]\n\n # make sure the time delta is ok\n if existing_timestep != my_timestep:\n raise Exception(\"Existing dataset has different timestep (mine=%d, existing=%d)\"\n % (my_timestep, existing_timestep))\n\n my_offset = (my_timestamps[0] - existing_timestamps[0]) // existing_timestep\n my_end = my_offset + len(my_timestamps)\n\n return my_offset, my_end",
"def _scan_reverse(gaps, center, dist):\n\n for i in range( 0, center ):\n idx_gap = center - 1 - i\n gap = gaps[idx_gap]\n if gap >= dist: return idx_gap+1\n return 0",
"def check_gaps(matches, gap_threshold = 0):\n gaps = []\n prev = None\n for match in sorted(matches, key = itemgetter(0)):\n if prev is None:\n prev = match\n continue\n if match[0] - prev[1] >= gap_threshold:\n gaps.append([prev, match])\n prev = match\n return [[i[0][1], i[1][0]] for i in gaps]",
"def get_split_positions(read, min_gap):\n cigar = read.cigar\n # Cigar string is a list of tuples:\n if len(read.cigar) <= 1:\n return [] # no break points = empty list of break point positions\n\n ##\n # read has break points if cigar string is longer than 1\n\n # This is a list with the breakpoint tuples\n list_of_break_point_positions = []\n\n # set the current position on the genome\n if cigar[0][0] == 0:\n current_pos = int(read.positions[0])\n else:\n current_pos = int(read.positions[0]) - cigar[0][1]\n\n # Search for breakpoints in cigar and get the corresponding position on the genome\n\n i = 0\n for info_tuple in cigar:\n # If current segment in cigar string is aligned.\n if info_tuple[0] == 0:\n # Special case when at first segment:\n if i == 0 and cigar[1][1] >= min_gap: # first end-split\n list_of_break_point_positions.append((current_pos + info_tuple[1] , True))\n\n # Special case when at last segment:\n elif i == len(cigar) - 1 and cigar[i - 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos, False))\n\n # Internal segments:\n elif cigar[i - 1][1] >= min_gap and cigar[i + 1][1] >= min_gap:\n if cigar[i - 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos, False))\n if cigar[i + 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos + info_tuple[1] - 1, True))\n i += 1\n\n current_pos += info_tuple[1]\n\n return(list_of_break_point_positions)",
"def test_gaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").gaps(), array([0, 0]))\n self.assertEqual(sc(\"T-\").gaps(), array([0, 1]))",
"def gaps(args):\n from jcvi.formats.base import DictFile\n from jcvi.apps.base import popen\n from jcvi.utils.cbook import percentage\n\n p = OptionParser(gaps.__doc__)\n p.add_option(\"--bdist\", default=0, type=\"int\", help=\"Base pair distance\")\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n idsfile, frfile, gapsbed = args\n bdist = opts.bdist\n d = DictFile(frfile, keypos=1, valuepos=2)\n bedfile = idsfile + \".bed\"\n fw = open(bedfile, \"w\")\n fp = open(idsfile)\n total = 0\n for row in fp:\n id = row.strip()\n hit = d[id]\n tag, pos = get_tag(hit, None)\n seqid, start, end = pos\n start, end = max(start - bdist, 1), end + bdist\n print(\"\\t\".join(str(x) for x in (seqid, start - 1, end, id)), file=fw)\n total += 1\n fw.close()\n\n cmd = \"intersectBed -a {0} -b {1} -v | wc -l\".format(bedfile, gapsbed)\n not_in_gaps = popen(cmd).read()\n not_in_gaps = int(not_in_gaps)\n in_gaps = total - not_in_gaps\n print(\"Ids in gaps: {1}\".format(total, percentage(in_gaps, total)), file=sys.stderr)",
"def gap(l):\n if l < 3:\n return 0\n\n # places one person in the middle of the gap,\n # and starts over on the new smaller gaps on either side.\n return gap(int(l / 2)) + 1 + gap(ceil(l / 2) - 1)",
"def _seq2Indices(self, sequence, vocab, start, end, unknown):\n if start:\n sequence.insert(0, start)\n if end:\n sequence.append(end)\n return [vocab[token] if token in vocab else vocab[unknown] for token in sequence]",
"def get_move_indexes(i, j):\n return (i, j), (j, n - 1 - i), (n - 1 - i, n - 1 - j), (n - 1 - j, i)",
"def seq_along(along_with: Iterable[Any], base0_: bool = None) -> ArrayLikeType:\n base0_ = get_option(\"index.base.0\", base0_)\n return Array(range(len(along_with))) + int(not base0_)",
"def nearest_gap(seq,pos):\n # Catch negative sequence positions\n if pos < 0:\n raise IndexError, \"Sequence positions cannot be negative: %d\" % pos\n \n # If pos contains a gap, that's the closest gap\n if seq[pos] == '-':\n return pos\n \n # create a list to store the nearest gap character in the 5' and\n # 3' directions\n choices = []\n # find the nearest gap 5' of pos\n try:\n gap_index = ''.join(seq[:pos]).rindex('-')\n distance = pos - gap_index\n choices.append((distance,gap_index))\n except ValueError:\n pass\n \n # find the nearest gap 3' of pos\n try:\n gap_index = pos + ''.join(seq[pos:]).index('-')\n distance = gap_index - pos\n choices.append((distance,gap_index))\n except ValueError:\n pass\n \n # error if there are no gaps in the sequence\n if not choices:\n raise UnalignableSequenceError,\\\n \"Can't adjust alignment because there are too few gaps to \"+\\\n \"remove in the aligned candidate to reduce to the length of \"+\\\n \"the template alignment (i.e., candidate adds too many insertions \"+\\\n \"during pairwise alignment).\"\n \n # return the gap_index of the choice with the smaller distance -- if there\n # is a tie, will delete the 5' gap (which is what original NAST does)\n return min(choices)[1]",
"def _get_batches_starting_indexes(self):\n\n indexes = numpy.arange(0, self.num_frames, self.recurrence)\n indexes = numpy.random.permutation(indexes)\n\n # Shift starting indexes by self.recurrence//2 half the time\n if self.batch_num % 2 == 1:\n indexes = indexes[(indexes + self.recurrence) % self.num_frames_per_proc != 0]\n indexes += self.recurrence // 2\n self.batch_num += 1\n\n num_indexes = self.batch_size // self.recurrence\n batches_starting_indexes = [indexes[i:i+num_indexes] for i in range(0, len(indexes), num_indexes)]\n\n return batches_starting_indexes",
"def _gap_account(self, spanrels):\n\t\t#Add extra spans in the keys\n\t\tgap_spanrels = copy.deepcopy(spanrels)\n\t\tfor head in spanrels:\n\t\t\tfor relation in spanrels[head]:\n\t\t\t\tl,r = relation[0], relation[1]\n\t\t\t\tif l != 0 and l not in self.wordspans:\n\t\t\t\t\tgap_spanrels[head].add((l-1,r))\n\t\t\t\tif r+1 not in self.wordspans and r+2 in self.wordspans:\n\t\t\t\t\tgap_spanrels[head].add((l,r+1))\n\t\tfor head in spanrels:\n\t\t\tnheads = []\n\t\t\tl,r = head[0], head[1]\n\t\t\tif l!=0 and l not in self.wordspans:\n\t\t\t\tnheads.append((l-1,r))\n\t\t\tif r+1 not in self.wordspans and r+2 in self.wordspans:\n\t\t\t\tnheads.append((l,r+1))\n\t\t\tfor nhead in nheads:\n\t\t\t\tgap_spanrels[nhead] = set([])\n\t\t\t\tfor key in gap_spanrels[head]:\n\t\t\t\t\tif key[1] <= nhead[0] or key[0] >= nhead[1]:\n\t\t\t\t\t\tgap_spanrels[nhead].add(key)\n\t\treturn gap_spanrels"
] | [
"0.6974337",
"0.6601554",
"0.64816505",
"0.63461",
"0.6027567",
"0.5972312",
"0.5915429",
"0.586718",
"0.58583003",
"0.58261764",
"0.5799375",
"0.5784183",
"0.57565933",
"0.57559055",
"0.57435954",
"0.57185775",
"0.5682374",
"0.56467414",
"0.56163996",
"0.5611873",
"0.561138",
"0.5592568",
"0.5590252",
"0.55862945",
"0.55622303",
"0.5561683",
"0.553156",
"0.5528514",
"0.5516186",
"0.55161685"
] | 0.74248666 | 0 |
Sequence gap_vector should return correct gap positions | def test_gap_vector(self):
def g(x):
return self.RNA(x).gap_vector()
self.assertEqual(g(""), [])
self.assertEqual(g("ACUGUCAGUACGHCSDKCCUCCDNCNS"), [False] * 27)
self.assertEqual(
g("GUACGUAACAKADC-SDAHADSAK"),
list(map(bool, list(map(int, "000000000000001000000000")))),
)
self.assertEqual(g("-DSHSUHDSS"), list(map(bool, list(map(int, "1000000000")))))
self.assertEqual(
g("UACHASCAGDS-"), list(map(bool, list(map(int, "000000000001"))))
)
self.assertEqual(
g("---CGAUgCAU---ACGHc---ACGUCAGU--?"),
list(map(bool, list(map(int, "111000000001110000011100000000111")))),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gaps(self):\n return self.gaps_L + self.gaps_R",
"def get_gaps( rows ):\n\n n = len(rows) - 1\n gaps = [ rows[i+1][0]-rows[i][1] for i in range(n) ]\n return gaps",
"def pos_gaps(df, gaps):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0)#.max(axis=0).ge(conservation * nb_rows)\n\n ge = []\n for i in value_counts.columns:\n try:\n if value_counts[i]['-'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n try:\n if value_counts[i]['.'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n return ge",
"def test_gap_indices(self):\n self.assertEqual(self.RNA(\"\").gap_indices(), [])\n self.assertEqual(self.RNA(\"ACUGUCAGUACGHSDKCUCDNNS\").gap_indices(), [])\n self.assertEqual(self.RNA(\"GUACGUACAKDC-SDHDSK\").gap_indices(), [12])\n self.assertEqual(self.RNA(\"-DSHUHDS\").gap_indices(), [0])\n self.assertEqual(self.RNA(\"UACHASADS-\").gap_indices(), [9])\n self.assertEqual(\n self.RNA(\"---CGAUgCAU---ACGHc---ACGUCAGU---\").gap_indices(),\n [0, 1, 2, 11, 12, 13, 19, 20, 21, 30, 31, 32],\n )",
"def gap(l):\n if l < 3:\n return 0\n\n # places one person in the middle of the gap,\n # and starts over on the new smaller gaps on either side.\n return gap(int(l / 2)) + 1 + gap(ceil(l / 2) - 1)",
"def test_insert_gaps_order_invariant():\n gaps1 = insert_gaps(log)\n gaps2 = insert_gaps(log.iloc[[1,0]])\n\n get_gaps = lambda x: x[x['name'] == 'gap']['length'].reset_index(drop=True)\n assert (get_gaps(gaps1) == get_gaps(gaps2.iloc[::-1])).all()",
"def extract_labeled_sequence_gaps(source_seq, test_seq):\n slot_vals = {} \n tmp_gap = []\n prev_word_pos = 0 # the temp value used as a key for the gaps\n pos_in_seq = 0 # position of source_seq of test_seq's current match\n for i, el in enumerate(test_seq):\n if (len(source_seq)-pos_in_seq > len(test_seq)-i) or (pos_in_seq == len(source_seq)):\n return {} \n if el == source_seq[pos_in_seq]:\n # match\n pos_in_seq += 1\n if pos_in_seq != 1 and len(tmp_gap) != 0:\n slot_vals[prev_word_pos] = tmp_gap\n tmp_gap = []\n prev_word_pos = i \n else:\n tmp_gap.append(el)\n if pos_in_seq == len(source_seq):\n return slot_vals\n return {}",
"def test_gaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").gaps(), array([0, 0]))\n self.assertEqual(sc(\"T-\").gaps(), array([0, 1]))",
"def gaps(df):\n return [(round(df[i][\"o\"] - df[i - 1][\"c\"], 2)) for i in range(1, len(df))]",
"def test_gap_indices(self):\n r = self.RNA(\"-?A-?NRY-\")\n v = r.gap_indices()\n self.assertEqual(v, array([0, 1, 3, 4, 8]))\n r = self.RNA(\"AC\")\n v = r.gap_indices()\n self.assertEqual(v, array([])) # note: always returns array\n r = self.RNA(\"-?\")\n v = r.gap_indices()\n self.assertEqual(v, array([0, 1]))",
"def test_gap_array(self):\n r = self.RNA(\"-?A-?NRY-\")\n v = r.gap_array()\n self.assertEqual(v, array([1, 1, 0, 1, 1, 0, 0, 0, 1]))\n r = self.RNA(\"AC\")\n v = r.gap_array()\n self.assertEqual(v, array([0, 0]))\n r = self.RNA(\"-?\")\n v = r.gap_array()\n self.assertEqual(v, array([1, 1]))",
"def throw_random_gap_list(lengths, mask, save_interval_func, allow_overlap=False):\n # Use mask to find the gaps; gaps is a list of (length,start,end)\n lengths = [length for length in lengths if length > 0]\n min_length = min(lengths)\n gaps = []\n start = end = 0\n while True:\n start = mask.next_clear(end)\n if start == mask.size:\n break\n end = mask.next_set(start)\n if end - start >= min_length:\n gaps.append((end - start, start, None))\n # Sort (long regions first)\n gaps.sort()\n gaps.reverse()\n # Throw\n throw_random_private(lengths, gaps, save_interval_func, allow_overlap, three_args=False)",
"def check_gaps(matches, gap_threshold = 0):\n gaps = []\n prev = None\n for match in sorted(matches, key = itemgetter(0)):\n if prev is None:\n prev = match\n continue\n if match[0] - prev[1] >= gap_threshold:\n gaps.append([prev, match])\n prev = match\n return [[i[0][1], i[1][0]] for i in gaps]",
"def gap_insertion_sort(a_list, start, gap):\n\n for i in range(start + gap, len(a_list), gap):\n current_value = a_list[i]\n position = i\n while position >= gap and a_list[position - gap] > current_value:\n a_list[position] = a_list[position - gap]\n position = position - gap\n\n a_list[position] = current_value",
"def gap_insertion_sort(alist, start, gap):\n for i in range(start+gap, len(alist), gap):\n current_val = alist[i]\n position = i\n\n while position >= gap and alist[position-gap] > current_val:\n alist[position] = alist[position-gap]\n position = position - gap\n\n alist[position] = current_val",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def introduce_terminal_gaps(template,aligned_template,aligned_candidate):\n \n # count the 5' gaps in the original aligned template\n original_five_prime_gaps = 0\n for c in template:\n if c == '-':\n original_five_prime_gaps +=1\n else:\n break\n \n # count the 5' gaps already existing in the pairwise aligned template\n # (because we don't need to add these)\n aligned_template_five_prime_gaps = 0\n for c in aligned_template:\n if c == '-':\n aligned_template_five_prime_gaps += 1\n else:\n break\n \n # compute the number of 5' gaps that need to be added to get to the\n # original alignment length\n five_prime_gaps_to_add = \\\n original_five_prime_gaps - aligned_template_five_prime_gaps\n \n # count the 3' gaps in the original aligned template\n original_three_prime_gaps = 0\n for c in reversed(template):\n if c == '-':\n original_three_prime_gaps +=1\n else:\n break\n \n # count the 3' gaps already existing in the pairwise aligned template\n # (because we don't need to add these)\n aligned_template_three_prime_gaps = 0\n for c in reversed(aligned_template):\n if c == '-':\n aligned_template_three_prime_gaps += 1\n else:\n break\n \n # compute the number of 3' gaps that need to be added to get to the\n # original alignment length\n three_prime_gaps_to_add = \\\n original_three_prime_gaps - aligned_template_three_prime_gaps\n\n # return the sequence with the 5' and 3' gaps added\n return DNA.makeSequence(''.join([\\\n '-'*five_prime_gaps_to_add,\\\n str(aligned_candidate),\\\n '-'*three_prime_gaps_to_add]),\\\n Name=aligned_candidate.Name)",
"def nearest_gap(seq,pos):\n # Catch negative sequence positions\n if pos < 0:\n raise IndexError, \"Sequence positions cannot be negative: %d\" % pos\n \n # If pos contains a gap, that's the closest gap\n if seq[pos] == '-':\n return pos\n \n # create a list to store the nearest gap character in the 5' and\n # 3' directions\n choices = []\n # find the nearest gap 5' of pos\n try:\n gap_index = ''.join(seq[:pos]).rindex('-')\n distance = pos - gap_index\n choices.append((distance,gap_index))\n except ValueError:\n pass\n \n # find the nearest gap 3' of pos\n try:\n gap_index = pos + ''.join(seq[pos:]).index('-')\n distance = gap_index - pos\n choices.append((distance,gap_index))\n except ValueError:\n pass\n \n # error if there are no gaps in the sequence\n if not choices:\n raise UnalignableSequenceError,\\\n \"Can't adjust alignment because there are too few gaps to \"+\\\n \"remove in the aligned candidate to reduce to the length of \"+\\\n \"the template alignment (i.e., candidate adds too many insertions \"+\\\n \"during pairwise alignment).\"\n \n # return the gap_index of the choice with the smaller distance -- if there\n # is a tie, will delete the 5' gap (which is what original NAST does)\n return min(choices)[1]",
"def calculateIndels(mfaPairs, regions):\n gapLength = 0\n gaps = [0]*1000\n for i in regions:\n for j in xrange(i[0], i[1]):\n k = mfaPairs[j]\n if k == GAP:\n gapLength += 1\n else:\n if gapLength != 0:\n gaps[gapLength] += 1\n gapLength = 0\n return gaps",
"def get_interval_list_predefined_gap(traces_list, gap_interval):\n\n intv = 0\n interval_list = []\n pre_traces = []\n\n for timst in traces_list:\n timst = timst.replace(microsecond=0)\n pre_traces.append(timst)\n\n for i in range(0, len(pre_traces)-1):\n iat = (pre_traces[i+1]-pre_traces[i]).total_seconds()\n if iat <= gap_interval:\n current_trace = pre_traces[i]\n while current_trace < pre_traces[i+1]:\n interval_list.append(current_trace)\n current_trace = current_trace + datetime.timedelta(0,1)\n else:\n interval_list.append(pre_traces[i])\n\n if i == len(pre_traces)-2:\n interval_list.append(pre_traces[i+1])\n\n return interval_list",
"def prime_gap_plots(maxp, gap_sizes):\n P = prime_range(maxp + 1)\n v = [[(0, 0)] for i in gap_sizes]\n k = dict([(g, i) for i, g in enumerate(gap_sizes)])\n for i in range(len(P) - 1):\n g = P[i + 1] - P[i]\n if g in k:\n w = v[k[g]]\n w.append((P[i + 1], w[-1][1]))\n w.append((P[i + 1], w[-1][1] + 1))\n return v",
"def detect_time_gaps(st, min_samples=10, epsilon=1e-20, thresh_disc=100):\n # Read data\n tdata = st[0].data\n indz = np.where(abs(tdata) < epsilon)[0] # indices where we have 0\n diff_indz = indz[min_samples:] - indz[0:-min_samples] # Need min_samples consecutive samples with 0's to identify as time gap\n ind_des = np.where(diff_indz == min_samples)[0] # desired indices: value is equal to min_samples in the time gap\n ind_gap = indz[ind_des] # indices of the time gaps\n gap_start_ind = []\n gap_end_ind = []\n if (0 == len(ind_gap)): \n num_gaps = 0\n else:\n print \"Warning: %s time gap(s) with zeros found\"%len(ind_gap)\n # May have more than 1 time gap\n ind_diff = np.diff(ind_gap) # discontinuities in indices of the time gaps, if there is more than 1 time gap\n ind_disc = np.where(ind_diff > thresh_disc)[0]\n # N-1 time gaps\n curr_ind_start = ind_gap[0]\n for igap in range(len(ind_disc)): # do not enter this loop if ind_disc is empty\n gap_start_ind.append(curr_ind_start)\n last_index = ind_gap[ind_disc[igap]] + min_samples\n gap_end_ind.append(last_index)\n curr_ind_start = ind_gap[ind_disc[igap]+1] # update for next iteration\n # Last time gap\n gap_start_ind.append(curr_ind_start)\n gap_end_ind.append(ind_gap[-1] + min_samples)\n num_gaps = len(gap_start_ind)\n\n return [num_gaps, gap_start_ind, gap_end_ind]",
"def gap_insertion_sort(num_list, start, gap):\n\n # Creates sublists for the sublist gap\n for i in range(start + gap, len(num_list), gap):\n\n # New item to be inserted into the sublist gap\n current_value = num_list[i]\n position = i\n\n while position >= gap and num_list[position - gap] > current_value:\n # Shift item to current position\n num_list[position] = num_list[position - gap]\n position -= gap\n\n # Sets new position to current value\n num_list[position] = current_value",
"def test_not_gap(self):\n m, seq = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n self.assertTrue(not_gap(m[0]))\n self.assertFalse(not_gap(m[5]))",
"def gap_to_next_car(self):\n c = self.next_car()\n if c.x > self.x:\n return c.x - c.length_in_cells - self.x\n elif c.x < self.x:\n return (self.road.N - self.x) + (c.x - c.length_in_cells)\n elif c.x == self.x:\n return self.road.N",
"def _scan_forward( gaps, center, dist ):\n\n n = len(gaps)\n for i in range( center, n ):\n idx_gap = i\n gap = gaps[idx_gap]\n if gap >= dist: return idx_gap+1\n return n+1",
"def get_split_positions(read, min_gap):\n cigar = read.cigar\n # Cigar string is a list of tuples:\n if len(read.cigar) <= 1:\n return [] # no break points = empty list of break point positions\n\n ##\n # read has break points if cigar string is longer than 1\n\n # This is a list with the breakpoint tuples\n list_of_break_point_positions = []\n\n # set the current position on the genome\n if cigar[0][0] == 0:\n current_pos = int(read.positions[0])\n else:\n current_pos = int(read.positions[0]) - cigar[0][1]\n\n # Search for breakpoints in cigar and get the corresponding position on the genome\n\n i = 0\n for info_tuple in cigar:\n # If current segment in cigar string is aligned.\n if info_tuple[0] == 0:\n # Special case when at first segment:\n if i == 0 and cigar[1][1] >= min_gap: # first end-split\n list_of_break_point_positions.append((current_pos + info_tuple[1] , True))\n\n # Special case when at last segment:\n elif i == len(cigar) - 1 and cigar[i - 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos, False))\n\n # Internal segments:\n elif cigar[i - 1][1] >= min_gap and cigar[i + 1][1] >= min_gap:\n if cigar[i - 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos, False))\n if cigar[i + 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos + info_tuple[1] - 1, True))\n i += 1\n\n current_pos += info_tuple[1]\n\n return(list_of_break_point_positions)",
"def gaps(args):\n from jcvi.formats.base import DictFile\n from jcvi.apps.base import popen\n from jcvi.utils.cbook import percentage\n\n p = OptionParser(gaps.__doc__)\n p.add_option(\"--bdist\", default=0, type=\"int\", help=\"Base pair distance\")\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n idsfile, frfile, gapsbed = args\n bdist = opts.bdist\n d = DictFile(frfile, keypos=1, valuepos=2)\n bedfile = idsfile + \".bed\"\n fw = open(bedfile, \"w\")\n fp = open(idsfile)\n total = 0\n for row in fp:\n id = row.strip()\n hit = d[id]\n tag, pos = get_tag(hit, None)\n seqid, start, end = pos\n start, end = max(start - bdist, 1), end + bdist\n print(\"\\t\".join(str(x) for x in (seqid, start - 1, end, id)), file=fw)\n total += 1\n fw.close()\n\n cmd = \"intersectBed -a {0} -b {1} -v | wc -l\".format(bedfile, gapsbed)\n not_in_gaps = popen(cmd).read()\n not_in_gaps = int(not_in_gaps)\n in_gaps = total - not_in_gaps\n print(\"Ids in gaps: {1}\".format(total, percentage(in_gaps, total)), file=sys.stderr)",
"def gap_split(chain: [int], gap: int):\n chain_blocks = []\n chain_block = [0] # aircraft's charging outlet joltage\n item = 0\n for i in range(len(chain)):\n if not chain_block or chain[i] < item + gap:\n item = chain[i]\n chain_block.append(item)\n continue\n item = chain[i]\n chain_block.append(item)\n if len(chain_block) > 2: # blocks with 1 or 2 items can only have 1 distinct arrangement\n chain_blocks.append(chain_block)\n chain_block = [item]\n if len(chain_block) > 2: # blocks with 1 or 2 items can only have 1 distinct arrangement\n chain_blocks.append(chain_block)\n return chain_blocks",
"def reconstruct_sequence(s1, s2, S, backtrack_matrix, gap_penalty, gap_opening_penalty, edit_function, matrix):\n coordinate_list = []\n [i, j] = backtrack_matrix.shape\n i-=1\n j-=1\n \n while i > 0 or j > 0:\n val = S[i, j]\n # Consider 0 to handle the first row/column\n s1_gap = 0 if i == 0 else max([S[i - k, j] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, i+1)])\n s2_gap = 0 if j == 0 else max([S[i, j - k] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, j+1)])\n mut = S[i - 1, j - 1] + edit_function(s1[i - 1], s2[j - 1], matrix=matrix)\n\n # Append the current location to the coordinate list.\n coordinate_list.append([i, j])\n # If a 0 is found, interrupt the traceback\n if val == 0:\n break\n # Match s1 to a gap, move vertically\n elif i > 0 and val == s1_gap:\n i -= 1\n # Match s2 to a gap, move horizontally\n elif j > 0 and val == s2_gap:\n j -= 1\n # Substitution, diagonal movement\n elif i > 0 and j > 0 and val == mut:\n i -= 1\n j -= 1\n else:\n raise ValueError(\"val={0}, but we have s1_gap={1}, s2_gap={2}, mut={3}\".format(val, s1_gap, s2_gap, mut))\n \n coordinate_list.reverse() \n return coordinate_list"
] | [
"0.69004416",
"0.6682347",
"0.6234445",
"0.61394644",
"0.6135273",
"0.6029227",
"0.5996346",
"0.596874",
"0.59524286",
"0.58817637",
"0.5849354",
"0.58274347",
"0.5820121",
"0.5768506",
"0.57642204",
"0.5745565",
"0.5728751",
"0.5723446",
"0.5646391",
"0.56260604",
"0.5612469",
"0.5576031",
"0.5527938",
"0.54981875",
"0.5460408",
"0.54411846",
"0.53991276",
"0.53894764",
"0.53617567",
"0.5360284"
] | 0.71960086 | 0 |
Sequence gap_maps should return dicts mapping gapped/ungapped pos | def test_gap_maps(self):
empty = ""
no_gaps = "aaa"
all_gaps = "---"
start_gaps = "--abc"
end_gaps = "ab---"
mid_gaps = "--a--b-cd---"
def gm(x):
return self.RNA(x).gap_maps()
self.assertEqual(gm(empty), ({}, {}))
self.assertEqual(gm(no_gaps), ({0: 0, 1: 1, 2: 2}, {0: 0, 1: 1, 2: 2}))
self.assertEqual(gm(all_gaps), ({}, {}))
self.assertEqual(gm(start_gaps), ({0: 2, 1: 3, 2: 4}, {2: 0, 3: 1, 4: 2}))
self.assertEqual(gm(end_gaps), ({0: 0, 1: 1}, {0: 0, 1: 1}))
self.assertEqual(
gm(mid_gaps), ({0: 2, 1: 5, 2: 7, 3: 8}, {2: 0, 5: 1, 7: 2, 8: 3})
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_labeled_sequence_gaps(source_seq, test_seq):\n slot_vals = {} \n tmp_gap = []\n prev_word_pos = 0 # the temp value used as a key for the gaps\n pos_in_seq = 0 # position of source_seq of test_seq's current match\n for i, el in enumerate(test_seq):\n if (len(source_seq)-pos_in_seq > len(test_seq)-i) or (pos_in_seq == len(source_seq)):\n return {} \n if el == source_seq[pos_in_seq]:\n # match\n pos_in_seq += 1\n if pos_in_seq != 1 and len(tmp_gap) != 0:\n slot_vals[prev_word_pos] = tmp_gap\n tmp_gap = []\n prev_word_pos = i \n else:\n tmp_gap.append(el)\n if pos_in_seq == len(source_seq):\n return slot_vals\n return {}",
"def gaps(self):\n return self.gaps_L + self.gaps_R",
"def get_gaps( rows ):\n\n n = len(rows) - 1\n gaps = [ rows[i+1][0]-rows[i][1] for i in range(n) ]\n return gaps",
"def __get_map_offsets(self):\n map = self.map.copy()\n map_up = np.zeros((self.h + 1, self.w), np.uint8) # create 4-neighbor connectivity comparision\n map_down = np.zeros((self.h + 1, self.w), np.uint8)\n map_right = np.zeros((self.h, self.w + 1), np.uint8)\n map_left = np.zeros((self.h, self.w + 1), np.uint8)\n map_up[1:, :] = map # paste mask onto it, 1 shifted\n map_down[:-1, :] = map\n map_right[:, :-1] = map\n map_left[:, 1:] = map\n map_up = np.delete(map_up, -1, 0) # delete the extra row/column\n map_down = np.delete(map_down, 0, 0)\n map_right = np.delete(map_right, 0, 1)\n map_left = np.delete(map_left, -1, 1)\n map_up[0, :] = 1 # set new cells (after the shift) to 1(walls) to eliminate false-positives\n map_down[-1, :] = 1\n map_right[:, -1] = 1\n map_left[:, 0] = 1\n return map_up, map_right, map_down, map_left",
"def check_gaps(matches, gap_threshold = 0):\n gaps = []\n prev = None\n for match in sorted(matches, key = itemgetter(0)):\n if prev is None:\n prev = match\n continue\n if match[0] - prev[1] >= gap_threshold:\n gaps.append([prev, match])\n prev = match\n return [[i[0][1], i[1][0]] for i in gaps]",
"def test_gaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").gaps(), array([0, 0]))\n self.assertEqual(sc(\"T-\").gaps(), array([0, 1]))",
"def test_insert_gaps_order_invariant():\n gaps1 = insert_gaps(log)\n gaps2 = insert_gaps(log.iloc[[1,0]])\n\n get_gaps = lambda x: x[x['name'] == 'gap']['length'].reset_index(drop=True)\n assert (get_gaps(gaps1) == get_gaps(gaps2.iloc[::-1])).all()",
"def generate_map():\n o = []\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n pos = [np.random.randint(100, 412), np.random.randint(80, 304)]\n models = make_models()\n\n print(\"# of groups: {}\".format(timestamps.shape[0] // note_group_size))\n for i in range(timestamps.shape[0] // note_group_size):\n z = generate_set(models, begin=i * note_group_size, start_pos=pos, length_multiplier=dist_multiplier,\n group_id=i, plot_map=False)[:, :6] * np.array([512, 384, 1, 1, 512, 384])\n pos = z[-1, 0:2]\n o.append(z)\n a = np.concatenate(o, axis=0)\n return a",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def cigar_to_map(cigar_text):\n assert 'I' not in cigar_text\n spans, posn = [], 0\n for n, c in pattern.findall(cigar_text):\n if n:\n n = int(n)\n else:\n n = 1\n \n if c == 'M':\n spans.append(Span(posn, posn+n))\n posn += n\n else:\n spans.append(LostSpan(n))\n map = Map(spans = spans, parent_length = posn)\n return map",
"def pos_gaps(df, gaps):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0)#.max(axis=0).ge(conservation * nb_rows)\n\n ge = []\n for i in value_counts.columns:\n try:\n if value_counts[i]['-'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n try:\n if value_counts[i]['.'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n return ge",
"def _with_gaps_removed(group_map, paired):\n gapped_groups = set()\n for group, elems in group_map.items():\n # Verify we're getting 1, 2, 3, ...\n expected_sequence = list(range(1, len(elems) + 1))\n if paired:\n fwd_nums = [\n int(pattern_multilane.search(se).group(1)) for se in [fwd for fwd, _ in elems]\n ]\n rev_nums = [\n int(pattern_multilane.search(se).group(1)) for se in [rev for _, rev in elems]\n ]\n if fwd_nums != expected_sequence or rev_nums != expected_sequence:\n gapped_groups.add(group)\n else:\n nums = [int(pattern_multilane.search(se).group(1)) for se in elems]\n if nums != expected_sequence:\n gapped_groups.add(group)\n\n return {group: elems for group, elems in group_map.items() if group not in gapped_groups}",
"def get_gap_info(in_file):\n # Initialize values to be computed.\n total_N = 0\n total_nucleotides = 0\n total_gaps = 0\n total_gaps_over_100 = 0\n all_gap_lengths = []\n\n # Use a dictionary to store bed coordinates.\n # key = fasta header\n # Value = list of tuples corresponding to genomic coordinates.\n bed_gaps = collections.OrderedDict()\n\n # Iterate through each sequence in the fasta,\n # and get gap info from each.\n sequences = SeqReader(in_file)\n for header, sequence in sequences.parse_fasta():\n gap_sequence = GapSequence(sequence)\n\n # Get total number of 'N' characters for this sequence.\n total_N += gap_sequence.count_Ns()\n # Get total number of nucleotides for this sequence.\n total_nucleotides += len(sequence)\n for gap in gap_sequence.get_gaps():\n # Increment total number of gaps\n total_gaps += 1\n if len(gap) > 100:\n total_gaps_over_100 += 1\n # Save this gap length to master list.\n all_gap_lengths.append(len(gap))\n\n # Now fill in bed file data structure.\n all_coordinates = [(m.start(0), m.end(0)) for m in gap_sequence.get_gap_coords()]\n if all_coordinates:\n bed_gaps[header] = all_coordinates\n\n return {\n 'total_N': total_N,\n 'total_nucleotides': total_nucleotides,\n 'total_gaps': total_gaps,\n 'total_gaps_over_100': total_gaps_over_100,\n 'all_gap_lengths': all_gap_lengths,\n 'bed_gaps': bed_gaps\n }",
"def create_map():\n pass\n # for line in range(0, shared.lines):\n # map_data[line][0] = (1, -1)\n # map_data[line][shared.columns - 1] = (1, -1)\n #\n # for column in range(0, shared.columns):\n # map_data[0, column] = (-1, 1)\n # # if column <= shared.left_space or column > shared.columns - shared.left_space:\n # map_data[shared.lines - 1, column] = (-1, 1)",
"def prime_gap_plots(maxp, gap_sizes):\n P = prime_range(maxp + 1)\n v = [[(0, 0)] for i in gap_sizes]\n k = dict([(g, i) for i, g in enumerate(gap_sizes)])\n for i in range(len(P) - 1):\n g = P[i + 1] - P[i]\n if g in k:\n w = v[k[g]]\n w.append((P[i + 1], w[-1][1]))\n w.append((P[i + 1], w[-1][1] + 1))\n return v",
"def swappable_positions(self):\n swappable = []\n empty_position = self.get_position(0)\n for i in range(-1, 2, 2):\n adjacent_position1 = empty_position[0] + i, empty_position[1]\n adjacent_position2 = empty_position[0], empty_position[1] + i\n if 0 <= adjacent_position1[0] < 4:\n swappable.append(adjacent_position1)\n if 0 <= adjacent_position2[1] < 4:\n swappable.append(adjacent_position2)\n\n return swappable",
"def reportCopyMap(self):\n copy_map = defaultdict(list)\n for para in self.block_map:\n offset = self.offset_map[para]\n for i in xrange(len(self.block_map[para]) - 1):\n start, var, block = self.block_map[para][i]\n span = self.block_map[para][i + 1][0] - start\n if var is not None:\n copy_map[para].append([start + offset, span, pulp.value(var)])\n prevVar = pulp.value(var)\n else:\n copy_map[para].append([start + offset, span, prevVar])\n finalStart, finalVar, finalBlock = self.block_map[para][-1]\n finalSpan = self.G.sizes[para] - finalStart\n if finalVar is not None:\n copy_map[para].append([finalStart + offset, finalSpan, pulp.value(var)])\n else:\n copy_map[para].append([finalStart + offset, finalSpan, prevVar])\n return copy_map",
"def complete_mapping(self):\r\n\r\n self._reset_map()\r\n #position_prey = self.prey.position\r\n #self.complete_map[position_prey[1], position_prey[0]] = 1.0\r\n position_body = [part.position for part in self.body]\r\n\r\n for position in position_body:\r\n self.complete_map[position[1], position[0]] = 1\r\n\r\n return self.complete_map",
"def _calculate_leading_dim_map():\n small_matrixes = [(value, value+64) for value in range(256, 40192+512, 512)]\n large_matrixes = [(value, value+1088) for value in range(1024, 39936+1024, 1024)]\n return dict(small_matrixes + large_matrixes)",
"def get_gaps_curve(raw_data):\n peaks = []\n valleys = []\n gaps = []\n # process the first window; i.e., the first PAGESIZE rows of data\n for j in range(1, Parser.PAGESIZE):\n if raw_data[j] > raw_data[j - 1] and raw_data[j] > raw_data[j + 1]:\n bisect.insort_left(peaks, raw_data[j], bisect.bisect_left(peaks, raw_data[j]))\n elif raw_data[j] < raw_data[j - 1] and raw_data[j] < raw_data[j + 1]:\n bisect.insort_left(valleys, raw_data[j], bisect.bisect_left(valleys, raw_data[j]))\n\n gaps.append(Parser.__find_gaps(peaks, valleys))\n\n # slide from start to end\n for j in range(Parser.PAGESIZE, len(raw_data)):\n s = j - Parser.PAGESIZE + 1\n if raw_data[s] > raw_data[s - 1] and raw_data[s] > raw_data[s + 1]:\n del peaks[bisect.bisect_left(peaks, raw_data[s])]\n elif raw_data[s] < raw_data[s - 1] and raw_data[s] < raw_data[s + 1]:\n del valleys[bisect.bisect_left(valleys, raw_data[s])]\n\n e = j - 1\n if raw_data[e] > raw_data[e - 1] and raw_data[e] > raw_data[e + 1]:\n bisect.insort_left(peaks, raw_data[e], bisect.bisect_left(peaks, raw_data[e]))\n elif raw_data[e] < raw_data[e - 1] and raw_data[e] < raw_data[e + 1]:\n bisect.insort_left(valleys, raw_data[e], bisect.bisect_left(valleys, raw_data[e]))\n gaps.append(Parser.__find_gaps(peaks, valleys))\n\n return gaps",
"def gaps(args):\n from jcvi.formats.sizes import agp\n from jcvi.formats.agp import mask, build\n\n p = OptionParser(gaps.__doc__)\n p.add_option(\n \"--split\", default=False, action=\"store_true\", help=\"Generate .split.fasta\"\n )\n p.set_mingap(default=100)\n p.set_cpus()\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (inputfasta,) = args\n mingap = opts.mingap\n split = opts.split\n prefix = inputfasta.rsplit(\".\", 1)[0]\n bedfile = prefix + \".gaps.bed\"\n\n if need_update(inputfasta, bedfile):\n write_gaps_bed(inputfasta, prefix, mingap, opts.cpus)\n\n if split:\n splitfile = prefix + \".split.fasta\"\n oagpfile = prefix + \".splitobject.agp\"\n cagpfile = prefix + \".splitcomponent.agp\"\n\n if need_update((inputfasta, bedfile), splitfile):\n\n sizesagpfile = agp([inputfasta])\n\n maskedagpfile = mask([sizesagpfile, bedfile, \"--splitobject\"])\n shutil.move(maskedagpfile, oagpfile)\n logging.debug(\"AGP file written to `{0}`.\".format(oagpfile))\n\n maskedagpfile = mask([sizesagpfile, bedfile, \"--splitcomponent\"])\n shutil.move(maskedagpfile, cagpfile)\n logging.debug(\"AGP file written to `{0}`.\".format(cagpfile))\n\n build([oagpfile, inputfasta, splitfile])\n cleanup(sizesagpfile)\n\n return splitfile, oagpfile, cagpfile",
"def calculateIndels(mfaPairs, regions):\n gapLength = 0\n gaps = [0]*1000\n for i in regions:\n for j in xrange(i[0], i[1]):\n k = mfaPairs[j]\n if k == GAP:\n gapLength += 1\n else:\n if gapLength != 0:\n gaps[gapLength] += 1\n gapLength = 0\n return gaps",
"def get_paps(\n self,\n ref = 'cogid',\n entry = 'concept',\n missing = 0\n ):\n \n try:\n return self._cache['#paps#'+str(missing)+'#',ref]\n except:\n pass\n \n etym_dict = self.get_etymdict(ref=ref,entry=entry) \n\n # create dictionary for paps\n paps = {}\n\n # create dictionary that stores missing data\n missed = {}\n\n # retrieve the values\n for key,values in etym_dict.items(): #self._etym_dict[ref,'concept'].items():\n paps[key] = []\n\n # check for missing data\n meanings = set()\n for value in values:\n if value:\n for v in value:\n meanings.add(v)\n if len(meanings) == 1:\n meaning = meanings.pop()\n\n if meaning not in missed:\n \n # get the list in the wordlist of self\n tmp = np.array(self.get_list(row=meaning))\n \n # get the sum of the list\n tmp = sum(tmp)\n \n # get all languages which are zero\n gaps = [i for i in range(self.width) if not tmp[i]]\n\n # append gaps to missing\n missed[meaning] = gaps\n else:\n meaning = False\n\n for i,value in enumerate(values):\n if value:\n paps[key].append(1)\n else:\n if meaning:\n if i in missed[meaning]:\n paps[key].append(missing)\n else:\n paps[key].append(0)\n else:\n paps[key].append(1)\n \n self._cache['#paps#'+str(missing)+'#',ref] = paps\n \n return paps",
"def base_to_signal_mapping(grp):\n\n position_in_signal = [0 for _ in range(5)]\n for i in range(1, len(grp)):\n position_in_signal += [i for _ in range(grp[i][5])]\n # position_in_signal += [grp[i][0] for _ in range(grp[i][5])]\n\n # print(position_in_signal)\n return position_in_signal",
"def _map_lines(self, delta: Delta) -> Dict[Tuple, Tuple]:\n\n # this is harder than I thought; I'll start with a super naive\n # approach and improve it later (or never)\n\n if delta.old_length == 0:\n return {(): tuple(range(delta.new_length))}\n if delta.new_length == 0:\n return {tuple(range(delta.old_length)): ()}\n\n result: Dict[Tuple[int, ...], Tuple[int, ...]] = {}\n\n for i in range(min(delta.old_length, delta.new_length) - 1):\n result[(i,)] = (i,)\n\n if delta.old_length >= delta.new_length:\n result[tuple(range(delta.new_length - 1, delta.old_length))] = (\n delta.new_length - 1,\n )\n else:\n result[(delta.old_length - 1,)] = tuple(\n range(delta.old_length - 1, delta.new_length)\n )\n\n return result",
"def _gap_account(self, spanrels):\n\t\t#Add extra spans in the keys\n\t\tgap_spanrels = copy.deepcopy(spanrels)\n\t\tfor head in spanrels:\n\t\t\tfor relation in spanrels[head]:\n\t\t\t\tl,r = relation[0], relation[1]\n\t\t\t\tif l != 0 and l not in self.wordspans:\n\t\t\t\t\tgap_spanrels[head].add((l-1,r))\n\t\t\t\tif r+1 not in self.wordspans and r+2 in self.wordspans:\n\t\t\t\t\tgap_spanrels[head].add((l,r+1))\n\t\tfor head in spanrels:\n\t\t\tnheads = []\n\t\t\tl,r = head[0], head[1]\n\t\t\tif l!=0 and l not in self.wordspans:\n\t\t\t\tnheads.append((l-1,r))\n\t\t\tif r+1 not in self.wordspans and r+2 in self.wordspans:\n\t\t\t\tnheads.append((l,r+1))\n\t\t\tfor nhead in nheads:\n\t\t\t\tgap_spanrels[nhead] = set([])\n\t\t\t\tfor key in gap_spanrels[head]:\n\t\t\t\t\tif key[1] <= nhead[0] or key[0] >= nhead[1]:\n\t\t\t\t\t\tgap_spanrels[nhead].add(key)\n\t\treturn gap_spanrels",
"def map_position(pos):\n\n posiction_dict = dict(zip(range(1, 17), [i for i in range(30, 62) if i % 2]))\n return posiction_dict[pos]",
"def gaps(args):\n from jcvi.formats.base import DictFile\n from jcvi.apps.base import popen\n from jcvi.utils.cbook import percentage\n\n p = OptionParser(gaps.__doc__)\n p.add_option(\"--bdist\", default=0, type=\"int\", help=\"Base pair distance\")\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n idsfile, frfile, gapsbed = args\n bdist = opts.bdist\n d = DictFile(frfile, keypos=1, valuepos=2)\n bedfile = idsfile + \".bed\"\n fw = open(bedfile, \"w\")\n fp = open(idsfile)\n total = 0\n for row in fp:\n id = row.strip()\n hit = d[id]\n tag, pos = get_tag(hit, None)\n seqid, start, end = pos\n start, end = max(start - bdist, 1), end + bdist\n print(\"\\t\".join(str(x) for x in (seqid, start - 1, end, id)), file=fw)\n total += 1\n fw.close()\n\n cmd = \"intersectBed -a {0} -b {1} -v | wc -l\".format(bedfile, gapsbed)\n not_in_gaps = popen(cmd).read()\n not_in_gaps = int(not_in_gaps)\n in_gaps = total - not_in_gaps\n print(\"Ids in gaps: {1}\".format(total, percentage(in_gaps, total)), file=sys.stderr)",
"def fill_gaps(self):\n\n for source in self.sources.keys():\n if source in self.staticsources:\n continue\n src = self.sources[source]\n print '[INFO] Scanning ' + source + ' for gaps'\n src.fill_gaps()",
"def clean_recording_gaps(self, pos_xy: np.ndarray, pos_times: np.ndarray):\n (\n position_gap_inds_above_threshold\n ) = self.check_for_position_gaps_above_threshold(pos_times)\n cleaned_pos_xy = pos_xy[:]\n for ind in position_gap_inds_above_threshold:\n cleaned_pos_xy[ind - 5 : ind + 5] = np.nan\n return (cleaned_pos_xy, position_gap_inds_above_threshold)"
] | [
"0.69372344",
"0.6512925",
"0.6227912",
"0.61309123",
"0.6124416",
"0.5984851",
"0.5867605",
"0.58508515",
"0.58421373",
"0.5823585",
"0.5815825",
"0.5779069",
"0.5761891",
"0.57431996",
"0.5677936",
"0.56523556",
"0.565037",
"0.5649766",
"0.5629222",
"0.5621352",
"0.56200445",
"0.5619618",
"0.5594119",
"0.55333376",
"0.55221665",
"0.550345",
"0.5497683",
"0.5479024",
"0.5420916",
"0.5398752"
] | 0.7045853 | 0 |
Sequence count_degenerate should return correct degen base count | def test_count_degenerate(self):
self.assertEqual(self.RNA("").count_degenerate(), 0)
self.assertEqual(self.RNA("GACUGCAUGCAUCGUACGUCAGUACCGA").count_degenerate(), 0)
self.assertEqual(self.RNA("N").count_degenerate(), 1)
self.assertEqual(self.PROT("N").count_degenerate(), 0)
self.assertEqual(self.RNA("NRY").count_degenerate(), 3)
self.assertEqual(
self.RNA("ACGUAVCUAGCAUNUCAGUCAGyUACGUCAGS").count_degenerate(), 4
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count(seq):\n\treturn sum(1 for x in seq)",
"def count():",
"def n_neg(seq):\n\n # Convert to all upper case\n seq = seq.upper()\n\n # Check the valiality of sequence\n for aa in seq:\n if aa not in bioinfo_dicts.aa.keys():\n raise RuntimeError(aa + ' is not a valid amino acid.')\n # Count E and D and return Count\n return seq.count('D') + seq.count('E')",
"def Count():\n return CheckForError(lib.Generators_Get_Count())",
"def testCounting(self):\n \n clk = Signal(0)\n clock_gen = ClkDriver(clk, period=4)\n \n for i in range(1, 6):\n #print \"Testing\", i, \"bits\"\n out = Signal(intbv(0)[i:])\n prev_out = Signal(intbv(2**i - 1)[i:])\n counter = Counter(out, clk, Signal(1))\n \n # make sure it increments and wraps at modulo 2^n\n @always(clk.posedge)\n def test():\n #print out, prev_out\n self.assertEqual(int(out), int((prev_out + 1) % 2**(len(prev_out))))\n prev_out.next = out\n \n sim = Simulation(counter, clock_gen, test)\n sim.run(12 * 2**i, quiet=1)",
"def n_doubled(intervals):\n i = 0\n for interval in intervals.intervals:\n if not Interval.is_degenerated(interval):\n i += 1\n return i",
"def n_neg(seq):\n\n # Convert sequence to upper case\n seq = seq.upper()\n\n # Check for a valid sequence\n for aa in seq:\n if aa not in bioinfo_dicts.aa.keys():\n raise RuntimeError(aa + ' is not a valid amino acid.')\n\n # Count E's and D's, since these are the negative residues\n return seq.count('E') + seq.count('D')",
"def generator_count(self, gen):\n if len(gen) != 1 or gen.array_form[0][1] < 0:\n raise ValueError(\"gen must be a generator\")\n s = gen.array_form[0]\n return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])",
"def test_abbcde():\n assert part_01.count_for('abbcde', 2) == 1\n assert part_01.count_for('abbcde', 3) == 0",
"def test_count_ab(self):\n AB = get_moltype(\"ab\")\n seq = AB.make_array_seq(\"aaba-\", alphabet=AB.alphabet.with_gap_motif())\n c = seq.counts()\n self.assertEqual(c.to_dict(), {\"a\": 3, \"b\": 1})\n c = seq.counts(allow_gap=True)\n self.assertEqual(c.to_dict(), {\"a\": 3, \"b\": 1, \"-\": 1})",
"def codon_counts(self):\n # Removing 5' UTR and 3' UTR sequences\n sequence = self.sequence.replace(self.five_prime_utr_sequence, \"\").replace(self.three_prime_utr_sequence, \"\")\n return len(sequence) / 3",
"def howmany_sequences(listOfTuples):\r\n #initialize number of pairs as 0\r\n pairs = 0\r\n #count pairs\r\n for n in listOfTuples:\r\n pairs += 1\r\n k = 1\r\n #find number of initial sequences \r\n while k*(k-1) != pairs*2:\r\n k += 1\r\n return(k)",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def gapRunCount(letters):\n uniqLetters = map(operator.itemgetter(0), groupby(letters))\n return uniqLetters.count(\"-\")",
"def specht(mu):\n return StandardTableaux(mu).cardinality().n()",
"def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count",
"def freq(self) -> int:",
"def count(self):\n # TODO not implemented yet\n return 0",
"def final_kmer_counts(seq_dict, num_seqs, alphabet, min_k, max_k):\n counted = Counter()\n len_seqs = 0\n for name, sequence in seq_dict.items():\n seq = seq_cleaner(sequence, alphabet)\n len_seqs += len(seq)\n counted.update(count_kmers_cython(seq, min_k, max_k))\n final_count = {k: (v // num_seqs) for k, v in counted.items()}\n # total_len = (len_seqs // num_seqs)\n return final_count, len_seqs",
"def test_counts(self):\n # test DNA seq\n orig = \"AACCGGTTAN-T\"\n seq = self.DNA(orig)\n # no gaps, no ambiguities\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3)\n self.assertEqual(dict(got), expect)\n # gaps allowed\n got = seq.counts(allow_gap=True)\n expect = dict(A=3, C=2, G=2, T=3)\n expect.update({\"-\": 1})\n self.assertEqual(dict(got), expect)\n # ambig allowed\n got = seq.counts(include_ambiguity=True)\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n self.assertEqual(dict(got), expect)\n # ambig and gap allowed\n got = seq.counts(include_ambiguity=True, allow_gap=True)\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n expect.update({\"-\": 1})\n self.assertEqual(dict(got), expect)\n\n # test DNA seq motif length of 2\n got = seq.counts(motif_length=2)\n expect = dict(AA=1, CC=1, GG=1, TT=1)\n self.assertEqual(dict(got), expect)\n # gap allowed\n got = seq.counts(motif_length=2, allow_gap=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1)\n expect.update({\"-T\": 1})\n # ambig allowed\n got = seq.counts(motif_length=2, include_ambiguity=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1, AN=1)\n self.assertEqual(dict(got), expect)\n # ambig and gap allowed\n got = seq.counts(motif_length=2, include_ambiguity=True, allow_gap=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1, AN=1)\n expect.update({\"-T\": 1})\n self.assertEqual(dict(got), expect)\n\n # test base -- no concept of ambiguity, but understands gap\n orig = \"AACCGGTTAN-T\"\n seq = self.SEQ(orig)\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n self.assertEqual(dict(got), expect)\n\n # handle '?'\n orig = \"AACCGGTTAN-T?\"\n seq = self.DNA(orig)\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3)\n self.assertEqual(dict(got), expect)\n got = seq.counts(allow_gap=True, include_ambiguity=True)\n expect.update({\"-\": 1, \"N\": 1, \"?\": 1})\n self.assertEqual(dict(got), expect)",
"def count_umbiguous_bases(sequence):\n sequence = sequence.upper()\n amb = ['N', 'R', 'Y', 'W', 'S', 'K', 'M']\n return sum({base: sequence.count(base) for base in amb}.values())",
"def counter(self) -> int:",
"def counter(self) -> int:",
"def number_negatives(seq):\n # Convert sequence to upper case\n seq = seq.upper()\n\n for aa in seq:\n if aa not in bootcamp_utils.aa.keys():\n raise RuntimeError(aa + ' is not a valid amino acid.')\n\n # Count E's and D's, since these are the negative residues\n return seq.count('E') + seq.count('D')",
"def test_abcccd():\n assert part_01.count_for('abcccd', 2) == 0\n assert part_01.count_for('abcccd', 3) == 1",
"def _count_discordant_pairs(preds: Tensor, target: Tensor) ->Tensor:\n return torch.cat([_discordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)",
"def count(self, base):\n return self._dna.count(base)"
] | [
"0.6848711",
"0.6295464",
"0.6287779",
"0.6244194",
"0.61932415",
"0.6169714",
"0.61309725",
"0.60558563",
"0.6046519",
"0.60183054",
"0.5953899",
"0.5946832",
"0.59274083",
"0.59274083",
"0.59274083",
"0.59274083",
"0.59089196",
"0.58695394",
"0.58592194",
"0.5851811",
"0.5827188",
"0.5824548",
"0.5810887",
"0.5800668",
"0.57960445",
"0.57960445",
"0.57879144",
"0.57826084",
"0.5763182",
"0.5754043"
] | 0.7551653 | 0 |
Sequence can_mispair should return True on any possible mispair | def test_can_mispair(self):
assert not self.RNA("").can_mispair("")
assert self.RNA("N").can_mispair("N")
assert self.RNA("R").can_mispair("Y")
assert self.RNA("N").can_mispair("r")
assert self.RNA("CGUACGCAN").can_mispair("NUHCHUACH")
assert self.RNA("U").can_mispair("C")
assert self.RNA("U").can_mispair("R")
assert self.RNA("UUU").can_mispair("AAR")
assert self.RNA("UUU").can_mispair("GAG")
assert not self.RNA("UUU").can_mispair("AAA")
assert not self.RNA("UCAG").can_mispair("CUGA")
assert self.RNA("U--").can_mispair("--U")
assert self.DNA("TCCAAAGRYY").can_mispair("RRYCTTTGGA") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_mi():\n pass",
"def ok_mm_primer(primer_seq, all_primer_seqs, primer_mm):\r\n for curr_pat in all_primer_seqs:\r\n if count_mismatches(primer_seq, curr_pat, primer_mm) <= primer_mm:\r\n return True\r\n return False",
"def check_sat(m):\n conflict = False\n matrix = np.array(m)\n\n ## If only augmented column remains\n if len(matrix[0]) == 1:\n for i in range(len(matrix)):\n if matrix[i,0] == 1:\n conflict = True\n break\n else:\n ## Check if exist empty odd which means UNSAT i.e. a conflict\n for row in matrix[::-1]:\n if row[-1] == 1 and np.sum(row[:-1]) == 0:\n ## UNSAT\n conflict = True \n break \n return conflict",
"def testMedicationsImmunosupp(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"immunosupp\")\n\n self.util.boolPropertyTest(self, attr, \"immunosupp\")",
"def is_carrying_minerals(self) -> bool:\n return self.has_buff(BuffId.CARRYMINERALFIELDMINERALS) or self.has_buff(\n BuffId.CARRYHIGHYIELDMINERALFIELDMINERALS\n )",
"def is_monoid_action(self) :\n return True",
"def test_mixed_prisoners_dilemma(_):\n game = gamegen.sym_2p2s_game(2, 0, 3, 1) # prisoners dilemma\n eqm = [0, 1]\n\n assert (\n regret.mixture_regret(game, eqm) == 0\n ), \"Known symmetric mixed was not zero regret\"",
"def primer_exceeds_mismatches(primer_seq, all_primer_seqs, max_primer_mm):\r\n if primer_seq not in all_primer_seqs:\r\n if not ok_mm_primer(primer_seq, all_primer_seqs, max_primer_mm):\r\n return True\r\n return False",
"def check_with_premises(self, premises):\n cnt = 0\n for self_prem in self.needed_premises:\n self_prem_code = self_prem.visit_make_coding()\n for given_prem in premises:\n if self_prem_code == given_prem.coding:\n cnt += 1\n if cnt == len(self.needed_premises):\n return True\n return False",
"def isPossible(self):\n \n return bool(len(self._possibilities))",
"def mod_mask(self):\n # Check the *_masq values\n self.__log.debug(\"Checking the *_masq arrays\")\n # Retrieve the kid boxes\n masq_names = np.unique([\"{}_masq\".format(item[1]) for item in self.list_detector])\n self.__check_attributes(masq_names, read_missing=False)\n # Check that they are all the same\n warnings.warn(\"Temporary fix to int8\")\n masqs = [getattr(self, masq).astype(np.int8) for masq in masq_names]\n\n if np.any(np.std(masqs, axis=0) != 0):\n self.__log.error(\"*_masq is varying -- Please check : {}\".format(pprint_list(masq_names, \"_masq\")))\n\n # AB private comm) main_flag should be the bitwise_or of all boxes\n # Well not exactly....\n # cast into 8 bit, is more than enough, only 3 bits used anyway...\n masq = np.bitwise_or.reduce(masqs, axis=0).astype(np.int8)\n\n # AB (#CONCERTO_DAQ January 11 13:02)\n # _flag_balayage_en_cours & _flag_blanking_synthe\n # Ainsi on aura la modulation en bit0 et 1 et le flag blanking en bit\n # AB (#CONCERTO_DAQ February 11 11:07)\n # bit 1 & 2 code the modulation as a signed integer -1 0 1 : 11 00 01 ie 3 0 1\n # bit 3 is a blanking bit, which does not exist for KISS, but should not be taken into account for CONCERTO\n\n # Thus as a temporary fix, let's clear the 3rd bit, actually a bad idea...\n # self.__log.warning(\"Temporary fix : clearing the 3rd bit of masq\")\n # masq = masq & ~(1 << 2)\n\n return masq",
"def is_mc(test_cases, foo):\r\n # conditions that independently affected the outcome\r\n \r\n n=len(test_cases[0]) \r\n c_aff=[]\r\n for subset in combinations(test_cases,2):\r\n (changed,which)=onlyonechanged(subset)\r\n if changed:\r\n if foo(*subset[0])!=foo(*subset[1]):\r\n c_aff.append(which) \r\n return n==len(c_aff)",
"def __bool__(self):\n return any(self.smask)",
"def check_completeness(ISM):\n for item in ISM:\n if item not in ['A', 'T', 'C', 'G', '-']:\n return False\n return True",
"def mr_pairs_have_less_mi_exp(filename=None):\n trials = 500\n matrix = [[0,0,0,0] for i in range(L)]\n motif = [random_site(L) for i in range(n)]\n scale = 0.01 #use this to prevent overflows in anneal\n scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale\n annealed_system = lambda :anneal(scaled_sse,\n lambda(matrix,motif):propose(matrix,motif),\n (matrix,motif),\n verbose=True,\n iterations=100000,\n stopping_crit = 0.1*scale)\n systems = [annealed_system() for i in xrange(500)]\n motifs = map(second,systems)\n ics = map(motif_ic,motifs)\n control_motifs = [sa_motif_with_desired_ic(ic,0.1,n,L) for ic in verbose_gen(ics)]\n mis = map(total_motif_mi,motifs)\n control_mis = map(total_motif_mi,control_motifs)\n plt.scatter(mis,control_mis)\n plt.xlabel(\"M-R System Mutual Information (bits)\")\n plt.ylabel(\"Annealed Motif Mutual Information (bits)\")\n plt.plot([0,5],[0,5])\n maybesave(filename)\n #mannwhitneyu(mis,control_mis) -> (47673.0, 1.2864021557444156e-64)\n return mis,control_mis",
"def isLegal(self):\n counter = 0\n for t in self.types:\n if t > 0:\n counter = counter + 1\n if counter < 4:\n return True\n else:\n return False",
"def feasible(self, c):\n\t\tfor played_combination in self.combinations:\n\t\t\tif not self.consistent(c, played_combination):\n\t\t\t\treturn False\n\t\treturn True",
"def check(m) :\n #find Connected-component\n lst = find_c(m)\n for e in lst :\n # verify len , 3 is the len of large boat\n if len(e) > 3 :\n return False\n if not is_vert(e) and not is_hori(e):\n return False\n return True",
"def check_other(seq_iter):\n\treturn any(filter(has_abba, seq_iter))",
"def check_sat_(m, lits):\n conflict = False\n clause = []\n\n ## If only augmented column remains\n if len(m[0]) == 1:\n if np.sum(m[:,0]) > 0:\n conflict = True\n else:\n ## Check if exist empty odd which means UNSAT i.e. a conflict\n for row in m[::-1]:\n if row[-1] == 1 and np.sum(row[:-1]) == 0:\n ## UNSAT\n conflict = True \n break\n elif np.sum(row[:-1]) == 1:\n ## Unit XOR\n i, = np.where(row[:-1] == 1)[0]\n if row[-1] == 1:\n if lits[i] not in clause:\n clause.append( lits[i])\n else:\n if -lits[i] not in clause:\n clause.append(-lits[i])\n return conflict, clause",
"def test_viable(self,outs):\n \n viable = True\n for i,temp_i in enumerate(outs):\n if (temp_i <= self.mins[i+4]):\n viable = False\n elif (temp_i >= self.maxes[i+4]): \n viable = False\n return viable",
"def metropolis_accept_move(self):\n return self.mc.metropolis(self)",
"def is_heavily_armed(self):\n # type: () -> bool\n return False",
"def __call__(self, possibility: object) -> bool:\n if {truth(possibility) for truth in self.truths} == {True}:\n return True\n else:\n return False",
"def McNuggets(n):\n\n if n == 0:\n return True\n if n < 0:\n return False\n\n return McNuggets(n-6) or McNuggets(n-9) or McNuggets(n-20)\n return False",
"def is_sequential(self):\n counter = 1\n for r in range(0, 4):\n for c in range(0, 4):\n if counter == 16:\n return True\n elif self.get((r, c)) != counter:\n return False\n counter += 1",
"def isbimol(rxn_typ):\n return rxn_typ in BIMOL_REACTIONS",
"def controlseq(s): #was the_controlseq\n return any_controlseq().if_value(s)",
"def check_for_combat():\n if random.randint(1, 4) == 1:\n return True\n else:\n return False",
"def constraint_not_adjacent(m, n) :\n if abs(m-n)==1:\n return False\n return True"
] | [
"0.5894247",
"0.55321866",
"0.5423738",
"0.532274",
"0.52513564",
"0.5211209",
"0.5125752",
"0.50944513",
"0.5094062",
"0.50700766",
"0.50617254",
"0.5061186",
"0.5042807",
"0.5036132",
"0.5035601",
"0.50216484",
"0.50123334",
"0.50002867",
"0.49906853",
"0.4979314",
"0.49767098",
"0.4962538",
"0.49600053",
"0.49590138",
"0.4951467",
"0.49251196",
"0.48751268",
"0.48656738",
"0.48625833",
"0.4849188"
] | 0.7014911 | 0 |
Sequence must_pair should return True when no possible mispairs | def test_must_pair(self):
assert self.RNA("").must_pair("")
assert not self.RNA("N").must_pair("N")
assert not self.RNA("R").must_pair("Y")
assert not self.RNA("A").must_pair("A")
assert not self.RNA("CGUACGCAN").must_pair("NUGCGUACG")
assert not self.RNA("U").must_pair("C")
assert not self.RNA("UUU").must_pair("AAR")
assert not self.RNA("UUU").must_pair("RAA")
assert not self.RNA("UU-").must_pair("-AA")
assert self.RNA("UCAG").must_pair("CUGA")
assert self.DNA("TCCAGGG").must_pair("CCCTGGA")
assert self.DNA("tccaggg").must_pair(self.DNA("ccctgga"))
assert not self.DNA("TCCAGGG").must_pair("NCCTGGA") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def requires_pairing(cls) -> bool:\n return False",
"def test_can_pair(self):\n assert self.RNA(\"\").can_pair(\"\")\n assert not self.RNA(\"UCAG\").can_pair(\"UCAG\")\n assert self.RNA(\"UCAG\").can_pair(\"CUGA\")\n assert not self.RNA(\"UCAG\").can_pair(\"cuga\")\n assert self.RNA(\"UCAG\").can_pair(\"NNNN\")\n assert self.RNA(\"NNNN\").can_pair(\"UCAG\")\n assert self.RNA(\"NNNN\").can_pair(\"NNNN\")\n assert not self.RNA(\"N\").can_pair(\"x\")\n assert not self.RNA(\"N\").can_pair(\"-\")\n assert self.RNA(\"-\").can_pair(\"-\")\n assert self.RNA(\"UCAGU\").can_pair(\"KYYRR\")\n assert self.RNA(\"UCAG\").can_pair(\"KKRS\")\n assert self.RNA(\"U\").can_pair(\"G\")\n\n assert not self.DNA(\"T\").can_pair(\"G\")",
"def __pair_maximizer(alpha_pairs, pair):\n for alt in alpha_pairs:\n if pair != alt and pair[0].issubset(alt[0]) and pair[1].issubset(alt[1]):\n return False\n return True",
"def check_pairs(self, all_pr, curr):\n flag = True\n for pair_ox in all_pr:\n if (curr[0] == pair_ox or curr[1] == pair_ox):\n flag = False\n return flag",
"def is_paired_list(self, key):\n if key in ('pattern','points'):\n return True\n else:\n return False",
"def validate_pairs(pairs, historical_pairs):\n if pairs is None:\n return False\n for p in pairs:\n if p in historical_pairs:\n return False\n return True",
"def has_pair(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n self.rank_per_hand['0'] = \"pair\"\n return True\n return False",
"def hasConflicts(self):\n partners = {}\n for first, second in self:\n #print >>sys.stderr, \"first:\", first, \"second:\", second\n if first is None:\n if second is None:\n continue #no pairing info\n else:\n first, second = second, first #swap order so None is 2nd\n if second is None: #check first isn't paired\n if partners.get(first, None) is not None:\n print >>sys.stderr, \"here1\"\n print >>sys.stderr, \"first:\", first, \"second:\", second\n return True\n else:\n partners[first] = None\n else: #first and second were both non-empty: check partners\n if first in partners:\n if partners[first] != second:\n print >>sys.stderr, \"here2\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[first]\", partners[first]\n print \"partners:\", partners\n return True\n if second in partners:\n if partners[second] != first:\n print >>sys.stderr, \"here3\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[second]:\", partners[second]\n return True\n #add current pair to the list of constraints\n partners[first] = second\n partners[second] = first\n #can only get here if there weren't conflicts\n return False",
"def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False",
"def estPair(nbre):\n if(nbre % 2 == 0):\n return True\n else:\n return False",
"def is_valid_pair(self, pair, exchange):\n pairs = self.ccxt.get_pairs(exchange)\n print(pairs)\n return pair in pairs",
"async def pair(self, *args, **kwargs) -> bool:\n return await self._backend.pair(*args, **kwargs)",
"def is_minpair(first, second, corpus_context, segment_pairs, environment_filter):\n first = getattr(first, corpus_context.sequence_type)\n second = getattr(second, corpus_context.sequence_type)\n\n if len(first) != len(second):\n return False\n\n has_difference = False\n for i in range(len(first)):\n if first[i] == second[i]:\n continue\n elif (conflateable(first[i], second[i], segment_pairs)\n and fits_environment(first, second, i, environment_filter)):\n has_difference = True\n continue\n else:\n return False\n\n if has_difference:\n return True",
"def is_pair(hand):\n\tis_a_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tis_a_pair = True\n\t\ti += 1 \n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_pair == True:\n\t\tif hand[j] == 2 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_pair:\n\t\treturn True, high_card\n\telse:\n\t\treturn False",
"def is_pair(pair):\n return isa(pair, Pair) or is_list(pair)",
"def find_pair(line):\n if len(line) >1:\n for dummy_i in range(0,len(line) - 1):\n if line[dummy_i] !=0 and line[dummy_i] == line[dummy_i+1]:\n return True\n return False",
"def test_horizontal_sequence_match(self):\n dna = self._create_dna()\n\n # Existing codon pair\n correct_codon_pair = dna.data[2]\n\n # Another codon pair\n other_pair = self._create_codon_pair()\n\n self.assertFalse(dna.has_sequence(other_pair))\n self.assertTrue(dna.has_sequence(correct_codon_pair))",
"def onlyonechanged(pair):\r\n\r\n l1 = pair[0]\r\n l2 = pair[1]\r\n res = [ x != y for (x,y) in zip(l1, l2)]\r\n\r\n if sum(res)==1:\r\n ret_res=(sum(res)==1)\r\n ret_pos=[i for i, e in enumerate(res) if e != 0]\r\n else :\r\n ret_res=False\r\n ret_pos=[0]\r\n \r\n return ret_res, ret_pos[0]",
"def check_restraint_pairs_for_doubles(list): # Also consider that a1 and a2 can be switches\r\n for i in range(len(list) - 1):\r\n for j in range(i + 1, len(list)):\r\n if (list[i].r1 == list[j].r1 and list[i].r2 == list[j].r2) or (\r\n list[i].r1 == list[j].r2 and list[i].r2 == list[j].r1) or list[i].distance == list[j].distance:\r\n return True\r\n return False",
"def _test_pairs(self, idx0, idx1):\n pass",
"def is_sequential(self):\n counter = 1\n for r in range(0, 4):\n for c in range(0, 4):\n if counter == 16:\n return True\n elif self.get((r, c)) != counter:\n return False\n counter += 1",
"def hasPseudoknots(self):\n pairs = self.directed()\n seen = [] # list of pairs against which you compare each time\n pairs.sort()\n for pair in pairs:\n if not seen:\n seen.append(pair)\n else:\n lastseen_up, lastseen_down = seen[-1]\n while pair[0] > lastseen_down:\n seen.pop()\n if not seen:\n break\n else:\n lastseen_up,lastseen_down = seen[-1]\n if not seen:\n seen.append(pair)\n continue\n if pair[1]>lastseen_down:\n #pseudoknot found\n return True\n else:\n #good pair\n seen.append(pair)\n return False",
"def verify_cutoff_pair(cutoff, pair, voltages):\n flag = -1\n for i in range(len(cutoff)):\n pairs = cutoff[i]\n if pair in pairs or list(reversed(pair)) in pairs:\n if voltages[i] is 'S':\n flag = i\n return flag\n return flag",
"def test_hand_has_one_pair(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_one_pair() == expected",
"def _valid_bond_pair(self, set):\n (sbu1, cp1), (sbu2, cp2) = set\n if all([i is None for i in [cp1.special, cp2.special, cp1.constraint, cp2.constraint]]):\n return sbu1.is_metal != sbu2.is_metal\n\n return (cp1.special == cp2.constraint) and (cp2.special == cp1.constraint)",
"def _is_mapping_correct(self):\n for i in range(self.mapping_size):\n target = self.mapping[i]\n if target < 0:\n continue\n if target == i // 2:\n continue\n return False\n return True",
"def test_hand_has_two_pair(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_two_pair() == expected",
"def is_proper(i0, i1, i2, i3, bond_set):\n if (i0, i1) in bond_set and (i1, i2) in bond_set and (i2, i3) in bond_set and len(set([i0, i1, i2, i3])) == 4:\n return True\n return False",
"def is_onepair(holecards, flop, exclude_board=True):\n hand = tuple(chain(holecards, flop))\n\n if exclude_board:\n return hand_is_onepair(hand) and not flop_has_pair(flop)\n else:\n return hand_is_onepair(hand)",
"def valid_colset_pair((cset1, cset2), pairedcols):\n c1a, cia, cna = cset1\n c1b, cib, cnb = cset2\n seta = set([c1a, cna]) | cia\n setb = set([c1b, cnb]) | cib\n if not seta.isdisjoint(setb):\n for a, b in pairedcols:\n if cna == a and c1b == b:\n return False\n if cnb == a and c1a == b:\n return False\n if c1a in setb and cna in setb and not seta <= setb:\n return False\n if c1b in seta and cnb in seta and not setb <= seta:\n return False\n if cna not in set([cnb]) | cib and cnb not in set([cna]) | cia:\n return False\n if c1a not in set([c1b]) | cib and c1b not in set([c1a]) | cia:\n return False\n return True"
] | [
"0.7317017",
"0.6751099",
"0.66120136",
"0.64646906",
"0.6463766",
"0.6461888",
"0.63059074",
"0.63053954",
"0.6272074",
"0.6253735",
"0.6220704",
"0.61994374",
"0.6165336",
"0.60740834",
"0.6050706",
"0.60397875",
"0.59866846",
"0.5950847",
"0.5943008",
"0.59143764",
"0.58840144",
"0.57655394",
"0.57578814",
"0.57548964",
"0.57544345",
"0.5729742",
"0.57227683",
"0.57136846",
"0.57113546",
"0.5677373"
] | 0.6926781 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.