query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Sleeps until we're ready to make a call again | def _sleep(self):
while 1:
diff = (time.time()-self.lastcall) - self.mindelay
if diff >= 0: return
time.sleep(max(-diff/2.0, 0.01)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _wait_before_call(self):\n while (dt.datetime.now() - self._last_call_ts) <= dt.timedelta(\n seconds=self.api_timeout\n ):\n time.sleep(0.5)\n self._last_call_ts = dt.datetime.now()",
"def _wait_before_call(self) -> None:\n while (datetime.now() - self._last_call_ts) <= timedelta(\n seconds=self._config.get_yfinance_polling_period()\n ):\n time.sleep(0.1)\n self._last_call_ts = datetime.now()",
"def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)",
"def answer_waiting_call(self) -> None:",
"def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"async def sleep(cls, delay: float) -> None:",
"def deepsleep(time_ms: int = None) -> None:",
"def wait():\n time.sleep(1)",
"def wait(self):\n self.drain_call_queue()",
"def sleep_until(self, time):\n raise NotImplementedError()",
"def waitUntilSuccess():",
"def wait(self):\n time.sleep(self.next())",
"def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass",
"def wait(delay=2):\n time.sleep(delay)",
"def wait(self, sleep_time):\n time.sleep(sleep_time)",
"def wait (self, seconds=0.0):\r\n\t\tstart_time = time.time()\r\n\t\twhile time.time() < start_time + seconds:\r\n\t\t\tself.receive()",
"def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)",
"def wait_for(func):\n \n while not func() and not rospy.is_shutdown():\n time.sleep(0.01)",
"def wait():\n pass",
"def wake_till(seconds):\n while True:\n if int(time.time()) < seconds:\n time.sleep(5)\n else:\n return",
"def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)",
"def handle_sleep(_):\n loop.sleep()",
"def work(self):\n time.sleep(random.randint(0, 200) / 100)\n pass",
"def sleep_for_cloudinit():\n run(\"sleep 15\")",
"def wait(self):\n time.sleep(0.010)",
"def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'"
]
| [
"0.72283506",
"0.70473284",
"0.7040449",
"0.6982203",
"0.6924638",
"0.67761457",
"0.67761457",
"0.67761457",
"0.6768626",
"0.6690188",
"0.6678289",
"0.66534007",
"0.66428673",
"0.6601506",
"0.6587237",
"0.65673304",
"0.6564877",
"0.6560657",
"0.6520853",
"0.6513019",
"0.6494254",
"0.64841425",
"0.64761233",
"0.64760363",
"0.64579815",
"0.64401525",
"0.6439308",
"0.643716",
"0.64365417",
"0.6428655"
]
| 0.71504414 | 1 |
Returns the archival path for a given cache filename. Appends the last modified timestamp for it. | def archivepath(self, cachefname):
modtime = os.stat(cachefname).st_mtime
ext = '.'+self.serializer
base = cachefname.rsplit(ext, 1)[0]
ret = '%s-%f%s' % (base, modtime, ext)
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cachefile(filename):\n if not os.path.exists(cachedir):\n os.makedirs(cachedir)\n return os.path.join(cachedir, filename)",
"def get_cache_file(self, dependencies):\n filename = '%s.tar' % self.get_cache_key(dependencies)\n return os.path.join(self.cache_directory, filename)",
"def cachepath(self, *args, **kw):\n cachename = self.cachefunc(*args, **kw)\n ret = os.path.join(self.cachedir, cachename)+'.'+self.serializer\n return ret",
"def _get_cache_file_path(self, identifier):\n cache_file_name = get_cache_file_name(identifier)\n cache_file_path = os.path.join(self.cache_path, cache_file_name)\n\n return cache_file_path",
"def _get_cached_filepath(prefix, url):\n filename = '{prefix}_{hash_string}.cache'.format(\n prefix=prefix,\n hash_string=_hash_value(url),\n )\n logger.debug('Cached filepath: ' + os.path.join(CACHE_DIRECTORY, filename))\n return os.path.join(CACHE_DIRECTORY, filename)",
"def cache_key(self):\r\n statinfo = os.stat(self.pathname)\r\n return (self.filename + str(statinfo.st_mtime)).encode('ascii', 'ignore')",
"def get_cache_file_path(self, URL):\n\n filename = hashlib.md5(URL.encode('utf-8')).hexdigest() + '.wbc'\n path = pathlib.Path(config.WEATHER_PROVIDERS['App']['Cache_path'])\n cache_file_path = path.joinpath(filename)\n\n return cache_file_path",
"def cache_file(cache_key):\n\n return MASTOOLS_DIR / f\"{cache_key}_cache.json\"",
"def get_cache_path(self):",
"def get_cache_path(self):",
"def cache_path(self):",
"def cache_path(self):",
"def _get_cache_filename(name, filename):\n filename = os.path.abspath(filename)[1:]\n home_folder = os.path.expanduser('~')\n base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')\n\n return os.path.join(base_cache_dir, name, filename)",
"def get_cache_file_path(self):\n home_path = os.path.expanduser(\"~\")\n # path to the programs cache directory\n full_cache_dir = os.path.join(home_path, \".cache\", CACHE_DIR)\n\n if not os.path.exists( full_cache_dir ):\n os.makedirs( full_cache_dir )\n \n return os.path.join( full_cache_dir, FILE_NAME )",
"def _cachefilename(self, cachedir):\n\n return cachedir / \"filename\"",
"def _2to3_cache_path(self, path):\n head, tail = os.path.split(path)\n base_filename, sep, tail = tail.partition('.')\n filename = ''.join([base_filename, sep, self.tag, sep, tail])\n return os.path.join(head, '__pycache__', filename)",
"def get_cache_file_path(self) -> str:\n return self.cache_file_path",
"def getCacheFile(ns, digest):\n return os.path.join(getDir(cacheDir, ns), digest)",
"def get_archive_file_path(self,results):\n path = os.path.join(self.archive_path,results.version)\n if not os.path.exists(path):\n os.makedirs(path)\n return os.path.join(path,self.get_archive_filename(results))",
"def cache_file(self, repo):\n token = blake2b(repo.location.encode()).hexdigest()[:10]\n dirname = f\"{repo.repo_id.lstrip(os.sep)}-{token}\"\n return pjoin(self.options.cache_dir, \"repos\", dirname, self.cache.file)",
"def __cache_path(song_name, artist):\n song_name = REGEX_FILEPATH_GUARD.sub(\"-\", song_name)\n artist = REGEX_FILEPATH_GUARD.sub(\"_\", artist)\n cache_name = \"-\".join([artist, song_name]) + \".json\"\n cache_path = op.join(op.dirname(__file__), \"data\", \"cache\", cache_name)\n\n return cache_path",
"def _get_instrument_cache_file_path(instrument_name, start_date, end_date, cache_dir):\n\n identifier = f'{instrument_name}_{start_date}_{end_date}'\n return os.path.join(cache_dir, f'{identifier}.data')",
"def cache_path(self, vpath):\n return os.path.join(self.cache_root, \n *vpath.split('/') )",
"def _join_path(\n year: int, day: int, session: str, file_type: Optional[str] = None\n) -> str:\n cache_location = user_cache_dir(appname=\"advent-of-code\")\n cache_file = os.path.join(cache_location, str(session), str(year), str(day))\n if file_type == \"input_file\":\n cache_file = os.path.join(cache_file, \"input.txt\")\n if file_type == \"submission_file\":\n cache_file = os.path.join(cache_file, \"submission.txt\")\n if file_type == \"last_time_file\":\n cache_file = os.path.join(cache_file, \"time.txt\")\n return cache_file",
"def get_cached_file_name(file_name):\n (directory, base_name) = os.path.split(file_name)\n directory = os.path.relpath(directory, start=\"/\")\n new_directory = os.path.join(CACHE, directory)\n (simple_name, ext) = os.path.splitext(base_name)\n new_base_name = simple_name + cached_ext(ext)\n result = os.path.join(new_directory, new_base_name)\n return result",
"def _abs_path(self, path):\n\n debug(\"current cache: %s\", self._cache)\n\n #save path in argument\n arg_path = path\n try:\n #try to return whats in cache:\n debug(\"trying to access %s path in cache\", arg_path)\n return self._cache[arg_path]\n except KeyError:\n debug(\"%s not found in cache\", arg_path)\n #normalize path:\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n path = os.path.normpath(path)\n #save the result in the cache:\n self._cache[arg_path] = path\n debug(\"stored %s in cache\", self._cache[arg_path])\n return path",
"def _get_cached_file_name(bucket_name, saltenv, path):\n\n file_path = os.path.join(_get_cache_dir(), saltenv, bucket_name, path)\n\n # make sure bucket and saltenv directories exist\n if not os.path.exists(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n\n return file_path",
"def _get_buckets_cache_filename():\n\n cache_dir = _get_cache_dir()\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n return os.path.join(cache_dir, \"buckets_files.cache\")",
"def get_cache_filename(typ, dumpname, address=None):\n fname = typ\n if address is not None:\n fname = '%x.%s' % (address, typ)\n return os.path.sep.join([get_cache_folder_name(dumpname), fname])",
"def _get_cache_filename(self):\n home_dir = os.path.expanduser(\"~\")\n filename = 'dbcollection.json'\n return os.path.join(home_dir, filename)"
]
| [
"0.69801885",
"0.68424743",
"0.6686933",
"0.6672214",
"0.6626115",
"0.65499896",
"0.65381086",
"0.6537387",
"0.6528383",
"0.6528383",
"0.649913",
"0.649913",
"0.6485715",
"0.64468354",
"0.6443828",
"0.6428211",
"0.6415024",
"0.64056045",
"0.6385045",
"0.6383729",
"0.6363894",
"0.6341871",
"0.63107973",
"0.62682986",
"0.6246833",
"0.61185867",
"0.60519254",
"0.604636",
"0.60070384",
"0.593716"
]
| 0.7945767 | 0 |
Loads the cache from the given cachepath. If not found, raises IOError. | def loadcache(self, cachepath):
loadfunc = json.load if self.serializer == 'json' else pickle.load
try:
# check for recency
if self.expiration > 0:
elapsed = time.time() - os.stat(cachepath).st_mtime
#print >>sys.stderr, '%s exp, %s elapsed' % (self.expiration, elapsed)
if elapsed > self.expiration:
if self.expirepolicy == 'archive':
os.rename(cachepath, self.archivepath(cachepath))
raise IOError
return loadfunc(open(cachepath))
except Exception, e:
#print >>sys.stderr, 'Could not load cache file %s: %s' % (cachepath, e)
raise IOError('Could not load cache file %s: %s' % (cachepath, e)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_cache(self, path):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n if os.path.exists(cache_path):\n with io.open(cache_path, encoding='utf-8') as f:\n text = f.read()\n\n return text\n\n msg = ('Unable to download remote file \"{0}\" and local cache is not '\n 'available.').format(path)\n raise RuntimeError(msg)",
"def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)",
"def __read_cache_file_if_exists(self) -> None:\n if os.path.exists(self.__cache_file):\n self.__config.open_file(self.__cache_file, \"r\", self.__process_cache)",
"def read_cached_file(self, path):\n if self.config.get('do_caching', False):\n ext = path.split('.')[-1]\n\n if ext == 'cache':\n with open(path, 'r') as fd:\n try:\n return fd.read()\n except UnicodeDecodeError as e:\n self.logger.warning(str(e))\n else:\n raise Exception('\"{}\" is a invalid cache file.'.format(path))",
"def load_cache(path, encoding=\"latin-1\", fix_imports=True):\n with open(path, \"rb\") as f:\n return pickle.load(f)",
"def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)",
"def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return",
"def load_cache(self,path, encoding=\"latin-1\", fix_imports=True):\n with open(path, \"rb\") as f:\n return pickle.load(f, encoding=encoding, fix_imports=True)",
"def load_cache(self, filename=None):\n try:\n if not os.path.getsize(self._cache_filename(filename)):\n print(\"On-disk cache empty\")\n return\n\n with open(self._cache_filename(filename), \"rb\") as fh:\n cached = pickle.load(fh)\n self.name_cache = cached.name_cache\n self.run_cache = cached.run_cache\n self.row_cache = cached.row_cache\n self.extend(cached)\n print(\"On-disk cache loaded\")\n except OSError: # (FileNotFoundError is Python 3 only)\n print(\"On-disk cache not found\")",
"def cache_get(item: str) -> object:\n\titem = str(item)\n\tcache = cache_find(item)\n\n\t# cache_find() will return none if the cache does not exist\n\t# the returned location is guaranteed to exist, so no point checking again.\n\n\tif cache is not None:\n\t\ttry:\n\t\t\tcached = pickle.load(open(cache, \"rb\"))\n\t\texcept EOFError as ex:\n\t\t\t# Cache file is corrupted, so print an error and act like it does\n\t\t\t# not exist. We do not delete the cache file incase the user wants\n\t\t\t# to recover the file.\n\t\t\tuux.show_error(\"Error when loading file from cache: \" + str(ex))\n\t\t\treturn None\n\t\texcept Exception as ex:\n\t\t\traise ex\n\t\tuux.show_debug(\"Cache hit for \" + item)\n\t\treturn cached\n\n\treturn None",
"def _locate_from_cache_file():\n path_file = os.path.join(_get_temp_dir(), _config.pathfile)\n return _read_file(path_file) if os.path.isfile(path_file) else None",
"def get(self, path):\n\t\treturn self.cache.get(path)",
"def load_cache(cache_key, version):\n\n # Try to get the results of the last run, but fall back to an empty dict if that's not\n # available. That's most likely to happen on the first run.\n try:\n cache = json.loads(cache_file(cache_key).read_text())\n except FileNotFoundError:\n return {}\n\n if cache[\"version\"] != version:\n raise ValueError(\n f\"Unknown {cache_key} version number: expected {version}, got {cache['version']}\"\n )\n\n return cache[cache_key]",
"def load_cache(base_url, path=\"logs/\"):\n\n # Convert URL to filename and read contents\n url_filename = url_to_filename(base_url)\n\n filename = f\"{path}CACHE-{url_filename}.html\"\n f = open(filename, \"r\")\n data_cache = f.read()\n\n data_cache = \" \".join(data_cache.split()) # Remove all whitespaces\n\n return data_cache",
"def load(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.load(name, self.l2)\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return result\n logging.debug(f'{name} cache miss')\n return None # Cache Miss",
"def load_cache(name, typ=\"pkl\"):\n filename = cache_name(name, typ)\n if typ == \"str\":\n with open(filename, 'r') as fin:\n return fin.read()\n elif typ == \"pkl\":\n with open(filename, 'rb') as fin:\n return pickle.load(fin)\n elif typ == \"h5\":\n import keras\n return keras.models.load_model(filename)\n else:\n raise ValueError(\"Invalid type '{}'.\".format(typ))",
"def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))",
"def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))",
"def cached_load(filepath: str) -> io.BytesIO:\n with open(filepath, 'rb') as f:\n return io.BytesIO(f.read())",
"def _load_for_cache(self, parsed_uri, session):\n remote_uri = \"{}://{}/{}\".format(parsed_uri.scheme, parsed_uri.netloc, parsed_uri.path)\n if self.verbose:\n print(\"Loading URI {}\".format(remote_uri), file=sys.stderr)\n response = session.get(remote_uri)\n try:\n response.raise_for_status()\n except HTTPError as e:\n raise RefResolutionException(\n \"Could not load file {}\".format(parsed_uri.geturl())\n ) from e\n remote_json = self._load_json(response)\n return remote_json",
"def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))",
"def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']",
"def cache(*filepath):\n expected_dir = os.path.join(config.cache_dir, \"/\".join(filepath))\n if not os.path.exists(expected_dir):\n raise FileNotFoundError(\"Couldn't find {}\".format(expected_dir))\n return expected_dir",
"def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in (\"http\", \"https\", \"s3\"):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))",
"def _read_cache(url):\n\n j = None\n m = hashlib.md5()\n m.update(url)\n if os.path.exists('.cache.%s' % m.hexdigest()):\n with open('.cache.%s' % m.hexdigest(), 'rb') as infile:\n j = json.load(infile)\n\n return j",
"def _read_buckets_cache_file(cache_file):\n\n log.debug(\"Reading buckets cache file\")\n\n with salt.utils.files.fopen(cache_file, \"rb\") as fp_:\n try:\n data = pickle.load(fp_)\n except (\n pickle.UnpicklingError,\n AttributeError,\n EOFError,\n ImportError,\n IndexError,\n KeyError,\n ValueError,\n ) as exc:\n log.debug(\"Exception reading buckets cache file: '%s'\", exc)\n data = None\n\n return data",
"def read_cache():\n try:\n cache_file = open(CACHE_FILENAME, 'r', encoding=\"utf-8\")\n cache_contents = cache_file.read()\n cache_dict = json.loads(cache_contents)\n cache_file.close()\n return cache_dict\n except:\n cache_dict = {}\n return cache_dict",
"def get_image_from_cache(cache, file_path):\n if file_path in cache:\n return cache[file_path]\n image = read_image(file_path, GRAY_NUMBER)\n cache[file_path] = image\n return image",
"def load(cache_file: Path, *, mode: str = None, unsafe: bool = False):\n if mode == 'binary':\n return cache_file.read_bytes()\n\n content = cache_file.read_text()\n if mode == 'json':\n content = json.loads(content)\n\n return content",
"def loadGameFromCache(self, theKey):\n theGameFile = File(self.theCacheDirectory, theKey + \".zip\")\n theLine = None\n try:\n theLine = br.readLine()\n br.close()\n ir.close()\n gIn.close()\n fIn.close()\n except Exception as e:\n if theLine == None:\n return None\n return Game.loadFromJSON(theLine)"
]
| [
"0.69977164",
"0.6749261",
"0.6732635",
"0.67183495",
"0.65936494",
"0.65107465",
"0.65056586",
"0.63451797",
"0.6232331",
"0.60423887",
"0.6003267",
"0.5985406",
"0.59646606",
"0.5915458",
"0.5858328",
"0.5852859",
"0.57651997",
"0.57651997",
"0.57532793",
"0.5717989",
"0.56828153",
"0.56436884",
"0.56368405",
"0.55992454",
"0.5596445",
"0.55960125",
"0.5587883",
"0.55862504",
"0.55508083",
"0.55482996"
]
| 0.76469654 | 0 |
Saves the given obj to the given cachepath. Returns the file size of the cache file. | def savecache(self, obj, cachepath):
try:
os.makedirs(os.path.dirname(cachepath))
except OSError:
pass
tmpfname = cachepath+'.tmp-%d' % (int(time.time()*1000))
f = open(tmpfname, 'wb')
if self.serializer == 'json':
json.dump(obj, f, indent=2, sort_keys=1)
elif self.serializer == 'pickle':
pickle.dump(obj, f, -1)
try:
os.rename(tmpfname, cachepath)
size = os.stat(cachepath).st_size
except Exception:
print 'Savecache rename failed from %s to %s' % (tmpfname, cachepath)
raise
return size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cache_save(item: str, obj: object) -> None:\n\titem = str(item)\n\tcache = \"Cached/\" + item\n\n\tcache_create()\n\n\tpickle.dump(obj, open(cache, \"wb\"))\n\tuux.show_debug(\"Cached object to \" + cache)",
"def save_obj(obj, path: str):\n with open(path, 'wb') as h:\n pickle.dump(obj, h)",
"def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)",
"def get_size(obj: object, seen=None):\n size = getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size\n\n def request(self, key: K) -> V:\n pass\n\n def _get(self, key: K) -> V:\n value = self.request(key)\n store = Retrievable(value)\n self.CACHE.update({key: store})\n return value",
"def save_object(obj, fpath):\r\n with open(fpath, 'wb') as o:\r\n pickle.dump(obj, o)",
"def save_cached_object(object, filename, directory='Cache/'):\r\n if filename[-4:] != '.pkl':\r\n filename = filename+'.pkl'\r\n try:\r\n modified_date = time.strptime(time.ctime(os.path.getmtime(directory+filename)), '%a %b %d %H:%M:%S %Y')\r\n modified_date = datetime.fromtimestamp(time.mktime(modified_date)).strftime('%m-%d-%y_%H%M')\r\n archived_file = directory+'Archive/'+filename[:len(filename)-4]+'--'+modified_date+'.pkl'\r\n os.rename(directory+filename,archived_file)\r\n log.info('Cached object already exists with given filename, archiving old object to: '+ archived_file)\r\n except WindowsError:\r\n pass\r\n joblib.dump(object, directory+filename, compress=9)\r\n log.info('New object cached to: '+directory+filename)",
"def __get_cache_size(self):\n total = 0\n for entry in os.scandir(self.cacheDir):\n total += entry.stat(follow_symlinks=False).st_size\n if self.__log:\n self.__logger.info(f\"Cache size: {total} bytes\")\n return total",
"def save_object(path,object):\r\n with open(path,\"wb\") as f:\r\n pickle.dump(object,f,pickle.HIGHEST_PROTOCOL)",
"def picklesave(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)",
"def filesize(self, path):\n arinfo = self._handle.getmember(path)\n return arinfo.size",
"def filesize(self, path):\n arinfo = self._handle.getmember(path)\n return arinfo.size",
"def getsize(path):\n return get_instance(path).getsize(path)",
"def get_size(obj, seen=None):\r\n size = sys.getsizeof(obj)\r\n if seen is None:\r\n seen = set()\r\n obj_id = id(obj)\r\n if obj_id in seen:\r\n return 0\r\n # Important mark as seen *before* entering recursion to gracefully handle\r\n # self-referential objects\r\n seen.add(obj_id)\r\n if isinstance(obj, dict):\r\n size += sum([get_size(v, seen) for v in obj.values()])\r\n size += sum([get_size(k, seen) for k in obj.keys()])\r\n elif hasattr(obj, '__dict__'):\r\n size += get_size(obj.__dict__, seen)\r\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\r\n size += sum([get_size(i, seen) for i in obj])\r\n return size",
"def get_size(obj, seen=None):\r\n size = sys.getsizeof(obj)\r\n if seen is None:\r\n seen = set()\r\n obj_id = id(obj)\r\n if obj_id in seen:\r\n return 0\r\n \r\n # Important mark as seen *before* entering recursion to gracefully handle\r\n # self-referential objects\r\n \r\n seen.add(obj_id)\r\n if isinstance(obj, dict):\r\n size += sum([get_size(v, seen) for v in obj.values()])\r\n size += sum([get_size(k, seen) for k in obj.keys()])\r\n elif hasattr(obj, '__dict__'):\r\n size += get_size(obj.__dict__, seen)\r\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\r\n size += sum([get_size(i, seen) for i in obj])\r\n return size",
"def get_size(obj, seen=None):\r\n size = sys.getsizeof(obj)\r\n if seen is None:\r\n seen = set()\r\n obj_id = id(obj)\r\n if obj_id in seen:\r\n return 0\r\n\r\n # Important mark as seen *before* entering recursion to gracefully handle\r\n # self-referential objects\r\n\r\n seen.add(obj_id)\r\n if isinstance(obj, dict):\r\n size += sum([get_size(v, seen) for v in obj.values()])\r\n size += sum([get_size(k, seen) for k in obj.keys()])\r\n elif hasattr(obj, '__dict__'):\r\n size += get_size(obj.__dict__, seen)\r\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\r\n size += sum([get_size(i, seen) for i in obj])\r\n return size",
"def get_size(fileobj):\n\n old_pos = fileobj.tell()\n try:\n fileobj.seek(0, 2)\n return fileobj.tell()\n finally:\n fileobj.seek(old_pos, 0)",
"def get_size(obj, seen=None):\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, \"__dict__\"):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, \"__iter__\") and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size",
"def get_size(obj, seen=None):\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size",
"def get_size(obj, seen=None):\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size",
"def get_size(obj, seen=None):\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size",
"def get_size(obj, seen=None):\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size",
"def get_size(obj, seen=None):\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size",
"def cache_save_hashed(item: str, obj: object) -> None:\n\tcache_save(md5(item), obj)",
"def analyze_file_obj(obj):\n pos = 0\n if hasattr(obj, 'tell'):\n pos = obj.tell()\n\n # Handle cStringIO and StringIO\n if hasattr(obj, 'getvalue'):\n # Why using getvalue() makes sense:\n # For StringIO, this string is pre-computed anyway by read().\n # For cStringIO, getvalue() is the only way\n # to determine the length without read()'ing the whole thing.\n raw_data = obj.getvalue()\n if pos == 0:\n return (len(raw_data), raw_data)\n else:\n # We could return raw_data[pos:], but that could drastically\n # increase memory usage. Better to read it block at a time.\n size = max(0, len(raw_data) - pos)\n return (size, None)\n\n # Handle real files\n if hasattr(obj, 'fileno'):\n size = max(0, os.fstat(obj.fileno()).st_size - pos)\n return (size, None)\n\n # User-defined object with len()\n if hasattr(obj, '__len__'):\n size = max(0, len(obj) - pos)\n return (size, None)\n\n # We don't know what kind of stream this is.\n # To determine the size, we must read the whole thing.\n raw_data = obj.read()\n return (len(raw_data), raw_data)",
"def get_size(self, bucket, object):\n self.log.info('Checking the file size of object: %s in bucket: %s', object, bucket)\n service = self.get_conn()\n try:\n response = service.objects().get(\n bucket=bucket,\n object=object\n ).execute()\n\n if 'name' in response and response['name'][-1] != '/':\n # Remove Directories & Just check size of files\n size = response['size']\n self.log.info('The file size of %s is %s', object, size)\n return size\n else:\n raise ValueError('Object is not a file')\n except errors.HttpError as ex:\n if ex.resp['status'] == '404':\n raise ValueError('Object Not Found')",
"def save(self, obj):\r\n if self.np is not None and type(obj) in (self.np.ndarray,\r\n self.np.matrix, self.np.memmap):\r\n size = obj.size * obj.itemsize\r\n if self.compress and size < self.cache_size * _MEGA:\r\n # When compressing, as we are not writing directly to the\r\n # disk, it is more efficient to use standard pickling\r\n if type(obj) is self.np.memmap:\r\n # Pickling doesn't work with memmaped arrays\r\n obj = self.np.asarray(obj)\r\n return Pickler.save(self, obj)\r\n self._npy_counter += 1\r\n try:\r\n filename = '%s_%02i.npy' % (self._filename,\r\n self._npy_counter)\r\n # This converts the array in a container\r\n obj, filename = self._write_array(obj, filename)\r\n self._filenames.append(filename)\r\n except:\r\n self._npy_counter -= 1\r\n # XXX: We should have a logging mechanism\r\n print('Failed to save %s to .npy file:\\n%s' % (\r\n type(obj),\r\n traceback.format_exc()))\r\n return Pickler.save(self, obj)",
"def cache_size(self):\n return self.cachesize",
"def saveCacheFile(self):\n with open(self.cachePath, 'w', encoding='utf-8') as outfile:\n json.dump(self.cacheData, outfile)",
"def file_size(self):\n return self.context.getObjSize(self.context)",
"def get_size(obj, seen=None):\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if hasattr(obj, '__dict__'):\n for cls in obj.__class__.__mro__:\n if '__dict__' in cls.__dict__:\n d = cls.__dict__['__dict__']\n if inspect.isgetsetdescriptor(d) or inspect.ismemberdescriptor(d):\n size += get_size(obj.__dict__, seen)\n break\n if isinstance(obj, dict):\n size += sum((get_size(v, seen) for v in obj.values()))\n size += sum((get_size(k, seen) for k in obj.keys()))\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum((get_size(i, seen) for i in obj))\n\n if hasattr(obj, '__slots__'): # can have __slots__ with __dict__\n size += sum(get_size(getattr(obj, s), seen) for s in obj.__slots__ if hasattr(obj, s))\n\n\n return size"
]
| [
"0.639193",
"0.60019094",
"0.59937465",
"0.5935359",
"0.5934574",
"0.58999723",
"0.5722097",
"0.56716526",
"0.5670551",
"0.5631007",
"0.5631007",
"0.5607497",
"0.55910337",
"0.55860263",
"0.55671954",
"0.5562444",
"0.556237",
"0.55445635",
"0.55445635",
"0.55445635",
"0.55445635",
"0.55445635",
"0.5516412",
"0.5514592",
"0.55062723",
"0.55051243",
"0.54416025",
"0.54274255",
"0.5408448",
"0.5406127"
]
| 0.86657774 | 0 |
Calls the api function many times. Put tuples of (args, kw) into allargs. Yields results in the same order as inputs, as we compute them. | def callmany(self, allargs):
seqs = []
# add all inputs to queue
for args, kw in allargs:
t = time.time()
seqs.append(t)
self.inq.put((t, args, kw))
# read all outputs
outs = {}
while len(seqs) > 0:
t, ret = self.outq.get()
# if we don't know this seq number, put it back on the queue
if t not in seqs:
self.outq.put((t, ret))
time.sleep(0.01)
continue
# if this is the first item, yield it
if not seqs: break
if t == seqs[0]:
seqs.pop(0)
yield ret
# also check if we have the next item(s) done
while seqs and seqs[0] in outs:
t = seqs.pop(0)
ret = outs.pop(t)
yield ret
continue
# else, save it for future use
outs[t] = ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iter_call(self, name, *args, **kwargs):\r\n return self.client.iter_call(self.name, name, *args, **kwargs)",
"def all_(*args, **kwargs):\n ...",
"def call_each(self, methname, *args, **kwargs):\n return MultiCall(self.listattr(methname), *args, **kwargs).execute()",
"def loop(func):\n def wrapper(*a, **b):\n while True:\n func(*a, **b)\n return wrapper",
"def input(self, *args, **kwargs):\n if args:\n for arg in args:\n yield None, arg\n\n else:\n # if we don't have any arguments we want .output() to do one iteration\n yield None, None",
"def multicall(self, req, signatures):\n for signature in signatures:\n try:\n yield self.get_method(signature['methodName'])(req, signature['params'])\n except Exception, e:\n yield e",
"def __call__(self, *args, **kwargs):\n for key, obj in self._dict.items():\n key[0](obj, *args, **kwargs)",
"def for_loop(num_iters, body, initial_args):\n for i in range(num_iters):\n if i == 0:\n outputs = body(*initial_args)\n else:\n outputs = body(*outputs)\n return outputs",
"def sequential(self, func, args_dict=None):\n for uri, cf in self._cfs.items():\n args = self._process_args_dict(cf, uri, args_dict)\n func(*args)",
"def iterate(func, x):\n while True:\n x = func(x)\n yield x",
"def _threaded(self, *args, **kwargs):\n\n for target in self.targets:\n result = target(*args, **kwargs)\n self.queue.put(result)",
"def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch",
"def test_decorated(*args):\n for i in args:\n yield i",
"def __iter__(self):\n yield from self.calls",
"def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]",
"def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)",
"def with_outer(*args):\n def generator():\n for i in args:\n yield i\n return generator",
"def index(*args, **kwargs):\n yield from _generate(*args, **kwargs)",
"def _for_each_generator(self,\n func: Callable[..., Any],\n *args: Iterable[Any]) -> List[Any]:\n return [func(gen, *args_for_func) for gen, args_for_func in zip(\n self._generators, zip(*args))]",
"def __call__(self, words, offset):\n for b in self._backends:\n for alt, l in b(words, offset): # yield from\n yield alt, l",
"def iterate(func: Callable[..., T], x: T, *args, index: Index = None):\n func = to_callable(func)\n index = to_index_seq(index)\n\n if index is None and not args:\n out = _iterate(func, x)\n elif index is None:\n out = _iterate_n(func, (x, *args))\n else:\n if not args:\n out = _iterate_indexed(func, index, x)\n else:\n out = _iterate_indexed_n(func, index, (x, *args))\n\n return Iter(out)",
"def _iterate_over_factors(self, func, args):\n # TODO The user may prefer to provide the arguments as lists and receive them as\n # TODO lists, as this may be the form in which they are available. This should\n # TODO be allowed, rather than packing and unpacking them repeatedly.\n args_list, numerical_args = self._validate_and_prepare_args_for_iteration(args)\n\n out = [\n self._get_method(self.factors[i], func, args_list[i], numerical_args)\n for i in range(len(self.factors))\n ]\n if self._pool_outputs:\n return self._pool_outputs_from_function(out)\n return out",
"def itervalues(self, *args, **kwargs):\n for key in self.iterkeys():\n yield self._get(key, *args, **kwargs)",
"def in_parallel(*args):\n \n # Execute each in a thread and return them all.\n return ThreadPool(len(args)).map(lambda x: x(), args)",
"def evaluate(*args, **kwargs):\n yield from _generate(*args, **kwargs)",
"def iterate_layers(self, *args):\n for layer in range(self.num_layers):\n yield layer, (\n (\n direction,\n tuple(arg[self.num_directions * layer + direction] for arg in args),\n )\n for direction in range(self.num_directions)\n )",
"def run(self):\n self.fn(*self.args, **self.kwargs)",
"def multiple_eval_for_loops_v1():",
"def map_multi_args(self, func, iterable, chunksize=None):\n assert self._state == RUN\n return self.map_async(one_to_many(func), iterable, chunksize).get()",
"def iterable_hook(self, name, iterable):\n for record in iterable:\n self(name, record)\n yield record"
]
| [
"0.6339846",
"0.6080391",
"0.5980732",
"0.5929507",
"0.5679536",
"0.56711113",
"0.5670682",
"0.5644141",
"0.5629318",
"0.5613347",
"0.5596558",
"0.55757564",
"0.5565455",
"0.55366033",
"0.5532328",
"0.5532285",
"0.55107445",
"0.54587275",
"0.5406769",
"0.5400489",
"0.5386842",
"0.53644013",
"0.5355985",
"0.5320115",
"0.5303901",
"0.5249461",
"0.5248241",
"0.5245148",
"0.5241542",
"0.5226729"
]
| 0.6574676 | 0 |
upload music url and music preview | def upload_url(self, url, preview):
return super(PicovicoMusic, self).upload_url(url, preview_url=preview) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upload_music(handler):\n user_id = handler.get_argument('user_id')\n music_path = handler.get_argument('path') #Having problems parsing this out\n sound_cloud_client = Petitions.instantiate_user(user_id)\n track = sound_cloud_client.post('/tracks', track={\n 'title': 'Testing Uploads',\n 'asset_data': open(music_path, 'rb')\n })\n\n return track.permalink_url # Improve messages. Change to Json",
"async def download_audio(event):\n url = event.pattern_match.group(1)\n rmsg = await event.get_reply_message()\n if not url and rmsg:\n myString = rmsg.text\n url = re.search(\"(?P<url>https?://[^\\s]+)\", myString).group(\"url\")\n if not url:\n return await edit_or_reply(event, \"`What I am Supposed to find? Give link`\")\n codevent = await edit_or_reply(event, \"`Preparing to download...`\")\n reply_to_id = await reply_id(event)\n ytdl_data = await ytdl_down(codevent, audio_opts, url)\n if ytdl_data is None:\n return\n await codevent.edit(\n f\"`Preparing to upload song:`\\\n \\n**{ytdl_data['title']}**\\\n \\nby *{ytdl_data['uploader']}*\"\n )\n f = pathlib.Path(f\"{ytdl_data['title']}.mp3\".replace(\"|\", \"_\"))\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.mp3.jpg\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.mp3.webp\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = None\n c_time = time.time()\n ul = io.open(f, \"rb\")\n uploaded = await event.client.fast_upload_file(\n file=ul,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(d, t, codevent, c_time, \"upload\", file_name=f)\n ),\n )\n ul.close()\n attributes, mime_type = await fix_attributes(f, ytdl_data, supports_streaming=True)\n media = types.InputMediaUploadedDocument(\n file=uploaded,\n mime_type=mime_type,\n attributes=attributes,\n thumb=await event.client.upload_file(codthumb) if codthumb else None,\n )\n await event.client.send_file(\n event.chat_id,\n file=media,\n reply_to=reply_to_id,\n caption=ytdl_data[\"title\"],\n supports_streaming=True,\n force_document=False,\n )\n os.remove(f)\n if codthumb:\n os.remove(codthumb)\n await codevent.delete()",
"def demonstrate():\n\n api = authenticate()\n\n # Demonstrate upload feature.\n # Create a list of one or more file paths of the mp3s you would like \n # to upload\n filepaths = []\n filepaths.append('./song1.mp3')\n\n # Upload an mp3 to your library. upload() returns a tuple of information\n # about the success or failure of uploads\n print(\"Beginning upload...\\n\")\n uploaded = api.upload(filepaths) \n\n # Print all successfully uploaded songs\n if len(uploaded[0]) > 0:\n print(\"Successfully uploaded:\")\n i = 1\n for key in uploaded[0]:\n print(\"%d. %s\" % (i, key))\n i += 1\n\n # Print all unsuccessfully uploaded songs and a description of why\n # songs weren't uploaded\n if len(uploaded[2]) == 0:\n print(\"\\nAll songs successfully uploaded.\")\n else:\n print(\"Not all songs were successfully uploaded:\")\n i = 1\n for key in uploaded[2]:\n print(\"%d. %s not uploaded: %s\" % (i, key, uploaded[2][key]))\n i += 1\n\n\n # Demonstrate download feature\n # Get information about songs previously uploaded that are available\n # to be downloaded\n uploaded_songs = api.get_uploaded_songs()\n\n if len(uploaded_songs) == 0:\n print(\"There are no songs currently available for download\")\n else:\n # Print songs that are available for download and store their ids\n # so we can download them\n song_ids = []\n print(\"\\nThe following songs are available for download\")\n for i in range(len(uploaded_songs)):\n song_ids.append(uploaded_songs[i]['id'])\n print(\"%d. %s\" % (i+1, uploaded_songs[i]['title']))\n\n # Download uploaded songs from your library\n print(\"\\nBeginning download...\")\n for i in range(len(song_ids)):\n filename, audio = api.download_song(song_ids[i])\n\n # Write song to disk\n with open(filename, 'wb') as f:\n f.write(audio)\n\n print(\"%d. Written to ./%s\" % (i + 1, filename))\n print(\"\\nDownload complete.\")\n\n # It's good practice to logout when finished\n api.logout()",
"def put(self, request):\n ProcessingService.save_file(uploaded_file=request.data['file'],\n artist=request.data['artist'], title=request.data['title'])\n tasks.process_audio.delay(uploaded_file_path=settings.FILE_UPLOAD_DIR + request.data['file'].name,\n artist=request.data['artist'], title=request.data['title'])\n return Response(status=status.HTTP_200_OK)",
"def audio_file_player(self):\n if self.audio_file:\n file_url = settings.MEDIA_URL + str(self.content)\n player_string = '<audio src=\"%s\" controls>Your browser does not support the audio element.</audio>' % (file_url)\n return player_string",
"def testMediaUpload(self):\n self._testUpload(DefaultStorage(), 'media')\n self._testUpload(StaticStorage(), 'static')",
"def play_local(self, music, device):\n # Look at all the files in the specified directory and add their URIs.\n mp3s = []\n try:\n files = os.listdir(music)\n except OSError, ex:\n logging.warning(\"OS Error: %s\", ex)\n return\n for filename in files:\n if filename.endswith(\".mp3\"):\n mp3s.append(os.path.join(self.webserver, music,\n urllib.pathname2url(filename)))\n\n device.play(sorted(mp3s))",
"def post(self, request) -> redirect:\n form = UploadMusicForm(request.POST, request.FILES)\n if form.is_valid():\n music = form.save()\n playpos = PlayPosition(position=music,\n plist=request.user.profile)\n playpos.add_order()\n playpos.save()\n\n return redirect('/accounts/profile/{}/music/'.format(request.user.profile.custom_url))",
"def media_content_type(self):\n return MEDIA_TYPE_MUSIC",
"def media_content_type(self):\n return MEDIA_TYPE_MUSIC",
"def media_content_type(self):\n return MEDIA_TYPE_MUSIC",
"def media_content_type(self):\n return MEDIA_TYPE_MUSIC",
"def tag_file(filename, artist, title, year=None, genre=None, artwork_url=None, album=None, track_number=None, url=None):\n\n try:\n audio = EasyMP3(filename)\n audio.tags = None\n audio[\"artist\"] = artist\n audio[\"title\"] = title\n if year:\n audio[\"date\"] = str(year)\n if album:\n audio[\"album\"] = album\n if track_number:\n audio[\"tracknumber\"] = track_number\n if genre:\n audio[\"genre\"] = genre\n if url: # saves the tag as WOAR\n audio[\"website\"] = url\n audio.save()\n\n if artwork_url:\n\n artwork_url = artwork_url.replace('https', 'http')\n\n mime = 'image/jpeg'\n if '.jpg' in artwork_url:\n mime = 'image/jpeg'\n if '.png' in artwork_url:\n mime = 'image/png'\n\n if '-large' in artwork_url:\n new_artwork_url = artwork_url.replace('-large', '-t500x500')\n try:\n image_data = requests.get(new_artwork_url).content\n except Exception as e:\n # No very large image available.\n image_data = requests.get(artwork_url).content\n else:\n image_data = requests.get(artwork_url).content\n\n audio = MP3(filename, ID3=OldID3)\n audio.tags.add(\n APIC(\n encoding=3, # 3 is for utf-8\n mime=mime,\n type=3, # 3 is for the cover image\n desc='Cover',\n data=image_data\n )\n )\n audio.save()\n\n # because there is software that doesn't seem to use WOAR we save url tag again as WXXX\n if url:\n audio = MP3(filename, ID3=OldID3)\n audio.tags.add(WXXX(encoding=3, url=url))\n audio.save()\n\n return True\n\n except Exception as e:\n puts(colored.red(\"Problem tagging file: \") + colored.white(\"Is this file a WAV?\"))\n return False",
"def media_content_type(self):\n return MEDIA_TYPE_MUSIC\n # return MEDIA_TYPE_PLAYLIST",
"def videoclipupload(request, hash_key):\n # need access to temporary_file_path of uploaded clip files so \n # this view always reads files to disk during uploads\n request.upload_handlers = [TemporaryFileUploadHandler()]\n return _videoclipupload(request, hash_key)",
"def get(self, request) -> render:\n context = self.get_menu_context('music', 'Загрузка музыки')\n context['music_pages'] = 'upload'\n\n context['form'] = UploadMusicForm()\n\n return render(request, self.template_name, context)",
"def track_01():\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title='FM4.ORF.AT', force_radio=True)\n return \"Ok\"",
"def OnPlay(self):\r\n # check if there is a file to play, otherwise open a\r\n # Tk.FileDialog to select a file\r\n print(\"1-1\")\r\n\r\n\r\n self.Media = self.Instance.media_new(self.youtube_url)\r\n self.player.set_media(self.Media)\r\n\r\n # set the window id where to render VLC's video output\r\n if platform.system() == 'Windows':\r\n print(\"1-3\")\r\n self.player.set_hwnd(self.GetHandle())\r\n else:\r\n print(\"1-4\")\r\n self.player.set_xwindow(self.GetHandle()) # this line messes up windows\r\n # FIXME: this should be made cross-platform\r\n\r\n # Try to launch the media, if this fails display an error message\r\n if self.player.play() == -1:\r\n print(\"1-6\")\r\n self.errorDialog(\"Unable to play.\")",
"def on_add_file(self, event):\n wildcard = \"Media Files (*.*)|*.*\"\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentFolder, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.currentFolder = os.path.dirname(path[0])\n trackPath = '\"%s\"' % path.replace(\"\\\\\", \"/\")\n self.mplayer.Loadfile(trackPath)\n \n t_len = self.mplayer.GetTimeLength()\n self.playbackSlider.SetRange(0, t_len)\n self.playbackTimer.Start(100)",
"def handle_upload(f, attrs):\n\n # chunked = False\n dest_folder = os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)",
"def add_song(self):\r\n path = input(\"Give file path:\\t\") # Request file path\r\n path = path.replace('\\\\', '/')\r\n if self.path_song_re.match(path) and not self.path_storage_re.match(\r\n path): # Check that the path leads to a song that is not already found in Storage\r\n copy(path, self.p_storage) # Copy the song to the storage directory\r\n file_title, form = path.split(\"/\")[-1].split(\".\") # Save file title and format from the path\r\n sql = \"SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s\" # Check the existence of a song\r\n # with the same title and format in the database\r\n self.cursor.execute(sql, (file_title, form))\r\n r = self.cursor.fetchall()\r\n if r[0][0] != 0:\r\n return \"A song with this file name and format already exists!\"\r\n song_title = input(\"Song title:\\t\")\r\n artist = input(\"Artist:\\t\")\r\n data = input(\"Release date:\\t\")\r\n tags = input(\"Associated tags:\\t\")\r\n sql = \"INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, \" \\\r\n \"%s) \" # Insert song into database\r\n columns = (file_title, song_title, artist, form, data, tags)\r\n self.cursor.execute(sql, columns)\r\n self.cnx.commit()\r\n self.cursor.execute(\r\n \"SELECT MAX(ID) FROM songs\")\r\n result = self.cursor.fetchall()\r\n return \"New song ID: \" + str(result[0][0])\r\n else:\r\n return \"Give valid path\"",
"def example_music(self, **kwargs):\n kwargs.setdefault(\"paused\", True)\n kwargs.setdefault(\"title\", \"music\")\n kwargs.setdefault(\"artist\", \"artist\")\n kwargs.setdefault(\"album\", \"album\")\n kwargs.setdefault(\"total_time\", 49)\n kwargs.setdefault(\"position\", 22)\n kwargs.setdefault(\"genre\", \"genre\")\n self.music_playing(**kwargs)",
"def soundcloud_submit(request):\n try:\n response = request.GET\n # TODO: Clean Up and Get rid of unnecessary code\n variable_dictionary = {\n \"url\": response.get('url'),\n \"output_path\": response.get('output_path'),\n \"is_playlist_or_song\": response.get('is_playlist_or_song') in ['true'],\n \"only_mp3\": response.get('only_mp3') in ['true'],\n \"add_artist_to_files\": response.get('add_artist_to_files') in ['true'],\n \"continue_if_exists\": response.get('download_artist') in ['true'],\n \"download_all_tracks_and_reposts\": response.get('download_all_tracks_and_reposts') in ['true'],\n \"download_user_uploads\": response.get('download_user_uploads') in ['true'],\n \"download_favorites\": response.get('download_favorites' in ['true']),\n \"download_playlist\": response.get('download_playlist') in ['true'],\n \"download_like_and_owned_playlists\": response.get('url') in ['true'],\n \"downloaded_commented_tracks\": response.get('downloaded_commented_tracks') in ['true']\n }\n soundcloud_dl = SoundcloudDLWorker(url=variable_dictionary[\"url\"],\n output_path=variable_dictionary[\"output_path\"],\n is_playlist_or_song=variable_dictionary[\"is_playlist_or_song\"],\n continue_if_exists=variable_dictionary[\"continue_if_exists\"],\n only_mp3=variable_dictionary[\"only_mp3\"],\n add_artist_to_files=variable_dictionary[\"add_artist_to_files\"],\n download_all_tracks_and_reposts=variable_dictionary[\n \"download_all_tracks_and_reposts\"],\n download_user_uploads=variable_dictionary[\"download_user_uploads\"],\n download_favorites=variable_dictionary[\"download_favorites\"],\n download_playlist=variable_dictionary[\"download_playlist\"],\n download_like_and_owned_playlists=variable_dictionary[\n \"download_like_and_owned_playlists\"],\n downloaded_commented_tracks=variable_dictionary[\n \"downloaded_commented_tracks\"]\n )\n soundcloud_dl.make_command()\n except Exception as e:\n print (e)\n return JsonResponse({\"success\": \"Downloaded Files successfully\"})",
"def new_song():\n song_id = int(request.args['song_id'])\n track_info = shiva.get_tracks([song_id])[song_id]\n vlc.add_song(track_info['path'])\n return 'ok'",
"def _UploadFile(self, media_source, title, category):\n media_entry = gdata.GDataEntry()\n media_entry.title = atom.Title(text=title)\n media_entry.category.append(category)\n media_entry = self.Post(media_entry, '/feeds/documents/private/full',\n media_source = media_source,\n extra_headers = {'Slug' : media_source.file_name })\n\n return media_entry",
"def set_tags(temp, filename, kwargs):\n song = kwargs[\"track_db\"][int(temp[-1])]\n try:\n song_file = MP3(filename, ID3=EasyID3)\n except mutagen.MutagenError as e:\n log.debug(e)\n print(\n f\"Failed to download: {filename}, please ensure YouTubeDL is up-to-date. \"\n )\n\n return\n song_file[\"date\"] = song.get(\"year\")\n if kwargs[\"keep_playlist_order\"]:\n song_file[\"tracknumber\"] = str(song.get(\"playlist_num\"))\n else:\n song_file[\"tracknumber\"] = (\n str(song.get(\"num\")) + \"/\" + str(song.get(\"num_tracks\"))\n )\n\n song_file[\"genre\"] = song.get(\"genre\")\n song_file.save()\n song_file = MP3(filename, ID3=ID3)\n cover = song.get(\"cover\")\n if cover is not None:\n if cover.lower().startswith(\"http\"):\n req = urllib.request.Request(cover)\n else:\n raise ValueError from None\n with urllib.request.urlopen(req) as resp: # nosec\n song_file.tags[\"APIC\"] = APIC(\n encoding=3,\n mime=\"image/jpeg\",\n type=3,\n desc=\"Cover\",\n data=resp.read(),\n )\n song_file.save()",
"def signal_metadata(self, url, artist=None, title=None):\n\n # default values to return\n path = None\n full_title = None\n lyrics = 'No lyrics'\n\n # if url is null, send an empty message with the default values\n # (happens when the player has just started and is not playing)\n # if not, extract lyrics\n if url != None:\n # decode path from url\n path = urllib.parse.urlparse(url).path\n path = urllib.parse.unquote(path)\n\n # extract the artist name and title\n # then create a window title from them\n full_title = artist + ' - ' + title\n\n try:\n # extract the lyrics from the file using mutagen\n tags = mutagen.id3.ID3(path)\n lyrics_tag = tags.getall('USLT')\n\n if len(lyrics_tag) > 0:\n lyrics = lyrics_tag[0].text\n except mutagen.id3.ID3NoHeaderError:\n # no lyrics in the file\n pass\n\n # do not return /home/username if we can replace it with '~'\n home = GLib.get_home_dir()\n if path.startswith(home):\n path = path.replace(home, '~', 1)\n\n self.callback_func(path, full_title, lyrics)",
"def play_song(self):\r\n path = input('Give path to wanted song: ') # Request path to song\r\n path = path.replace('\\\\', '/')\r\n if not self.path_storage_re.match(path): # Check if the wanted song is from the storage directory\r\n print(\"Give a valid path\")\r\n else:\r\n p = vlc.MediaPlayer(path) # Create VLC instance and play the song\r\n p.play()\r\n self.playSong.append(p)\r\n self.isPlaying = True",
"def studio_submit(self, data, suffix=''):\n self.oppiaid = data.get('oppiaid')\n self.src = data.get('src')\n self.width = data.get('width')\n self.height = data.get('height')\n\n return {'result': 'success'}",
"def file_upload():\n\n click.secho('*** Uploading image...', fg='green')\n uploaded = _uploaded_file('cover.jpg')\n click.secho(json.dumps(uploaded, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating a Picture document for it...', fg='green')\n picture = _make_document('picture', title='cover image', sys_filename=uploaded['path'])\n click.secho(json.dumps(picture, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Attaching it to a Blueray as cover...', fg='green')\n slp = _make_document('movie', title='Silver Linings Playbook')\n blueray = _make_document('blueray', movie_id=slp['_id'], cover_id=picture['_id'])\n click.secho(json.dumps(blueray, indent=2, sort_keys=True), fg='yellow')"
]
| [
"0.73513263",
"0.61659884",
"0.6155535",
"0.598074",
"0.5892458",
"0.58604306",
"0.58441514",
"0.5799063",
"0.5745149",
"0.5745149",
"0.5745149",
"0.5745149",
"0.56834704",
"0.56557757",
"0.56421643",
"0.5635405",
"0.55895376",
"0.55888253",
"0.55803883",
"0.55577713",
"0.55388135",
"0.5535722",
"0.5524565",
"0.5516591",
"0.55051816",
"0.54955554",
"0.54954416",
"0.548629",
"0.5482444",
"0.54543334"
]
| 0.76914215 | 0 |
update maxcounts and subjcounts | def update_max_counts(self, label, nvals):
if label not in self.maxcounts:
if self.verb > 1:
print('** found new label key: %s' % label)
self.maxcounts[label] = nvals
else: # rcr - safe as one line? will it be parsed?
if nvals > self.maxcounts[label]: self.maxcounts[label] = nvals
self.subjcounts[label] += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_counts(self, msg, subtype, by):\n\n try:\n counts = self.get_local(msg, \"counts\")\n except KeyError:\n counts = defaultdict(int)\n\n counts['all'] += by\n counts[subtype] += by\n self.set_local(msg, \"counts\", counts)",
"def update_frequencies():\n pass",
"def _update_max_value(k, mi, by_gene):\n # Update the max mutual info.\n if mi is not None:\n by_gene[k] = max(by_gene.get(k, 0), mi)",
"def update_count(self):\n pass",
"def update_highest(csevo):\n tmax = [t[np.argmax(N)] for (t, N) in figure_to_data(csevo)]\n\n data = [{\n \"x\": list(range(len(tmax))), \"y\":tmax, \"type\":\"bar\"\n }]\n\n layout = {\n \"title\":'Time of largest abundance',\n \"template\":\"plotly_dark\",\n \"xaxis\":{\"title\":\"Charge state\", \"range\":[0, len(tmax)]},\n \"yaxis\":{\"title\":\"Time (s)\", \"type\":\"log\"}\n }\n\n return {\"data\":data, \"layout\":layout}",
"def recalculate_popularity(self):\n self.voters = 0\n for x in self.votes:\n self.voters += 1\n if x.good:\n self.popularity += 1\n else:\n self.popularity -= 1",
"def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)",
"def correct_counts():\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n unique = articles.distinct('feed_source', dict())\n for link in unique:\n count = articles.count({'feed_source': link})\n monitors.update({'metadata.rss_link': link}, {'$set': {'hits': count}})",
"def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))",
"def update_count(self):\n pass # Do nothing",
"def count_max(alon):\n return count_max_acc(alon, alon[0], 0, 0)",
"def _update_invalid_counts(self, msg, subtype, by):\n\n try:\n counts = self.get_local(msg, \"invalid_counts\")\n except KeyError:\n counts = defaultdict(int)\n\n counts['all'] += by\n counts[subtype] += by\n self.set_local(msg, \"invalid_counts\", counts)",
"def count_max_acc(alon, curr_max, count, pos):\n if pos == len(alon):\n return count\n curr_num = alon[pos]\n if curr_num > curr_max:\n curr_max = curr_num\n count = 0\n if curr_num == curr_max:\n count += 1\n return count_max_acc(alon, curr_max, count, pos+1)",
"def updateBotCounts(self, nextCard):\n nextVal = dnUtil.getValue(nextCard)\n state = self.getState()\n counts = self.getCounts(state)\n newCount = counts.copy()\n for value in dnUtil.valuesList:\n if counts[value][2] == 0:\n continue\n update = self.updateCount(value, nextVal, counts[value])\n newCount[value] = update\n self.setCounts(newCount)",
"def __cross_wiki_counts(self):\n\n print(\"Updating counts by merging with CrossWiki\")\n\n cnt = 0\n crosswiki_path = os.path.join(\n self.base_url, \"generic/p_e_m_data/crosswikis_p_e_m.txt\"\n )\n\n with open(crosswiki_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n parts = line.split(\"\\t\")\n mention = unquote(parts[0])\n\n if (\"Wikipedia\" not in mention) and (\"wikipedia\" not in mention):\n if mention not in self.wiki_freq:\n self.wiki_freq[mention] = {}\n\n num_ents = len(parts)\n for i in range(2, num_ents):\n ent_str = parts[i].split(\",\")\n ent_wiki_id = int(ent_str[0])\n freq_ent = int(ent_str[1])\n\n if (\n ent_wiki_id\n not in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n ent_name_re = self.wikipedia.wiki_redirect_id(ent_wiki_id)\n if (\n ent_name_re\n in self.wikipedia.wiki_id_name_map[\"ent_name_to_id\"]\n ):\n ent_wiki_id = self.wikipedia.wiki_id_name_map[\n \"ent_name_to_id\"\n ][ent_name_re]\n\n cnt += 1\n if (\n ent_wiki_id\n in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n if mention not in self.mention_freq:\n self.mention_freq[mention] = 0\n self.mention_freq[mention] += freq_ent\n\n ent_name = self.wikipedia.wiki_id_name_map[\n \"ent_id_to_name\"\n ][ent_wiki_id].replace(\" \", \"_\")\n if ent_name not in self.wiki_freq[mention]:\n self.wiki_freq[mention][ent_name] = 0\n self.wiki_freq[mention][ent_name] += freq_ent",
"def __init__(self):\n self.num_counts = {}",
"def update_count(self, source, geometry, count):\n if source in self._counts:\n if geometry in self._counts[source]:\n self._total_count -= self._counts[source][geometry]\n self._counts[source][geometry] = count\n else:\n self._counts[source] = {geometry: count}\n self._total_count += count\n self.change_text()",
"def update_subreddit_stats(new_subreddits, is_edit_or_delete = False):\n\t\n\tfor subreddit in new_subreddits.items():\t\n\t\twith _conn.cursor() as cur:\n\t\t\tif is_edit_or_delete:\n\t\t\t\tcur.execute('UPDATE subreddit_stats SET count = count - %(num)s WHERE sub = %(subreddit)s;' +\n\t\t\t\t\t\t'DELETE FROM subreddit_stats WHERE count = 0;', {'subreddit':subreddit[0], 'num':subreddit[1]})\n\t\t\telse:\n\t\t\t\t# I opted for this instead of upsert because it seemed simpler.\n\t\t\t\tcur.execute('UPDATE subreddit_stats SET count = count + %(num)s WHERE sub = %(subreddit)s;' +\n\t\t\t\t\t\t\t'INSERT INTO subreddit_stats (sub, count) SELECT %(subreddit)s, %(num)s WHERE NOT EXISTS (SELECT 1 FROM subreddit_stats WHERE sub = %(subreddit)s);',\n\t\t\t\t\t\t\t{'subreddit':subreddit[0], 'num':subreddit[1]})\n\t_conn.commit()",
"def update_subreddit_stats(new_subreddits, is_edit_or_delete=False):\n\n for subreddit in new_subreddits.items():\n with _conn.cursor() as cur:\n if is_edit_or_delete:\n cur.execute(\"UPDATE subreddit_stats SET count = count - %(num)s WHERE sub = '%(subreddit)s';\"\n \"DELETE FROM subreddit_stats WHERE count = 0;\" % {\"subreddit\":subreddit[0], \"num\":subreddit[1]})\n else:\n # I opted for this instead of upsert because it seemed simpler.\n cur.execute(\"UPDATE subreddit_stats SET count = count + %(num)s WHERE sub = '%(subreddit)s';\"\n \"INSERT INTO subreddit_stats (sub, count) SELECT '%(subreddit)s', %(num)s WHERE NOT EXISTS\"\n \"(SELECT 1 FROM subreddit_stats WHERE sub = '%(subreddit)s');\" %\n {\"subreddit\":subreddit[0], \"num\":subreddit[1]})\n _conn.commit()",
"def update_count(self, source, count):\n if source in self._counts:\n self._total_count -= self._counts[source]\n self._counts[source] = count\n self._total_count += count\n self.change_text()",
"def update_species_fitness_hist(self):\n for specie_id, specie in self.species.species.items():\n if specie_id not in self.species_hist: self.species_hist[specie_id] = dict()\n max_fitness = max({m.fitness if m.fitness else 0 for m in specie.members.values()})\n self.species_hist[specie_id][self.generation] = max_fitness",
"def count_transmaxlen(geneclass,statdic,gffdb):\n genetranslen = {}\n \n for i in gffdb.children(geneclass,featuretype='transcript'):\n trans_id = i['transcript_id'][0]\n tmplist = []\n translen = 0\n exonnum = 0\n \n for x in gffdb.children(i,featuretype='exon'):\n translen = translen + (x.end - x.start + 1) \n exonnum = exonnum + 1\n \n tmplist.append(exonnum)\n tmplist.append(translen)\n genetranslen[trans_id] = tmplist\n\n maxtrans = sorted(list(genetranslen.items()), key= lambda x:x[1][1], reverse=True)[0]\n statdic[geneclass.id] = list(maxtrans)\n return statdic",
"def update_cnt_map(self,s):\r\n cnts = []\r\n num_grid = self.cnt_map.shape[0]*self.cnt_map.shape[1]\r\n old_coverage =num_grid- self.cnt_map.flatten().tolist().count(0)\r\n for sj in s:\r\n grid_s = self.get_gridState(sj)\r\n self.cnt_map[grid_s[0], grid_s[1]] += 1\r\n cnts.append(self.cnt_map[grid_s[0], grid_s[1]])\r\n\r\n self.map_coverage = num_grid - self.cnt_map.flatten().tolist().count(0)\r\n print(\"Coverage:\",self.map_coverage)\r\n print(\"Change of coverage:\",self.map_coverage-old_coverage)\r\n\r\n return cnts",
"def update_status_and_solution(self, all_libs_scores, max_books):\n max_score_lib_idx = np.nanargmax(all_libs_scores) # best library\n lib_scores = self.libs_books_matrix[max_score_lib_idx] * self.all_books_scores\n top_books_idx = np.argsort(lib_scores.flatten())\n max_books_to_scan = max_books[max_score_lib_idx]\n books_to_scan = top_books_idx[-max_books_to_scan:][::-1] # the first best max_books of lib_id\n\n self.sol_libs.append(max_score_lib_idx)\n self.sol_books_lists.append(books_to_scan.tolist())\n\n # zero the scanned books in all the libraries\n self.libs_books_matrix[:, books_to_scan] = 0\n # zero all books in the chosen library\n self.libs_books_matrix[max_score_lib_idx, :] = 0\n # update books count\n self.num_books_in_lib = np.sum(self.libs_books_matrix, axis=1)\n # update days left\n self.days_to_scan -= self.libs_signup_time[max_score_lib_idx]",
"def UpdateCountsHandler(self):\n\n self.response.out.write('<br/><br/>Updating counts<br/>')\n MAX_COUNT = 200\n changesets = Changeset.all().order('-created_at').fetch(MAX_COUNT)\n\n date_of_first_changeset = changesets[0].created_at.date()\n date_of_last_changeset = changesets[-1].created_at.date()\n\n # if the same day for first and last write MAX_COUNT, skip next steps\n if date_of_last_changeset == date_of_first_changeset:\n update_count(date_of_first_changeset, MAX_COUNT)\n self.response.out.write('MAX_COUNT (%d) in this date (%s)<br/>' %\n (MAX_COUNT, str(date_of_first_changeset)) )\n return\n\n date_last = changesets[0].created_at.date()\n count_last = 0\n\n one_day = timedelta(days=1)\n\n for c in changesets:\n date_current = c.created_at.date()\n if date_current == date_last:\n count_last += 1\n else:\n if date_last - date_current > one_day:\n self.response.out.write('need to iterate between dates<br/>')\n d = date_current + one_day\n # iterate between dates, set counts to 0\n while d < date_last:\n self.response.out.write(str(d) + '<br/>')\n update_count(d, 0)\n d += one_day\n self.response.out.write(str(date_last)+': '+str(count_last)+'<br/>')\n is_new_entry = update_count(date_last, count_last)\n if not is_new_entry:\n self.response.out.write('not new entry<br/>')\n if not date_last == date_of_first_changeset:\n self.response.out.write(\n 'count for %s is already in datastore' % \n str(date_last)\n )\n return\n\n\n date_last = c.created_at.date()\n count_last = 1\n if c.created_at.date() == date_of_last_changeset:\n break\n \n self.response.out.write(str(changesets[0].created_at)+'<br/>')\n self.response.out.write(str(changesets[-1].created_at)+'<br/>')",
"def update_popularity(self):\n ratings_count = self.ratings.groupby(['movie_id'])['rating'].count().reset_index()\n sorted_ids = ratings_count.sort_values(['rating'], ascending=[False])\n self.popular = pd.merge(sorted_ids, self.movies, left_on=['movie_id'], right_on=['movie_id'])",
"def _update_count(self):\n self._count = len(self._items)",
"def count_instances(tbl, col2count, colcounted):\n counted_ser = tbl[col2count].value_counts()\n counted_df = pd.DataFrame(counted_ser, columns=[colcounted]).reset_index()\n counted_df.rename(columns={'index':col2count},inplace=True)\n tbl = tbl.merge(counted_df,on=col2count)\n return tbl",
"def update_library_count(self, ebook_count):\n\t\tself.library_count = ebook_count",
"def counting_sort(the_list, max_value):\n \n # Creates an array that checks num of instances of any int up through max_value\n num_counts = [0] * (max_value + 1)\n \n # Increments count on first pass through input list\n for item in the_list:\n num_counts[item] += 1 \n\n sorted_list = []\n\n for item, count in enumerate(num_counts):\n print \"this is the status of num_counts:\", num_counts\n print \"enumerating item:\", item, \"enumerating count:\", count\n\n for _ in xrange(count):\n sorted_list.append(item)\n\n return sorted_list"
]
| [
"0.58235234",
"0.57351094",
"0.56918263",
"0.56449956",
"0.5610702",
"0.5506785",
"0.5486935",
"0.54524773",
"0.5449593",
"0.5420955",
"0.5404708",
"0.5353762",
"0.5347782",
"0.534608",
"0.52927333",
"0.52230465",
"0.5207815",
"0.51916337",
"0.5191431",
"0.5174872",
"0.5165098",
"0.51592964",
"0.51162183",
"0.5085531",
"0.50737906",
"0.5048533",
"0.5032796",
"0.50180256",
"0.5012308",
"0.5000986"
]
| 0.7142012 | 0 |
insert the new label into the labels list and init maxcounts | def insert_new_label(self, label, index, nvals):
if label in self.labels: return
self.labels.append(label)
self.parents.append(self.find_parent_label(label))
self.maxcounts[label] = nvals
self.subjcounts[label] = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_max_counts(self, label, nvals):\n if label not in self.maxcounts:\n if self.verb > 1:\n print('** found new label key: %s' % label)\n self.maxcounts[label] = nvals\n\n else: # rcr - safe as one line? will it be parsed?\n if nvals > self.maxcounts[label]: self.maxcounts[label] = nvals\n\n self.subjcounts[label] += 1",
"def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist",
"def clean_labels(labels):\n\n llabels, slabels = list(labels), set(labels)\n \n for l in slabels:\n if llabels.count(l) <2 and l != max(slabels):\n llabels[llabels.index(l)] = l+1\n return clean_labels(llabels)\n elif llabels.count(l) <2 and l == max(slabels):\n llabels[llabels.index(l)] = l-1\n return clean_labels(llabels)\n else:\n return np.array(llabels)",
"def add_labels(self, frame_nums, labels):\n assert all([self._frame_labels[t] is None for t in frame_nums])\n for t, labels_t in zip(frame_nums, labels):\n self._frame_labels[t] = labels_t\n if labels_t.numel() > 0:\n self._highest_instance_id = max(self._highest_instance_id, labels_t.max().item())\n\n return self._highest_instance_id + 1",
"def add_label_sequence(self, label_seq):\n curr_ngram = self.all_grams\n for label in label_seq:\n curr_ngram.add_count()\n curr_ngram = curr_ngram.get_next_Ngram(label)\n # Add count for last label\n curr_ngram.add_count()",
"def rename_labels_by_count(labels):\n new_labels, label_counts = _count_labels(labels)\n\n return new_labels",
"def assign_labels_first_max(document, label_encoder):\n for sentence in document.sentences:\n for word in sentence.words:\n probs = np.stack([p[1] for p in word.tokens[0].predictions])\n label_idx = np.argmax(np.max(probs, axis=0), axis=-1)\n word.predicted_label = label_encoder.inv_label_map[label_idx]",
"def assign_labels_first_freq(document, label_encoder):\n for sentence in document.sentences:\n for word in sentence.words:\n labels = [\n label_encoder.inv_label_map[np.argmax(p[1])]\n for p in word.tokens[0].predictions\n ]\n word.predicted_label = most_common(labels)",
"def num_labels(self) -> int:\n raise NotImplementedError",
"def add_label_counts(labels_df):\n # ---------------------------------------------------------------------\n # label count: count total number of occurences of a label in all label sources\n projects = ['food', 'internet', 'technology', 'media']\n final = []\n for project in projects:\n final.append(pd.read_csv(\n \"data/\" + project + \"/hierarchical_category_names.csv\"))\n final.append(\n pd.read_csv(\"data/\" + project + \"/keyphrases_names.csv\"))\n # final.append(pd.read_csv(\"data/\" + project + \"/keyword_names.csv\"))\n # final.append(pd.read_csv(\"data/\" + project + \"/lda_label_names.csv\"))\n # final.append(pd.read_csv(\"data/\" + project + \"/link_names.csv\"))\n # final.append(pd.read_csv(\"data/\" + project + \"/lda_label_names.csv\"))\n\n final = pd.concat(final)\n counts = final.groupby([\"label\"]).size().reset_index(name=\"label_count\")\n\n # ---------------------------------------------------------------------\n # label count: count total number of occurences of a label wihtin current project\n # Todo: discuss with Shilad which to use\n counts2 = labels_df.groupby([\"label\"]).size().reset_index(name=\"label_count_project\")\n with_count_all = pd.merge(labels_df, counts, on='label')\n return pd.merge(with_count_all, counts2, on='label')",
"def update_labels(self, frame_num, labels):\n assert self.labels_exist(frame_num)\n assert not self._is_frozen[frame_num]\n assert self._frame_labels[frame_num].shape == self._frame_labels[frame_num].shape\n self._frame_labels[frame_num] = labels\n if labels.numel() > 0:\n self._highest_instance_id = max(self._highest_instance_id, labels.max().item())\n return self._highest_instance_id",
"def labelingLVQ(self):\n numLabels = len(np.unique(self.y))\n for i, x in enumerate(self.x):\n w = self.find_closest(x)[0]\n for nl in range(numLabels):\n if self.y[i] == nl:\n self.labels[nl, w[0], w[1]] += 1\n return self.labels",
"def adjust_labels(data_y, label):\n\n if label == 'locomotion': # Labels for locomotion are adjusted\n data_y[data_y == 4] = 3\n data_y[data_y == 5] = 4\n elif label == 'gestures': # Labels for gestures are adjusted\n data_y[data_y == 406516] = 1\n data_y[data_y == 406517] = 2\n data_y[data_y == 404516] = 3\n data_y[data_y == 404517] = 4\n data_y[data_y == 406520] = 5\n data_y[data_y == 404520] = 6\n data_y[data_y == 406505] = 7\n data_y[data_y == 404505] = 8\n data_y[data_y == 406519] = 9\n data_y[data_y == 404519] = 10\n data_y[data_y == 406511] = 11\n data_y[data_y == 404511] = 12\n data_y[data_y == 406508] = 13\n data_y[data_y == 404508] = 14\n data_y[data_y == 408512] = 15\n data_y[data_y == 407521] = 16\n data_y[data_y == 405506] = 17\n return data_y",
"def reformat_labels(label, bin_limits=[2]):\n# num_labels = y_batch.max() + 1\n label = np.array([label], dtype=np.float32)\n num_labels = 2\n label = np.digitize(label, bins=[2])\n label = (np.arange(num_labels) == label[:, None]).astype(np.float32)[0]\n return label",
"def _on_new_batch(self, data):\n data[self.pid_cols] = self.pid.digitize(data[self.pid_cols])\n #set counts back to 0\n for label in self.labels:\n self.lab_counts[label] = 0 \n for col in self.cat_cols:\n for label in self.labels:\n for val in self.categories[col]:\n self.cat_counts[col][label][val] = 0\n \n #add each row to the counts\n for index, row in data.iterrows():\n label = row[self.target_col_name]\n self.lab_counts[label] += 1\n \n for col in self.cat_cols:\n #skip nans\n if self.isnan(row[col]):\n continue\n val = row[col]\n self.cat_counts[col][label][val] += 1\n \n self._calculate_probs_and_entropies()",
"def assign_labels(self, data):\n data[self.label] = self.labeler(data.index.values)",
"def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1",
"def update_pivot_labels(l, maxn):\n tmp = maxn+1\n tmps = []\n for i in range(0, len(l)):\n if l[i] > MAX_NODES-1 or l[i] in tmps:\n l[i] = tmp\n tmp += 1\n tmps.append(l[i])\n return l",
"def update_labels(self,label_dict):\n \t\tfor key in self.deps:\n \t\t\tfor dependent in self.deps[key]:\n \t\t\t\tlabel = dependent[1]\n \t\t\t\tlabel_dict[label] = label_dict.get(label,0) + 1\n \t\treturn label_dict",
"def generate_label(self):\n\n last = self.label\n self.label += 1\n self.P.append(last)\n\n return last",
"def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))",
"def getLabel(labels):\r\n elems = {}\r\n for l in labels:\r\n if l not in elems.keys():\r\n elems[l] = 1\r\n else:\r\n elems[l] += 1\r\n counts = sorted(elems.values(), reverse=True)\r\n if len(counts) > 1 and counts[0] == counts[1]:\r\n return choice(list(elems.keys()))\r\n return sorted(elems, key=elems.get, reverse=True)[0]",
"def _setup_labels(self):\n self._labels = self.get_labels()\n self._labels = self.get_predefined_labels() + list(self._labels)\n self._labels = sorted(self._labels)\n\n self._labels_2_index = {label.lower(): i for i, label in enumerate([self._unknown_label] + self._labels)}\n self._index_2_labels = {i: label for label, i in self._labels_2_index.items()}\n\n self._labels_dim = len(self._labels_2_index)\n return None",
"def update_label(UniqueLabel, Label):\n\n UniqueLabel['Confidence'] = ((UniqueLabel['Confidence'] * UniqueLabel['Count']) + Label['Label']['Confidence'])/(UniqueLabel['Count'] + 1)\n UniqueLabel['TimeStamps'].append(Label['Timestamp'])\n UniqueLabel['Count'] += 1\n\t\n return",
"def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels",
"def add_label_to_unique_species_labels(self, label: str) -> str:\n unique_label, i = label, 0\n while unique_label in self.unique_species_labels:\n unique_label = f'{label}_{i}'\n i += 1\n self.unique_species_labels.append(unique_label)\n return unique_label",
"def propagate_labels_majority(image,labels):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels)\n outputs = zeros(amax(rlabels)+1,'i')\n counts = zeros(amax(rlabels)+1,'i')\n for rlabel, label_, count in cors.T:\n if not rlabel or not label_:\n # ignore background correspondences\n continue\n if counts[rlabel] < count:\n outputs[rlabel] = label_\n counts[rlabel] = count\n outputs[0] = 0\n return outputs[rlabels]",
"def train(self, data, labels, validationData, validationLabels): # data x training, y_training\n # ## TODO: Your code here\n count = util.Counter()\n\n #print 'Labels of the data is:', labels #printing all the labels from which the frequency has to be found\n\n for x in labels: # for loop\n count[x] += 1\n\n #print count\n val = count.argMax()\n self.guess = val\n #print val",
"def add_labels(self, labels: dict):\n self.status = \"Creating labels\"\n for lname, value in labels.items():\n self.labels.add_label(lname, value)",
"def _add_label(self):\n\n label = self._label_edit.text()\n labelNames = [i[0] for i in self.labels]\n if not label in list(labelNames):\n self.labels.append((label,0))\n self._option_selector.addItem(label)\n with open(\"{}/labels.txt\".format(self.output_directory), 'a') as file:\n file.write(\"{}\\n\".format(label))\n self._label_edit.setText('')"
]
| [
"0.77373195",
"0.6748753",
"0.64776474",
"0.61958253",
"0.6102394",
"0.60787904",
"0.60749143",
"0.5959312",
"0.59497046",
"0.59487486",
"0.593107",
"0.5894521",
"0.58929664",
"0.5848322",
"0.5846774",
"0.58393145",
"0.5825734",
"0.5815149",
"0.58101875",
"0.57937914",
"0.5778873",
"0.5778638",
"0.57776463",
"0.5750567",
"0.5744481",
"0.5729039",
"0.5719523",
"0.571393",
"0.5712803",
"0.570762"
]
| 0.71766055 | 1 |
try to get subject and possibly group names from infiles fill self.snames and self.gnames, if possible 1. get SID if files look like out.ss_review.SID.txt, that is a good start else, look for varying part of filename 2. get GID replace SID in infile names and for varying group name | def parse_infile_names(self):
rv, slist = UTIL.list_minus_pref_suf(self.infiles,'out.ss_review.','.txt')
if rv < 0: return
if rv > 0:
if self.verb > 1: print('++ trying to get SID from glob form')
slist = UTIL.list_minus_glob_form(self.infiles, strip='dir')
else:
if self.verb > 1: print("++ have SIDs from 'out.ss_reiview' form")
if len(slist) == 0:
if self.verb > 1: print("-- empty SID list")
return
# make sure names are unique and not empty
if not UTIL.vals_are_unique(slist):
if self.verb > 1: print('-- SIDs not detected: not unique')
return
minlen = min([len(ss) for ss in slist])
if minlen < 1:
if self.verb > 1: print('-- SIDs not detected: some would be empty')
return
# we have a subject list
self.snames = slist
# now go for GID, start by replacing SIDs in infiles
newfiles = [fname.replace(slist[ind], 'SUBJ') for ind, fname in
enumerate(self.infiles)]
if UTIL.vals_are_constant(newfiles):
print('-- no groups detected from filenames')
return
# okay, try to make a group list
glist = UTIL.list_minus_glob_form(newfiles)
# cannot have dirs in result
for gid in glist:
if gid.find('/') >= 0:
if self.verb>1: print('-- no GIDs, dirs vary in multiple places')
return
minlen = min([len(ss) for ss in glist])
if minlen < 1:
if self.verb > 1: print('-- GIDs not detected: some would be empty')
return
if self.verb > 1: print("++ have GIDs from infiles")
self.gnames = glist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_subjects_info(data_folder, dataset_id, format=\"dict\"):\r\n subjects_info = {} # build of dictionnary of all session for each subject\r\n\r\n if dataset_id == \"raw_clean_32\":\r\n \"\"\" High Versus Low inhibitory Stimuli of Tinnitus and control patients\r\n \"\"\"\r\n patient = 2 # patient group (static for a given dataset)\r\n session = 9 # 6 = 1 old remplacer apres (session 'high')\r\n ses2 = 8 # (session 'low')\r\n names = os.listdir(os.path.join(data_folder, dataset_id, str(patient) + \"_\" + str(session)))\r\n names2 = os.listdir(os.path.join(data_folder, dataset_id, str(patient) + \"_\" + str(ses2)))\r\n\r\n pat = []\r\n pat2 = []\r\n for name in names:\r\n # print name.split('_')[0]\r\n pat.append(name.split('_')[0]) # all subjects ID from names\r\n for name in names2:\r\n # print name.split('_')[0]\r\n pat2.append(name.split('_')[0]) # all subjects ID from names2\r\n\r\n for name in names2:\r\n if pat.__contains__(name.split('_')[0]):\r\n if subjects_info.keys().__contains__(name.split('_')[0]):\r\n subjects_info[name.split('_')[0]].append(name) # add file to the list\r\n else:\r\n subjects_info[name.split('_')[0]] = [name] # add first file to the list\r\n for name in names:\r\n if pat2.__contains__(name.split('_')[0]):\r\n subjects_info[name.split('_')[0]].append(name)\r\n\r\n elif dataset_id == \"Distress2010\":\r\n \"\"\" High Versus Low Distress patients (1, 2, 3, 4 Distress)\r\n \"\"\"\r\n sub_high = 'high distress'\r\n sub_low = 'low distress'\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id, sub_high)) + \\\r\n os.listdir(os.path.join(data_folder, dataset_id, sub_low))\r\n\r\n # get all subjects ID\r\n valid_id = [\"1\", \"2\", \"3\", \"4\"] # Distress group (file begin with)\r\n\r\n for filename in filenames:\r\n if filename[0] in valid_id:\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n symptoms.append({\"distress\": int(filename[0])})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session`\r\n elif dataset_id == \"Tinnitus_EEG\":\r\n \"\"\" extended Distress2010 dataset with more than 310 patients\r\n \"\"\"\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id))\r\n subjects_csv = pd.read_csv(os.path.join(data_folder, dataset_id,\"labels_name_cat_TQ_vas.csv\"),\r\n names=[\"session\", \"distress\", \"TQ\", \"VAS\"], index_col=\"session\")\r\n\r\n for filename in filenames:\r\n if filename.split(\".\")[1] == \"txt\":\r\n if np.any(subjects_csv.index.str.match(filename)):\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n distress_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"distress\"].values[0])\r\n TQ_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"TQ\"].values[0])\r\n VAS_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"VAS\"].values[0])\r\n\r\n symptoms.append({\"distress\": distress_val})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms, \"TQ\": TQ_val, \"VAS\": VAS_val}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session`\r\n else:\r\n print(\"file \" + filename + \" not listed in labels_name_cat_TQ_vas.csv, subject rejected\")\r\n\r\n elif dataset_id == \"NormativeDB\":\r\n \"\"\" Control subjects in resting state\r\n \"\"\"\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id, \"clean-up\", \"M\")) + \\\r\n os.listdir(os.path.join(data_folder, dataset_id, \"clean-up\", \"F\"))\r\n\r\n # get all subjects ID\r\n valid_id = [\"1\", \"2\", \"3\", \"4\"] # Distress group (file begin with)\r\n\r\n for filename in filenames:\r\n if not (filename.split(\".\")[0][-2:] == \"EC\"): # remove eyes closed\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n symptoms.append(\"Control\")\r\n symptoms.append({\"distress\": int(0)})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms, \"gender\": filename[2]}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session\r\n\r\n else:\r\n print(\"get_subjects_info: unknown dataset\")\r\n if format == \"DataFrame\":\r\n subjects_info = _subjects_dict_to_pandas(subjects_info)\r\n\r\n return subjects_info",
"def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r",
"def parse_files(usage: str, full_time: str, part_time: str, semester: str, total_courses: int) -> dict:\n one = filter_for_semester(open(usage, 'r').readlines(), semester)\n two = get_rows_with_usage(one)\n usage_file = remove_duplicate_crn(two)\n no_dup_r = remove_duplicate_royal(two)\n full_time_file = open(full_time, 'r').readlines()\n full_r = list()\n part_r = list()\n for x in full_time_file:\n y = x.split(DELIMITER)\n full_r.append(y[FAC_ROYAL])\n part_time_file = open(part_time, 'r').readlines()\n for x in part_time_file:\n y = x.split(DELIMITER)\n part_r.append(y[FAC_ROYAL])\n full = list()\n part = list()\n staff = list()\n for x in range(len(part_r)):\n part_r[x] = part_r[x].strip(\"\\\"\")\n for x in range(len(full_r)):\n full_r[x] = full_r[x].strip(\"\\\"\")\n for x in no_dup_r:\n y = x.split(DELIMITER)\n if y[USAGE_ROYAL] in full_r:\n full.append(y)\n elif y[USAGE_ROYAL] in part_r:\n part.append(y)\n else:\n staff.append(y)\n return {'semester_no_dup_crn': usage_file,\n 'semester_no_dup_r': no_dup_r,\n 'semester': two,\n 'full_time': full,\n 'len_full': len(full_time_file),\n 'part_time': part,\n 'len_part': len(part_time_file),\n 'staff': staff,\n 'total_courses': total_courses}",
"def produce_geneName_dict(inPath, spList, outPath):\n with open(spList, 'r') as f:\n swissProtIDs = set(f.read().split())\n with open(inPath, 'r') as fIn:\n idMap = {}\n for line in fIn:\n uniprotID, otherIDtype, otherID = line.strip().split('\\t')\n if otherIDtype == 'Gene_Name':\n if uniprotID in swissProtIDs:\n idMap[uniprotID] = otherID.upper()\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)",
"def file_names(acqfolder):\n log.info('anonymizer.py file_names {0}'.format(acqfolder))\n\n subj_path = path(acqfolder)\n\n done = -1\n for ext in dicom_file_extensions:\n file_lst = subj_path.glob('*' + ext)\n if file_lst:\n rename_file_group_to_serial_nums(file_lst)\n done = 0\n\n return done",
"def extract_subject_names(file_names):\n return file_names.apply(lambda name: name.split('_')[1])",
"def scans_parser(root, study, outdir, read_nifti=False):\n # Welcome\n print(\"Starting scans parsing...\")\n\n # Parameters\n sourcedir = os.path.join(root, \"sourcedata\")\n pattern = os.path.join(\n sourcedir, \"sub-*\", \"ses-*\", \"*\", \"sub-*_ses-*.nii*\")\n # TODO: remove when reorganized\n pattern = os.path.join(\n sourcedir, \"sub-*\", \"ses-*\", \"*\", \"*\", \"sub-*_ses-*.nii*\")\n scans = defaultdict(lambda: defaultdict(list))\n\n # Parse the session files\n participants_sessions = {}\n all_subjects = os.listdir(sourcedir)\n with progressbar.ProgressBar(max_value=len(all_subjects),\n redirect_stdout=True) as bar:\n for cnt, subject in enumerate(all_subjects):\n session_file = os.path.join(\n sourcedir, subject, \"{0}_sessions.tsv\".format(subject))\n if os.path.isfile(session_file):\n with open(session_file) as open_file:\n reader = csv.DictReader(open_file, delimiter=\"\\t\")\n participants_sessions[subject.replace(\"sub-\", \"\")] = {}\n for row in reader:\n participants_sessions[subject.replace(\"sub-\", \"\")][\n row[\"session_id\"]] = row\n bar.update(cnt)\n\n # Parse the sourcedata directory\n all_files = glob.glob(pattern)\n with progressbar.ProgressBar(max_value=len(all_files),\n redirect_stdout=True) as bar:\n for cnt, path in enumerate(all_files):\n\n # Get acquisition information\n split = path.split(os.sep)\n subject = split[-4].replace(\"sub-\", \"\")\n session_id = split[-3]\n session = split[-3].replace(\"ses-\", \"\")\n dtype = split[-2]\n # TODO: remove when reorganized\n subject = split[-5].replace(\"sub-\", \"\")\n session_id = split[-4]\n session = split[-4].replace(\"ses-\", \"\")\n dtype = split[-3]\n name, ext = split[-1].split(\".\", 1)\n label = name.split(\"_\")[-1]\n if label not in ALLOWED_MODALITY:\n # Deal with multiple conversions\n label = label[:-1]\n if label not in ALLOWED_MODALITY:\n print(\"Unsupported BIDS modality label '{0}'.\".format(path))\n continue\n if subject in participants_sessions:\n session_info = participants_sessions[subject][session_id]\n center = session_info.get(\"site\", DEFAULT_CENTER)\n age_of_subject = session_info.get(\"age\", None)\n if not isinstance(age_of_subject, float):\n age_of_subject = None\n timepoint_label = session_info.get(\"label\", None)\n else:\n center = DEFAULT_CENTER\n age_of_subject = None\n\n # Get all associated files\n resources = [path]\n desc_file = os.path.join(os.path.dirname(path), name + \".json\")\n if not os.path.isfile(desc_file):\n desc_file = None\n else:\n resources.append(desc_file)\n tarball_file = os.path.join(os.path.dirname(path),\n name + \".dicom.tar.gz\")\n if not os.path.isfile(tarball_file):\n tarball_file = None \n if dtype == \"dwi\":\n bvec_file = os.path.join(os.path.dirname(path), name + \".bvec\")\n bval_file = os.path.join(os.path.dirname(path), name + \".bval\")\n if not os.path.isfile(bvec_file):\n print(\"No diffusion bvecs, skipping '{0}'...\".format(path))\n continue\n if not os.path.isfile(bval_file):\n print(\"No diffusion bvals, skipping '{0}'...\".format(path))\n continue\n resources.extend([bvec_file, bval_file])\n\n # Type the input nifti dataset\n if dtype in (\"swi\", \"anat\"):\n scan_type = \"MRIData\"\n elif dtype == \"func\":\n scan_type = \"FMRIData\"\n elif dtype == \"dwi\":\n scan_type = \"DMRIData\"\n else:\n raise ValueError(\n \"'{0}' data type not yet supported.\".format(dtype))\n typedata_struct, device_struct = nifti_typedata(\n path, scan_type, desc_file, read_nifti=read_nifti)\n\n # Convert timepoint\n if timepoint_label is not None:\n if age_of_subject is None:\n age_of_subject = session\n session = timepoint_label\n\n # Generate the scan struct\n assessment_id = \"{0}_{1}_{2}_{3}\".format(\n study.lower(), label, session, subject)\n scans[center][subject].append({\n \"Assessment\": {\n \"identifier\": assessment_id,\n \"timepoint\": session},\n \"Scans\": [{\n \"TypeData\": typedata_struct,\n \"ExternalResources\": [],\n \"FileSet\": {\n \"identifier\": md5_sum(path),\n \"name\": label},\n \"Scan\": {\n \"format\": \"NIFTI\",\n \"label\": label,\n \"identifier\": md5_sum(path),\n \"type\": label}\n }]\n })\n if tarball_file is not None:\n scans[center][subject][-1][\"Scans\"].append({\n \"TypeData\": typedata_struct,\n \"ExternalResources\": [{\n \"identifier\": md5_sum(tarball_file),\n \"absolute_path\": True,\n \"name\": name,\n \"filepath\": tarball_file}],\n \"FileSet\": {\n \"identifier\": md5_sum(tarball_file),\n \"name\": label},\n \"Scan\": {\n \"format\": \"DICOM\",\n \"label\": label,\n \"identifier\": md5_sum(tarball_file),\n \"type\": label}\n })\n if device_struct is not None:\n scans[center][subject][-1][\"Device\"] = device_struct\n if age_of_subject is not None:\n scans[center][subject][-1][\"Assessment\"][\n \"age_of_subject\"] = age_of_subject\n for _path in resources:\n name, ext = os.path.basename(_path).split(\".\", 1)\n scans[center][subject][-1][\"Scans\"][0][\n \"ExternalResources\"].append({\n \"identifier\": md5_sum(_path),\n \"absolute_path\": True,\n \"name\": name,\n \"filepath\": _path})\n\n # Update progress bar\n bar.update(cnt)\n\n # Save the results\n print(\"Saving data in '{0}'...\".format(outdir))\n save_parsing(scans, outdir, study, \"scans\")\n\n # Goodbye\n print(\"Done.\")\n\n return scans",
"def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i",
"def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))",
"def correct_naming(obsid, inst):\n cobsid = str(int(float(obsid)))\n if len(cobsid) == 5:\n return \n\n lobsid = mcf.add_leading_zero(obsid, 5)\n \n for sdir in ['secondary', 'analysis']:\n\n cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/hrcf* >' + zspace\n os.system(cmd)\n\n data = mcf.read_data_file(zspace, remove=1)\n for ent in data:\n atemp = re.split('\\/', ent)\n fname = atemp[-1]\n mc = re.search(lobsid, fname)\n if mc is not None:\n continue\n else:\n atemp = re.split('hrcf', fname)\n btemp = re.split('_', atemp[1])\n sobs = btemp[0]\n new = fname.replace(sobs, lobsid)\n full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new\n\n cmd = 'mv ' + ent + ' ' + full\n os.system(cmd)",
"def loci_parsed(loci_file):\n #\n ga_list = [\"Ang_30\",\"Ang_29\"]\n\n gb_list = [\"Ang_67\", \"Ang_21\"]\n\n cc_list = [\"Cg12063\", \"Cg125212\", \"Cg126212\", \"Cg12758\", \"Cg_432\"]\n\n loci_dic = {}\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n\n\n for files in loci_file:\n\n name= files.strip().split (\"/\")\n name_loci = name[12].split(\"_\")\n name_loci_1 = name_loci[1].split(\".\")\n real_name_loci = name_loci_1[0]\n\n loci_file = open(files)\n\n\n for line in loci_file:\n\n if line[:1] in \"0123456789\":\n pass\n else:\n\n line_information = line.strip().split()\n isolate = line_information[0]\n sequence = line_information [1]\n\n # if \"-\" in sequence:\n # sequence = sequence.replace (\"-\", \"\")\n\n if isolate in ga_list and loci_list[\"ga\"] == None:\n loci_list[\"ga\"] = sequence\n if isolate in gb_list and loci_list[\"gb\"] == None:\n loci_list[\"gb\"] = sequence\n if isolate in cc_list and loci_list[\"cc\"] == None:\n loci_list[\"cc\"] = sequence\n loci_dic[real_name_loci] = loci_list\n\n\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n return loci_dic",
"def extract_file_tags_from_file_name(filePath): #TODO untested and unused\n out_dict = {}\n studyid = 'n/a'\n subjectid = 'n/a'\n visitid = '1'\n\n if 'scorefiles' in filePath:\n studyid = filePath.split('scorefiles')[0]\n studyid = studyid.split('\\\\')\n if studyid[-1] == '':\n studyid = studyid[-2]\n else:\n studyid = studyid[-1]\n subjectid = filePath.split('scorefiles')[-1]\n subjectid = subjectid.split('subjectid')[-1]\n subjectid = subjectid.split('.')[0]\n if 'visit' in filePath:\n visitid = subjectid.split('visitid')[-1]\n visitid = visitid.split('.')[0]\n subjectid = subjectid.split('visitid')[0]\n\n subjectid = str(subjectid).lstrip(STRIP).rstrip(STRIP)\n subjectid = str(subjectid).lstrip('_').rstrip('_')\n visitid = str(visitid).lstrip(STRIP).rstrip(STRIP)\n visitid = str(visitid).lstrip('_').rstrip('_')\n studyid = str(studyid).lstrip(STRIP).rstrip(STRIP)\n out_dict['subjectid'] = subjectid\n out_dict['studyid'] = studyid\n out_dict['visitid'] = visitid\n return out_dict",
"def read_file_names(self):\n files_BIDMC = os.listdir(self.root_dir_BIDMC)\n masks_BIDMC = os.listdir(self.seg_dir_BIDMC)\n files_HK = os.listdir(self.root_dir_HK)\n masks_HK = os.listdir(self.seg_dir_HK)\n files_I2CVB = os.listdir(self.root_dir_I2CVB)\n masks_I2CVB = os.listdir(self.seg_dir_I2CVB)\n files_ISBI = os.listdir(self.root_dir_ISBI)\n masks_ISBI = os.listdir(self.seg_dir_ISBI)\n files_ISBI_15 = os.listdir(self.root_dir_ISBI_15)\n masks_ISBI_15 = os.listdir(self.seg_dir_ISBI_15)\n files_UCL = os.listdir(self.root_dir_UCL)\n masks_UCL = os.listdir(self.seg_dir_UCL)\n site_files = [files_BIDMC, files_HK, files_I2CVB, files_ISBI, files_ISBI_15, files_UCL]\n site_masks = [masks_BIDMC, masks_HK, masks_I2CVB, masks_ISBI, masks_ISBI_15, masks_UCL]\n return site_files, site_masks",
"def _get_snp_data(self):\n\n vcf_counter = 0\n for filename in self.vcffilenames:\n vcf_reader = vcf.Reader(open(filename), \"rb\")\n samplename = vcf_reader.samples[0]\n for record in vcf_reader:\n chromosome, position, ref, alt = (\n record.CHROM,\n record.POS,\n record.REF,\n record.ALT,\n )\n position = str(position)\n\n ## code to build all snps position\n self._record_all_snp_positions(chromosome, position)\n\n self._record_all_snps(filename, chromosome, position, ref, alt)\n # self.snp_positions.update({str(vcf_counter) + \"_\" + chromosome + \"_\" + str(position):{\"ref\": str(ref), \"alt\":str(alt).replace(\"[\",\"\").replace(\"]\", \"\")}})\n self.snpsites[chromosome][str(position)][vcf_counter] = True\n\n vcf_counter += 1",
"def initial_processing(subject_dir):\n # get subject name\n subject_name = subject_dir.parts[-1]\n\n # create ${subject_dir}/ASL and ${subject_dir}/T1w/Results/ASL \n # directories\n asl_dir = subject_dir / 'ASL'\n tis_dir = asl_dir / 'TIs'\n calib_dir = asl_dir / 'Calib'\n calib0_dir = calib_dir / 'Calib0'\n calib1_dir = calib_dir / 'Calib1'\n strucasl_dir = subject_dir / 'T1w/ASL'\n create_dirs([asl_dir, tis_dir, calib0_dir, calib1_dir, strucasl_dir])\n\n # find sub-directories\n # structural\n t1_dir = subject_dir / 'T1w'\n t1_name = t1_dir / 'T1w_acpc_dc_restore.nii.gz'\n t1_brain_name = t1_dir / 'T1w_acpc_dc_restore_brain.nii.gz'\n\n # asl\n b_dir = subject_dir / f'{subject_name}_V1_B'\n try:\n mbpcasl_dir = list(b_dir.glob('**/scans/*mbPCASLhr'))[0]\n # if no files match this format, it throws an IndexError\n except IndexError as e:\n print(e)\n mbpcasl = mbpcasl_dir / 'resources/NIFTI/files' / f'{subject_name}_V1_B_mbPCASLhr_PA.nii.gz'\n \n # output names\n tis_name = tis_dir / 'tis.nii.gz'\n calib0_name = calib0_dir / 'calib0.nii.gz'\n calib1_name = calib1_dir / 'calib1.nii.gz'\n # get tis\n fslroi(str(mbpcasl), tis_name, 0, 86)\n # get calibration images\n fslroi(str(mbpcasl), calib0_name, 88, 1)\n fslroi(str(mbpcasl), calib1_name, 89, 1)\n\n # get surface names\n surfaces_dir = t1_dir / 'fsaverage_LR32k'\n L_mid = surfaces_dir / f'{subject_name}_V1_MR.L.midthickness.32k_fs_LR.surf.gii'\n R_mid = surfaces_dir / f'{subject_name}_V1_MR.R.midthickness.32k_fs_LR.surf.gii'\n L_pial = surfaces_dir / f'{subject_name}_V1_MR.L.pial.32k_fs_LR.surf.gii'\n R_pial = surfaces_dir / f'{subject_name}_V1_MR.R.pial.32k_fs_LR.surf.gii'\n L_white = surfaces_dir / f'{subject_name}_V1_MR.L.white.32k_fs_LR.surf.gii'\n R_white = surfaces_dir / f'{subject_name}_V1_MR.R.white.32k_fs_LR.surf.gii'\n\n # add filenames to a dictionary to be saved to a json\n json_name = asl_dir / 'ASL.json'\n fields = [\n \"T1w_dir\",\n \"T1w_acpc\",\n \"T1w_acpc_brain\",\n \"ASL_seq\",\n \"ASL_dir\",\n \"TIs_dir\",\n \"structasl\",\n \"calib_dir\",\n \"calib0_dir\",\n \"calib1_dir\",\n \"calib0_img\",\n \"calib1_img\",\n \"L_mid\",\n \"R_mid\",\n \"L_pial\",\n \"R_pial\",\n \"L_white\",\n \"R_white\",\n \"json_name\"\n ]\n field_values = [\n t1_dir,\n t1_name,\n t1_brain_name,\n tis_name,\n asl_dir,\n tis_dir,\n strucasl_dir,\n calib_dir,\n calib0_dir,\n calib1_dir,\n calib0_name,\n calib1_name,\n L_mid,\n R_mid,\n L_pial,\n R_pial,\n L_white,\n R_white,\n json_name\n ]\n names_dict = {}\n for key, value in zip(fields, field_values):\n names_dict[key] = str(value)\n with open(json_name, 'w') as fp:\n json.dump(names_dict, fp, sort_keys=True, indent=4)",
"def handleFileNames(self):\n \n # expand the wild cards - but do not create the full directory path\n # as the work sub directories have yet to be created.\n if not os.path.exists(self.shareArea):\n m = 'Cannot set self.auxfiles due to non-existent share directory: %s' % self.shareArea\n self.logger.fatal(m)\n raise RTTCodingError(m)\n\n # resolve auxFile patterns to file names\n auxFiles = []\n for pattern in self.auxFilePatterns:\n base, fnpattern = os.path.split(pattern)\n srcDir = os.path.normpath(os.path.join(self.shareArea, base))\n filesInShare = os.listdir(srcDir)\n auxFiles.extend([os.path.join(base,file) for file in filesInShare if fnmatch.fnmatch(file, fnpattern)])\n\n self.auxFiles = unique(auxFiles)",
"def read_dir():\n file_list=[]\n title_list = []\n for filename in os.listdir(\"alignments/\"):\n if filename.endswith(\".aln\"): #Retrieve only alignment files.\n file_list.append(filename)\n with open (\"genID.txt\",'r') as x: #The genID.txt file contains relevant gene names.\n while True:\n rule = x.readline()\n if len(rule) > 0: #If the rule is empty, the program does not use it.\n if rule[0] == \"B\": #Only fetch gen names.\n title_list.append(rule) #The title_list is used to create the variant files in a later stadium\n else:\n break\n return file_list,title_list",
"def produce_uniprotID_dict(inPath, spList, outPath):\n with open(spList, 'r') as f:\n swissProtIDs = set(f.read().split())\n with open(inPath, 'r') as fIn:\n idMap = {}\n for line in fIn:\n uniprotID, otherIDtype, otherID = line.strip().split('\\t')\n if uniprotID in swissProtIDs:\n if otherIDtype == 'Gene_Name':\n otherID = otherID.upper()\n idMap[otherID] = uniprotID\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)",
"def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()",
"def read_grp(fname):\n global DAYS\n uint_types = [DAYS,\n 'Current crop type', \n 'Current residue on ground type', \n 'Previous residue on ground type', \n 'Old residue on ground type', \n 'Current dead root type', \n 'Previous dead root type', \n 'Old dead root type']\n\n meta = {}\n data = None\n header = []\n\n meta['fname'] = fname\n meta['id'] = ''.join([L for L in fname if L in '0123456789'])\n \n fid = open(fname, 'rb')\n for i, line in enumerate(fid.readlines()):\n line_as_list = line.strip().split()\n\n if len(line_as_list) == 0:\n continue\n\n elif line_as_list[0][0] == '#':\n continue\n\n elif line_as_list[0] == 'int':\n try:\n meta[line[1]] = int(line[2])\n except:\n pass\n \n elif line_as_list[0] == 'float':\n try:\n meta[line[1]] = float(line[2])\n except:\n pass\n\n elif line_as_list[0] == 'char':\n continue\n\n elif line_as_list[0][0] == '{':\n cname = line.strip()[1:-1].replace(r'kg/m', r'kg*m**-1') \\\n .replace(r'kg/m**2', r'kg*m**-2') \\\n .replace(r'kg/m**3', r'kg*m**-3') \\\n .replace(r'kg/m**4', r'kg*m**-4') \\\n .replace(r'mm/hr', r'mm*hr**-1') \\\n .replace(r'mm/h', r'mm*hr**-1') \\\n .replace(r'm/day', r'm*day**-1') \\\n .replace(r'g/cc', r'g*cc**-1') \\\n .replace(r'kg-s/m**4', r'kg-s*m**-4') \\\n .replace(r's/m', r's*m**-1') \\\n .replace(r'Irrigation_volume_supplied/unit_area',\n r'Irrigation_volume_supplied*unit_area**-1')\n header.append(cname)\n\n else:\n if len(header) == len(line_as_list):\n \n # if we are here and data == None we need to initialize the data dictionary\n if data == None:\n data = {}\n for cname in header:\n typecode = ('f', 'h')[any([cname==s for s in uint_types])]\n data[cname] = array.array(typecode)\n\n for (cname, string) in zip(header, line_as_list):\n if any([cname==s for s in uint_types]):\n value = int(string)\n else:\n value = float(string)\n\n if cname == DAYS:\n\n if value in set(data[DAYS]):\n break\n\n data[cname].append(value)\n\n else:\n raise Exception('Failed to parse line %i, unexpected number of columns.'%(i+1))\n \n fid.close()\n\n # pack the table data into numpy arrays\n for (cname, v) in data.items():\n dtype = (np.float32, np.int16)[any([cname==s for s in uint_types])]\n data[cname] = np.array(v, dtype=dtype)\n\n return (meta, data)",
"def __snps_called__(self):\n directory, filename = os.path.split(self.snp_path)\n basename = os.path.basename(directory)\n if basename != 'gatk':\n new_directory = os.path.join(directory,'gatk')\n new_filename = re.sub(self.sample_key + \"_R_\",\"1_\",filename)\n snp_path = os.path.join(new_directory,new_filename)\n self.snp_path = snp_path\n if not os.path.isfile(self.snp_path):\n alternative_path = os.path.join(self.upload_dir,self.description + \"/\" + self.description + \"-gatk.vcf\")\n self.snp_path = alternative_path\n if not os.path.isfile(alternative_path):\n return False\n return True",
"def id2filenames(sid, ann_type=\"uppercase\", salamipath=dpath.SALAMI):\n import fnmatch\n\n spath = os.path.join(\n salamipath,\n \"annotations\",\n str(sid),\n \"parsed\"\n )\n files = None\n if os.path.exists(spath):\n files = os.listdir(spath)\n files = fnmatch.filter(files, '*'+ann_type+'*')\n # Prepend directory path\n for i in range(len(files)):\n files[i] = os.path.join(spath, files[i])\n return files",
"def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames",
"def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()",
"def manage_ident(filebasename, gmm, clusters):\n seg_f = open(\"%s.ident.%s.seg\" % (filebasename, gmm), \"r\")\n for line in seg_f:\n if line.startswith(\";;\"):\n# print line\n splitted_line = line.split()[1].split(':')[1].split('_')\n# print splitted_line\n try:\n cluster, speaker = splitted_line\n except:\n speaker = splitted_line[0]\n idx = line.index('score:' + speaker) + len('score:' + speaker + \" = \")\n iidx = line.index(']', idx) - 1\n value = line[idx:iidx]\n if not cluster in clusters:\n clusters[cluster] = Cluster(cluster, 'U', '0', '', cluster)\n clusters[cluster].add_speaker(speaker, value)\n seg_f.close()\n if not CONFIGURATION.KEEP_INTERMEDIATE_FILES:\n os.remove(\"%s.ident.%s.seg\" % (filebasename, gmm))",
"def create_sdxmetadata(sdx_dir, output_dir):\n #define list to store SDX information\n instrument = []\n picks = [] \n phases = []\n \n #segment and store metadata \n #define SDX files to be read\n for root, dirs, files in os.walk(sdx_dir):\n for idx, file in enumerate(files):\n if file.endswith(\".sdx\"):\n \n print(\"Reading File: \" + file)\n \n #define list to store SDX information\n instrument = []\n picks = [] \n phases = []\n \n #scan for pick info\n with open(root + file,\"r\") as f:\n searchlines = f.readlines()\n for i, line in enumerate(searchlines):\n #strip whitespace/end-of-line characters for exact text matching\n line = line.rstrip()\n #find pick info\n if \"pick\" == line:\n for l in searchlines[i:i+16]: \n #print(l)\n #assign pick info/instrument info to variables and store\n instrument_info = searchlines[i+1]\n pick_info = searchlines[i+2]\n phase_info = searchlines[i+9:i+13]\n instrument.append(instrument_info)\n picks.append(pick_info)\n phases.append(phase_info)\n \n #create a .txt file for each seperate event to store pick info\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n\n f = open(output_dir + os.path.splitext(file)[0] + \".txt\",'w')\n #header information...\n f.write('Data read from correpsonding SDX file:' + '\\n')\n f.write(file + '\\n\\n')\n f.write('Instrument/component' + '\\t\\t\\t' + 'Pick information' '\\t\\t\\t' + 'Phase information\\n')\n \n # print both instrument and pick information to the \n # associated event file\n for item in zip(instrument, picks, phases):\n \n #remove preceding whitespace/formatting characters\n item0 = item[0].rstrip()\n item1 = item[1].rstrip()\n item2 = list(map(str.strip, item[2]))\n \n #remove associated list formatting\n item2 = (\", \".join( str(e) for e in item2))\n\n #print...\n #format | instrument info | pick info | phase info\n f.write(\"%s\\t\\t%s\\t\\t%s\\n\" % (item0,item1,item2))\n \n f.close()",
"def parse_infiles(self):\n\n errs = 0\n # check file existence first\n for ifile in self.infiles:\n if ifile in ['-', 'stdin']: pass\n elif not os.path.isfile(ifile):\n print('** input file not found: %s' % ifile)\n errs += 1\n if errs: return 1\n \n # check for existence separately\n for ifile in self.infiles:\n if self.verb > 2: print('++ processing %s ...' % ifile)\n\n # open, read, close\n if ifile in ['-', 'stdin']: fp = sys.stdin\n else:\n try: fp = open(ifile)\n except:\n print(\"** failed to open input file %s\" % ifile)\n return 1\n ilines = fp.readlines()\n if ifile != sys.stdin: fp.close()\n\n # empty should be a terminal failure\n if len(ilines) < 1:\n print('** empty input for file %s' % ifile)\n return 1\n\n if len(self.labels) == 0:\n rv, self.labels = self.make_labels(ilines)\n self.parents = [self.find_parent_label(lab) for lab in self.labels]\n if rv: return 1\n\n rv, ldict = self.make_dict(ilines)\n if rv: return 1\n\n self.ldict.append(ldict)\n\n return 0",
"def _pname_and_metadata(in_file):\n\n\n if in_file.endswith(\".csv\"):\n raise ValueError(\"Did not find input metadata file: %s\" % in_file)\n base, md, global_vars = in_file, {}, {}\n md_file = None\n return base, md, global_vars, md_file",
"def _resolve_list_names(self, dirname='',\n dirname_extinfo=None,\n grisim='',\n grism_extinfo=None,\n in_sex=None,\n out_sex=None):\n # compose the default name\n if in_sex is None:\n # compose the filename from the direct image name\n in_sex = dirname.replace(\".fits\", \"_{0:d}.cat\"\n .format(dirname_extinfo['axe_ext']))\n\n # check whether the explicitly given filename exists\n if not os.path.isfile(in_sex):\n err_msg = (\"The Input Object List: {0:s} does not exist!\"\n .format(in_sex))\n raise aXeError(err_msg)\n\n if out_sex is None:\n # compose the name for the output GOL\n out_sex = os.path.basename(grisim).replace(\".fits\", \"_{0:d}.cat\".format(grism_extinfo['axe_ext']))\n\n # return the IOL and the GOL names\n return in_sex, out_sex",
"def parse_metadata(self):\n import csv\n f = open(self.seq_id_list)\n self.names = f.readlines()\n f.close()\n num_samples = len(self.names)\n for i in range(len(self.names)):\n self.names[i] = self.names[i].replace(\"\\n\", \"\")\n # Go through the combined metadata file - it has most of the data we need.\n metadata = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/combinedMetadata.csv\"))\n metadata_count = 0\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n # Need to look in external WGS spades as well.\n metadata = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/combinedMetadata.csv\"))\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n\n\n\n # Also need to go through the rMLST file to make sure that all rMLST genes are covered.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/rmlst.csv\"))\n metadata_count = 0\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n metadata_count += 1\n # Check external runs.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/rmlst.csv\"))\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n\n\n\n # Finally, need to get info on the MLST sequence type.\n metadata_count = 0\n mlst_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Also from External.\n mlst_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Go through the ROGA Summary file from the access DB to get strain/textual IDs, and 1' and 2' enzymes.\n try: # Assume we're using ROGA summary OLF. If it isn't there, assume ROGA summary OLC\n df = pd.read_excel('ROGA_summary_OLF.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['Isolate ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Textual ID'][i]\n self.metadata[seqid][\"1Enzyme\"] = df[\"1' Enzyme\"][i]\n self.metadata[seqid][\"2Enzyme\"] = df[\"2' Enzyme\"][i]\n self.metadata[seqid][\"Source\"] = df['Source'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n\n\n except FileNotFoundError: # Should be a file not found error - look it up.\n metadata_count = 0\n df = pd.read_excel('ROGA_summary_OLC.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['OLN ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Lab ID'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n # print(self.metadata)\n self.check_for_empty_data()"
]
| [
"0.617442",
"0.6138864",
"0.5529454",
"0.54885745",
"0.54767644",
"0.5435723",
"0.54309344",
"0.5426276",
"0.5394026",
"0.5354435",
"0.53199375",
"0.5302326",
"0.5287302",
"0.5281108",
"0.5234611",
"0.52154994",
"0.51984787",
"0.51866984",
"0.5181254",
"0.5164858",
"0.51636827",
"0.51619804",
"0.5151845",
"0.5141648",
"0.5129575",
"0.5123538",
"0.51192176",
"0.51083964",
"0.50962275",
"0.5093657"
]
| 0.83686924 | 0 |
display the final labels list | def display_labels(self):
nsubj = len(self.infiles)
print('-- final label table (length %d):' % len(self.labels))
for label in self.labels:
nv = self.maxcounts[label]
if nv == 1: cstr = '%3d val' % nv
else: cstr = '%3d vals' % nv
nv = self.subjcounts[label]
if nv == 1: sstr = '%3d file' % nv
else: sstr = '%3d files' % nv
if nv < nsubj: short = ' (short)'
else: short = ''
print('%-30s : %-10s : %-10s%s' % (label, cstr, sstr, short)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_labels(self,labels):\n\t\tfor key in labels:\n\t\t\tprint key, ':\\t', labels[key]",
"def display_name_labels(self):\n for name in self.names:\n # create a label for each name\n self.root.add_widget(Label(text=name))",
"def label_list(entry):\n printing_resident_sheets(entry, rf'{constants.OUTPUTS_DIR}\\label_sheet.xlsx')\n printing_documents.create_label_list()",
"def display_labels(self, out, names, confidences, bbs, loc, prefix=''):\n # loc refers to the location relative to the bounding box, see top of the file\n i1, i2 = loc\n font = cv2.QT_FONT_NORMAL\n for i, (label, confidence, bb) in enumerate(zip(names, confidences, bbs)):\n pos = (bb[i1], bb[i2])\n if pos[0] > out.shape[1] or pos[1] > out.shape[0] or pos[0] < 0 or pos[1] < 0:\n continue\n cv2.putText(out, prefix + label + \" {}%\".format(int(confidence * 100)), pos, font, 0.75,\n self.colors[i % len(self.colors)], 1, cv2.LINE_AA)",
"def labels(self):\n return self._labels",
"def display_label(f_class, catalog): \n # Transform the top n class indexes into class labels LIST.\n return catalog[str(f_class)]",
"def _generateLabel(self, obj, **args):\n result = []\n label = self._script.utilities.displayedLabel(obj)\n if label:\n result.append(label)\n return result",
"def get_labels(self):\n return []",
"def get_labels(self):\r\n return None",
"def label_dir(self):\n for lblname in self._vallabs:\n print(lblname)",
"def label(self, cfg):\n rep = \"\"\n nl = \"\"\n for node in cfg.nodes:\n rep += nl + \"{}\\tgen={}\\tkill={}\\tout={}\".format(\n node, \n set(self.gen.get(node)),\n set(self.kill.get(node)),\n set(self.out.get(node)))\n nl = \"\\n\"\n return rep",
"def show_labels(self, show_labels):\n\n self._show_labels = show_labels",
"def _hide_labels(self):\n pass",
"def labels_all(self):\n return self._labels_all",
"def drawLabels(self):\r\n if self.sensors == None or self.sensors == []:\r\n return\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[0]])\r\n self.c.create_text(30,20,text=self.sensors[self.sensor_ids[0]],fill=col,anchor=tk.NW)\r\n if len(self.sensor_ids) == 2:\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[1]])\r\n self.c.create_text(30,40,text=self.sensors[self.sensor_ids[1]],fill=col,anchor=tk.NW)",
"def labels(self):\n return self.label(self.p_y_given_x)",
"def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')",
"def generate_labels(pics):\r\n return []",
"def add_labels(self, labels):\n for i, axis in enumerate(self.bottom):\n self.grid[axis].set_xlabel(labels[i])\n\n for i, axis in enumerate(np.array(self.left)[-1::-1]):\n if axis == self.upperleft:\n continue\n\n self.grid[axis].set_ylabel(labels[i]) \n\n pl.draw()",
"def SAMT_labels(self):\n \t\t#find basic labels\n \t\tlabels_basic = self.dependency_labels()\n \t\tlabels = Labels(labels_basic)\n \t\treturn labels.SAMT_labels()",
"def create_labels(self):\n for name in self.name_to_phone:\n temp_labels = Label(text=name)\n self.root.ids.main.add_widget(temp_labels)",
"def get_labels(self):\n return [\"A轮\", \"B轮\",\"C轮\",\"天使轮\",\"战略融资\"]",
"def label_list(self, labnames=None):\n vallabs = self._vallabs\n if labnames is None:\n labnames = vallabs.keys()\n else:\n if isinstance(labnames, str):\n labnames = (labnames,)\n elif (not isinstance(labnames, collections.Iterable)\n or not all(isinstance(value, str) for value in labnames)):\n raise TypeError(\"labnames should be str or iterable of str\") \n labnames = set(name for value in labnames\n for name in value.split())\n if not labnames.issubset(vallabs.keys()):\n bad_names = \", \".join(str(lbl) for lbl in \n labnames.difference(vallabs.keys()))\n raise KeyError(bad_names + \" are not defined labels\")\n for name in labnames:\n print(name + \":\")\n lbldict = vallabs[name]\n for value in lbldict:\n print(\"{:>12} {}\".format(value, lbldict[value]))",
"def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist",
"def getLabels(self):\n return self.numToLabel",
"def get_labels(self):\n\t\traise NotImplementedError()",
"def UpdateLabel(self) -> _n_6_t_0:",
"def _fetch_labels(self, list_fams, no_imgs, num_samples) -> Tuple[np.ndarray, List]:\n y_train = np.zeros(num_samples)\n pos = 0\n label = 0\n indexes = []\n for i in no_imgs:\n indexes.append(i)\n print(\"Label:%2d\\tFamily: %15s\\tNumber of images: %d\" % (label, list_fams[label], i))\n for j in range(i):\n y_train[pos] = label\n pos += 1\n label += 1\n return y_train, indexes",
"def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]",
"def output_labels(self):\n return list(self._output_labels)"
]
| [
"0.7338853",
"0.71385354",
"0.6938978",
"0.67793345",
"0.67660046",
"0.67345864",
"0.67238367",
"0.6704955",
"0.66717625",
"0.663407",
"0.6592146",
"0.65798926",
"0.65646005",
"0.6542965",
"0.6540552",
"0.6459968",
"0.64576674",
"0.64520043",
"0.6444454",
"0.6407432",
"0.6397413",
"0.6382851",
"0.6380063",
"0.6372023",
"0.6343245",
"0.63383615",
"0.6333799",
"0.6313279",
"0.6303256",
"0.6303024"
]
| 0.8096897 | 0 |
write value lines, "left justified" to maxcount fields for each infile for each label | def write_value_lines(self, fp):
if len(self.labels) < 1: return 1
nfiles = len(self.infiles)
# labels, starting with input files
# start with subject, if possible
dosubj = len(self.snames) == len(self.infiles)
dogrp = len(self.gnames) == len(self.infiles)
for ind, infile in enumerate(self.infiles):
# first is group or infile
if dogrp: fp.write('%s' % self.gnames[ind])
else: # infile instead of group
if infile == '-': fp.write('stdin')
else: fp.write('%s' % infile)
# subject, if possible (repeat?)
# if dosubj: fp.write('\t%s' % self.snames[ind])
for label in self.labels:
nf = self.maxcounts[label]
try: vals = self.ldict[ind][label]
except:
if self.verb>2:print('** infile %s missing key %s'%(infile,label))
vals = []
nv = len(vals)
if nv > 0: fp.write('\t'+'\t'.join(vals))
if nf > nv: fp.write('\t'*(nf-nv))
fp.write('\n') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_file(file_, maxn, maxl):\n width = len(str(maxl))\n with open(file_, 'w') as out:\n for i in range(1, maxl+1):\n out.write(f'[{i:{width}}] ' + write_line(maxn) + '\\n')",
"def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')",
"def combine_all_value(write_filename):\n write_ptr = open(write_filename, \"w\")\n\n for count in range(20):\n read_filename = \"all_value_%s.csv\" % count\n read_ptr = open(read_filename, \"r\")\n\n for line in read_ptr:\n if line[0] == 'i':\n print count\n continue\n write_ptr.write(line)\n\n read_ptr.close()\n\n write_ptr.close()",
"def fmt_wrap_lines(fp,values,fmt=\"%14.4f \",per_line=10):\n for i,a in enumerate(values):\n if i>0 and i%10==0:\n fp.write(\"\\n\")\n fp.write(\"%14.4f \"%a)\n fp.write(\"\\n\")",
"def generate_val_file(id_test,lines,columns):\n print(\"generate_val_file\")\n val_file_name=id_test+\".val\"\n f = io.open(INPUT_PARSER_RESULTS_DIR+val_file_name, \"w\",newline='\\n')\n inverse_count=0\n for line in range(0,lines):\n for column in range(0,columns):\n inverse_count=inverse_count-1\n print(\"(line,column)=(\"+str(line)+\",\"+str(column)+\")\")\n f.write(\"(\"+str(line)+\",\"+str(column)+\") = \"+str(inverse_count)+\"\\n\")\n f.close()",
"def main():\n input_file = sys.argv[1]\n target_width = int(sys.argv[2]) * 2\n\n to_write = \"\"\n \n print(\"Processing: %s\" % input_file)\n\n with open(input_file,\"r\") as fh:\n for line in fh.readlines():\n slices = line[:-1]\n \n endian_buf = []\n\n while(len(slices) > 0):\n k = slices[0:target_width]\n endian_buf.insert(0,k+\"\\n\")\n slices = slices[target_width:]\n\n for b in endian_buf:\n to_write += b\n\n with open(input_file,\"w\") as fh:\n fh.write(to_write)",
"def write_max_splits(io_stream):\n io_stream.write('value max_splits\\n1\\n')",
"def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist",
"def produce_mirna_allbest_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.bam.mirbase_counts.txt\")",
"def write_hist_text_file(lengths, labels):\n for lengths_list, label in zip(lengths, labels):\n hist_file_name = label[:label.rfind('.')] + '.all_lengths.txt'\n with open(os.getcwd() + '/' + ntpath.basename(hist_file_name), 'w') as out_file:\n out_file.write(ntpath.basename(label) + '\\n')\n for length in sorted(lengths_list):\n out_file.write(str(length) + '\\n')",
"def densify(fof, outfof, window=-1):\n\n fnames = util.getfnames(fof)\n isdir = os.path.isdir(fof)\n\n for fname in fnames:\n\n outlines = set()\n i = 0\n \n with codecs.open(fname, \"r\", \"utf8\") as f:\n lines = f.readlines()\n\n if window == -1:\n outwrite = lines\n containscontent = True\n else:\n for line in lines:\n sline = line.split(\"\\t\")\n if len(sline) > 5:\n tag = sline[0]\n if tag != \"O\":\n # this is a label.\n # add w before and w after.\n # don't even need to worry about range checking!\n for j in range(i, i-window-1, -1): \n if j < 0 or len(lines[j].strip()) == 0:\n break\n outlines.add(j)\n\n\n for j in range(i, i+window+1):\n if j >= len(lines) or len(lines[j].strip()) == 0:\n break\n outlines.add(j)\n else:\n outlines.add(i)\n\n i += 1\n\n # conflate empty lines.\n outwrite = []\n lastlinewasempty = False\n containscontent = False\n for i,line in enumerate(lines):\n if i in outlines:\n isempty = len(line.strip()) == 0\n if isempty:\n if not lastlinewasempty:\n lastlinewasempty = True\n outwrite.append(line);\n else:\n containscontent = True\n outwrite.append(line);\n lastlinewasempty = False\n\n if isdir:\n outfname = outfof + \"/\" + os.path.basename(fname)\n else:\n outfname = outfof\n \n # if outlines isn't empty...\n if containscontent:\n with codecs.open(outfname , \"w\", \"utf8\") as out:\n for line in outwrite:\n out.write(line)",
"def write_count_matrix(pb_count, outfile, first=1):\n # write the header (PB names)\n print(\" \" + \"\".join([\"%6s\" % name for name in NAMES]), file=outfile)\n # write the data table\n for residue_idx, residue_pb in enumerate(pb_count):\n print(\"%-5d\" % (residue_idx + first) +\n \" \".join(\"%5d\" % i for i in residue_pb), file=outfile)",
"def write_lengths(gene_data, exon_data, out_file):\n ofunc = get_open_function(out_file)\n with ofunc(out_file, 'wt') as o:\n o.write('gene_id\\tgene_type\\taggregate_length\\n')\n for gene in sorted(exon_data):\n gtype = gene_data[gene]\n data = []\n for exon in exon_data[gene]:\n data.extend(range(exon[0], exon[1] + 1))\n o.write('{0}\\t{1}\\t{2}\\n'.format(gene, gtype, len(set(data))))",
"def writecc (listoflists,file,writetype='w',extra=2):\r\n if type(listoflists[0]) not in [ListType,TupleType]:\r\n listoflists = [listoflists]\r\n outfile = open(file,writetype)\r\n rowstokill = []\r\n list2print = copy.deepcopy(listoflists)\r\n for i in range(len(listoflists)):\r\n if listoflists[i] == ['\\n'] or listoflists[i]=='\\n' or listoflists[i]=='dashes':\r\n rowstokill = rowstokill + [i]\r\n rowstokill.reverse()\r\n for row in rowstokill:\r\n del list2print[row]\r\n maxsize = [0]*len(list2print[0])\r\n for col in range(len(list2print[0])):\r\n items = pstats.colex(list2print,col)\r\n items = map(pstats.makestr,items)\r\n maxsize[col] = max(map(len,items)) + extra\r\n for row in listoflists:\r\n if row == ['\\n'] or row == '\\n':\r\n outfile.write('\\n')\r\n elif row == ['dashes'] or row == 'dashes':\r\n dashes = [0]*len(maxsize)\r\n for j in range(len(maxsize)):\r\n dashes[j] = '-'*(maxsize[j]-2)\r\n outfile.write(pstats.lineincustcols(dashes,maxsize))\r\n else:\r\n outfile.write(pstats.lineincustcols(row,maxsize))\r\n outfile.write('\\n')\r\n outfile.close()\r\n return None",
"def splitFile(filename, n):\n in_file = open(filename)\n line = in_file.readline()\n count = 0\n while line <> \"\":\n if count < 10: num = \"0\"+str(count)\n else: num = str(count)\n f = open(\"output/\"+filename+\"-\"+num,\"w\")\n for i in range(n):\n if line == \"\": break\n f.write(line)\n line = in_file.readline()\n f.close()\n count += 1\n return count",
"def writeNormScore(self,fin,fout):\n\n for line in fin:\n [sv, en, score] = re.split(r'\\t|,',line)\n self.count[sv][en] += float(score)\n self.en_sum[en] += float(score)\n self.sv_sum[sv] += float(score)\n\n for sv, ens in self.count.iteritems():\n for en in ens.keys():\n fout.write(sv + \",\" + en + \"\\t\" + str(self.count[sv][en] / self.sv_sum[sv] * self.en_sum[en]) + \"\\n\")",
"def writing_count_longest_title(file_name):\n result = str(reports.count_longest_title(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")",
"def format_MT103_field70(self, val, character_limit, n_lines):\n text = []\n val = val.split('newline')\n for values in val:\n line = FSwiftWriterUtils.split_text_on_character_limit(values, character_limit)\n text.append(line)\n text = '\\n'.join(str(i) for sub_list in text for i in sub_list)\n text = '\\n'.join(text.split('\\n')[:n_lines])\n return text",
"def Seperate(f_read, f_write_name):\n lines = f_read.readlines()\n line_s = [line.split() for line in lines]\n\n for i in range(6, 13):\n nbytes = pow(2,i)\n f_write = f_write_name + str(nbytes) + \"b.txt\"\n f = open(f_write, \"w+\")\n\n for line in line_s:\n if line[3] == str(nbytes):\n f.write(\" \".join(line))\n f.write(\"\\n\")\n f.close()",
"def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")",
"def write_mps(instructions):\r\n for file_num in range(len(files_to_write)):\r\n with open(PATH + files_to_write[file_num], \"w\") as file:\r\n file.write(header)\r\n for instruction in instructions:\r\n parsed_instr = parse_bin_hex(instruction[0 + 32 * file_num: 32 + 32 * file_num]) + \" \"\r\n file.write(parsed_instr)",
"def fill_var_calls(file,length,var,indels): \n titel = file[0:10] \n with open (\"nuc_variant_calls/\"+titel+\".var\",'a')as outfile:\n outfile.write(\n \"%s\\t%s\\t%s\\t%s\\n\"%(file,\n length/2, #Length is divided by 2 because the alignment contains 2 sequences.\n var,\n indels)\n )",
"def make_table_file(lines, labels, dir_path, filename):\r\n lines.sort()\r\n lines.insert(0, '\\t'.join(labels))\r\n\r\n output = open(os.path.join(dir_path, filename), 'w')\r\n output.write('\\n'.join(lines))\r\n output.close()",
"def writeStats(inDir, outFname):\n ofh = open(outFname, \"w\")\n ofh.write(\"meta\\tkallistoProcReads\\tkallistoAlnReads\\tkallistoEstFragLen\\n\")\n\n inFnames = glob.glob(join(inDir, \"log\", \"*.log\"))\n print(\"Parsing %d logfiles and writing to %s\" % (len(inFnames), outFname))\n for inFname in inFnames:\n cellId = basename(inFname).split(\".\")[0].split(\"_\")[0]\n # [quant] processed 1,836,518 reads, 636,766 reads pseudoaligned\n # [quant] estimated average fragment length: 251.99\n for line in open(inFname):\n if line.startswith(\"[quant] processed \"):\n words = line.split()\n readCount = words[2].replace(\",\",\"\")\n alignCount = words[4].replace(\",\",\"\")\n if line.startswith(\"[quant] estimated average fragment length:\"):\n fragLen = line.split()[5]\n row = [cellId, readCount, alignCount, fragLen]\n ofh.write(\"\\t\".join(row)+\"\\n\")\n ofh.close()",
"def write_file(self):\r\n # -open file for writing\r\n f_fbob = open(self.fn_path, 'w')\r\n\r\n # -write header\r\n f_fbob.write('%s\\n' % (self.heading))\r\n\r\n # -write sections 1 & 2 : NOTE- what about NOPRINT?\r\n f_fbob.write('%10i%10i%10i%10i\\n' % (self.nqfb, self.nqcfb,\r\n self.nqtfb, self.iufbobsv))\r\n f_fbob.write('%10e\\n' % (self.tomultfb)) # check format\r\n\r\n # -write sections 3-5 looping through observations groups\r\n c = 0\r\n for i in range(self.nqfb):\r\n # while (i < self.nqfb):\r\n # write section 3\r\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\r\n self.nqclfb[i]))\r\n\r\n # Loop through observation times for the groups\r\n for j in range(self.nqobfb[i]):\r\n # -write section 4\r\n f_fbob.write(\r\n '{}{:10d}{:10.4g}{}{:10.4g}\\n'.format(self.obsnam[c],\r\n self.irefsp[c],\r\n self.toffset[c], ' ',\r\n self.flwobs[c]))\r\n c += 1 # index variable\r\n\r\n # -write section 5 - NOTE- need to adjust factor for muliple obs same cell\r\n for j in range(abs(self.nqclfb[i])):\r\n if self.nqclfb[\r\n i] < 0: # set factor to 1.0 for all cells in group\r\n self.factor[i, :] = 1.0\r\n f_fbob.write('{:10d}{:10d}{:10d}{}{:10f}\\n'\r\n .format(self.layer[i, j], (self.row[i, j]),\r\n self.column[i, j],\r\n ' ', self.factor[\r\n i, j])) # note- is 10f good enough here?\r\n\r\n f_fbob.close()\r\n #\r\n # swm: BEGIN hack for writing standard file\r\n sfname = self.fn_path # swm:hack\r\n sfname += '_ins' # swm: hack\r\n # write header\r\n f_ins = open(sfname, 'w') # swm: hack for standard file\r\n f_ins.write('jif @\\n') # swm: hack for standard file\r\n f_ins.write('StandardFile 0 1 %s\\n' % (\r\n self.nqtfb)) # swm: hack for standard file\r\n for i in range(0, self.nqtfb):\r\n f_ins.write(\r\n '{}\\n'.format(self.obsnam[i])) # swm: hack for standard file\r\n\r\n f_ins.close()\r\n # swm: END hack for writing standard file\r\n\r\n return",
"def write(self, filename):\n f = open(filename, 'w')\n f.write(str(self.m) + \"\\n\")\n f.write(str(self.n) + \"\\n\")\n for i in self.values:\n for j in i:\n f.write(str(j)+\"\\n\")\n f.closed",
"def check_len(file_handle, fh_write):\r\n # parse through the file by line\r\n max_len = 0\r\n seq = ''\r\n # header = ''\r\n # Section 1.2 - Parse through file sequences and find max length\r\n for line in file_handle:\r\n # if a line has > assume it is the title/header of organism\r\n if line[0] == '>':\r\n # Determine length of each sequence\r\n # print(len(seq.strip()))\r\n # header = line\r\n seq = ''\r\n else:\r\n # if it is part of the sequence\r\n seq = seq + line\r\n # Check to find length of longest sequence, used to fill in so all are same length\r\n if len(seq.strip()) > max_len:\r\n max_len = len(seq.strip())\r\n file_handle.seek(0)\r\n # The last entry will be at top without this\r\n\r\n # With this Section 1.4 will be needed or it will miss the last entry\r\n seq = ''\r\n header = ''\r\n # Determine max length for section\r\n print(max_len)\r\n\r\n # Section 1.3 - Parse through file again and add - to end if less than max length, write to new file\r\n for line in file_handle:\r\n # if a line has > assume it is the title/header of organism\r\n if line[0] == '>':\r\n # if sequences is full, add - if needed and write header and seq to new file\r\n if len(seq) > 0:\r\n seq = seq.strip()\r\n while len(seq) < max_len:\r\n seq = seq + '-'\r\n write_data(fh_write, header, seq)\r\n # reset header to next header and seq\r\n header = line\r\n seq = ''\r\n else:\r\n # if it is part of the sequence\r\n seq = seq + line\r\n\r\n # Section 1.4, to print the last entry in the files\r\n seq = seq.strip()\r\n while len(seq) < max_len:\r\n seq = seq + '-'\r\n write_data(fh_write, header, seq)",
"def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()",
"def writeOutFileBarcodeCounts(barcode_dict_summary, outFileName):\n with gzip.open(outFileName, 'wb') as out_file:\n for barcode in barcode_dict_summary:\n out_file.write(barcode)\n out_file.write(\"\\t\" + \"\\t\".join(map(str,barcode_dict_summary[barcode])))\n out_file.write(\"\\n\")",
"def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")"
]
| [
"0.6208772",
"0.5949596",
"0.5829664",
"0.56889254",
"0.5601088",
"0.5511065",
"0.5442443",
"0.54381067",
"0.53155595",
"0.52923715",
"0.52864933",
"0.5261997",
"0.5236052",
"0.51551807",
"0.5138517",
"0.5135748",
"0.5129272",
"0.51155114",
"0.51106626",
"0.50788087",
"0.50682455",
"0.5067844",
"0.5058038",
"0.5056868",
"0.5037089",
"0.50193876",
"0.5013404",
"0.49900964",
"0.49684423",
"0.4961693"
]
| 0.6836115 | 0 |
Returns all variable of the default tensorflow graph with the given prefix. The return value is a dictionary 'NAME_OF_VARIABLE' => 'VARIABLE'. If a prefix is given, the prefix is deleted in 'NAME_OF_VARIABLE'. | def get_op(prefix=None):
dict = {}
if prefix is not None and len(prefix) > 1:
if prefix[-1] != '/':
prefix += '/'
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=prefix)
for t in res:
key = t.name
key = key[len(prefix):]
dict[str(key)] = t
return dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vars_by_prefix(self, prefix):\n\n t_vars = tf.global_variables()\n return [var for var in t_vars if prefix in var.name]",
"def get_named_variables(dct, name=True, overwrite=False, prefix=''):\n exprs = [('%s%s' % (prefix, k), v) for k, v in dct.items()\n if isinstance(v, theano.tensor.basic.TensorVariable)]\n\n if name:\n for k, v in exprs:\n if not hasattr(v, 'name') or overwrite:\n v.name = '%s%s' % (prefix, k)\n return dict(exprs)",
"def variable_parser(var_list, prefix):\r\n ret_list = []\r\n for var in var_list:\r\n varname = var.name\r\n varprefix = varname.split('/')[0]\r\n if varprefix == prefix:\r\n ret_list.append(var)\r\n return ret_list",
"def variable_parser(var_list, prefix):\r\n ret_list = []\r\n for var in var_list:\r\n varname = var.name\r\n varprefix = varname.split('/')[0]\r\n if varprefix == prefix:\r\n ret_list.append(var)\r\n elif prefix in varname:\r\n ret_list.append(var)\r\n return ret_list",
"def get_local_variables(scope=None, suffix=None):\n return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)",
"def get_trainable_variables(scope=None, suffix=None):\n return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)",
"def _preload_existing_vars(prefix: str) -> Store:\n if not prefix:\n # If prefix is empty just return all the env variables.\n return environ\n\n prefixed = {}\n\n # Prefix is not empty, do the search and replacement:\n for env_name, env_value in environ.items():\n if not env_name.startswith(prefix):\n # Skip vars with no prefix.\n continue\n\n prefixed[env_name.replace(prefix, '', 1)] = env_value\n\n return prefixed",
"def declare_variables(self):\n\n\t\tvar_prefixes = ['W_in', 'W_rnn', 'b_rnn', 'W_out', 'b_out']\n\t\tself.var_dict = {}\n\n\t\twith tf.variable_scope('network'):\n\t\t\tfor p in var_prefixes:\n\t\t\t\tself.var_dict[p] = tf.get_variable(p, initializer=par[p+'_init'])",
"def prefix_nodes(graph, prefix):\n mapping = {node: f\"{prefix}{node}\" for node in graph.nodes}\n return networkx.relabel_nodes(graph, mapping)",
"def _prefixed(nt: namedtuple, prefix):\n result = {}\n for key, value in nt._asdict().items():\n result[prefix + key] = value\n return result",
"def get_model_variables(scope=None, suffix=None):\n return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)",
"def env_vars(prefix):\n return {k: v for k, v in os.environ.items() if k.startswith(prefix)}",
"def get_variables(scope=None, suffix=None,\n collection=ops.GraphKeys.GLOBAL_VARIABLES):\n if isinstance(scope, variable_scope.VariableScope):\n scope = scope.name\n if suffix is not None:\n if ':' not in suffix:\n suffix += ':'\n scope = (scope or '') + '.*' + suffix\n return ops.get_collection(collection, scope)",
"def ns_prefix_dict(g):\n return {ns: prefix.toPython() for (ns, prefix) in g.namespaces()}",
"def get_constants(prefix):\n return dict( (getattr(socket, n), n)\n for n in dir(socket)\n if n.startswith(prefix)\n )",
"def get_vars(scope=''):\n return [x for x in tf.trainable_variables() if scope in x.name]",
"def get_var_prefix(self):\n return self._var_prefix",
"def get_vars_to_save_and_restore(ckpt=None):\n model_vars = tf.trainable_variables()\n # Add batchnorm variables.\n bn_vars = [v for v in tf.global_variables()\n if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or\n 'mu' in v.op.name or 'sigma' in v.op.name or\n 'global_scale_var' in v.op.name]\n model_vars.extend(bn_vars)\n model_vars = sorted(model_vars, key=lambda x: x.op.name)\n mapping = {}\n if ckpt is not None:\n ckpt_var = tf.contrib.framework.list_variables(ckpt)\n ckpt_var_names = [name for (name, unused_shape) in ckpt_var]\n ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]\n not_loaded = list(ckpt_var_names)\n for v in model_vars:\n if v.op.name not in ckpt_var_names:\n # For backward compatibility, try additional matching.\n v_additional_name = v.op.name.replace('egomotion_prediction/', '')\n if v_additional_name in ckpt_var_names:\n # Check if shapes match.\n ind = ckpt_var_names.index(v_additional_name)\n if ckpt_var_shapes[ind] == v.get_shape():\n mapping[v_additional_name] = v\n not_loaded.remove(v_additional_name)\n continue\n else:\n logging.warning('Shape mismatch, will not restore %s.', v.op.name)\n logging.warning('Did not find var %s in checkpoint: %s', v.op.name,\n os.path.basename(ckpt))\n else:\n # Check if shapes match.\n ind = ckpt_var_names.index(v.op.name)\n if ckpt_var_shapes[ind] == v.get_shape():\n mapping[v.op.name] = v\n not_loaded.remove(v.op.name)\n else:\n logging.warning('Shape mismatch, will not restore %s.', v.op.name)\n if not_loaded:\n logging.warning('The following variables in the checkpoint were not loaded:')\n for varname_not_loaded in not_loaded:\n logging.info('%s', varname_not_loaded)\n else: # just get model vars.\n for v in model_vars:\n mapping[v.op.name] = v\n return mapping",
"def get_model_variables():\n g = tf.get_default_graph()\n return set(g.get_collection(tf.GraphKeys.MODEL_VARIABLES))",
"def get_default_prefix(path):\n if path in prefixes_dict.keys():\n return prefixes_dict[path]\n else:\n return ''",
"def get_var_prefix(self):\n return ''",
"def _create_feed_for_metric_variables(self, metric_variable_values: List[Any]\n ) -> Dict[types.TensorType, Any]:\n result = {}\n for node, value in zip(self._metric_variable_placeholders,\n metric_variable_values):\n result[node] = value\n return result",
"def get_constants(prefix):\n return {\n getattr(socket, n): n\n for n in dir(socket)\n if n.startswith(prefix)\n }",
"def get_unique_named_variable_scope(base_name):\n with tf.variable_scope(None, default_name=base_name) as vs:\n return vs",
"def _default_getter(environ, metadata, prefix, name):\n ce = metadata[CNF_KEY]\n var = ce.name if ce.name is not None else \"_\".join((*prefix, name)).upper()\n log.debug(\"looking for env var '%s'.\", var)\n try:\n return environ[var]\n except KeyError:\n raise MissingEnvValueError(var) from None",
"def _clean_salt_variables(params, variable_prefix=\"__\"):\n list(list(map(params.pop, [k for k in params if k.startswith(variable_prefix)])))\n return params",
"def load_weights(self):\n\n reader = pywrap_tensorflow.NewCheckpointReader(self._tf_model_prefix)\n var_to_shape_map = reader.get_variable_to_shape_map()\n data = dict()\n for name in var_to_shape_map:\n tensor = reader.get_tensor(name)\n data[name] = tensor\n\n print (\"Tensorflow checkpoint file [%s] loaded successfully. [%d] variables loaded.\"\n % (self._tf_model_prefix, len(data)))\n return data",
"def get_variables_by_name(given_name, scope=None):\n suffix = '/' + given_name + ':|^' + given_name + ':'\n return get_variables(scope=scope, suffix=suffix)",
"def variabels_to_restore(scope=None, strip_scope=False):\n if scope:\n variable_map = {}\n variables_to_restore = slim.get_variables_to_restore(include=[scope])\n for var in variables_to_restore:\n if strip_scope:\n var_name = var.op.name[len(scope) + 1:]\n else:\n var_name = var.op.name\n variable_map[var_name] = var\n return variable_map\n else:\n return {var.op.name: var for var in slim.get_variables_to_restore()}",
"def _get_variables_available_in_ckpt(variables, ckpt_path):\n ckpt_reader = tf.train.NewCheckpointReader(ckpt_path)\n ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_dtype_map()\n ckpt_vars_to_shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)\n vars_in_ckpt = {}\n for var_name, variable in sorted(variables.items()):\n if var_name in ckpt_vars_to_shape_map:\n if ckpt_vars_to_shape_map[var_name] == variable.shape.as_list():\n vars_in_ckpt[var_name] = variable\n return vars_in_ckpt"
]
| [
"0.7297888",
"0.62253565",
"0.5879272",
"0.5852968",
"0.57585263",
"0.56440735",
"0.56411254",
"0.56251013",
"0.5550454",
"0.5432469",
"0.54197264",
"0.5389389",
"0.53701025",
"0.52444595",
"0.52265084",
"0.5222102",
"0.51667374",
"0.51649207",
"0.5147349",
"0.5144244",
"0.5136751",
"0.5130674",
"0.51180977",
"0.5115014",
"0.5108235",
"0.51032066",
"0.50872827",
"0.5084366",
"0.50802857",
"0.5053692"
]
| 0.7121777 | 1 |
Check the behaviour of the `kind` parameter. In short, "borderline2" generates sample closer to the boundary decision than "borderline1". We generate an example where a logistic regression will perform worse on "borderline2" than on "borderline1". | def test_borderline_smote_kind():
X, y = make_classification(
n_samples=500,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_clusters_per_class=1,
n_classes=3,
weights=[0.1, 0.2, 0.7],
class_sep=1.0,
random_state=1,
)
smote = BorderlineSMOTE(
kind="borderline-1", m_neighbors=9, k_neighbors=5, random_state=0
)
X_res_borderline_1, y_res_borderline_1 = smote.fit_resample(X, y)
smote.set_params(kind="borderline-2")
X_res_borderline_2, y_res_borderline_2 = smote.fit_resample(X, y)
score_borderline_1 = (
LogisticRegression()
.fit(X_res_borderline_1, y_res_borderline_1)
.score(X_res_borderline_1, y_res_borderline_1)
)
score_borderline_2 = (
LogisticRegression()
.fit(X_res_borderline_2, y_res_borderline_2)
.score(X_res_borderline_2, y_res_borderline_2)
)
assert score_borderline_1 > score_borderline_2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_borderline_smote_no_in_danger_samples(kind):\n X, y = make_classification(\n n_samples=500,\n n_features=2,\n n_informative=2,\n n_redundant=0,\n n_repeated=0,\n n_clusters_per_class=1,\n n_classes=3,\n weights=[0.1, 0.2, 0.7],\n class_sep=1.5,\n random_state=1,\n )\n smote = BorderlineSMOTE(kind=kind, m_neighbors=3, k_neighbors=5, random_state=0)\n X_res, y_res = smote.fit_resample(X, y)\n\n assert_allclose(X, X_res)\n assert_allclose(y, y_res)\n assert not smote.in_danger_indices",
"def build_border(self, kind, size=np.array([[100], [100]])):\n border_size = np.array([100, 100]).reshape(-1, 1)\n if kind == \"wall\":\n self.border = Wall(border_size)\n elif kind == \"wrap\":\n self.border = Toric(border_size)\n elif kind == \"none\":\n self.border = Infinite(border_size)",
"def test_twoLines(self):\n self.performTestForParams()",
"def check_lines(wave, sel_lines, verb=False):\n\n print(\"\\nCHECKING LINES FOR WAVELENGTH RANGE AND SP. ORDERS\")\n print(\"--------------------------------------------------\")\n\n if type(wave[0]) is np.ndarray: # 2d spec\n min_spec_wave = wave[0][0]\n max_spec_wave = wave[-1][-1]\n spec_type = '2d'\n if type(wave[0]) in [np.float, np.float64]: # 1d spec\n min_spec_wave = wave[0]\n max_spec_wave = wave[-1]\n spec_type = '1d'\n\n # For each row (sp. line) in the config table calculate the min and max values of bandwidth\n rows = len(sel_lines['ln_id'])\n for k in range(rows):\n ln_id = sel_lines['ln_id'][k]\n ln_ctr = sel_lines['ln_ctr'][k]\n ln_win = sel_lines['ln_win'][k]\n\n if ln_win <= 0:\n sys.exit(\"*** ERROR: line {} bandwidth is not positive.\".format(ln_id))\n\n min_wave = ln_ctr - ln_win/2.\n max_wave = ln_ctr + ln_win/2.\n\n if verb:\n print(\"min_wave:\", min_wave)\n print(\"max_wave:\", max_wave)\n\n print(\"min_spec_wave:\", min_spec_wave)\n print(\"max_spec_wave:\", max_spec_wave)\n\n # Check if line fits inside spectral range\n if min_wave < min_spec_wave or max_wave > max_spec_wave:\n print(\"*** ERROR: Line {} bandwidth outside spectral range.\".format(ln_id))\n return False\n else:\n print(\"Line {} inside spectral range\".format(ln_id))\n\n # If wave is 2d check if line fits inside sp. order\n if spec_type == '2d':\n order = None\n ln_ctr_orders = []\n order = []\n for i in range(len(wave)):\n if min_wave > wave[i][0] and max_wave < wave[i][-1]:\n order.append(i)\n # used to show potencial orders with wavelength range\n if ln_ctr > wave[i][0] and ln_ctr < wave[i][-1]:\n ln_ctr_orders.append(i)\n\n if order is None:\n print(\"*** ERROR: Could not determine sp. order for {}\".format(ln_id))\n print(\"\\tmin_wave = {:.2f}\".format(min_wave))\n print(\"\\tmax_wave = {:.2f}\".format(max_wave))\n print(\"\\tThe closest orders are:\")\n for k in range(len(ln_ctr_orders)):\n closest_ord = ln_ctr_orders[k]\n print(\"\\tOrder {}: {:.2f}-{:.2f}\".format(closest_ord, wave[closest_ord][0], wave[closest_ord][-1]))\n sys.exit()\n else:\n for i in range(len(order)):\n print(\"Line {} inside spectral order {}\".format(ln_id, order[i]))\n return True",
"def test_model_layer_types_ww2x(self):\n \n\t\tdetails = self.watcher.describe(pool=False, min_evals=1)\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\n \t\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8*9, \"8*9 conv2D layers, but {} found\".format(denseCount))",
"def assert_kind(kind: Any) -> None:\r\n if not kind in {\"T\", \"P\"}:\r\n raise ValueError(f\"`kind` should be 'T' or 'P', but got {kind}.\")",
"def gen_test_case_type_check(self):\n cases = '\\n\\n;; Type check'\n assert_template = '(assert_invalid (module (func (result v128) ({lane_type}.{op} (i32.const 0) (f32.const 0.0)))) \"type mismatch\")'\n for op in self.BINARY_OPS:\n cases += '\\n' + assert_template.format(lane_type=self.LANE_TYPE, op=op)\n\n return cases",
"def assert_equal_type(logical_line):\n if asse_equal_type_re.match(logical_line):\n yield (0, \"G317: assertEqual(type(A), B) sentences not allowed\")",
"def assertRoundTrip(self, weyl1: TwoQubitWeylDecomposition):\n repr1 = repr(weyl1)\n with self.assertDebugOnly():\n weyl2: TwoQubitWeylDecomposition = eval(repr1) # pylint: disable=eval-used\n msg_base = f\"weyl1:\\n{repr1}\\nweyl2:\\n{repr(weyl2)}\"\n self.assertEqual(type(weyl1), type(weyl2), msg_base)\n maxdiff = np.max(abs(weyl1.unitary_matrix - weyl2.unitary_matrix))\n self.assertEqual(maxdiff, 0, msg=f\"Unitary matrix differs by {maxdiff}\\n\" + msg_base)\n self.assertEqual(weyl1.a, weyl2.a, msg=msg_base)\n self.assertEqual(weyl1.b, weyl2.b, msg=msg_base)\n self.assertEqual(weyl1.c, weyl2.c, msg=msg_base)\n maxdiff = np.max(np.abs(weyl1.K1l - weyl2.K1l))\n self.assertEqual(maxdiff, 0, msg=f\"K1l matrix differs by {maxdiff}\" + msg_base)\n maxdiff = np.max(np.abs(weyl1.K1r - weyl2.K1r))\n self.assertEqual(maxdiff, 0, msg=f\"K1r matrix differs by {maxdiff}\" + msg_base)\n maxdiff = np.max(np.abs(weyl1.K2l - weyl2.K2l))\n self.assertEqual(maxdiff, 0, msg=f\"K2l matrix differs by {maxdiff}\" + msg_base)\n maxdiff = np.max(np.abs(weyl1.K2r - weyl2.K2r))\n self.assertEqual(maxdiff, 0, msg=f\"K2r matrix differs by {maxdiff}\" + msg_base)\n self.assertEqual(weyl1.requested_fidelity, weyl2.requested_fidelity, msg_base)",
"def _isLine(self):\n return (self.width == 0 and self.height > 1) or (self.height == 0 and self.width > 1)",
"def _isLine(self):\n return (self.width == 0 and self.height > 1) or (self.height == 0 and self.width > 1)",
"def test_plot_ess_bad_kind(models):\n idata = models.model_1\n with pytest.raises(ValueError, match=\"Invalid kind\"):\n plot_ess(idata, kind=\"bad kind\")",
"def test_ds18b20_get_kind(self):\n assert_equal(self.test_ds18b20.get_kind(), 'mpds18b20')",
"def test_compare_Expsmall_line(self):\r\n # exp function crosses over to line func as A_shell-->0\r\n self.model.setParam(\"A_shell1\", 0.000001)\r\n self.model2.setParam(\"A_shell1\", 1)\r\n # change the function to a line function\r\n self.model2.setParam(\"func_shell1\", 1)\r\n \r\n #Compare exp(A=0.000001) to linear (where A_shell is null) function \r\n self.assertAlmostEqual(self.model.run(0.1),self.model2.run(0.1),4)",
"def test_with_2_lines():\n line = \"n\" * 15 + \"\\n\" + \"n\" * 60 + \" \" + \"n\" * 10\n assert wrap_line(line) == \"n\" * 15 + \" \" + \"n\" * 60 + \"\\n\" + \"n\" * 10",
"def test_threeLines(self):\n self.performTestForParams()",
"def test_spec2(run_spec2, fitsdiff_default_kwargs, suffix):\n rt.is_like_truth(run_spec2, fitsdiff_default_kwargs, suffix,\n truth_path='truth/test_nirspec_ifu')",
"def test_show_correctness_default(self):\n assert ShowCorrectness.correctness_available()",
"def check_shape(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return all(attr1.shape.eval() == attr2.shape.eval())",
"def test_all_layer_types(self):\n\n\t\tdetails = self.watcher.describe()\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8, \"8 conv2D layers, but {} found\".format(denseCount))",
"def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n weight_shape = self.weight_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.DepthwiseConv2D(\n name=\"conv1\",\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n output = conv1(inputs)\n\n self.assertEqual(output.get_shape(), output_shape)\n self.assertEqual(conv1.w.get_shape(), weight_shape)\n if use_bias:\n self.assertEqual(conv1.b.get_shape(), out_channels)",
"def test_ethyne(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((400, 420), (500, 420))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[400, 420, 500, 420]]\n ])\n )",
"def test_validation_wrong_protocols():\n basic_protocol = yank_load(standard_protocol)\n\n # Alchemical paths\n protocols = [\n {'lambda_electrostatics': [1.0, 0.5, 0.0]},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 'wrong!']},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 11000.0]},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, -0.5]},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': 0.0},\n {'lambda_electrostatics': [1.0, 0.5, 0.0], 'lambda_sterics': [1.0, 0.5, 0.0], 3: 2}\n ]\n for protocol in protocols:\n modified_protocol = copy.deepcopy(basic_protocol)\n modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol\n yield assert_raises, YamlParseError, ExperimentBuilder._validate_protocols, modified_protocol\n\n # Try different options both with 'auto' and a path with alchemical functions.\n auto_path = 'auto'\n no_lambda_path = {'lambda_electrostatics': 'lambda**2', 'lambda_sterics': 'sqrt(lambda)'}\n hardcoded_path = {'lambda_electrostatics': [1.0, 0.0], 'lambda_sterics': [1.0, 0.0]}\n correct_lambda_path = {'lambda': [1.0, 0.0], **no_lambda_path}\n str_lambda_path = {'lambda': 'string', **no_lambda_path}\n three_lambda_path = {'lambda': [1.0, 0.5, 0.0], **no_lambda_path}\n\n # Each test case is (error_regex, options, alchemical_path)\n trailblazer_options = [\n (\"n_equilibration_iterations:\\n - must be of integer type\",\n {'n_equilibration_iterations': 'bla'}, auto_path),\n (\"Only mathematical expressions have been given with no values for their variables\",\n {}, no_lambda_path),\n (\"Mathematical expressions were detected but no function variable name was given\",\n {}, correct_lambda_path),\n (\"Function variable name 'lambda' is not defined in 'alchemical_path'\",\n {'function_variable_name': 'lambda'}, hardcoded_path),\n (\"Only mathematical expressions have been given with no values for their variables\",\n {'function_variable_name': 'lambda'}, str_lambda_path),\n (\"Only the two end-point values of function variable 'lambda' should be given.\",\n {'function_variable_name': 'lambda'}, three_lambda_path),\n ]\n for regex, opts, alchemical_path in trailblazer_options:\n modified_protocol = copy.deepcopy(basic_protocol)\n modified_protocol['absolute-binding']['complex']['alchemical_path'] = alchemical_path\n modified_protocol['absolute-binding']['complex']['trailblazer_options'] = opts\n yield assert_raises_regexp, YamlParseError, regex, ExperimentBuilder._validate_protocols, modified_protocol\n\n # Phases\n alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])\n protocols = [\n {'complex': alchemical_path},\n {2: alchemical_path, 'solvent': alchemical_path},\n {'complex': alchemical_path, 'solvent': alchemical_path, 'thirdphase': alchemical_path},\n {'my-complex-solvent': alchemical_path, 'my-solvent': alchemical_path},\n {'my-complex': alchemical_path, 'my-complex-solvent': alchemical_path},\n {'my-complex': alchemical_path, 'my-complex': alchemical_path},\n {'complex': alchemical_path, 'solvent1': alchemical_path, 'solvent2': alchemical_path},\n {'my-phase1': alchemical_path, 'my-phase2': alchemical_path},\n collections.OrderedDict([('my-phase1', alchemical_path), ('my-phase2', alchemical_path),\n ('my-phase3', alchemical_path)])\n ]\n for protocol in protocols:\n modified_protocol = copy.deepcopy(basic_protocol)\n modified_protocol['absolute-binding'] = protocol\n yield assert_raises, YamlParseError, ExperimentBuilder._validate_protocols, modified_protocol",
"def auto_line_fitting_filter(param, i1, i2):\n if param[0] <= 0.:\n # line amplitdue too small\n return False\n if param[1] < i1 or param[1] > i2:\n # line center not in the fitting range (i1, i2)\n return False\n if param[2] > 50. or param[2] < 1.0:\n # line too broad or too narrow\n return False\n if param[3] < -0.5*param[0]:\n # background too low\n return False\n return True",
"def line_ratio(ratio_name,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n\n if ratio_name == 'NII':\n line1,line2 = '[NII]122','[NII]205'\n L_line1 = getattr(GR,'L_'+line1+'_sun')\n L_line2 = getattr(GR,'L_'+line2+'_sun')\n # Get ratio where the two samples overlap:\n ratio = L_line1 / L_line2\n ratio = ratio[ratio != 0]\n label = '%s / %s' % (line1,line2)\n\n if ratio_name == 'OICII':\n line1,line2 = '[OI]63','[CII]'\n L_line1 = getattr(GR,'L_'+line1+'_sun')\n L_line2 = getattr(GR,'L_'+line2+'_sun')\n # Get ratio where the two samples overlap:\n ratio = L_line1 / L_line2\n ratio = ratio[ratio > 1e-2]\n ratio = np.log10(ratio[ratio != 0])\n label = 'log %s / %s' % (line1,line2)\n\n fig,ax = plt.subplots(figsize=(10,8))\n h = ax.hist(ratio,bins=10,color='orange')\n\n ax.set_xlabel(label,fontsize=15)\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/ratio_%s' % ratio_name,dpi=300)",
"def testTwoDimensions(self):\n tt = cros_test_lib.TruthTable(inputs=[(True, True), (True, False)])\n self.assertEquals(len(tt), pow(2, 2))\n\n # Check truth table output.\n self.assertFalse(tt.GetOutput((False, False)))\n self.assertFalse(tt.GetOutput((False, True)))\n self.assertTrue(tt.GetOutput((True, False)))\n self.assertTrue(tt.GetOutput((True, True)))\n\n # Check assertions on bad input to GetOutput.\n self.assertRaises(TypeError, tt.GetOutput, True)\n self.assertRaises(ValueError, tt.GetOutput, (True, True, True))\n\n # Check iteration over input lines.\n lines = list(tt)\n self.assertEquals((False, False), lines[0])\n self.assertEquals((False, True), lines[1])\n self.assertEquals((True, False), lines[2])\n self.assertEquals((True, True), lines[3])\n\n self._TestTableSanity(tt, lines)",
"def test_blacklist_get_kind(self):\n assert_equal(self.test_blacklist.get_kind(), 'mpblacklist')",
"def verify_startwith_rule2(self, type_tagged, type = None):\n featuresets = []\n for t, question_tag in type_tagged:\n if t in type:\n featuresets.append([self.startwith_feture2(question_tag),t])\n else:\n featuresets.append([self.startwith_feture2(question_tag),'other'])\n print(featuresets)\n perc = 0.5\n train_set, test_set = featuresets[:int(len(featuresets)*perc)], featuresets[int(len(featuresets)*perc):]\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n print(\"Accurancy \")\n print(nltk.classify.accuracy(classifier, test_set))\n classifier.show_most_informative_features(10)",
"def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv2D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))",
"def get_adv_losses(discriminator_real_outputs, discriminator_fake_outputs,\n kind):\n if kind == 'classic':\n loss_fn = classic_gan_losses\n elif kind == 'nonsaturating':\n loss_fn = nonsaturating_gan_losses\n elif kind == 'wasserstein':\n loss_fn = wasserstein_gan_losses\n elif kind == 'hinge':\n loss_fn = hinge_gan_losses\n return loss_fn(discriminator_real_outputs, discriminator_fake_outputs)"
]
| [
"0.59879875",
"0.5278893",
"0.5067222",
"0.50639474",
"0.50141376",
"0.4952348",
"0.48771766",
"0.4809434",
"0.47661296",
"0.4752564",
"0.4752564",
"0.4720459",
"0.4624556",
"0.45733762",
"0.45580173",
"0.45507464",
"0.45396474",
"0.45207068",
"0.45138627",
"0.45116305",
"0.450184",
"0.4498797",
"0.44913912",
"0.44818118",
"0.44793293",
"0.4475868",
"0.44708836",
"0.44650438",
"0.4463499",
"0.44596666"
]
| 0.61739796 | 0 |
Test that login is required to retrieve tags | def test_login_required_to_view_tags(self):
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_login_required_to_retrieve_tags(self):\n\n response = self.client.get(URL_TAGS)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_login_required(self):\n res = self.client.get(TAGS_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_login_required(self):\n res = self.client.get(TAGS_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_login_required(self):\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code,status.HTTP_401_UNAUTHORIZED)",
"def test_login_required(self):\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_login_required(self):\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_login_required(self):\n res = self.client.get(TAG_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_tags_limited_to_authenticated_user(self):\n\n # Create a new user in addition to the user created in\n # the setUp, and leave it without an authentication.\n credentials = {'email': '[email protected]', 'password': 'Testpass34'}\n new_user = get_user_model().objects.create_user(**credentials)\n\n # Create a tag that is assigned to the new user.\n Tag.objects.create(user=new_user, name='Fruity')\n\n # Create a tag that is assigned to the authenticated user.\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n response = self.client.get(URL_TAGS)\n\n # Check that the response is HTTP 200, and includes only one tag.\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n\n # Check that the name of the returned tag matches with the\n # name of the tag that was assigned to the authenticated user.\n self.assertEqual(response.data[0]['name'], tag.name)",
"def test_get_tags_unauthenticated(self):\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_tags_limited_to_user_tags(self):\n\n user2 = create_user(\n fname='Test2',\n lname='User2',\n email='[email protected]',\n password='testpass2'\n )\n\n Tag.objects.create(user=user2, name='Vegan')\n tag = Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = create_user('[email protected]', 'OtherPassword')\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Home Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'Other User', '[email protected]', 'otherpassword')\n Tag.objects.create(user=user2, name='Nonveg')\n tag = Tag.objects.create(user=self.user, name='Fruity')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'testpass'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n '123456'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_show_tags(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/tags\")\r\n html = resp.get_data(as_text=True)\r\n tags = Tag.query.all()\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Tag List\", html)\r\n self.assertIn(tags[0].name, html)",
"def test_login_page(self):\n r = requests.get(self.url)\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.content)\n self.assertEqual(soup.findAll('legend')[0].contents[0], 'Sign In')",
"def test_tags_limited_to_user(self):\n imposter = get_user_model().objects.create_user(\n email='[email protected]', password='im_an_imposter')\n Tag.objects.create(user=imposter, name='BAD_FOOD')\n tag = Tag.objects.create(user=self.user, name='fruit')\n res = self.client.get(TAG_URL)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_login_required():\n pass",
"def test_login_required(self):\n res = self.client.get(reverse('recipe:ingredient-list'))\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_tags(self):\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.all().order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_login(self):\n\t\tdata = {'username' : 'testUser2', 'password' : 'passTestUser'}\n\t\tresponse = self.login(data)\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\ttry:\n\t\t\ttoken = response.data[\"token\"]\n\t\texcept AttributeError:\n\t\t\tself.fail(\"No token attribute\")",
"def test_get_tags_successful(self):\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.all().order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_aio_can_login_to_web_portal(aio):",
"def test_successful_login(self):\n pass",
"def test_show_add_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/tags/new\") \r\n html = resp.get_data(as_text=True) \r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Create Tag\", html)",
"def test_agentpi_login(self):\n param = {\n 'username' : \"seth\", \n 'password' : \"testing\"\n }\n response = param\n\n self.assertEqual(response.text, \"User Exists\")",
"def test_login_required(self):\n res = self.client.get(RETETA_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_login_required_to_retrieve_ingredients(self):\n\n # Retrieve the ingredients belonging to user.\n response = self.client.get(URL_INGREDIENTS)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_list(self):\n response = self.app.get(self.url('tags.list'))\n # Test response...",
"def test_login(self):\n res = self.client.get(\"/login\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Already a member!\" in data"
]
| [
"0.8512646",
"0.8236491",
"0.8236491",
"0.8216921",
"0.8152566",
"0.8152566",
"0.79444635",
"0.7274527",
"0.7273791",
"0.71287364",
"0.71203995",
"0.70111114",
"0.693236",
"0.6897866",
"0.68918276",
"0.6823461",
"0.68113005",
"0.6805346",
"0.675212",
"0.67194724",
"0.67154175",
"0.6682518",
"0.6663797",
"0.6645976",
"0.6587282",
"0.6584395",
"0.657087",
"0.65652883",
"0.6550973",
"0.6548496"
]
| 0.8265167 | 1 |
Test that the tags retrieved are for the authenticated user | def test_tags_limited_to_user_tags(self):
user2 = create_user(
fname='Test2',
lname='User2',
email='[email protected]',
password='testpass2'
)
Tag.objects.create(user=user2, name='Vegan')
tag = Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tags_limited_to_authenticated_user(self):\n\n # Create a new user in addition to the user created in\n # the setUp, and leave it without an authentication.\n credentials = {'email': '[email protected]', 'password': 'Testpass34'}\n new_user = get_user_model().objects.create_user(**credentials)\n\n # Create a tag that is assigned to the new user.\n Tag.objects.create(user=new_user, name='Fruity')\n\n # Create a tag that is assigned to the authenticated user.\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n response = self.client.get(URL_TAGS)\n\n # Check that the response is HTTP 200, and includes only one tag.\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n\n # Check that the name of the returned tag matches with the\n # name of the tag that was assigned to the authenticated user.\n self.assertEqual(response.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'Other User', '[email protected]', 'otherpassword')\n Tag.objects.create(user=user2, name='Nonveg')\n tag = Tag.objects.create(user=self.user, name='Fruity')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = create_user('[email protected]', 'OtherPassword')\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Home Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'testpass'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n '123456'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_tags_limited_to_user(self):\n user2 = User.objects.create(\n email='[email protected]',\n password='test_password'\n )\n Tag.objects.create(user=user2, name='Fruity')\n Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)",
"def test_tags_limited_to_user(self):\n\n user2 = get_user_model().objects.create_user(\n email='[email protected]',\n password='23pass1234&'\n )\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n Tag.objects.create(user=user2, name='Valami mas')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), len(tags))\n self.assertEqual(res.data, serializer.data)",
"def test_tags_limited_to_user(self):\n imposter = get_user_model().objects.create_user(\n email='[email protected]', password='im_an_imposter')\n Tag.objects.create(user=imposter, name='BAD_FOOD')\n tag = Tag.objects.create(user=self.user, name='fruit')\n res = self.client.get(TAG_URL)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)",
"def test_retrieve_tags(self):\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.all().order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_get_tags_successful(self):\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.all().order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_login_required_to_retrieve_tags(self):\n\n response = self.client.get(URL_TAGS)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_get_tags_unauthenticated(self):\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_show_tags(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/tags\")\r\n html = resp.get_data(as_text=True)\r\n tags = Tag.query.all()\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Tag List\", html)\r\n self.assertIn(tags[0].name, html)",
"def test_login_required_to_view_tags(self):\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrive_tags_assigned_to_recipe(self):\n tag1 = Tag.objects.create(user=self.user, name='Breakfast')\n tag2 = Tag.objects.create(user=self.user, name='Dinner')\n recipe = Recipe.objects.create(\n title=\"Eggs on toast\",\n making_time_minutes=10,\n price=5.00,\n user=self.user\n )\n recipe.tags.add(tag1)\n res = self.client.get(TAGS_URL, {\"assigned_only\": 1})\n serializer1 = TagSerializer(tag1)\n serializer2 = TagSerializer(tag2)\n\n self.assertIn(serializer1.data, res.data)\n self.assertNotIn(serializer2.data, res.data)",
"def test_get_all_tags(self):\n print(self.session.tags)\n self.assertEqual(\n len(self.session.tags),\n (3 * len(self.session.wp_post_objects)) #3 tags added by default\n )",
"def test_list(self):\n response = self.app.get(self.url('tags.list'))\n # Test response...",
"def test_retrieve_tags_assigned_unique(self):\n tag1 = Tag.objects.create(user = self.user,name='Breakfast')\n tag2 = Tag.objects.create(user=self.user,name='Lunch')\n\n recipe1 = Recipe.objects.create(user=self.user,title='Goose Liver on toast',price=5.00,time_minutes=15)\n recipe2 = Recipe.objects.create(user = self.user,title='Egg Benedict',price=5.00,time_minutes=15)\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag1)\n\n res = self.client.get(TAGS_URL,{'assigned_only':1})\n self.assertEqual(len(res.data),1)",
"def test_create_tags_successfull(self):\n payload = {'name': 'Test Tag'}\n self.client.post(TAG_URL, payload)\n exists = Tag.objects.filter(user=self.user, name = payload['name']).exists()\n self.assertTrue(exists)",
"def test_retrieve_tags_assigned_to_recipes(self):\n tag1 = Tag.objects.create(user=self.user, name='Breakfast')\n tag2 = Tag.objects.create(user=self.user, name='Lunch')\n\n recipe = Recipe.objects.create(\n title='Coriander eggs on toast',\n time_minutes=10,\n price=5.00,\n user=self.user\n )\n\n recipe.tags.add(tag1)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n\n serializer1 = TagSerializer(tag1)\n serializer2 = TagSerializer(tag2)\n\n self.assertIn(serializer1.data, res.data)\n self.assertNotIn(serializer2.data, res.data)",
"def test_retrive_tags_assigned_unique(self):\n tag = Tag.objects.create(user=self.user, name=\"Breakfast\")\n Tag.objects.create(user=self.user, name=\"Lunch\")\n recipe1 = Recipe.objects.create(\n title=\"Pancake\",\n making_time_minutes=10,\n price=5.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title=\"Italian Fried Egg\",\n making_time_minutes=5,\n price=10.00,\n user=self.user\n )\n recipe1.tags.add(tag)\n recipe2.tags.add(tag)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n self.assertEqual(len(res.data), 1)",
"def test_retrieve_tags_assigned_to_recipes(self):\n tag1 = Tag.objects.create(user=self.user, name='Tag1')\n tag2 = Tag.objects.create(user=self.user, name='Tag2')\n recipe = Recipe.objects.create(\n user=self.user,\n title=\"Rec1\",\n time_minutes=20,\n price=Decimal('4.85')\n )\n recipe.tags.add(tag1)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n\n serializer1 = TagSerializer(tag1)\n serializer2 = TagSerializer(tag2)\n\n self.assertIn(serializer1.data, res.data)\n self.assertNotIn(serializer2.data, res.data)",
"def test_given_that_I_add_a_user_and_insert_a_task_with_several_tags_I_can_access_tag_collection(self):\n from .models import Tag\n instance = self._makeOne(1,\n u'Find a shrubbery',\n [u'quest', u'ni', u'knight'])\n self.assertEqual(instance.tags[0].name, u'quest')\n self.assertEqual(instance.tags[1].name, u'ni')\n self.assertEqual(instance.tags[2].name, u'knight')",
"def test_filter_recipe_by_tag(self):\n tag1 = sample_tag(self.user, name='Indian')\n tag2 = sample_tag(self.user, name='Breakfast')\n recipe1 = sample_recipe(self.user, title='Curry')\n recipe2 = sample_recipe(self.user, title=\"bacon pie\")\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag2)\n recipe3 = sample_recipe(self.user)\n\n res = self.client.get(\n RECIPE_URL,\n {'tags': f'{tag1.id},{tag2.id}'}\n )\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)",
"def test_login_required(self):\n res = self.client.get(TAGS_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_login_required(self):\n res = self.client.get(TAGS_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_tags_assigned_to_recipes(self):\n\n # Create two tags, assign one of them to a recipe\n # while leaving the other one unassigned.\n tag1 = Tag.objects.create(user=self.user, name='Breakfast')\n tag2 = Tag.objects.create(user=self.user, name='Lunch')\n\n recipe = Recipe.objects.create(\n title='Turkish Omlette with Minced Meat',\n time_minutes=10,\n price=5.00,\n user=self.user\n )\n recipe.tags.add(tag1)\n\n response = self.client.get(URL_TAGS, {'assigned_only': True})\n\n serializer1 = TagSerializer(tag1)\n serializer2 = TagSerializer(tag2)\n\n # The response only includes the tag1.\n self.assertIn(serializer1.data, response.data)\n self.assertNotIn(serializer2.data, response.data)",
"def tags(request):\n return Tag.objects.filter(user=request.user)",
"def test_retrieve_tags_assigned_unique(self):\n\n tag = Tag.objects.create(user=self.user, name='Breakfast')\n Tag.objects.create(user=self.user, name='Lunch')\n\n recipe1 = Recipe.objects.create(\n title='Pancakes',\n time_minutes=5,\n price=3.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title='Porridge',\n time_minutes=3,\n price=1.00,\n user=self.user\n )\n\n recipe1.tags.add(tag)\n recipe2.tags.add(tag)\n\n response = self.client.get(URL_TAGS, {'assigned_only': True})\n\n self.assertEqual(len(response.data), 1)",
"def test_show_tag_details(self):\r\n \r\n with app.test_client() as client:\r\n resp = client.get(f\"/tags/{self.tag.id}\")\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Marvel\", html)\r\n self.assertIn(\"Avengers\", html)"
]
| [
"0.849094",
"0.81080294",
"0.81041294",
"0.80561423",
"0.8038318",
"0.80258954",
"0.80212194",
"0.79648906",
"0.7838485",
"0.78236586",
"0.74280965",
"0.7302066",
"0.7256315",
"0.7233424",
"0.6953861",
"0.6921536",
"0.68587327",
"0.684185",
"0.68384755",
"0.68327254",
"0.68081915",
"0.6794289",
"0.6735118",
"0.67350435",
"0.67150867",
"0.67150867",
"0.67147833",
"0.66979176",
"0.6696811",
"0.6683893"
]
| 0.81783974 | 1 |
Cast a list of names into rnndigestable matrix. | def to_matrix(names, max_len=None, pad=0, dtype="int32"):
max_len = max_len or max(map(len,names))
names_ix = np.zeros([len(names),max_len],dtype) + pad
for i in range(len(names)):
name_ix = list(map(token_to_id.get,names[i]))
names_ix[i,:len(name_ix)] = name_ix
return names_ix.T | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_features(names):\n Alphabet = ['a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j','k', 'l', 'm', 'n', 'o',\n 'p', 'q', 'r', 's', 't','u', 'v', 'w', 'x', 'y' , 'z']\n \n N = len(names)\n Feature_matrix = np.zeros((N, 260))\n for row in range(0, N):\n firstLast = names[row].split()\n first = firstLast[0] #First Name\n last = firstLast[1] #Last Name\n if(len(first) < 5):\n firstRange = len(first)\n else:\n firstRange = 5\n if(len(last) < 5):\n lastRange = len(last)\n else:\n lastRange = 5\n for index in range(0,firstRange): #iterate though first 5 letters of First name\n offset = 26 * index\n featureIndex = offset + Alphabet.index(first[index])\n Feature_matrix[row,featureIndex] = 1\n index = 4 #advance index in case length was less than 5 \n for Lastindex in range(0,lastRange): #iterate though first 5 letters of Last name\n index += 1\n offset = 26 * index\n featureIndex = offset + Alphabet.index(last[Lastindex])\n Feature_matrix[row,featureIndex] = 1\n return Feature_matrix",
"def to_multi_label_matrix(target_labels: List[List[str]], label_names: List[str]) -> np.ndarray:\n def map_multi_label_line(line_labels: List[str]) -> List[int]:\n return [1 if label in line_labels else 0 for label in label_names]\n\n return np.array(list(map(map_multi_label_line, target_labels)))",
"def _string_to_matrix(str_in: str):\n nums = list(str_in)\n n = int(len(nums) ** 0.5)\n return list(map(list, zip(*[map(str, nums)] * n)))",
"def _generate_similarity_mat(labels):\n l_mat = np.repeat(labels, len(labels), axis=1)\n l_mat_t = l_mat.T\n\n sim_mat = np.equal(l_mat, l_mat_t).astype(int)\n return sim_mat",
"def decode_cell_names(iterable):\n mapper = two_bit_mapper(np.unique(iterable))\n return [mapper[x] for x in iterable]",
"def get_distance_matrix(self):\n names = self.get_named_leaves()\n num_names = len(names)\n dist_mat = np.zeros((num_names, num_names), dtype='float')\n for i, j in itertools.combinations(range(num_names), 2):\n node1, node2 = self.node_names[names[i]], self.node_names[names[j]]\n dist = self.node_distance(node1, node2)\n dist_mat[i,j] = dist\n dist_mat[j,i] = dist\n return names, dist_mat",
"def srp_matrix(cls, words, ndims, _hashfunc=city_64(0)):\n multiplier = (ndims - 1) // 64 + 1\n hashes = [\n list(map(_hashfunc, ['{}_{}'.format(w, i)\n for i in range(multiplier)]))\n for w in words\n ]\n\n # Given a `multipier` value of 5, `hashes` is really a V x 5\n # array of 64-bit integers, where V is the vocabulary size...\n\n hash_arr = numpy.array(hashes, dtype=numpy.uint64)\n\n # ...but we could also think of it as a V x 40 array of bytes...\n\n hash_arr = hash_arr.view(dtype=numpy.uint8)\n\n # ...or even as an array of bits, where every word is represented\n # by 320 bits...\n\n hash_arr = numpy.unpackbits(hash_arr.ravel()).reshape(-1,\n 64 * multiplier)\n\n # ...or as an array of floating point values, all equal to either\n # 1.0 or 0.0, and truncated to give a final array of V x ndims.\n\n return (hash_arr.astype(numpy.float64) * 2 - 1)[:, :ndims]",
"def list2matrix(image_list):\n\tflatten_list = []\n\tfor image in image_list:\n\t\tflatten_list.append(image.ravel())\n\n\tmatrix = np.vstack(flatten_list)\n\n\treturn matrix",
"def fromList(cls, elems, **kwargs):\n if not ('m' in kwargs or 'n' in kwargs):\n raise ValueError(\"at least one of m and n must be specified\")\n m = kwargs.get('m')\n n = kwargs.get('n')\n num_elems = len(elems)\n if m is None:\n m = num_elems // n\n elif n is None:\n n = num_elems // m\n elif m * n != num_elems:\n raise ValueError(\"dimension does not match number of elements in\"\n \"list\")\n\n data = [elems[i * n: i * (n + 1)] for i in range(m)]\n return Matrix(m, n, data)",
"def _array_name_ND_to_1D(self, array_name):\n\n if array_name in self._split_arrays:\n array_name_1D = self._split_arrays[array_name]\n else:\n array_name_1D = [array_name + \"_\" + i for i in ('x', 'y', 'z')]\n\n return array_name_1D",
"def _create_stoic_mat(ns, nr, name_list, stoic_tuple, species_names):\n stoic_mat = np.zeros([ns, nr], dtype=np.int)\n for index, (names, stoics) in enumerate(zip(name_list, stoic_tuple)):\n for a_name, a_stoic in zip(names, stoics):\n species_index = species_names.index(a_name)\n stoic_mat[species_index, index] += int(a_stoic)\n return stoic_mat",
"def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat",
"def authors_matrix( corpus ) :\n all = all_authors(corpus)\n row_dois = [x['DI'] for x in corpus]\n result = zeros( (len(corpus),len(all)), dtype = int32 )\n for paper in corpus :\n for item in authors( paper ) :\n result[ row_dois.index( paper['DI'] ) ][ all.index( item ) ] = 1\n\n return result, row_dois, all",
"def convert_to_r_matrix(df, strings_as_factors=False):\n if isinstance(df, pandas.Series):\n # If it's a Series, cast it to a DataFrame\n df = pandas.DataFrame(df)\n r_dataframe = pandas_data_frame_to_rpy2_data_frame(df)\n as_matrix = robj.baseenv.get(\"as.matrix\")\n r_matrix = as_matrix(r_dataframe)\n return r_matrix",
"def transform(self, nameList):\n return {name: self.transformSingle(name) for name in nameList}",
"def string_list_to_cells(lst):\n cells = np.ndarray(len(lst), dtype = 'object')\n for i in range(len(lst)):\n cells[i] = lst[i]\n return cells",
"def create_matrix(list_of_edges, n):\n matrix = [[0 for i in range(n)] for j in range(n)]\n ind = 0\n for i in range(n):\n for j in range(i):\n matrix[i][j] = list_of_edges[ind]\n matrix[j][i] = list_of_edges[ind]\n ind += 1\n return matrix",
"def create_N_table_lookup(N=None,\n alphabet=['0', '1'],\n n_repeats=3,\n namer=lambda i: \"t{}\".format(i + 1),\n seed=123):\n np.random.seed(seed)\n inputs = np.array(list(''.join(letters)\n for letters in itertools.product(alphabet, repeat=n_repeats)))\n iter_outputs = itertools.permutations(inputs)\n if N is not None:\n iter_outputs = np.array(list(iter_outputs))\n indices = np.random.choice(range(len(iter_outputs)), size=N, replace=False)\n iter_outputs = iter_outputs[indices]\n return [pd.Series(data=outputs, index=inputs, name=namer(i)) for i, outputs in enumerate(iter_outputs)]",
"def name_distribution_with_tokens(names):\n dist = defaultdict(lambda: 0)\n all_names = []\n for k, v in names.items():\n all_names.append(k)\n \n # flattening lists of strings\n v1 = [([x] if isinstance(x,basestring) else x) for x in v]\n v = list(itertools.chain(*v1))\n all_names.extend(v)\n\n for name in all_names:\n dist[name.lower()] += 1\n\n for name, val in dist.items():\n dist[name] = float(val)/len(dist.keys())\n return dist",
"def matrix_stats(headers_list, distmats):\r\n\r\n if len(set(map(tuple, headers_list))) > 1:\r\n raise ValueError(\"error, not all input matrices have\" +\r\n \" identical column/row headers\")\r\n\r\n all_mats = array(distmats) # 3d numpy array: mtx, row, col\r\n means = mean(all_mats, axis=0)\r\n medians = median(all_mats, axis=0)\r\n stdevs = std(all_mats, axis=0)\r\n\r\n return deepcopy(headers_list[0]), means, medians, stdevs",
"def network_matrix(nt, rates):\n if type(nt) == str: \n nt = az.transform(nt)\n else: \n nt = az.transform(az.transform(nt))\n n = len(nt)\n M = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n M[i,j] = rates[G.index(nt[j][0] + nt[i][-1])]\n return M",
"def matrixlist(inputlist, converter=proper, fake=False):\n if converter is None:\n converter = type(inputlist[0][0])\n xlen = len(inputlist[0])\n for x in xrange(1,len(inputlist)):\n if len(inputlist[x]) != xlen:\n raise IndexError(\"Unequal matrix row lengths for matrixlist of \"+str(xlen)+\" and \"+str(len(inputlist[x])))\n out = matrix(len(inputlist), xlen, converter=converter, fake=bool(fake))\n out.a = inputlist[:]\n out.convert()\n return out",
"def build_matrix(path_screen, nmols, list_models):\r\n df = pd.DataFrame(columns=list_models, index=nmols)\r\n ntarget = os.path.split(path_screen)[1]\r\n df.index.name = ntarget\r\n df = df.fillna(0)\r\n for num_db in os.listdir(path_screen):\r\n for ff in os.listdir(os.path.join(path_screen, num_db)):\r\n if ff.split('.')[0] in list_models:\r\n pscreenfile = os.path.join(path_screen, num_db, ff)\r\n match_compounds = [int(mol.strip()) for mol in open(pscreenfile).readlines()]\r\n for compound in match_compounds:\r\n df.at[compound, ff.split('.')[0]] = 1\r\n df = df.fillna(0)\r\n return df",
"def dimensionizing_mapper(self, names=None):\n\n def fix(string):\n tags = [\n \"'\", '\"', ' ', '&', '.', '/', '-',\n '(', ')', '[', ']', '{', '}'\n ]\n for tag in tags:\n string = string.replace(tag, '_')\n return string\n\n masks = self._meta['masks']\n columns = self._meta['columns']\n suffix = self._dimensions_suffix\n\n if not names: names = self.variables()\n mapper = {}\n for org_mn, mask in masks.items():\n if org_mn in names:\n mask_name = fix(org_mn)\n new_mask_name = '{mn}.{mn}{s}'.format(mn=mask_name, s=suffix)\n mapper[org_mn] = new_mask_name\n\n mask_mapper = 'masks@{mn}'.format(mn=org_mn)\n new_mask_mapper = 'masks@{nmn}'.format(nmn=new_mask_name)\n mapper[mask_mapper] = new_mask_mapper\n\n values_mapper = 'lib@values@{mn}'.format(mn=org_mn)\n new_values_mapper = 'lib@values@{nmn}'.format(nmn=new_mask_name)\n mapper[values_mapper] = new_values_mapper\n\n items = masks[org_mn]['items']\n for i, item in enumerate(items):\n org_cn = item['source'].split('@')[-1]\n col_name = fix(org_cn)\n new_col_name = '{mn}[{{{cn}}}].{mn}{s}'.format(\n mn=mask_name, cn=col_name, s=suffix\n )\n mapper[org_cn] = new_col_name\n\n col_mapper = 'columns@{cn}'.format(cn=org_cn)\n new_col_mapper = 'columns@{ncn}'.format(ncn=new_col_name)\n mapper[col_mapper] = new_col_mapper\n\n for col_name, col in columns.items():\n if col_name in names and not self._is_array_item(col_name):\n new_col_name = fix(col_name)\n if new_col_name == col_name: continue\n mapper[col_name] = new_col_name\n\n col_mapper = 'columns@{cn}'.format(cn=col_name)\n new_col_mapper = 'columns@{ncn}'.format(ncn=new_col_name)\n mapper[col_mapper] = new_col_mapper\n\n return mapper",
"def tf_idf(name_vector):\n name_series = pd.Series(list(map(str, name_vector)))\n\n vectorizer = TfidfVectorizer(min_df=1, analyzer=ngrams)\n tf_idf_matrix = vectorizer.fit_transform(name_series)\n return tf_idf_matrix",
"def network_matrix(nt, rates):\n if type(nt) == str: nt = az.transform(nt)\n else: nt = az.transform(az.transform(nt))\n n = len(nt)\n M = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n M[i,j] = rates[G.index(nt[j][0] + nt[i][-1])]\n return M",
"def _fmats(self, rdata):\n\n widths = [self._width(col) for col in list(zip(*rdata))]\n fmats = ' '.join(['{:>%d}' % width for width in widths ])\n return fmats",
"def makeNewickList(distancesWithNames):\n i = 0\n oldDistance = 0\n while len(distancesWithNames) > 1:\n smallestindex = findSmallest(distancesWithNames)\n distancesWithNames, oldDistance = newMatrixWithSmallest(distancesWithNames, smallestindex, beforeDistance=oldDistance)\n i+=1\n retString = \"(\" + distancesWithNames[0][0] + \",\" + distancesWithNames[0][1] + \");\"\n return retString",
"def combine_fiber_identlist(identlist_lst):\n identlist1 = list(identlist_lst.values())[0]\n newdescr = [descr for descr in identlist1.dtype.descr]\n # add a new column\n newdescr.insert(0, ('fiber', 'S1'))\n\n newidentlist = []\n for fiber, identlist in sorted(identlist_lst.items()):\n for row in identlist:\n item = list(row)\n item.insert(0, fiber)\n newidentlist.append(tuple(item))\n newidentlist = np.array(newidentlist, dtype=newdescr)\n\n return newidentlist",
"def from_list_of_assignments(cls, assignments, new_id_prefix=None):\n from cascada.bitvector.operation import Operation\n for v_i, op_i in assignments:\n assert isinstance(v_i, core.Variable) and isinstance(op_i, Operation)\n\n my_table = MemoizationTable()\n my_table.counter = len(assignments)\n\n if new_id_prefix is None:\n first_var = assignments[0][0]\n for i, c in enumerate(first_var.name):\n if c.isdigit():\n index_first_digit = i\n break\n else:\n index_first_digit = len(first_var.name)\n my_table.id_prefix = first_var.name[:index_first_digit]\n else:\n my_table.id_prefix = new_id_prefix\n\n for v_i, op_i in assignments:\n if v_i.name.startswith(my_table.id_prefix) and \\\n v_i.name[len(my_table.id_prefix):].isdigit() and \\\n int(v_i.name[len(my_table.id_prefix):]) > my_table.counter:\n msg = \"invalid var name {} due to id_prefix {} and counter {}\\n{}\".format(\n v_i.name, my_table.id_prefix, my_table.counter, assignments)\n raise ValueError(msg)\n\n my_table.table = bidict.OrderedBidict(assignments)\n\n return my_table"
]
| [
"0.53580344",
"0.5232146",
"0.5220881",
"0.5063171",
"0.5013838",
"0.5013702",
"0.49942318",
"0.48702893",
"0.48425204",
"0.4825524",
"0.48236102",
"0.4823267",
"0.47998914",
"0.47902778",
"0.47831476",
"0.477888",
"0.47715646",
"0.47602817",
"0.47384185",
"0.47281095",
"0.4715039",
"0.4712613",
"0.47123343",
"0.4704299",
"0.46929038",
"0.4673984",
"0.46375024",
"0.463626",
"0.46350488",
"0.46276736"
]
| 0.6554735 | 0 |
Test that pause and delay rely on two separate pause types, and do not resume each other | def test_pause_and_delay_separation():
pause_mgr = PauseManager(door_state=DoorState.CLOSED)
assert pause_mgr.queue == []
pause_mgr.pause(PauseType.PAUSE)
assert pause_mgr.queue == [PauseType.PAUSE]
pause_mgr.pause(PauseType.DELAY)
assert pause_mgr.queue == [PauseType.PAUSE, PauseType.DELAY]
pause_mgr.resume(PauseType.PAUSE)
assert pause_mgr.queue == [PauseType.DELAY]
pause_mgr.resume(PauseType.PAUSE)
assert pause_mgr.queue == [PauseType.DELAY]
pause_mgr.resume(PauseType.DELAY)
assert pause_mgr.queue == [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pause_unit_helper(configs):\n _pause_resume_helper(pause_unit, configs)",
"def test_pause(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.queue(source)\n player.play()\n start_time = time.time()\n\n stage = 0\n while player.source:\n if stage == 0 and time.time() - start_time > 0.25:\n player.pause()\n stage = 1\n if stage == 1 and time.time() - start_time > 0.75:\n player.play()\n stage = 2\n player.dispatch_events()",
"def pause(): # real signature unknown; restored from __doc__\n pass",
"def test_902_pause_resume_actions(self):\n u.log.debug('Checking pause/resume actions...')\n self._test_pause()\n self._test_resume()",
"def test_901_pause_resume(self):\n # Expected default and alternate values\n set_default = {'use-syslog': 'False'}\n set_alternate = {'use-syslog': 'True'}\n self._assert_services(should_run=True)\n action_id = u.run_action(self.keystone_sentry, \"pause\")\n assert u.wait_on_action(action_id), \"Pause action failed.\"\n\n self._assert_services(should_run=False)\n self.d.configure('keystone', set_alternate)\n action_id = u.run_action(self.keystone_sentry, \"resume\")\n assert u.wait_on_action(action_id), \"Resume action failed\"\n self._assert_services(should_run=True)\n self.d.configure('keystone', set_default)\n self._auto_wait_for_status(message=\"Unit is ready\",\n include_only=['keystone'])",
"def test_910_pause_and_resume(self):\n u.log.debug('Checking pause and resume actions...')\n sentry_unit = self.n_ovs_sentry\n\n assert u.status_get(sentry_unit)[0] == \"active\"\n\n action_id = u.run_action(sentry_unit, \"pause\")\n assert u.wait_on_action(action_id), \"Pause action failed.\"\n assert u.status_get(sentry_unit)[0] == \"maintenance\"\n\n action_id = u.run_action(sentry_unit, \"resume\")\n assert u.wait_on_action(action_id), \"Resume action failed.\"\n assert u.status_get(sentry_unit)[0] == \"active\"\n u.log.debug('OK')",
"def test_delay():\n time1 = time.time()\n res = delay(1)(_dummy_func)(2)\n time2 = time.time()\n assert res == (2, 4)\n assert time2 - time1 >= 1",
"def test_pause_resume(self):\n with self.pause_resume(self.services):\n logging.info('Testing pause resume (services=\"{}\")'\n .format(self.services))",
"def test_door_pause_protocol(enable_door_safety_switch):\n pause_mgr = PauseManager(door_state=DoorState.CLOSED)\n assert pause_mgr.queue == []\n\n pause_mgr.set_door(door_state=DoorState.OPEN)\n pause_mgr.pause(PauseType.PAUSE)\n assert pause_mgr.queue == [PauseType.PAUSE]\n\n with pytest.raises(PauseResumeError):\n pause_mgr.resume(PauseType.PAUSE)\n assert pause_mgr.queue == [PauseType.PAUSE]\n\n pause_mgr.set_door(door_state=DoorState.CLOSED)\n assert pause_mgr.queue == [PauseType.PAUSE]\n\n pause_mgr.resume(PauseType.PAUSE)\n assert pause_mgr.queue == []",
"def pause(*args, seconds: int=0, **kwargs)->None:\n pass",
"def pause(ms=None): #py:pause\n if ms is None:\n RUR._pause_()\n else:\n RUR._pause_(ms)",
"def pause(seconds):\n time.sleep(seconds);",
"def test_valid_pause_resume(self):\n p_1_model = self._game.players[1].model\n p_2_model = self._game.players[2].model\n self._game.remove_player_by_user_id(self._users[1].uid)\n self.assertIsPAUSED(self._game)\n self.assertEqual(self._game.active_players(), 3)\n self._game.remove_player_by_user_id(self._users[2].uid)\n self._game.add_player(self._users[4], 2)\n self._game.add_player(self._users[2], 1)\n self.assertIsRUNNING(self._game)\n self.assertEqual(self._game.active_players(), 4)\n self.assertIs(self._game.players[1].model, p_1_model,\n \"Player 1 model did not preserve.\")\n self.assertIs(self._game.players[2].model, p_2_model,\n \"Player 2 model did not preserve.\")",
"def test_pause_resume(self):\n services = ['ceph-mds']\n with self.pause_resume(services):\n logging.info('Testing pause resume (services=\"{}\")'\n .format(services))",
"async def pause_behaviors(self) -> None:",
"def test_pause(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n message: Optional[str],\n) -> None:\n subject.pause(msg=message)\n decoy.verify(mock_engine_client.wait_for_resume(message=message))",
"def random_pause():\n pause_time = random.uniform(0, 0.5)\n sleep(pause_time)",
"def test_pause_queue(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.pause()\n player.queue(source)\n\n while player.source:\n player.dispatch_events()\n player.play()",
"def test_pausing_measure(processor, measure_with_tools):\n measure = measure_with_tools\n measure.add_tool('pre-hook', 'dummy2')\n measure.move_tool('pre-hook', 2, 0)\n measure.add_tool('post-hook', 'dummy2')\n processor.start_measure(measure)\n\n process_and_assert(getattr, (processor, 'active'))\n\n def wait_on_state_paused(timeout):\n return processor._state.wait(timeout, 'paused')\n\n pre_hook = measure.pre_hooks['dummy2']\n process_and_assert(pre_hook.waiting.wait, (5,))\n process_app_events()\n\n # Pause inside a pre_hook.\n processor.pause_measure()\n process_app_events()\n assert measure.status == 'PAUSING'\n pre_hook.go_on.set()\n wait_and_process(wait_on_state_paused)\n assert measure.status == 'PAUSED'\n\n processor.resume_measure()\n wait_and_process(pre_hook.signal_resuming.wait)\n assert measure.status == 'RESUMING'\n pre_hook.go_on_resuming.set()\n wait_and_process(pre_hook.signal_resumed.wait)\n assert measure.status == 'RUNNING'\n\n # Pause in between two pre_hooks\n processor.pause_measure()\n pre_hook.go_on_resumed.set()\n wait_and_process(wait_on_state_paused)\n assert measure.status == 'PAUSED'\n processor.resume_measure()\n\n # Pause just before starting the main measure.\n pre_hook2 = measure.pre_hooks['dummy']\n pre_hook2.accept_pause = False\n wait_and_process(pre_hook2.waiting.wait)\n assert measure.status == 'RUNNING'\n processor.pause_measure()\n pre_hook2.go_on.set()\n wait_and_process(wait_on_state_paused)\n processor.resume_measure()\n\n # Pause during the main task execution.\n wait_and_process(processor.engine.waiting.wait)\n processor.pause_measure()\n processor.engine.go_on.set()\n wait_and_process(wait_on_state_paused)\n assert measure.status == 'PAUSED'\n processor.resume_measure()\n wait_and_process(processor.engine.signal_resuming.wait)\n assert measure.status == 'RESUMING'\n processor.engine.go_on_resuming.set()\n wait_and_process(processor.engine.signal_resumed.wait)\n assert measure.status == 'RUNNING'\n processor.engine.go_on_resumed.set()\n\n # Pause inside a post_hook.\n post_hook = measure.post_hooks['dummy']\n wait_and_process(post_hook.waiting.wait)\n processor.pause_measure()\n process_app_events()\n assert measure.status == 'PAUSING'\n post_hook.go_on.set()\n wait_and_process(wait_on_state_paused)\n assert measure.status == 'PAUSED'\n\n processor.resume_measure()\n wait_and_process(post_hook.signal_resuming.wait)\n assert measure.status == 'RESUMING'\n post_hook.go_on_resuming.set()\n wait_and_process(post_hook.signal_resumed.wait)\n assert measure.status == 'RUNNING'\n\n # Pause in between two post_hooks\n processor.pause_measure()\n post_hook.go_on_resumed.set()\n wait_and_process(wait_on_state_paused)\n assert measure.status == 'PAUSED'\n processor.resume_measure()\n\n post_hook2 = measure.post_hooks['dummy2']\n wait_and_process(post_hook2.waiting.wait)\n post_hook2.go_on.set()\n\n process_and_join_thread(processor._thread)\n assert measure.status == 'COMPLETED'\n m = processor.plugin.workbench.get_manifest('test.measure')\n assert not m.find('runtime_dummy1').collected\n assert not m.find('runtime_dummy2').collected",
"def testTriggerPause(self):\n\n # Pause the proxy so data sent to it builds up in its buffer.\n self.proxy.pauseProducing()\n self.assertFalse(self.parentProducer.paused, \"don't pause yet\")\n self.proxy.write(\"x\" * 51)\n self.assertFalse(self.parentProducer.paused, \"don't pause yet\")\n self.proxy.write(\"x\" * 51)\n self.assertTrue(self.parentProducer.paused)",
"def pause():\n click.pause()",
"def pause(seconds: float) -> None:\n time.sleep(cast(float, seconds))",
"def _pause(self, delay):\n start = time.time()\n end = start + delay\n while time.time() < end:\n yield",
"def _pause(self, delay):\n start = time.time()\n end = start + delay\n while time.time() < end:\n yield",
"def pause(self):\n\t\tpass",
"def pause(self):\n pass",
"def pause(self):\n pass",
"def resume_unit_helper(configs):\n _pause_resume_helper(resume_unit, configs)",
"def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break",
"def _pause(self):\n data_paused = None\n while self.target.is_active and data_paused != '01':\n data_paused = self._mem_read(self.data['paused'][0], 1)\n time.sleep(self.pause_time)\n self.data['paused'][1] = data_paused\n return"
]
| [
"0.7281035",
"0.7201257",
"0.71017355",
"0.6926161",
"0.69036686",
"0.68986255",
"0.67866415",
"0.6734228",
"0.669614",
"0.6636806",
"0.6619378",
"0.6573626",
"0.6556387",
"0.6468316",
"0.64680254",
"0.64634264",
"0.64600056",
"0.64432734",
"0.6406104",
"0.6340547",
"0.62910455",
"0.6285906",
"0.6284799",
"0.6284799",
"0.62787086",
"0.62783897",
"0.62783897",
"0.62654096",
"0.62277824",
"0.6218399"
]
| 0.84556085 | 0 |
Test that when the door safety switch is enabled, pause cannot be resumed until the door is closed | def test_door_pause_protocol(enable_door_safety_switch):
pause_mgr = PauseManager(door_state=DoorState.CLOSED)
assert pause_mgr.queue == []
pause_mgr.set_door(door_state=DoorState.OPEN)
pause_mgr.pause(PauseType.PAUSE)
assert pause_mgr.queue == [PauseType.PAUSE]
with pytest.raises(PauseResumeError):
pause_mgr.resume(PauseType.PAUSE)
assert pause_mgr.queue == [PauseType.PAUSE]
pause_mgr.set_door(door_state=DoorState.CLOSED)
assert pause_mgr.queue == [PauseType.PAUSE]
pause_mgr.resume(PauseType.PAUSE)
assert pause_mgr.queue == [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_pause_and_delay_separation():\n pause_mgr = PauseManager(door_state=DoorState.CLOSED)\n assert pause_mgr.queue == []\n\n pause_mgr.pause(PauseType.PAUSE)\n assert pause_mgr.queue == [PauseType.PAUSE]\n\n pause_mgr.pause(PauseType.DELAY)\n assert pause_mgr.queue == [PauseType.PAUSE, PauseType.DELAY]\n\n pause_mgr.resume(PauseType.PAUSE)\n assert pause_mgr.queue == [PauseType.DELAY]\n\n pause_mgr.resume(PauseType.PAUSE)\n assert pause_mgr.queue == [PauseType.DELAY]\n\n pause_mgr.resume(PauseType.DELAY)\n assert pause_mgr.queue == []",
"def test_901_pause_resume(self):\n # Expected default and alternate values\n set_default = {'use-syslog': 'False'}\n set_alternate = {'use-syslog': 'True'}\n self._assert_services(should_run=True)\n action_id = u.run_action(self.keystone_sentry, \"pause\")\n assert u.wait_on_action(action_id), \"Pause action failed.\"\n\n self._assert_services(should_run=False)\n self.d.configure('keystone', set_alternate)\n action_id = u.run_action(self.keystone_sentry, \"resume\")\n assert u.wait_on_action(action_id), \"Resume action failed\"\n self._assert_services(should_run=True)\n self.d.configure('keystone', set_default)\n self._auto_wait_for_status(message=\"Unit is ready\",\n include_only=['keystone'])",
"async def test_toggle(player) -> None:\n assert player.state == STATE_OFF\n await player.async_toggle()\n assert player.state == STATE_ON\n await player.async_toggle()\n assert player.state == STATE_OFF\n player.standby()\n assert player.state == STATE_STANDBY\n await player.async_toggle()\n assert player.state == STATE_ON",
"async def pause_behaviors(self) -> None:",
"def Pause():\n\tDmg.enableButton.SetOff()",
"def test_pause_resume(self):\n services = ['ceph-mds']\n with self.pause_resume(services):\n logging.info('Testing pause resume (services=\"{}\")'\n .format(services))",
"def test_910_pause_and_resume(self):\n u.log.debug('Checking pause and resume actions...')\n sentry_unit = self.n_ovs_sentry\n\n assert u.status_get(sentry_unit)[0] == \"active\"\n\n action_id = u.run_action(sentry_unit, \"pause\")\n assert u.wait_on_action(action_id), \"Pause action failed.\"\n assert u.status_get(sentry_unit)[0] == \"maintenance\"\n\n action_id = u.run_action(sentry_unit, \"resume\")\n assert u.wait_on_action(action_id), \"Resume action failed.\"\n assert u.status_get(sentry_unit)[0] == \"active\"\n u.log.debug('OK')",
"def test_pause(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.queue(source)\n player.play()\n start_time = time.time()\n\n stage = 0\n while player.source:\n if stage == 0 and time.time() - start_time > 0.25:\n player.pause()\n stage = 1\n if stage == 1 and time.time() - start_time > 0.75:\n player.play()\n stage = 2\n player.dispatch_events()",
"def pause(self):\n pass\n # self.condor_object.hold()",
"def test_turn_off(power_supply):\n power_supply.Init()\n assert power_supply.state() != tango.DevState.OFF\n power_supply.turn_off()\n assert power_supply.state() == tango.DevState.OFF",
"def test_pause_resume(self):\n with self.pause_resume(self.services):\n logging.info('Testing pause resume (services=\"{}\")'\n .format(self.services))",
"def test_pause_already_closed(testchannel):\n with pytest.raises(ChannelClosedError):\n testchannel.pause()",
"async def test_turn_on_off(player) -> None:\n assert player.state == STATE_OFF\n await player.async_turn_on()\n assert player.state == STATE_ON\n await player.async_turn_off()\n assert player.state == STATE_OFF",
"def test_902_pause_resume_actions(self):\n u.log.debug('Checking pause/resume actions...')\n self._test_pause()\n self._test_resume()",
"def test_is_simulating(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n decoy.when(mock_engine_client.state.config.ignore_pause).then_return(True)\n assert subject.is_simulating()",
"def inhale_pause_control(self):\n self.worker_piston.pause = True",
"def event_m10_29_1190():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291000, z39=10290410)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()",
"def test_pause_resume(self):\n services = [\n 'apache2',\n 'octavia-health-manager',\n 'octavia-housekeeping',\n 'octavia-worker',\n ]\n if openstack_utils.ovn_present():\n services.append('octavia-driver-agent')\n logging.info('Skipping pause resume test LP: #1886202...')\n return\n logging.info('Testing pause resume (services=\"{}\")'\n .format(services))\n with self.pause_resume(services, pgrep_full=True):\n pass",
"def event_m10_29_1200():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291000, z39=10290411)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()",
"def event_m10_29_1140():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291010, z39=10290405)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()",
"def behaviors_paused(self) -> bool:",
"def event_m10_29_1110():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291010, z39=10290402)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()",
"def event_m10_29_1180():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291000, z39=10290409)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()",
"def event_m10_29_1210():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291000, z39=10290412)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()",
"def test_turn_on(power_supply):\n power_supply.Init()\n assert power_supply.state() != tango.DevState.ON\n power_supply.current = 5.0\n power_supply.turn_on()\n assert power_supply.state() == tango.DevState.ON",
"def event_m10_29_1170():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291000, z39=10290408)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()",
"def event_m10_29_1120():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291010, z39=10290403)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()",
"def pause(self):\n\n RosProxy().call_service(\"/welding_driver/arc_stop\", Empty, EmptyRequest())\n super(WeldTask, self).pause()",
"def should_sleep(self):\n return",
"def event_m10_29_1130():\r\n \"\"\"State 0,2: [Preset] Door that opens in conjunction with the gimmick door_SubState\"\"\"\r\n assert event_m10_29_x25(z38=10291010, z39=10290404)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()"
]
| [
"0.6591592",
"0.64406085",
"0.6390791",
"0.631739",
"0.6298093",
"0.6271643",
"0.6247215",
"0.62062687",
"0.61953586",
"0.6184892",
"0.61199874",
"0.60921264",
"0.6024901",
"0.5980132",
"0.5977752",
"0.5963126",
"0.5947802",
"0.5931819",
"0.59252495",
"0.59242266",
"0.5908363",
"0.58971524",
"0.5896979",
"0.589219",
"0.5882827",
"0.5859142",
"0.5855179",
"0.58527213",
"0.58453345",
"0.5843629"
]
| 0.8009721 | 0 |
Extracts used strings from a %(foo)s pattern. | def extract_pattern(fmt):
class FakeDict(object):
def __init__(self):
self.seen_keys = set()
def __getitem__(self, key):
self.seen_keys.add(key)
return ''
def keys(self):
return self.seen_keys
fake = FakeDict()
try:
fmt % fake
except TypeError:
# Formatting error
pass
return set(fake.keys()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_strings(f):\n strings = re.findall(strregex,f)\n return strings",
"def extractDef(c: Cmdr, s: str) -> str:\n for pat in c.config.getData('extract-patterns') or []:\n try:\n pat = re.compile(pat)\n m = pat.search(s)\n if m:\n return m.group(1)\n except Exception:\n g.es_print('bad regex in @data extract-patterns', color='blue')\n g.es_print(pat)\n for pat in extractDef_patterns:\n m = pat.search(s)\n if m:\n return m.group(1)\n return ''",
"def glob_fmt(pattern: str, *templates: Strings) -> List[str]:\n results: List[str] = []\n for wildcards in glob_extract(pattern):\n for template in each_string(*templates):\n results.append(copy_annotations(template, template.format(**wildcards)))\n return results",
"def find_pattern_in_str(pattern, source):\n pattern = re.compile(pattern)\n for match in re.finditer(pattern,source):\n return match.groups()\n return None",
"def extract(self, pat):\n re_pat = re.compile(pat)\n for infilename in self.file_names:\n infile = open(infilename, 'r')\n for lineno, line in enumerate(infile):\n line = line.rstrip()\n mo = re_pat.search(line)\n if mo is not None:\n groups = mo.groups()\n print 'File: %s LnNo: %d Line: %s' % (\n infilename, lineno, line, )\n for item in groups:\n print ' Match: \"%s\"' % (item, )",
"def _get_placeholders(template):\n return [p[1] for p in string.Formatter().parse(template)\n if p[1] is not None and len(p[1]) > 0]",
"def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)",
"def findall(pattern, string, overlapping=True, sensitive=True, regexp=False):\n if regexp:\n return SE.occurrences_re(pattern, string)\n if overlapping:\n return SE.occurrences(pattern, string, sensitive)\n else:\n return SE.full_words(pattern, string, sensitive)",
"def grep(requestContext, seriesList, pattern):\n regex = re.compile(pattern)\n return [s for s in seriesList if regex.search(s.name)]",
"def extract_def_use(elements):\n\n definition_usage_tuple = [(definition, usage) for element in elements for i, (definition, usage) in\n enumerate(zip(element['definition'], element['usage'])) if\n len(definition) != 0 and len(usage) != 0 and definition != \"\" and usage != \"\"]\n definitions = [el[0] for el in definition_usage_tuple]\n usages = [el[1] for el in definition_usage_tuple]\n definitions = [re.sub(r'[\\n\\r\\t]', ' ', el) for el in definitions]\n usages = [re.sub(r'[\\n\\r\\t]', ' ', el) for el in usages]\n return definitions, usages",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def fmt_capture(kwargs: Any, *patterns: Any) -> Any: # type: ignore\n results = [copy_annotations(pattern, _fmt_capture(kwargs, pattern)) for pattern in each_string(*patterns)]\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(results) == 1\n return results[0]\n return results",
"def precious(*patterns: Any) -> Any: # type: ignore\n strings: List[str] = []\n for pattern in each_string(*patterns):\n if not isinstance(pattern, AnnotatedStr):\n pattern = AnnotatedStr(pattern)\n pattern.precious = True\n strings.append(pattern)\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(strings) == 1\n return strings[0]\n return strings",
"def findall(pattern, text):\r\n\tspl = re.compile(pattern).split(text)\r\n\tresult = []\r\n\tbeginTag = \"\"\r\n\tendTag = None\r\n\tbeginFormat = \"\"\r\n\tendFormat = \"\"\r\n\tinitText = text\r\n\tfor s in spl:\r\n\t\ttext = text[len(s)+2:]\r\n\t\tend = text.find(\")s\")\r\n\t\tvar = \"\"\r\n\t\tif len(text) > 0:\r\n\t\t\tvar = text[:end]\r\n\t\t\tresult.append(var)\r\n\t\tif var == \"content\":\r\n\t\t\tbeginTag += s\r\n\t\t\tendTag = \"\"\r\n\t\telif endTag != None:\r\n\t\t\tendTag += s\r\n\t\t\tif var != \"\":\r\n\t\t\t\tif var in [\"disabled\",\"checked\",\"active\",\"selected\"]:\r\n\t\t\t\t\tendFormat += \" b'%s' if self.%s else b'',\"%(var, var)\r\n\t\t\t\telse:\r\n\t\t\t\t\tendFormat += \"self.%s,\"%var\r\n\t\t\t\tendTag += \"\\x25s\"\r\n\t\telse:\r\n\t\t\tbeginTag += s\r\n\t\t\tif var != \"\":\r\n\t\t\t\tif var in [\"disabled\",\"checked\",\"active\",\"selected\"]:\r\n\t\t\t\t\tbeginFormat += \" b'%s' if self.%s else b'',\"%(var, var)\r\n\t\t\t\telse:\r\n\t\t\t\t\tbeginFormat += \"self.%s,\"%var\r\n\t\t\t\tbeginTag += \"\\x25s\"\r\n\t\ttext = text[end+2:]\r\n\tif endTag == None:\r\n\t\tendTag = \"\"\r\n\t\tendFormat = \"\"\r\n\treturn result, beginTag, endTag, beginFormat, endFormat",
"def get_pattern(topic):\n variants = get_variants(topic)\n sub_patterns = [r'(.*\\b)%s\\b(.*)' % variant.lower() for variant in variants]\n return re.compile(r'|'.join(sub_patterns), flags=re.IGNORECASE)",
"def _internal_match(self, pattern):\n compiled_re = re.compile(pattern)\n for word in self.words:\n if compiled_re.fullmatch(word) is not None:\n yield word",
"def usage(self):\n names = self.sources.keys()\n return sorted([(n.replace('__', '.'), self._resolve(n)) for n in names],\n key=lambda el: el[0])",
"def extract_params(self, fname):\n return re.findall(self.regexp_params, os.path.basename(fname))",
"def load_url_pattern_names(patterns, include_with_args=True):\n global URL_NAMES\n for pat in patterns:\n if pat.__class__.__name__ == 'RegexURLResolver': # load patterns from this RegexURLResolver\n load_url_pattern_names(pat.url_patterns, include_with_args)\n elif pat.__class__.__name__ == 'RegexURLPattern': # load name from this RegexURLPattern\n if pat.name is not None and pat.name not in URL_NAMES:\n if include_with_args or re.compile(pat.regex).groups == 0:\n URL_NAMES.append(pat.name)\n return URL_NAMES",
"def grep_me(pattern, fname):\n for line in stream_reader(fname):\n if re.search(pattern, line, re.I):\n print('{}:{}:{}'.format(filename(), filelineno(), line), end='')",
"def extractAttribute(content, pattern):\n \n return re.findall(re.compile(pattern), str(contents))",
"def match(pattern: List[str], source: List[str]) -> List[str]:\n sind = 0 # current index we are looking at in the source list\n pind = 0 # current index we are looking at in the pattern list\n result: List[str] = [] # to store the substitutions that we will return if matched\n acc = ''\n\n # keep checking as long as we haven't hit the end of both pattern and source\n while sind != len(source) or pind != len(pattern): \n # Your job is to fill out the body fo this loop\n # 1) if we reached the end of the pattern but not source \n if pind == len(pattern):\n return None\n # 2) if the current thing in the pattern is a %\n elif pattern[pind] == '%':\n pind += 1 # moving from % to next word \n while sind != len(source):\n if pind != len(pattern) and pattern[pind] == source[sind]:\n break \n else: \n if acc == \"\": \n acc += source[sind] # if it is the first character do not add a space \n else: \n acc += \" \"\n acc += source[sind]\n sind += 1\n result.append(acc)\n acc = ''\n # 3) if we reached the end of the source but not the pattern\n elif sind == len(source):\n return None \n # 4) if the current thing in the pattern is an _\n elif pattern[pind] == '_':\n result.append(source[sind])\n sind += 1\n pind += 1\n #appending is for lists and adding is for strings\n # 5) if the current thing in the pattern is the same as the current thing \n # in the source\n elif pattern[pind] == source[sind]:\n sind += 1\n pind += 1\n # 6) else : this will happen if none of the other conditions are met\n # it indicates the current thing it pattern doesn't match the current\n # thing in source\n else: \n return None\n return result",
"def do_extract(self, args):\n args = args.split()\n if len(args) != 1:\n print 'usage: scan pat'\n return\n pat = args[0]\n self.regexprutils.extract(pat)",
"def get_source_file_string_placeholders(file):\n placeholders = {}\n root = ET.parse(file).getroot()\n for element in root.findall('string'):\n name = element.get('name')\n value = ''.join(element.itertext())\n placeholder = get_placeholders(value)\n if placeholder:\n placeholders[name] = placeholder\n return placeholders",
"def resolve_pattern(pattern, args):\n if args is None or len(args) == 0:\n return pattern\n elif pattern.find('%') >= 0:\n return pattern % args\n elif pattern.find(\"{\") >= 0:\n # star magic does not work for single args\n return pattern.format(*args)\n else:\n # fixed pattern, no placeholders\n return pattern",
"def find_names(s):\n \"*** YOUR CODE HERE ***\"",
"def extract_strings_from_i(incarnato_fragments, genome, param):\r\n i_dict = {}\r\n i_fragment_regions = \"\"\r\n\r\n with open(incarnato_fragments) as f:\r\n for line in f:\r\n start, end = line.strip().split(\"_\")[1].split(\"-\")\r\n seq = next(f).strip()\r\n ss = next(f).strip()\r\n i_dict[(int(start), int(end))] = [seq,ss]\r\n\r\n for start, end in sorted(i_dict.keys()):\r\n temp = start - len(i_fragment_regions)\r\n gaps = \"\".join([\"_\" for x in range(0, temp)])\r\n i_fragment_regions += gaps\r\n i_fragment_regions += i_dict[(start, end)][param]\r\n \r\n tail = \"\".join([\"_\" for x in range(len(i_fragment_regions), genome+1)])\r\n i_fragment_regions += tail\r\n return i_fragment_regions",
"def get_variable_matches(text):\n return _property_pattern.findall(text)",
"async def find_pattern_in_tags(pattern: str, tags: ResultSet) -> list[str]:\n matches: list[str] = []\n\n for tag in tags:\n url: str = str(tag.get('href'))\n matches.append(url) if re.search(pattern=pattern, string=url) else None\n\n return matches",
"def find_template_variables(code):\n return re.findall(re_template_var, code)"
]
| [
"0.5958557",
"0.55669093",
"0.54235214",
"0.52379817",
"0.5219056",
"0.5214103",
"0.5189378",
"0.5168074",
"0.5166035",
"0.5159953",
"0.5145372",
"0.51319027",
"0.5097553",
"0.5061075",
"0.5056262",
"0.50380015",
"0.50376844",
"0.5022514",
"0.5001648",
"0.49748185",
"0.4872375",
"0.4864959",
"0.48562014",
"0.48515457",
"0.4847991",
"0.48357606",
"0.4825609",
"0.4815113",
"0.48096606",
"0.47995415"
]
| 0.57127404 | 1 |
Returns a dictionary containing all attributes of a given node. Attributes whose name occurs in the set skip are ignored. | def _filterAttr(self, node, skip):
attr = {}
for key, val in node.items():
if not key in skip:
attr[key] = val
return attr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_attributes_of_node(core, node):\n attributes = {}\n for attribute in core.get_attribute_names(node):\n attributes[attribute] = core.get_attribute(node,attribute)\n return attributes",
"def get_node_attributes(graph: BaseGraph, attr_key: str) -> Dict:\n return get_node_attributes(graph.graph, attr_key)",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))",
"def node_attrs(self, context: DrawContext, node: Node) -> DotAttrs:\n for func in self.node_attr_funcs:\n yield from func(context, node)",
"def get_keyable_attribute(node):\n attrs = cmds.listAttr(node, ud=False, k=True)\n\n return attrs",
"def attributes(self):\n attr = super(BaseDataRecord, self).attributes()\n return [a for a in attr if a not in self.excluded()]",
"def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes",
"def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs",
"def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict",
"def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}",
"def attrs(self):\n return list(name for name in self.__dict__\n if not name.startswith(\"_\"))",
"def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic",
"def get_attrs_dict(self, root_element):\n attr_elements = root_element.findall(\"attribute\")\n attrs_dict = {}\n for el in attr_elements:\n attrs_dict[el.attrib[\"name\"]] = {\n \"value\": el.attrib[\"value\"],\n \"type\": el.attrib.get(\"type\", None)\n }\n return attrs_dict",
"def get_attributes(doc):\n\treturn doc.keys()",
"def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)",
"def getAttributeDict(self):\n result = {}\n dict = self.getDict()\n for key in dict.keys():\n value = dict.get(key)\n if value.__class__.__name__ != 'SpecialDict':\n result[key] = value\n return result",
"def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict",
"def get_attributes(cls):\r\n return [Attribute('file'),\r\n Attribute('missing', None)]",
"def get_non_null_attributes(self) -> dict:\n return {\n key: getattr(self, key, None)\n for key in sorted(self.attributes)\n if getattr(self, key, None) is not None\n }",
"def get_attrib_dict(self, attribs: Tuple[str]) -> Dict[str, str]:\n attrs = self.get_attribs(attribs)\n attrs = tuple(map(lambda a: (a[0][1:], a[1]), attrs))\n return dict(attrs)",
"def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a",
"def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)",
"def get_skip(self) -> Mapping[str, str]:\n return {}",
"def get_fields(node):\r\n return dict(iter_fields(node))",
"def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list",
"def visit(self, node):\n super(_GetattrNodeVisitor, self).visit(node)",
"def get_edge_attributes(graph: BaseGraph, attr_key: str) -> Dict:\n return get_edge_attributes(graph.graph, attr_key)",
"def testAttributes(self):\n ddict = {\n \"group\": {\"dataset\": 100, \"@group_attr1\": 10},\n \"dataset\": 200,\n \"@root_attr\": 11,\n \"dataset@dataset_attr\": \"12\",\n \"group@group_attr2\": 13,\n }\n dictdump.dicttonx(ddict, self.h5_fname)\n ddict = dictdump.nxtodict(self.h5_fname, include_attributes=True)\n self.assertEqual(ddict[\"group\"][\"@group_attr1\"], 10)\n self.assertEqual(ddict[\"@root_attr\"], 11)\n self.assertEqual(ddict[\"dataset@dataset_attr\"], \"12\")\n self.assertEqual(ddict[\"group\"][\"@group_attr2\"], 13)",
"def extractAttrs(data):\n\treturn [instance[1:] for instance in data]"
]
| [
"0.67991483",
"0.6495876",
"0.6291643",
"0.6113402",
"0.608586",
"0.6012828",
"0.5955157",
"0.59021395",
"0.589775",
"0.58977383",
"0.5892174",
"0.58709115",
"0.58429515",
"0.58203477",
"0.5813974",
"0.5740156",
"0.573371",
"0.57268363",
"0.5715512",
"0.57122576",
"0.5706236",
"0.5702955",
"0.5681007",
"0.5677158",
"0.5671647",
"0.5669442",
"0.5613086",
"0.56122357",
"0.560555",
"0.5590988"
]
| 0.8172322 | 0 |
Listen indefinitely for scanner input. If student's ID numbers is found in the database, update the attendance roster, otherwise add the student to the database. | def get_input(conn):
while True:
student_id = input('')
c = conn.cursor()
c.execute('SELECT * FROM students WHERE student_id=?', (student_id,))
selection = c.fetchone()
if (selection == None):
name = prompt()
student = name + (student_id,)
entry_id = create_student(conn, student)
print(f'Student added at ID: {entry_id}')
else:
update_attendance(selection)
print('Attendance updated!') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __ui_update_student(self):\n student_id = input(\"student id: \")\n student_name = input(\"student discipline_name: \")\n disciplines_list = []\n\n discipline_name = None\n while discipline_name != '':\n discipline_name = input(\"Discipline discipline_name: \")\n if discipline_name == '':\n break\n elif self.__discipline_controller.find_by_name(discipline_name) is not None:\n disciplines_list.append(discipline_name)\n print(\"Add discipline successful\\n\")\n else:\n print(\"Invalid discipline!\")\n\n try:\n self.__student_controller.update_student(student_id, student_name, disciplines_list)\n print(\"Update student successful\\n\")\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return",
"def add_student():\r\n reply = True\r\n while reply is True:\r\n reply = yes_or_no('Do you want to add a student?')\r\n if reply is True:\r\n student_name = input('Enter student name: ') or \"-1\"\r\n student_id = input('Enter student id: ') or \"-1\"\r\n student = {\"name\": student_name, \"student_id\": student_id}\r\n students.append(student)\r\n save_file(student_name)",
"def searchStudent():\n os.system(\"cls\")\n print(\"Input Number: \")\n print(\"Press q to abort\\nPress enter if you done typing\")\n keyin = msvcrt.getwch()\n if keyin == 'q':\n print(\"Searching Aborted.\")\n return\n state, s, hashID = t.search(keyin)\n os.system(\"cls\")\n for k in s:\n print(k)\n while 1:\n print(\"Input Number:\", keyin)\n print(\"Press q to abort\\nPress enter if you done typing\")\n keyinnow = msvcrt.getwch()\n if keyinnow == 'q':\n print(\"Searching Aborted.\")\n return\n elif keyinnow == '\\x08':\n keyin = keyin[:-1]\n elif keyinnow == '\\r':\n break\n else:\n keyin += keyinnow\n os.system(\"cls\")\n state, s, hashID = t.search(keyin)\n for j in s:\n print(j)\n number = keyin\n state, s, hashID = t.search(number)\n if state == 1:\n student = ht.getIndex(hashID, number)\n inp1 = 0\n while inp1 != 4:\n inp1 = int(input(\"1. View Student\\n2. Delete Student\\n3. Edit Student\\n4. Exit\\n\"))\n if inp1 == 1:\n print(\"Name:\", student.data.name)\n print(\"Number:\", student.data.number)\n print(\"GPA:\", student.data.gpa)\n print(\"Field:\", student.data.field)\n if inp1 == 2:\n deleteStudent(hashID, number)\n break\n if inp1 == 3:\n editStudent(hashID, number)\n break\n else:\n print(\"student doesn't exist.\")",
"def ask_for_valid_school_id(name: str, classroom: Classroom) -> Optional[Student]:\n msg = f'<b>{name}</b> not found.'\n echo(msg, format=True)\n\n while True:\n school_id = ask('School id: ', default=None)\n if school_id is None:\n if ask('Skip student? ', type=bool):\n echo()\n return None\n else:\n continue\n\n try:\n student = Student(name, school_id)\n classroom.students.add(student)\n classroom.save()\n return student\n except ValueError:\n echo(f'School id exists for {student.display}')\n if ask('Reuse? ', type=bool):\n student = classroom.students.get(school_id=school_id)\n student.aliases.append(student.name)\n classroom.save()\n echo()\n return None",
"def option1(self):\n ID = int(input(\"ID: \"))\n name = input(\"Name: \")\n attNr = int(input(\"Number of attendances: \"))\n grade = int(input(\"Grade: \"))\n self.__srv.addStud(ID,name,attNr,grade)",
"def add_employee():\n\n while True:\n first_name = get_user_string(\"Enter your first name\")\n last_name = get_user_string(\"Enter your last name\")\n grade = get_employee_input_int(\"Enter your grade\")\n db.add_employee(first_name, last_name, grade)\n print(\"New employee \" + first_name + \" \" + last_name + \" has been added to the employee table\")\n user_input = input(\"Do you want to add more employees to the table ? (Y/N)\")\n if(str(user_input).upper()) == 'Y':\n continue\n elif (str(user_input).upper()) == 'N':\n break\n else:\n print(\"Invalid Input\\nReturning to the main menu\")\n break",
"def GradeManager():\r\n quit_program = False\r\n while quit_program is False:\r\n user_input = input('$ ')\r\n n = user_input.find(' ') # separates argument and command\r\n if user_input[:n] == 'AddStudent':\r\n add_student(user_input[n + 1:])\r\n elif user_input[:n] == 'DeleteStudent':\r\n delete_student(user_input[n+1:])\r\n elif user_input[:n] == 'SortRoster':\r\n sort_roster(user_input[n+1:])\r\n elif user_input[:n] == 'FindByFName':\r\n find_by_name('FindByFName', user_input[n+1:])\r\n elif user_input[:n] == 'FindByLName':\r\n find_by_name('FindByLName', user_input[n+1:])\r\n elif user_input[:n] == 'GetAverage':\r\n get_average(user_input[n+1:])\r\n elif user_input == 'PrintRoster':\r\n for student in StudentRoster:\r\n print(student_format(student))\r\n elif user_input == 'Quit': # test\r\n quit_program = True",
"def add():\r\n ch = input('You are about to ADD an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter info for the following fields...\\n')\r\n xln = re.sub(r'\\s', '', str(input('Last name?\\n')).lower().capitalize()) # lower, cap first, remove whitespace\r\n xfn = re.sub(r'\\s', '', str(input('First name?\\n')).lower().capitalize())\r\n\r\n if search2(xln, xfn): # search if an entry already exists for user's input\r\n print('An entry already exists for', xfn, xln, end='. Please enter another entry.\\n')\r\n return add() # if an entry already exists make user enter another\r\n\r\n xgr = None\r\n try: # try except user's inputted grade\r\n xgr = int(input('Grade?\\n'))\r\n xgrs = [8, 9, 10, 11, 12, 13]\r\n\r\n xgr = check_int(xgr, xgrs)\r\n except ValueError:\r\n print('You did not enter an applicable grade. Please enter another value.')\r\n add()\r\n\r\n xsr = str(input('Stream? (eg. Academic, IB, etc...)\\n')).lower().capitalize()\r\n xrl = str(input('Role? (eg. Design Member)\\n')).lower().capitalize()\r\n xcm = str(input('Any comments?\\n')).lower().capitalize()\r\n\r\n ch2 = input('Are you sure you wish to add this individual to the database? YES or NO?\\n')\r\n if y_n(ch2):\r\n print(xfn, xln, 'has been added to the database.')\r\n with conn: # input corresponding info to table with context manager\r\n c.execute(\"\"\"INSERT INTO personnel VALUES (\r\n :last, :first, :grade, :stream, :role, :comments)\"\"\",\r\n {'last': xln, 'first': xfn, 'grade': xgr, 'stream': xsr, 'role': xrl, 'comments': xcm})\r\n\r\n start() # after user's action has been completed, ask for another\r\n else:\r\n print('Your add action has been cancelled.')\r\n start()\r\n else: # ask for another if user wishes to perform another action\r\n start()",
"def update_attendance(selection):\n\n t = date.today()\n today = str(t) + '.csv'\n name = list(selection)\n with open(today, 'a', newline='') as attendance:\n fieldnames = ['last_name', 'first_name', 'status']\n writer = writer(attendance, fieldnames=fieldnames)\n for row in reader:\n if row['last_name'] == name[0]:\n writer.writerow({'status': 'PRESENT'})\n else:\n print('Student could not be found on the roster.')",
"def __ui_add_student(self):\n student_id = input(\"student_id: \")\n student_name = input(\"student_name: \")\n\n print(\"Give disciplines for student, enter for done\")\n disciplines_list = []\n\n discipline_name = '0'\n while discipline_name != '':\n discipline_name = input(\"Discipline discipline_name: \")\n if discipline_name == '':\n break\n elif self.__discipline_controller.find_by_name(discipline_name) is not None:\n disciplines_list.append(discipline_name)\n print(\"Add discipline successful\\n\")\n else:\n print(\"Invalid discipline!\")\n\n try:\n self.__student_controller.add_student(student_id, student_name, disciplines_list)\n print(\"Add student successful\\n\")\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return",
"def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t",
"def __search_student(self):\n menu_string = \"Search for a student:\\n\"\n menu_string += \"\\t1. by ID\\n\"\n menu_string += \"\\t2. by discipline_name\\n\"\n menu_string += \"\\t0. Exit\\n\"\n\n stop = False\n while not stop:\n command_list = \\\n {\n '1': self.__ui_search_student_by_id,\n '2': self.__ui_search_student_by_name,\n '0': self.__no_command\n }\n command = self.__ui_read_command(menu_string)\n\n if command == '0':\n return\n\n search = input(\"Enter search_substring string: \")\n if len(search) == 0:\n print(\"Search string cannot be empty!\")\n return\n\n if command in command_list.keys():\n command_list[command](search)\n else:\n print(\"Invalid command!\")",
"def add_member() -> None:\r\n name = input('Enter Member Name :')\r\n clas = input('Enter Member Class & Section : ')\r\n address = input('Enter Member Address : ')\r\n phone = input('Enter Member Phone : ')\r\n email = input('Enter Member Email : ')\r\n student_status = \"\"\r\n if name.lower() not in books_di:\r\n books_di[name.lower()] = [clas, address, phone, email, student_status]\r\n else:\r\n print(f'\\n Student is already existed with that name{name}')\r\n print(f'\\n\\nNew Member {name} added successfully')\r\n wait = input('\\n\\n\\n Press any key to continue....')",
"def update():\r\n ch = input('You are about to UPDATE an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter info for the following fields...\\n')\r\n xln = re.sub(r'\\s', '', str(input('Last name?\\n'))).lower().capitalize()\r\n xfn = re.sub(r'\\s', '', str(input('First name?\\n'))).lower().capitalize()\r\n\r\n if not search2(xln, xfn):\r\n print('No entry exists for', xfn, xln, end='. Please enter another entry.\\n')\r\n return update()\r\n\r\n chs2 = ['grade', 'g', 'stream', 's', 'role', 'r', 'comment', 'c']\r\n ch2 = input('What information would you like to update? Previous data will be cleared.\\n')\r\n ch2 = check(ch2, chs2)\r\n\r\n if ch2 == 'grade' or ch2 == 'g':\r\n try:\r\n xgr = int(input('New grade for {} {}?\\n'.format(xfn, xln)))\r\n xgrs = [8, 9, 10, 11, 12, 13]\r\n\r\n xgr = check_int(xgr, xgrs)\r\n gr_u(xln, xfn, xgr)\r\n except ValueError:\r\n print('You did not enter an applicable grade. Please enter another value.')\r\n search()\r\n elif ch2 == 'stream' or ch2 == 's':\r\n xsr = input('New stream for {} {}?\\n'.format(xfn, xln)).lower().capitalize()\r\n sr_u(xln, xfn, xsr)\r\n elif ch2 == 'role' or ch2 == 'r':\r\n xrl = input('New role for {} {}?\\n'.format(xfn, xln)).lower().capitalize()\r\n rl_u(xln, xfn, xrl)\r\n else:\r\n xcm = input('New comment for {} {}?\\n'.format(xfn, xln)).lower().capitalize()\r\n rl_u(xln, xfn, xcm)\r\n else:\r\n start()",
"def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return",
"def _load_student_record(self, student_key, students_attendance_data):\n student = SchoolDB.models.Student.get(db.Key(student_key))\n if (student):\n student.attendance.save_multiple_dates(\n self.dates, students_attendance_data)",
"def query_user_id(conn):\n user_id = 0\n while (user_id != -1) is True:\n user_input = raw_input(\"Please enter a person ID: \")\n try:\n user_id = int(user_input)\n if user_id == -1:\n print \"User entered -1 - exiting\"\n break\n select_person_by_id(conn, user_id)\n select_pets_by_person(conn, user_id)\n except ValueError:\n print \"Please enter an integer\"",
"def monitor(self):\n\n # Log beginning of process\n board_logger = self.get_board_logger()\n board_logger.info(\"Beginning monitor of input for pin %s.\", \\\n self.__pin)\n \n # Set input status of pin for board object\n self.set_input_status(GPIO.input(self.__pin))\n status = \"ALARM\" if self.get_input_status() else \"RECOVERY\"\n board_logger.info(\"Initital status: %s\", status)\n\n # Deal with an error status upon power failure\n if self.get_input_status() == 1: self.initiate_event()\n\n # Monitor pin until KeyBoardInterrupt is detected\n while True:\n\n # Log monitoring\n board_logger.info(\"Monitoring for pin changes...\")\n \n # Wait for a change in pin status\n GPIO.wait_for_edge(self.__pin, GPIO.BOTH)\n\n sleep(0.005) #debounce for 5ms\n\n if self.get_input_status() != GPIO.input(self.__pin):\n \n # Set input status of pin\n self.set_input_status(GPIO.input(self.__pin))\n\n # Initiate event\n self.initiate_event()",
"def global_assign(ass_status) -> None:\r\n name = input('Enter Member Name :')\r\n student_status = input('Enter the book to be assigned : ')\r\n if name.lower() in ass_status[\"total_students\"] and student_status in ass_status[\"total_book\"]:\r\n if name.lower() in ass_status[\"available_students\"] and student_status in ass_status[\"available_books\"]:\r\n students_di[name][-1] = student_status\r\n books_di[student_status][-1] = name\r\n logging.info(f\"The {student_status} book is assigned to {name}\")\r\n print(f\"The {student_status} book is assigned to {name}\")\r\n else:\r\n logging.warning(f\"Student - {name} OR Book - {student_status} are already assigned\")\r\n print(f\"Student - {name} OR Book - {student_status} are already assigned\")\r\n else:\r\n print(f\"Mr.{name} OR The Book - {student_status} are not a part of this library\")\r\n logging.warning(f\"Mr.{name} OR The Book - {student_status} are not a part of this library\")",
"def addStud(self,ID,name,attNr,grade):\n if ID < 0: raise Exception(\"Invalid ID!\")\n parts = name.split(' ')\n if len(parts) < 2: raise Exception('Invalid name!')\n for part in parts:\n if len(part)<3: raise Exception('Invalid name!')\n if attNr < 0: raise Exception('Invalid number of attendances!')\n if grade not in range(0,11): raise Exception('Invalid grade!')\n self.__studRepo.add(Student(ID,name,attNr,grade))",
"def interaction_with_student(player: Character, flee: int):\n while True:\n choice = input(\"\\nA student appeared! Fight or run away? \")\n choice = choice.lower()\n a_student = Student()\n if choice == \"fight\":\n battle_to_death(player, a_student)\n break\n elif choice == \"run away\" or choice == \"run\":\n if flee == 10:\n student_flee_attack(player, a_student)\n if player.hp <= 0:\n break\n player.display_flee_message()\n break\n else:\n print(\"Invalid choice, try again!\")",
"def add_student(lstudents, lnotes):\n option = 'y'\n\n while option == 'y':\n\n try:\n name = input('Name: ')\n qualification = float(input('Qualification: '))\n lstudents.append(name)\n lnotes.append(qualification)\n\n except ValueError:\n print('wrong value')\n\n option = input(\n 'if you want add more studets press: \\'y\\' otherwise press any key: ')",
"def add_student(user_inputs):\r\n no_space = (remove_space(user_inputs))\r\n student_tuple = student_info._make(no_space.split(\",\"))\r\n StudentRoster.append(student_tuple)",
"def data_input(self, post_student_data: dict) -> dict:\n for i in post_student_data: \n if post_student_data[i] == \"N/A\":\n while True:\n if i == \"scoreTimestamp\":\n post_student_data[i] = int(round(time.time() * 1000))\n break\n data_input = input(f\"Enter {i}: \")\n if data_input == \"-99\":\n break\n if i == \"score\":\n try:\n data_input = float(data_input)\n except ValueError:\n print(\"Value Error: please enter a numeric score.\")\n continue\n post_student_data[i] = data_input\n break\n \n return post_student_data",
"def addStudent():\n name = input(\"Name: \")\n number = input(\"Number: \")\n gpa = input(\"GPA: \")\n field = input(\"Field: \")\n student = Student(name, number, gpa, field)\n if t.insert(number, student):\n ht.insert(student)\n print(name, \"added successfully.\")\n else:\n print(\"student number is not valid.\")",
"def process_event(self, event, day_index, scratchpad_dict):\n student_key = event.student_key\n #try:\n #logging.info(\">>Processing event on day index %d\" %day_index)\n #logging.info(\"--Processing event: '%s'\" %event.get_event_text())\n #logging.info(\"--Processing event -key: %s\" %str(student_key))\n #logging.info(\"<<Processing event student name '%s'\" \n #%unicode(db.get(student_key)))\n #except:\n #logging.error(\"+++Failed in logging in process_event\") \n if event.add_to_section():\n #The student was not in the section yesterday but\n #already have her in the list of students \n if scratchpad_dict.has_key(student_key):\n del(scratchpad_dict[student_key])\n else:\n #The student is no longer in the section as of today.\n #Thus there has been no record of the student in our\n #dict of students as we move backwards in time\n student = db.get(student_key)\n self.all_students_dict[student_key] = student\n scratchpad_dict[student_key] = student\n self.add_event_text(day_index, event)",
"def update_checkin_DB(self):\n while True:\n tid, lid = self.queue.get()\n self.new_venues.append(lid)\n try:\n self.checkinDB.update({'_id': tid}, {'$set': {'lid': lid}})\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n print(sys.exc_info()[1])\n self.queue.task_done()",
"def ucsd_student_chat():\n \n chat = True\n said_college = False\n said_major = False\n greeted = False\n \n while chat:\n\n # Get a message from the user. \n msg = input('INPUT :\\t')\n out_msg = None\n\n\n # Prepare the input message to adjust to lower case and remove punctuation \n msg = prepare_text(msg)\n\n # Check for an end msg \n if end_chat(msg):\n out_msg = \"Hope to talk to you again! Bye!\"\n chat = False\n\n # Here, we will check for a series of topics that we have designed to answer to\n if not out_msg:\n\n # Initialize to collect a list of possible outputs\n outs = []\n\n # Output as first message regardless of what message is inputted\n if not greeted:\n outs = [\"Hello :) What college you are in at UCSD. If you do not want to talk to me, type 'quit'\"]\n greeted = True\n \n #Will check if college is inputted by user. If not, it will ask for the question again\n elif (not said_college) and greeted:\n false_resp = \"I don't think that's a college here on campus. Can you try typing your college again.\"\n curr_response = ucsd_college(msg)\n if curr_response != false_resp and (curr_response is not None):\n said_college = True\n outs.append(curr_response)\n \n #Will check if major is inputted by user. If not, it will ask for the question again. If so, it ends chat\n elif (not said_major) and said_college:\n college_resp = ucsd_major(msg)\n false_college = \"I don't recognize that major. Can you try typing out your major again?\" \n if college_resp != false_college and (college_resp is not None):\n said_major = True\n chat = False\n outs.append(college_resp)\n \n\n options = list(filter(None, outs))\n if options:\n out_msg = random.choice(options)\n \n\n #If input is not recongized, it will output a response from unknown variable\n if not out_msg:\n out_msg = random.choice(UNKNOWN)\n\n print('OUTPUT:', out_msg)",
"def AddStudent(self, event):\n pass",
"def _listen(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is: \")\n for i in users:\n print(users[i][\"name\"])\n name = False\n while not name: #Loop until valid user given\n name = input(\"Please enter the user that you would like to start listening to events for: \")\n userID = self._get_user_id(name)\n if not userID:\n name = False\n #Output\n command = \"listen {0}\".format(userID)\n return(command)"
]
| [
"0.57680213",
"0.5578818",
"0.5524008",
"0.5456833",
"0.5360587",
"0.5212382",
"0.5199357",
"0.51914966",
"0.51305926",
"0.5081897",
"0.50623494",
"0.5052733",
"0.504723",
"0.50306654",
"0.5011259",
"0.49893084",
"0.4986669",
"0.4959946",
"0.49570683",
"0.4955665",
"0.489259",
"0.48727986",
"0.4851455",
"0.48486194",
"0.4839975",
"0.48177156",
"0.48050433",
"0.47806492",
"0.4759658",
"0.47570354"
]
| 0.6865095 | 0 |
Return a scrambled version of wf.ANSWER >>> random.seed(42) >>> generate_starting_point() 'ACTGODOXFMUE' | def generate_starting_point() -> str:
starter = ''
for i in range(len(wf.ANSWER) // wf.SECTION_LENGTH):
section = list(wf.ANSWER[wf.SECTION_LENGTH * i:wf.SECTION_LENGTH * (i + 1)])
random.shuffle(section)
starter = starter + ''.join(section)
return starter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_random_sequence(self) -> int:\n return random.randint(0, TWO_BYTES)",
"def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)",
"def _starting_prob(self, s):\n return self._starting_state_distribution.pdf(s)",
"def position_from_seed(seed):\n random.seed(seed)\n ascii_character_sum = sum(bytearray(seed, \"utf8\")) # Sums the ASCII values of every character\n offset = random.randint(1, 100)\n start_position = (math.log(ascii_character_sum / 100) + offset, math.log(ascii_character_sum / 100) + offset)\n end_positon = (start_position[0] + 100, start_position[1] + 100)\n square_position = (start_position, end_positon)\n print(square_position)\n \n return square_position",
"def tracking_generation_seed():\n return 112",
"def _make_stamp():\n from random import choice\n from string import hexdigits\n\n length = 8\n return ''.join(choice(hexdigits) for m in range(length))",
"def write_starting_points(self):\n num_params = self.f['/parameters/parameterNames'].shape[0]\n num_starting_points = 100\n np.random.seed(0)\n starting_points = self.f.require_dataset(\n '/optimizationOptions/randomStarts',\n [num_params, num_starting_points], 'f8')\n lower = self.f['/parameters/lowerBound'][:]\n upper = self.f['/parameters/upperBound'][:]\n starting_points[:] = np.transpose(\n np.random.rand(num_starting_points, num_params) * (\n upper - lower) + lower)\n\n if 'nominalValue' in self.parameter_df:\n self.f['/parameters/nominalValues'] = \\\n self.parameter_df.nominalValue[\n self.parameter_df.estimate == 1]",
"def first_code_word():\r\n\r\n code = ''.join(f'{random.randint(1, 3)}f{random.randint(1,3)}e')\r\n return code",
"def get_seed_from_user():\n valid = False\n seed = \"\" #default value\n\n # user info\n print(\"\\nType initial characters of name(s) to generate.\")\n print(\"(You can directly enter to not indicate any initial character)\")\n print(\"(Type \\\"0\\\" without quotes to exit)\")\n\n while not valid:\n seed = input(\"Your input: \")\n seed = seed.lower().strip()\n\n if is_seed_valid(seed):\n valid = True\n else:\n print(\"\\nPlease type alphabetical character(s).\\n\")\n\n return seed",
"def exploring_starts(self):\n def random_choice(l): return l[np.random.randint(len(l))]\n return map(random_choice, (self.env.states, self.env.moves))",
"def rand_start_pos(self):\n free_list = np.where(self.grid_map == self.empty_value)\n pos_idx = np.random.randint(free_list[0].shape[0])\n self.set_start_pos((free_list[0][pos_idx], free_list[1][pos_idx]))",
"def grab_next_state(substr, markov_chain, pin_to_add):\n letters_for_next_state = list(hashTable[pin_to_add])\n transition_state_dict = markov_chain[substr]\n mod_transition_state = {}\n for letter in letters_for_next_state:\n if letter in transition_state_dict.keys():\n mod_transition_state[letter] = transition_state_dict[letter]\n print(\"modded transition state\", mod_transition_state)\n sum_trstate = sum(mod_transition_state.values())\n rnd = random.random() * sum_trstate\n for next_letter, weight in mod_transition_state.items():\n rnd -= weight\n if rnd <= 0:\n return next_letter",
"def generate_code(self):\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)",
"def generate_name(seed=\"\"):\n \n seed = \"<\" + seed\n output = seed\n \n # create initial states\n h_state = tf.zeros(shape=(1, embedding_dim))\n c_state = tf.zeros(shape=(1, embedding_dim))\n states = [h_state, c_state]\n \n stop = False\n \n while not stop:\n # convert text seed to model input\n seq = name_to_seq(seed)\n seq = np.array([seq])\n \n # predict next char\n probs, h_state, c_state = inference_model([seq] + states)\n states = [h_state, c_state]\n probs = np.asarray(probs)[:, -1, :]\n # \n index = np.random.choice(list(range(vocab_size)), p=probs.ravel())\n \n if index == 0:\n break\n \n pred_char = index_to_char[index]\n seed = pred_char\n output += pred_char\n \n if pred_char == \">\" or len(output) > max_len + 2:\n break\n \n return output.lstrip(\"<\").rstrip(\">\") # get rid of start(<) and end(>) chars",
"def setup(self):\n setup = RandomWordGenerator().get()\n self.formatted_word = ConvertWord().convert_to_dict(setup)\n self.underscore_word = HangmanUnderscoreDiagram(\n setup).create_hidden_word()\n self.failed_guesses = 0\n print(\"Hello\")\n self.has_won = False\n self.start_game(True)",
"def generate_round():\n prog_len = random.randint(_MIN_LEN, _MAX_LEN)\n diff = random.randint(_MIN_DIFF, _MAX_DIFF)\n start = random.randint(0, _MAX_START)\n prog = _generate_progression(prog_len, diff, start)\n missing_position = random.randint(0, len(prog) - 1)\n missing_element = prog[missing_position]\n prog[missing_position] = '..'\n prog = list(map(str, prog))\n string_question = ' '.join(prog)\n return string_question, missing_element",
"def seed():",
"def make_initial_state(self,seed,scrambles):\n seen = {}\n ns=0\n x = range(self.N*self.N)\n\n for r in range(self.N):\n for c in range(self.N):\n if x[r*self.N+c]==0:\n row,col=r,c\n self.initial = PuzzleState(x,self.N,row,col)\n R = random.Random()\n R.seed(seed)\n while ns<scrambles:\n index = R.randint(0,len(self.actions)-1)\n a = self.actions[index]\n nexts = self.initial.move(a)\n if nexts is not None:\n serial = nexts.__str__()\n if serial not in seen:\n seen[serial] = True\n self.initial = nexts\n ns += 1\n print('Problem:', self.__doc__, 'Initial state:')\n print(self.initial)\n print('==============')",
"def generate_sample(seed_phrase=\" \",max_length=MAX_LENGTH): \n x_sequence = [token_to_id[token] for token in seed_phrase]\n s.run(tf.assign(h_t,h_t.initial_value))\n \n # Feed the seed phrase, if any.\n for ix in x_sequence[:-1]:\n s.run(tf.assign(h_t,next_h),{x_t:[ix]})\n \n # Generate.\n for _ in range(max_length-len(seed_phrase)):\n x_probs,_ = s.run([next_probs,tf.assign(h_t,next_h)],{x_t:[x_sequence[-1]]})\n x_sequence.append(np.random.choice(n_tokens,p=x_probs[0]))\n \n return \"\".join([tokens[ix] for ix in x_sequence])",
"def _prepare(self):\n self.code = random.randint(1000,9999)\n self.user_guess.append(\"----\")\n self.user_guess.append(\"----\")\n self.applied_guess.append(\"****\")\n self.applied_guess.append(\"****\")",
"def get_start_gram(self, n):\n loc = random.choice([Loc(source, line, 0)\n for source in self._sources\n for line in range(len(source))\n if len(source[line])])\n return NGram.from_loc(n, loc)._replace(count=1/self.num_lines)",
"def generate_strings(self, new_puzzle):\n return new_puzzle._start",
"def generate_random_starting_point(variables):\n variable_dict = {}\n\n for i in variables:\n variable_dict[variables[i - 1]] = 1 if random.random() < 0.5 else 0\n\n return variable_dict",
"def get_offset(limit=12):\n return random.randrange(0, limit)",
"def new_state():\n return ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))",
"def generate_seed():\n global seed\n seed = []\n\n for char_id in range(0, len(printable)):\n while True:\n char_sequence = [printable[randint(0, len(printable)-1)], printable[randint(0, len(printable)-1)]]\n if char_sequence not in seed:\n break\n seed.append(char_sequence)",
"def generate_raiz():\n\treturn os.urandom(12)",
"def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline",
"def main():\n word = random_word()\n old_ans = dashed(word)\n print('You have ' + str(N_TURNS) + ' guesses left.')\n guess(word, old_ans)",
"def main():\n ans = random_word()\n run_game(ans, N_TURNS)"
]
| [
"0.5797676",
"0.5748072",
"0.5625717",
"0.5550335",
"0.55358267",
"0.5495027",
"0.5467237",
"0.54466385",
"0.5418034",
"0.53874934",
"0.52591026",
"0.5216048",
"0.52145153",
"0.52141696",
"0.52128077",
"0.52029085",
"0.5193149",
"0.5172854",
"0.5165869",
"0.5153036",
"0.51502144",
"0.5149609",
"0.5149586",
"0.51348186",
"0.5130538",
"0.51295376",
"0.51168334",
"0.5106664",
"0.510541",
"0.5102007"
]
| 0.78475887 | 0 |
Return a random section_num corresponding to a section of state that is not correctly arranged. >>> random.seed(42) >>> get_section_hint('CATDGOXOFMUE') 3 >>> get_section_hint('CTADGOXOFMUE') 4 | def get_section_hint(state: str) -> int:
section_nums = [i + 1 for i in range(len(state) // wf.SECTION_LENGTH)]
random.shuffle(section_nums)
for section_num in section_nums:
if not wf.check_section(state, section_num):
return section_num
return 0 # should never get here | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pick_section(signal, section=None):\n len_noise = signal.shape[-1]\n if section is None:\n len_sig = len_noise\n ii = 0\n elif isinstance(section, int):\n len_sig = section\n ii = np.random.randint(0, len_noise - len_sig)\n else:\n len_sig = np.asarray(section).shape[-1]\n ii = np.random.randint(0, len_noise - len_sig)\n return signal[..., ii:ii + len_sig]",
"def get_section_number() -> int:\n section_num = input('Enter a section number (1 - 4): ')\n while not (section_num.isdigit() and wf.is_valid_section(int(section_num))):\n print('Invalid section number!')\n section_num = input('Enter a section number (1 - 4): ')\n return int(section_num)",
"def _read_section(self, pointer, nr_of_leads):\n if pointer.id == 1:\n return self._section1(pointer)\n if pointer.id == 2:\n return self._section2(pointer)\n elif pointer.id == 3:\n return self._section3(pointer)\n elif pointer.id == 4:\n return self._section4(pointer)\n elif pointer.id == 5:\n return self._section5(pointer, nr_of_leads)\n elif pointer.id == 6:\n return self._section6(pointer, nr_of_leads)\n elif pointer.id == 7:\n return self._section7(pointer)\n elif pointer.id == 8:\n return self._section8(pointer)\n elif pointer.id == 9:\n return self._section9(pointer)\n elif pointer.id == 10:\n return self._section10(pointer)\n elif pointer.id == 11:\n return self._section11(pointer)\n elif pointer.id == 12:\n return self._section12(pointer)\n elif pointer.id > 12:\n print(\"WARN: Section Id %s is not implemented\" % str(pointer.id))\n return None",
"def section(self, idx: int) -> int:\n if self.sections >= (idx + 1):\n return int(RE_DIGIT.match(self.string.split(\".\")[idx]).group(1))\n return 0",
"def get_hints(state: str, mode: str, hint_type: str, section_num: int = 0) -> int:\n if in_easy_mode(mode):\n if hint_type == SECTION_HINT:\n hint = input('Enter Y if you want a section hint: ')\n if hint == 'Y':\n print('Your section hint is: ' + str(get_section_hint(state)))\n return 1\n elif hint_type == MOVE_HINT:\n hint = input('Enter Y if you want a move hint: ')\n if hint == 'Y': \n print('Your move hint is: ' + wf.get_move_hint(state, section_num)) \n return 1\n return 0",
"def getSectionIndex(self) -> int:\n ...",
"def _resolve_section_id_from_context(self, cr, uid, context=None):\n if context is None:\n context = {}\n if type(context.get('default_section_id')) in (int, long):\n return context.get('default_section_id')\n if isinstance(context.get('default_section_id'), basestring):\n section_name = context['default_section_id']\n section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=section_name, context=context)\n if len(section_ids) == 1:\n return int(section_ids[0][0])\n return None",
"def find_section_id( self, tool_panel_section_id ):\n if not tool_panel_section_id:\n tool_panel_section_id = ''\n else:\n if tool_panel_section_id not in self._tool_panel:\n # Hack introduced without comment in a29d54619813d5da992b897557162a360b8d610c-\n # not sure why it is needed.\n fixed_tool_panel_section_id = 'section_%s' % tool_panel_section_id\n if fixed_tool_panel_section_id in self._tool_panel:\n tool_panel_section_id = fixed_tool_panel_section_id\n else:\n tool_panel_section_id = ''\n return tool_panel_section_id",
"def get_section(line: str) -> str:\n if len(line) < 2:\n raise Exception(\"Error: Section line can't be shorter than 2\")\n return line[1:len(line) - 1]",
"def find_section_state(line, current_section, section_order, content, highlight_content):\n for section, pattern in SEC_PAT_DICT.items():\n if pattern.match(line):\n section_order.append(section)\n content[section] = []\n highlight_content[section] = []\n return section, 1\n\n if current_section is None:\n raise InvalidDataError(\"Could not identify section from line: {}\".format(line))\n else:\n return current_section, 1",
"def get_index_from_section(section):\n return section.rsplit(\"(\", 1)[1].rstrip(\")\")",
"def section(self):\n return SECTION_NAME_TO_SECTION[self.section_name]",
"def which(self, section):\n\n self.program.state.last_viewed = section\n ids = getattr(self.program.state, section)\n return ''.join([\n self._render(i+1, s) for i, s in enumerate(ids)\n ])",
"def fake_get_hint(_):\r\n return {'best_hint': 'This is the best hint.',\r\n 'rand_hint_1': 'A random hint',\r\n 'rand_hint_2': 'Another random hint',\r\n 'answer': '42.5'}",
"def _get_section(self, sections, section_id):\n for section in sections:\n\t if section['section_id'] == section_id:\n\t return section",
"def _get_section(self, sections, section_id):\n for section in sections:\n\t if section['section_id'] == section_id:\n\t return section",
"def _get_default_section_id(self, cr, uid, context=None):\n return (self._resolve_section_id_from_context(cr, uid, context=context) or False)",
"def find_section(amdpar_xml):\n siblings = [s for s in amdpar_xml.itersiblings()]\n\n if len(siblings) == 0:\n return find_lost_section(amdpar_xml)\n\n for sibling in siblings:\n if sibling.tag == 'SECTION':\n return sibling\n\n paragraphs = [s for s in siblings if s.tag == 'P']\n if len(paragraphs) > 0:\n return fix_section_node(paragraphs, amdpar_xml)",
"def get_section_id(section_service, import_id):\n section_id = None\n if import_id in section_cache:\n section_id = section_cache[import_id]\n else:\n filters = { 'import_id': import_id, }\n results = section_service.get_sections(filters)\n if len(results) > 0:\n # cache the results\n section_cache[import_id] = results[0].id\n section_id = results[0].id\n return section_id",
"def load_canvas(state, section=None):\n\n course_id = state.get_assignment().canvas_course_id\n if course_id is None:\n raise click.ClickException('Need to configure canvas in assignment '\n 'config')\n\n api = state.canvas_api()\n try:\n sections = tuple(api.list_sections(course_id))\n except CanvasNotFoundError:\n raise click.ClickException('Canvas reports no course with id {}'\n .format(course_id))\n except CanvasInternalError:\n raise click.ClickException('Canvas reported an internal error (5xx '\n 'status code). Try again later?')\n except CanvasAPIError as err:\n raise click.ClickException(str(err))\n\n if not sections:\n raise click.ClickException('No sections, so no students! Bailing out')\n\n section_chosen = None\n\n # Find a section matching criteria (either id or substring of section name)\n while True:\n error_message = None\n\n # If this is not their first attempt, print an error describing\n # why we're prompting again\n if section is not None:\n if section == 'all':\n break\n\n # First, try to find a match by id\n try:\n section_id = int(section)\n id_matches = [s for s in sections if section_id == s.id]\n\n if id_matches:\n # Assume a section id is unique\n section_chosen, = id_matches\n break\n except ValueError:\n # Not an integer\n pass\n\n # Now, try to find a match by name\n name_matches = [s for s in sections if section in s.name.lower()]\n\n if len(name_matches) == 1:\n section_chosen, = name_matches\n break\n elif len(name_matches) > 1:\n error_message = 'More than one section matches. Try again? ' \\\n '(Canvas is an extremely good website and ' \\\n 'allows duplicate section names, so you may ' \\\n 'have to supply an id.)'\n else:\n error_message = 'No sections match. Try again?'\n\n click.echo('List of sections:')\n for s in sections:\n click.echo(str(s))\n\n # Print the error message _after_ the list of sections. Even if\n # there are tons of sections, we still want the user to see the\n # error message.\n if error_message:\n click.echo(error_message)\n\n section = click.prompt('Choose a section (name or id)',\n default='all', type=lambda s: s.lower())\n\n assignment_id = state.get_assignment().canvas_assignment_id\n\n # If they specified \"all\", use all submissions in the course,\n # otherwise use just those from one section\n if section_chosen is None:\n submissions = api.list_submissions(course_id, assignment_id)\n else:\n submissions = api.list_section_submissions(section_chosen.id,\n assignment_id)\n\n click.echo('Downloading submissions from Canvas...')\n # Need to iterate over the list of submissions so that click can\n # call len(iterator) to know how to progress the progress bar\n with click.progressbar(list(submissions)) as bar:\n for canvas_submission in bar:\n student_name = canvas_submission.user.sortable_name\n base_dir = os.path.join(state.submission_dir, student_name)\n # Remove submission if it already exists\n shutil.rmtree(base_dir, ignore_errors=True)\n\n files_dir = os.path.join(base_dir, SUBMISSION_FILES_DIRECTORY)\n mkdir_p(files_dir)\n canvas_submission.download(files_dir)\n flatten(files_dir)\n\n # Create initial meta.json in submission dir\n submission = Submission(student_name, state.get_assignment(),\n base_dir, graded=False,\n id=canvas_submission.user_id,\n submission_time=canvas_submission.time())\n submission.initialize_metadata()",
"def section_id(self) -> str:\n\n return self[\"id\"]",
"def _section_index(self, chapter_index, title):\r\n\r\n # This is a hideous CSS selector that means:\r\n # Get the links containing the section titles in `chapter_index`.\r\n # The link text is the section title.\r\n section_css = 'div.chapters>section:nth-of-type({0}) div.sections div h3 a'.format(chapter_index)\r\n section_titles = self.q(css=section_css).map(lambda el: el.text.lower().strip()).results\r\n\r\n # The section titles also contain \"n of m possible points\" on the second line\r\n # We have to remove this to find the right title\r\n section_titles = [t.split('\\n')[0] for t in section_titles]\r\n\r\n # Some links are blank, so remove them\r\n section_titles = [t for t in section_titles if t]\r\n\r\n try:\r\n # CSS indices are 1-indexed, so add one to the list index\r\n return section_titles.index(title.lower()) + 1\r\n except ValueError:\r\n self.warning(\"Could not find section '{0}'\".format(title))\r\n return None",
"def testSectionCount(self):\n\n self.sectionCount(3640)",
"def fancy_section_type(section):\n return {\n 'LEC': 'Lecture',\n 'LAB': 'Laboratory',\n 'TUT': 'Tutorial',\n 'PRA': 'Practicum',\n 'COR': 'Correspondence',\n 'SEM': 'Seminar',\n 'ONL': 'Online',\n }.get(section, section)",
"def find_lost_section(amdpar_xml):\n reg_text = amdpar_xml.getparent()\n reg_text_siblings = [s for s in reg_text.itersiblings()\n if s.tag == 'REGTEXT']\n if len(reg_text_siblings) > 0:\n candidate_reg_text = reg_text_siblings[0]\n amdpars = [a for a in candidate_reg_text if a.tag == 'AMDPAR']\n if len(amdpars) == 0:\n # Only do this if there are not AMDPARS\n for c in candidate_reg_text:\n if c.tag == 'SECTION':\n return c",
"def get_section_tuple(c_dict, section_name=''):\n\n subsections = [get_section_tuple(c_dict[ss], ss) for ss in c_dict.sections]\n section_tuple = (section_name, subsections, c_dict.scalars)\n return section_tuple",
"def find_section_containing(self, addr):\n for s in self.sections:\n if s.contains_addr(addr - self.rebase_addr):\n return s\n\n return None",
"def setup_test_set_get_section_grader_ajax(self):\r\n self.populate_course()\r\n sequential_usage_key = self.course.id.make_usage_key(\"sequential\", None)\r\n sections = get_modulestore(self.course.id).get_items(sequential_usage_key)\r\n # see if test makes sense\r\n self.assertGreater(len(sections), 0, \"No sections found\")\r\n section = sections[0] # just take the first one\r\n return reverse_usage_url('xblock_handler', section.location)",
"def section_by_address(self, address: int) -> Optional['Elf.Section']:\n # Iterate in reverse to give priority to sections with nonzero addresses\n for section in sorted(self.sections, reverse=True):\n if address in section.range():\n return section\n\n return None",
"def bus_section_id(self) -> int:\n return self.dss_obj.BUSI(5, 0)"
]
| [
"0.61764073",
"0.6048128",
"0.5957858",
"0.58769643",
"0.5857279",
"0.5694491",
"0.5605851",
"0.55351967",
"0.5389725",
"0.5279265",
"0.52681446",
"0.5125589",
"0.5080668",
"0.5071636",
"0.5045724",
"0.5045724",
"0.5005661",
"0.49288523",
"0.4920502",
"0.4882837",
"0.48555204",
"0.48440847",
"0.48285463",
"0.48046103",
"0.4790382",
"0.47822085",
"0.47591904",
"0.47170982",
"0.47129145",
"0.47124717"
]
| 0.87867403 | 0 |
Return True if and only if mode is a valid mode, and False otherwise. >>> is_valid_mode('T') True >>> is_valid_mode('S') False | def is_valid_mode(mode: str) -> bool:
return mode in (TEST, EASY, HARD) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isMode(mode, check):\n if mode==\"default\" or mode==\"all\":\n return True\n \n if mode.__contains__(check):\n return True\n\n if check.__contains__(\"_\"):\n check_modes = check.split(\"_\")\n for check_mode in check_modes:\n if not isMode(mode, check_mode):\n return False\n return True\n\n return False",
"def _check_mode_valid(mode):\n if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and\n mode != model_fn.ModeKeys.EVAL):\n raise ValueError(\"mode=%s unrecognized.\" % str(mode))",
"def _checkMode(mode):\n\n if not isinstance(mode, str):\n raise TypeError('The {0} should be a string. Given: {1!r}'.format(\"mode\", mode))\n\n if mode not in [MODE_RTU, MODE_ASCII]:\n raise ValueError(\"Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.\".format(mode))",
"def _assert_valid_mode(mode:str):\n if not mode in [_TRAIN, _EVAL, _PREDICT]:\n raise ValueError(\"Invalid mode.\")",
"def is_valid_copy_number_mode(self, mode: str) -> bool:\n copy_number_type_mode = mode.strip().lower()\n return copy_number_type_mode in self.valid_copy_number_modes",
"def _check_mode(mode, encoding, newline):\n if \"t\" in mode:\n if \"b\" in mode:\n raise ValueError(\"Invalid mode: %r\" % (mode,))\n else:\n if encoding is not None:\n raise ValueError(\"Argument 'encoding' not supported in binary mode\")\n if newline is not None:\n raise ValueError(\"Argument 'newline' not supported in binary mode\")",
"def is_support(mode: int) -> bool:\n return mode in supported_modes\n pass",
"def in_easy_mode(mode: str) -> bool:\n return mode == EASY",
"def in_test_mode(mode: str) -> bool:\n return mode == TEST",
"def __check_mode_change(self):\n if self.mode[\"auto_mode\"] != self.mode[\"last_mode\"]:\n self.mode[\"last_mode\"] = self.mode[\"auto_mode\"]\n return True\n return False",
"def __wlst_mode_matches(self, attr_mode_string):\n return attr_mode_string == 'both' or attr_mode_string == WlstModes.from_value(self._wlst_mode).lower()",
"def is_valid(self):\n try:\n self.validate()\n return True\n except (TypeError, ValueError) as e:\n return False",
"def check_config_mode(self):\n return False",
"def is_valid_case_type(case_type):\n return bool(_case_type_regex.match(case_type or ''))",
"def validate_mode(mode, operator_tag, is_sha_digest):\n version_supports_restricted = check_if_tag_supports_restricted(operator_tag, is_sha_digest)\n if mode == MODE_RESTRICTED and not version_supports_restricted:\n raise ValueError(\"{} is not supported for this version, please use {}\".format(MODE_RESTRICTED, MODE_ALL))",
"def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode",
"def is_valid(self):\n if self.flow_id is None:\n return False\n elif self.protocol is None:\n return False\n elif self.dst_addr is None:\n return False\n elif self.dst_port is None:\n return False\n elif self.pattern is None:\n return False\n else:\n return True",
"def isvalid(self):\n validName = not StringExtension.is_none_or_white_space(self._name)\n validValue = not StringExtension.is_none_or_white_space(self._value)\n if validName and validValue:\n return True\n return False",
"def is_valid_dup_del_mode(self, mode: str) -> bool:\n hgvs_dup_del_mode = mode.strip().lower()\n return hgvs_dup_del_mode in self.valid_dup_del_modes",
"def is_valid(self):\n if self.answer_type == 'F':\n return bool(self.text)\n\n if not self.answers.count():\n return False\n if not self.answers.filter(correct=True).count():\n return False\n return True",
"def validate(self, mode): # pragma: no cover\n pass",
"def check_flag(self):\n return self._flag is 0 or self._flag is 16",
"def check_enable_mode(self, check_string='#'):\n return True",
"def _match_current_modes(command, current_mode, modes):\n if current_mode in modes:\n return True\n #\n # if the modes is enable, this works everywhere\n #\n if 'login' in modes:\n return True\n #\n # if the modes is login, and the mode is anything but login,\n # then this is true\n #\n if 'enable' in modes and current_mode != 'login':\n return True\n for mode in modes:\n if mode.endswith('*') and current_mode.startswith(mode[:-1]):\n return True\n if command.get('command-type') == 'config-submode':\n for mode in modes:\n if current_mode.startswith(mode):\n return True\n \n return False",
"def _exact_mode_match(current_mode, command_modes):\n if not type(command_modes) == list:\n command_modes = [command_modes]\n for mode in command_modes:\n if mode == current_mode:\n return True\n if mode.endswith('*') and mode[:-1] == current_mode:\n return True\n return False",
"def _verify_ccd_operation_mode(self, ccd_operation_mode):\n em_mode = ccd_operation_mode['em_mode']\n em_gain = ccd_operation_mode['em_gain']\n hss = ccd_operation_mode['hss']\n preamp = ccd_operation_mode['preamp']\n binn = ccd_operation_mode['binn']\n t_exp = ccd_operation_mode['t_exp']\n ccd_temp = ccd_operation_mode['ccd_temp']\n\n dic_keywords_list = [\n 'binn', 'ccd_temp', 'em_gain', 'em_mode', 'hss', 'preamp', 't_exp']\n\n for key in ccd_operation_mode.keys():\n if key not in dic_keywords_list:\n raise ValueError(\n f'The name provided is not a CCD parameter: {key}')\n\n if list(ccd_operation_mode.keys()).sort() != dic_keywords_list.sort():\n raise ValueError(\n 'There is a missing parameter of the CCD operation mode')\n\n if em_mode not in [0, 1]:\n raise ValueError(\n f'Invalid value for the EM mode: {em_mode}')\n if em_mode == 0:\n if em_gain != 1:\n raise ValueError(\n 'The EM Gain must be 1 for the Conventional'\n + f' Mode: {em_gain}')\n else:\n if em_gain not in [float, int]:\n raise ValueError(\n f'The EM gain must be a number: {em_gain}')\n elif em_gain < 2 or em_gain > 300:\n raise ValueError(\n f'EM gain out of range [2, 300]: {em_gain}')\n\n if preamp not in [1, 2]:\n raise ValueError(\n f'Invalid value for the pre-amplification: {preamp}')\n\n if hss not in [0.1, 1, 10, 20, 30]:\n raise ValueError(\n f'Invalid value for the Readout rate: {hss}')\n\n if binn not in [1, 2]:\n raise ValueError(\n f'Invalid value for the binning: {bin}')\n\n if type(t_exp) not in [float, int]:\n raise ValueError(\n f'The exposure time must be a number: {t_exp}')\n elif ccd_operation_mode['t_exp'] < 1e-5:\n raise ValueError(\n f'Invalid value for the exposure time: {t_exp}')\n\n if type(ccd_temp) not in [float, int]:\n raise ValueError(\n f'The CCD temperature must be a number: {ccd_temp}')\n if ccd_temp < -80 or ccd_temp > 20:\n raise ValueError(\n f'CCD temperature out of range [-80, 20]: {ccd_temp}')",
"def isValid(self):\n return self.isOutOfDate() == False \\\n and self.isQCValid() == True \\\n and self.getDisposeUntilNextCalibrationTest() == False \\\n and self.isValidationInProgress() == False \\\n and self.isCalibrationInProgress() == False",
"def check_permission(perm_mode, flags=stat.S_IWOTH):\n return bool(perm_mode & flags)",
"def validGameSettings(self):\n if not isinstance(self.view, GView):\n return False\n if not isinstance(self.input, GInput):\n return False\n validStates = [STATE_INACTIVE, STATE_NEWWAVE, STATE_ACTIVE,\n STATE_PAUSED, STATE_CONTINUE, STATE_COMPLETE]\n if not self.getState() in validStates:\n return False\n if not self.getWave() is None or isinstance(self.getWave(), Wave):\n return False\n if not self.getText() is None or isinstance(self.getText(), GLabel):\n return False\n return True",
"def valid(self):\n try:\n if self.getPret() > 0 and self.getAn() > 0 and self.validProgram(self.getProgram()):\n return True\n except:\n return False\n return False"
]
| [
"0.68416965",
"0.6592954",
"0.65321696",
"0.64740825",
"0.6327304",
"0.63070184",
"0.6277338",
"0.5980799",
"0.5936964",
"0.5894869",
"0.58809364",
"0.5868526",
"0.5779383",
"0.57723993",
"0.5761688",
"0.57445145",
"0.57384235",
"0.57228845",
"0.5719184",
"0.5700653",
"0.5692263",
"0.5611517",
"0.559629",
"0.5594046",
"0.5592097",
"0.5591895",
"0.55883366",
"0.5587057",
"0.55829185",
"0.5580736"
]
| 0.8136188 | 0 |
Return True if and only if mode indicates the game is in test mode, and False otherwise. >>> in_test_mode('T') True >>> in_test_mode('E') False | def in_test_mode(mode: str) -> bool:
return mode == TEST | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid_mode(mode: str) -> bool:\n return mode in (TEST, EASY, HARD)",
"def in_test_mode(self, repo):\n return config.get('test_mode', repo=repo)",
"def test_test_mode(self):\n\n # Ensure that we default to test mode off\n self.assertFalse(self.driver._test_mode)\n\n exception = False\n try:\n self.driver.set_test_mode(False)\n self.driver.test_force_state(state=1)\n\n except TestModeException:\n exception = True\n\n except Exception:\n # ignore other exceptions\n pass\n\n self.assertTrue(exception)\n\n # Now set test mode and try to run again.\n exception = False\n try:\n self.driver.set_test_mode(True)\n self.assertTrue(self.driver._test_mode)\n self.driver.test_force_state(state=1)\n except TestModeException:\n exception = True\n\n except Exception:\n # ignore other exceptions\n pass\n\n self.assertFalse(exception)",
"def isMode(mode, check):\n if mode==\"default\" or mode==\"all\":\n return True\n \n if mode.__contains__(check):\n return True\n\n if check.__contains__(\"_\"):\n check_modes = check.split(\"_\")\n for check_mode in check_modes:\n if not isMode(mode, check_mode):\n return False\n return True\n\n return False",
"def set_test_mode(self, on_off):\n if type(on_off) != bool:\n print(\"test mode must be a bool\")\n return\n self.test_mode = on_off",
"def test_case(self) -> bool:\n return pulumi.get(self, \"test_case\")",
"def is_support(mode: int) -> bool:\n return mode in supported_modes\n pass",
"def test_is_mode_upsellable(self, mode, is_upsellable):\n CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)\n if mode == CourseMode.CREDIT_MODE:\n CourseModeFactory.create(mode_slug=CourseMode.VERIFIED, course_id=self.course.id)\n enrollment = CourseEnrollmentFactory(\n is_active=True,\n mode=mode,\n course_id=self.course.id,\n user=self.user\n )\n assert is_mode_upsellable(self.user, enrollment) is is_upsellable",
"def _match_current_modes(command, current_mode, modes):\n if current_mode in modes:\n return True\n #\n # if the modes is enable, this works everywhere\n #\n if 'login' in modes:\n return True\n #\n # if the modes is login, and the mode is anything but login,\n # then this is true\n #\n if 'enable' in modes and current_mode != 'login':\n return True\n for mode in modes:\n if mode.endswith('*') and current_mode.startswith(mode[:-1]):\n return True\n if command.get('command-type') == 'config-submode':\n for mode in modes:\n if current_mode.startswith(mode):\n return True\n \n return False",
"def is_test(self):\r\n return self.has_label('tests')",
"def test_modes(self):\n step = self.run_step('S01-first.py')\n self.assertTrue(step.success)\n self.assertTrue(step.local.is_testing)\n self.assertFalse(step.local.is_interactive)\n self.assertFalse(step.local.is_single_run)",
"def set_test_mode(self):\n self._test_mode = True\n self._wins = 0\n self._draws = 0\n self._count = 0\n self._losses = 0",
"def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode",
"def get_running_mode(enduse, mode_constrained, enduse_space_heating):\n if mode_constrained:\n return True\n elif not mode_constrained and enduse in enduse_space_heating:\n return False\n elif not mode_constrained and enduse not in enduse_space_heating:\n # All other not constrained enduses where technologies are defined\n # are run in 'constrained' mode (e.g. lighting)\n return True",
"def in_easy_mode(mode: str) -> bool:\n return mode == EASY",
"def game_check(self, mode, row=None, col=None, is_set=True,\r\n show_fail=True):\r\n self.add_play_move(PlayMove.GAME_CHECK, mode=mode, row=row, is_set=is_set, show_fail=show_fail)\r\n if mode == \"h\" or mode == \"v\":\r\n part = self.get_part(type=\"edge\", sub_type=mode, row=row, col=col)\r\n if part is None:\r\n raise SelectError(f\"game_check: no edge({mode}) found at row={row} col={col}\") \r\n is_on = part.is_turned_on()\r\n if is_on != is_set:\r\n result = False\r\n msg = (f\"Unexpected test result: {result}\"\r\n f\" for line({mode}) at row={row} col={col}\")\r\n SlTrace.lg(f\"game_check: {msg}\")\r\n if show_fail:\r\n raise SelectFail(msg)\r\n return False\r\n elif mode == \"sq\":\r\n part = self.get_part(type=\"region\", row=row, col=col)\r\n is_on = part.is_turned_on()\r\n if is_on != is_set:\r\n result = False\r\n msg = (f\"Unexpected test result: {result}\"\r\n f\" for square at row={row} col={col}\")\r\n SlTrace.lg(f\"game_check: {msg}\")\r\n if show_fail:\r\n raise SelectFail(msg)\r\n return False\r\n else:\r\n raise SelectFail(f\"Unrecognized game_check mode({mode}\")\r\n \r\n return True",
"def check_enable_mode(self, check_string='#'):\n return True",
"def match(self):\n return 'test' in self.name",
"def is_on(self):\n if self._switch_type == \"record_motion\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_MOTION\n elif self._switch_type == \"record_always\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_ALWAYS\n elif self._switch_type == \"record_smart\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_SMARTDETECT\n elif self._switch_type == \"ir_mode\":\n return self._camera_data[\"ir_mode\"] == self._ir_on_cmd\n elif self._switch_type == \"hdr_mode\":\n return self._camera_data[\"hdr_mode\"] is True\n elif self._switch_type == \"high_fps\":\n return self._camera_data[\"video_mode\"] == TYPE_HIGH_FPS_ON\n else:\n return self._camera_data[\"status_light\"] == \"True\"",
"def check_config_mode(self):\n return False",
"def string_to_bool(test_string):\n\treturn bool(test_string in [\"True\", \"true\", \"Yes\", \"yes\", \"Y\", \"y\"])",
"def has_test(args):\n return (args.test_set or args.test_source or args.test_dataset or\n args.test_stdin or args.test_datasets)",
"def check_enable_mode(self, *args, **kwargs):\n pass",
"def is_on(self) -> bool:\n return self.coordinator.data.get_metric(METRIC_KEY_MODE) == MODE_ON",
"def is_custom_mode_enabled(self):\n return os.environ.get('SNYK_CUSTOM_MODE', 'false').lower() in ('1', 'yes', 'true')",
"def test_support_MODES(self):\n self._testIntOrDefaultFeature(\"MODES\")",
"def eco_mode_enabled(self) -> bool:\n return self._device_info[\"EcoMode\"] == \"on\"",
"def is_check_mode_enabled(self):\n return self.in_check_mode",
"def is_config_mode(self):\n\n return self._connection.get_prompt().strip().startswith('(')",
"def _is_boot_mode_uefi(self):\n boot_mode = self.get_current_boot_mode()\n if boot_mode == 'UEFI':\n return True\n else:\n return False"
]
| [
"0.72444856",
"0.6852615",
"0.65411115",
"0.6474507",
"0.6274849",
"0.621066",
"0.6163532",
"0.5953138",
"0.5897571",
"0.58896554",
"0.5868408",
"0.5852036",
"0.5784398",
"0.57723427",
"0.5732893",
"0.5709262",
"0.5681045",
"0.56652296",
"0.5612993",
"0.5609369",
"0.5585378",
"0.5577496",
"0.5558917",
"0.55471694",
"0.5518509",
"0.5512932",
"0.54903746",
"0.54631114",
"0.5456147",
"0.5435768"
]
| 0.82374626 | 0 |
Return True if and only if mode indicates the game is in easy mode, and False otherwise. >>> in_easy_mode('E') True >>> in_easy_mode('H') False | def in_easy_mode(mode: str) -> bool:
return mode == EASY | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid_mode(mode: str) -> bool:\n return mode in (TEST, EASY, HARD)",
"def eco_mode_enabled(self) -> bool:\n return self._device_info[\"EcoMode\"] == \"on\"",
"def is_support(mode: int) -> bool:\n return mode in supported_modes\n pass",
"def check_enable_mode(self, check_string='#'):\n return True",
"def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode",
"def is_on(self):\n if self._power_state == HYSEN_POWERON :\n return True\n else:\n return False",
"def EM_advanced_enabled(self):\n\n state = ct.c_int()\n self.lib.GetEMAdvanced(ct.pointer(state))\n return state.value",
"def isMode(mode, check):\n if mode==\"default\" or mode==\"all\":\n return True\n \n if mode.__contains__(check):\n return True\n\n if check.__contains__(\"_\"):\n check_modes = check.split(\"_\")\n for check_mode in check_modes:\n if not isMode(mode, check_mode):\n return False\n return True\n\n return False",
"def is_on(self) -> bool:\n return self.coordinator.data.get_metric(METRIC_KEY_MODE) == MODE_ON",
"def check_enable_mode(self, *args, **kwargs):\n pass",
"def is_on(self):\n if self._switch_type == \"record_motion\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_MOTION\n elif self._switch_type == \"record_always\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_ALWAYS\n elif self._switch_type == \"record_smart\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_SMARTDETECT\n elif self._switch_type == \"ir_mode\":\n return self._camera_data[\"ir_mode\"] == self._ir_on_cmd\n elif self._switch_type == \"hdr_mode\":\n return self._camera_data[\"hdr_mode\"] is True\n elif self._switch_type == \"high_fps\":\n return self._camera_data[\"video_mode\"] == TYPE_HIGH_FPS_ON\n else:\n return self._camera_data[\"status_light\"] == \"True\"",
"def is_check_mode_enabled(self):\n return self.in_check_mode",
"def is_essential(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_essential\")",
"def is_essential(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_essential\")",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def get_running_mode(enduse, mode_constrained, enduse_space_heating):\n if mode_constrained:\n return True\n elif not mode_constrained and enduse in enduse_space_heating:\n return False\n elif not mode_constrained and enduse not in enduse_space_heating:\n # All other not constrained enduses where technologies are defined\n # are run in 'constrained' mode (e.g. lighting)\n return True",
"def in_test_mode(mode: str) -> bool:\n return mode == TEST",
"def eeprom_enabled():\n\n return (_read_device_state() & _get_addr_for_bit(_eeprom_bit)) != 0",
"def is_on(self):\n return self._program.get(\"enabled\") is True",
"def is_on(self):\n camera = self.coordinator.data[self._camera_id]\n if self._switch_type == \"record_motion\":\n enabled = True if camera[\"recording_mode\"] == TYPE_RECORD_MOTION else False\n elif self._switch_type == \"record_always\":\n enabled = True if camera[\"recording_mode\"] == TYPE_RECORD_ALLWAYS else False\n else:\n enabled = True if camera[\"ir_mode\"] == self._ir_on_cmd else False\n return enabled",
"def is_away_mode_on(self):\n return self._away",
"def high_current_mode_bool(self):\n return self._high_current_mode_bool",
"def isOpen(self):\n what = self.checkapf(\"WHATSOPN\").read()\n if \"DomeShutter\" in what or \"MirrorCover\" in what or \"Vents\" in what:\n return True, what\n else:\n return False, ''",
"def _is_device_active(self):\n return self.power_mode == STATE_ON",
"def is_food(self) -> bool:\n return self in (self.off, self.off_pro)",
"def antenny_is_safemode(self):\n return self.safe_mode",
"def __bool__(self):\n context, active_obj, actual_mode, mode = self.get_context()\n if not mode: return False\n \n if mode == 'OBJECT':\n return bool(context.selected_objects)\n elif mode == 'EDIT_MESH':\n mesh = active_obj.data\n if actual_mode == 'EDIT_MESH':\n return bool(mesh.total_vert_sel)\n else:\n return any(item.select for item in mesh.vertices)\n elif mode in {'EDIT_CURVE', 'EDIT_SURFACE'}:\n for spline in active_obj.data.splines:\n for item in spline.bezier_points:\n if (item.select_control_point or\n item.select_left_handle or\n item.select_right_handle):\n return True\n for item in spline.points:\n if item.select:\n return True\n elif mode == 'EDIT_METABALL':\n return bool(active_obj.data.elements.active)\n elif mode == 'EDIT_LATTICE':\n return any(item.select for item in active_obj.data.points)\n elif mode == 'EDIT_ARMATURE':\n return any(item.select_head or item.select_tail\n for item in active_obj.data.edit_bones)\n elif mode == 'POSE':\n return any(item.select for item in active_obj.data.bones)\n elif mode == 'PARTICLE':\n # Theoretically, particle keys can be selected,\n # but there seems to be no API for working with this\n pass\n else:\n pass # no selectable elements in other modes\n \n return False",
"async def test_set_away_mode_on(opp):\n await common.async_set_away_mode(opp, True, ENTITY_WATER_HEATER)\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"away_mode\") == \"on\"",
"def is_on(self) -> bool:\n return self.entity_description.get_ufp_value(self.device) is True",
"def is_on(self) -> bool:\n return self.entity_description.get_ufp_value(self.device) is True"
]
| [
"0.6544077",
"0.6287514",
"0.5981719",
"0.59426105",
"0.5845831",
"0.5844486",
"0.580687",
"0.57966757",
"0.56680804",
"0.55833864",
"0.55572945",
"0.5481888",
"0.5474338",
"0.5474338",
"0.5473657",
"0.54406446",
"0.5424466",
"0.5370847",
"0.5350441",
"0.5343598",
"0.5343355",
"0.5330223",
"0.5305922",
"0.5299378",
"0.52727515",
"0.5228254",
"0.5221652",
"0.5207089",
"0.5202725",
"0.5202725"
]
| 0.79923415 | 0 |
Return the new game state after performing the game move specified by move on the section of state correspoding to section_num. >>> make_move('ATCDOGFOXEMU', 1, 'R') 'CATDOGFOXEMU' >>> make_move('CATDOGFOXUME', 4, 'C') The section is incorrect 'CATDOGFOXUME' | def make_move(state: str, section_num: int, move: str) -> str:
if move == wf.CHECK:
check_result = wf.check_section(state, section_num)
if check_result:
print('The section is correct')
else:
print('The section is incorrect')
else:
state = wf.change_state(state, section_num, move)
return state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_move(self, move):\n if type(move) == str:\n move = int(move)\n\n new_state = SubtractSquareState(not self.p1_turn,\n self.current_total - move)\n return new_state",
"def make_move(self, move: Any) -> \"StonehengeState\":\n new_state = StonehengeState(self.p1_turn, self.length,\n self.letters[:], self.claim[:])\n state = new_state\n if new_state.length == 1:\n state = self.move_length_1(move, new_state)\n if new_state.length == 2:\n state = self.move_length_2(move, new_state)\n if new_state.length == 3:\n state = self.move_length_3(move, new_state)\n if new_state.length == 4:\n if move in [\"A\", \"B\", \"J\", \"O\", \"N\", \"R\",\n \"C\", \"F\", \"E\", \"I\", \"P\", \"Q\"]:\n state = self.move_length_4(move, new_state)\n else:\n state = self.move_length_41(move, new_state)\n if new_state.length == 5:\n if move in [\"A\", \"B\", \"U\", \"O\", \"T\", \"Y\",\n \"C\", \"J\", \"E\", \"N\", \"V\", \"X\"]:\n state = self.move_length_5(move, new_state)\n elif move in [\"F\", \"I\", \"W\"]:\n state = self.move_length_51(move, new_state)\n else:\n state = self.move_length_52(move, new_state)\n return state",
"def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun",
"def makeMove(self, move):\n\t\ttry:\n\t\t\tif (self.board[int(move) - 1] is Piece.BLANK):\n\t\t\t\tself.board[int(move) - 1] = self.current\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\texcept:\n\t\t\treturn 0",
"def make_move(self, move: Tuple[int, int]) -> MoveError:\n\n # Make sure our move is going to be valid\n if self.is_winner():\n return MoveError.GAME_WON\n\n elif move[0] >= self._board_size or move[0] < 0 or move[1] >= self._board_size or move[1] < 0:\n return MoveError.OUT_OF_RANGE\n\n elif self._board[move[1]][move[0]] != self.NEUTRAL_PLAYER:\n return MoveError.TAKEN\n\n # If we make it to here, then it is valid to make the move\n self._board[move[1]][move[0]] = self._players[self._current_player]\n self._number_of_moves = self._number_of_moves + 1\n self._last_move = move\n\n self._check_for_winner()\n\n # Only change who the player is if we didn't get a winner,\n # otherwise the final board's color will be wrong\n if not self.is_winner():\n self._current_player = (self._current_player + 1) % len(self._players)\n\n return MoveError.OKAY",
"def api_make_move(self, move_input):\n return self.board.attempt_move(move_input)",
"def get_move() -> str:\n msg = 'Enter a move for that section (C to check, S to swap, R to rotate): '\n move = input(msg)\n while not wf.is_valid_move(move):\n print('Invalid move!')\n move = input(msg) \n return move",
"def make_move(self, move: int) -> None:\n if move not in self._valid_moves:\n raise ValueError(f'Move \"{move}\" is not valid')\n\n self._update_board(move)\n\n self._win_state = self._check_winner()\n self._is_red_active = not self._is_red_active\n self.move_number += 1",
"def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state",
"def move(self, movement):\n index = self.state.index(0)\n\n new_state = self.state.copy()\n\n if movement == 'up':\n new_state[index], new_state[index - 3] = new_state[index - 3], new_state[index]\n elif movement == 'down':\n new_state[index], new_state[index + 3] = new_state[index + 3], new_state[index]\n elif movement == 'left':\n new_state[index], new_state[index - 1] = new_state[index - 1], new_state[index]\n else:\n # movement == 'right'\n new_state[index], new_state[index + 1] = new_state[index + 1], new_state[index]\n \n return new_state",
"def make_move(self, move, player):\n if not self.test_valid_move( move):\n return False\n self.game_state[move[0]][move[1]] = player",
"def make_move(self, board: Board) -> int:\n\n move = input()\n move = int(move)\n\n while move not in board.get_valid_moves():\n print(\"That is not a valid move\")\n move = input()\n move = int(move)\n\n return move",
"def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord; invalid col in ' + coord)\n # if not 0 < row <= game[\"board_height\"]:\n # raise ValueError('bad coord; invalid row in ' + coord)\n return row*(self.rules[\"row_len\"]) + col\n move = list(map(map_move,move_string.split(' ')))\n self.turn[\"board\"][move[0]].make_move(*move[1:])\n self.turn[\"half_move_clock\"] += 1\n if self.turn[\"active_player\"] == 1:\n self.turn[\"full_move_clock\"] += 1\n self.turn[\"active_player\"] = (self.turn[\"active_player\"] + 1) % 2\n # self.turn[\"board\"][move_start].make_move(move_end)",
"def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state",
"def move_length_41(self, move, new_state: \"StonehengeState\"):\n # Then consider the internal1 move\n new1_state = new_state\n if move in [\"D\", \"K\", \"M\"]:\n for i in [[\"D\", 2, 4, 4, 1, 6, 10, 14, 1, 0, 7, 12, 17, 14],\n [\"K\", 5, 15, 12, 1, 3, 6, 14, 1, 9, 11, 12, 13, 8],\n [\"M\", 8, 16, 5, 9, 10, 11, 13, 8, 0, 3, 7, 17, 14]]:\n new1_state = self.loop2(move, new_state, i)\n # Finally consider the internal2 move\n internal2 = [[\"G\", 2, 11, 16, 13, 5, 7, 8, 6, 1, 3, 10, 14, 1],\n [\"H\", 4, 11, 15, 3, 5, 6, 8, 6, 0, 3, 12, 17, 14],\n [\"L\", 2, 6, 16, 13, 4, 7, 15, 3, 9, 10, 12, 13, 8]]\n if move in [\"G\", \"H\", \"L\"]:\n for i in internal2:\n new1_state = self.loop3(move, new_state, i)\n return StonehengeState(not self.p1_turn, new1_state.length,\n new1_state.letters, new1_state.claim)",
"def makeMove(self, board, move):\n\t\trotation, this_board = self.__getNormalizedAndRotatedBoard(board)\n\t\tthis_state = TicTacToeHelper.serializeBoard(this_board)\n\n\t\tthis_move = TicTacToeHelper.rotateMove(move, rotation)\n\n\t\tself.__state_history.append((this_state, this_move))",
"def make_move(self, move_to_play, color_to_move, return_capture=False):\r\n captures = 0\r\n if move_to_play == 'PASS':\r\n board_copy = Board(self.state, self.previous_state, self.to_move)\r\n if self.to_move == 1:\r\n board_copy.to_move = 2\r\n else:\r\n board_copy.to_move = 1\r\n if return_capture:\r\n return board_copy, captures\r\n else:\r\n return board_copy\r\n\r\n current_state = np.array(self.state)\r\n ptemp_state = np.array(current_state)\r\n\r\n for p in ORTHOGONAL_POSITIONS[move_to_play]:\r\n if self.board[p[0]][p[1]].chain_liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += len(self.chains[(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color)])\r\n current_state = self.remove_chain(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color,\r\n current_state)\r\n\r\n elif self.board[p[0]][p[1]].liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += 1\r\n current_state[p[0]][p[1]] = 0\r\n\r\n current_state[move_to_play[0]][move_to_play[1]] = color_to_move\r\n if color_to_move == 1:\r\n temp_board = Board(current_state, ptemp_state, 2)\r\n else:\r\n temp_board = Board(current_state, ptemp_state, 1)\r\n if return_capture:\r\n return temp_board, captures\r\n else:\r\n return temp_board",
"def get_move(moves):\n pass",
"def get_move(state):\n entry = game_states[get_values(state)]\n options = list()\n\n for move in entry:\n move_result = entry[move]\n if move_result == 'Y':\n return move\n elif move_result == 'N':\n continue\n options.extend([move]*move_result)\n return choice(options)",
"def move(self, move):\n out = ''\n for val in self.moves[move]:\n out += self.state[val]\n self.state = out",
"def _get_move_result(self, unlocked_before_move : bool, err = None):\n if err:\n return Moveresult.INVALID\n elif self.current_turn.entity in self.game_state.get_completed_characters():\n return Moveresult.EXIT\n elif self.game_state.is_character_expelled(self.current_turn.entity):\n return Moveresult.EJECT\n elif self.game_state.is_current_level_unlocked() and not unlocked_before_move:\n return Moveresult.KEY\n else:\n return Moveresult.OK",
"def make_move(self, board: Board) -> int:\n return random.choice(board.get_valid_moves())",
"def add_move(self, room_code: str, new_move: Move) -> GameInfo:\n game = self.read_game(room_code)\n if not game.started:\n raise GameNotStarted(\"Cannot make a move in a game that hasn't started\")\n\n if game.challenge is not None:\n raise InvalidMove(f\"Game {room_code!r} has an open challenge\")\n\n if game.turn_player_name != new_move.player_name:\n msg = \"Turn player is {!r} but {!r} tried to move\"\n raise WrongPlayer(msg.format(game.turn_player_name, new_move.player_name))\n\n if new_move.player_name not in [player.name for player in game.players]:\n raise InvalidMove(\"Must join a game to play a move\")\n\n if new_move.position in [move.position for move in game.moves]:\n raise InvalidMove(f\"There is already a move on {new_move.position.dict()}\")\n\n if len(new_move.letter) != 1:\n raise InvalidMove(\"Moves can only be one letter\")\n\n # TODO: validate move position and value\n\n self.games_table.update_item(\n Key={\"room_code\": room_code},\n UpdateExpression=(\"set moves=list_append(moves, :m)\"),\n ExpressionAttributeValues={\n \":m\": [new_move.dict()],\n },\n ConditionExpression=Attr(\"moves\").eq(game.dict()[\"moves\"]),\n )\n\n self._advance_turn(game)\n\n return self.read_game(room_code)",
"def movePiece(cls, layout, start, end, move):\n start = int(start)\n end = int(end)\n if not Checkers.validPiece(start, layout, move):\n return (None, None)\n\n legal_moves = cls.getLegalMoves(start, layout)\n if 'moves' in legal_moves and end in legal_moves[\"moves\"]:\n piece = layout[start]\n new_layout = layout[0:start] + \" \" + layout[start + 1:]\n # kings!\n if end < 4 or end > 27:\n piece = piece.upper()\n new_layout = new_layout[0:end] + piece + new_layout[end + 1:]\n return (move + 1, new_layout)\n return (None, None)",
"def _makeAMove(self, prev_move, board: str) -> int:\n # If not overridden, make a random valid move.\n move = choice(validMoves(board))\n return move",
"def _makeAMove(self, prev_move, board: str) -> int:\n\n (myWins, otherWins, _, _) = self.winsBlocksForks(board)\n move = choice([self.findEmptyCell(board, myWin) for myWin in myWins] if myWins else\n [self.findEmptyCell(board, otherWin) for otherWin in otherWins] if otherWins else\n list(self.otherMove(board, emptyCellsCount(board)))\n )\n return move",
"def get_move(self, board):\n\n valid_moves = [move for move in board.legal_moves]\n is_valid_move = False\n while not is_valid_move:\n move = input(\"Enter a valid move in uci format: \").lower()\n if len(move) == 4 or len(move) == 5:\n try:\n player_move = chess.Move.from_uci(move)\n\n if board.is_legal(player_move):\n try:\n board.push(player_move)\n return player_move\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")",
"def move(self):\r\n my_move = self.last_moves[\"my_move\"]\r\n return (my_move != \"\" and moves[(moves.index(my_move)+1) % 3] or\r\n random.choice(moves))",
"def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None",
"def forecast_move(self, move):\n if move not in get_legal_moves(self):\n raise RuntimeError(\"Attempted forecast of illegal move\")\n newGameState = deepcopy(self)\n newGameState[move[0]][move[1]] = 1\n newGameState.player_locations[whose_turn] = move\n newGameState.whose_turn ^= 1\n return newGameState"
]
| [
"0.70182574",
"0.6673007",
"0.6612107",
"0.65497804",
"0.6517106",
"0.647525",
"0.63786256",
"0.63121843",
"0.6278452",
"0.6199623",
"0.61955255",
"0.6136059",
"0.6130135",
"0.60747117",
"0.60397965",
"0.5908862",
"0.59013885",
"0.58989275",
"0.5887443",
"0.5865713",
"0.58464986",
"0.5842901",
"0.5810723",
"0.57916486",
"0.5788478",
"0.5768236",
"0.57650834",
"0.5745483",
"0.5718028",
"0.5712711"
]
| 0.8437671 | 0 |
Return a valid game mode entered by the user. | def get_mode() -> str:
mode = input('Enter the mode to play [(T)est, (E)asy, or (H)ard]: ')
while not is_valid_mode(mode):
print('Invalid mode!')
mode = input('Enter the mode to play [(T)est, (E)asy, or (H)ard]: ')
return mode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def game_mode(self):\n return self._get(\"game_mode\")",
"def get_player_mode(mode=None):\n if mode == \"1\":\n print(\"You've chosen Solo Mode! Can you beat a computer?\")\n return mode\n elif mode == \"2\":\n print(\"You've chosen Multiplayer Mode! Can you beat a human?\")\n return mode\n else:\n if mode is not None:\n print(\"Unrecognized input. Please enter 1 or 2\\n\")\n mode = input(\"1 or 2 Players? \")\n return get_player_mode(mode)",
"def is_valid_mode(mode: str) -> bool:\n return mode in (TEST, EASY, HARD)",
"def getVerificationMode():\n\n\tmode = 0\n\tprint \"Select a mode of verification:\"\n\tprint \"1. No Verification\"\n\tprint \"2. Information Theoretic Verification\"\n\tprint \"3. MAC Verification\"\n\n\twhile mode not in range(1, 4):\n\t\tmodeStr = raw_input(\"[1-3]: \")\n\t\ttry:\n\t\t\tmode = int(modeStr)\n\t\t\tif mode not in range(1, 4):\n\t\t\t\traise ValueError()\n\t\texcept:\n\t\t\tprint \"Invalid input: integer in range [1-3] expected.\"\n\t\t\tmode = 0\n\n\treturn mode",
"def mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[pulumi.Input[Union[str, 'Mode']]]:\n return pulumi.get(self, \"mode\")",
"def choose_mode():\n print('Do you want to play in terminal/debug mode (\"t\") or in visual mode (\"v\")?')\n while True:\n user_input = input()\n if user_input.lower() == \"t\":\n print('You chose for terminal mode, the game will start now')\n return False\n elif user_input.lower() == \"v\":\n print('You chose for visual mode, the game will start in a new window')\n return True\n else:\n print(f'Your input: {user_input}, is not recognised, please try')",
"def chooseGamemode(self):\n\n # Set the gamemode when user clicks a radio button\n self.GAME_MODE = self.gamemode_var.get()",
"def get_encryption_mode():\r\n\r\n msg = 'Do you want to encrypt ({0}) or decrypt ({1})? '.format(\r\n cipher_functions.ENCRYPT, cipher_functions.DECRYPT)\r\n mode = input(msg)\r\n while not (mode == cipher_functions.ENCRYPT or\r\n mode == cipher_functions.DECRYPT):\r\n print('Invalid mode.')\r\n mode = input(msg)\r\n return mode",
"def parse_gamemode(self, gamemode: str):\n\n gamemode = gamemode.strip()\n\n # for users who input 'gem-grab' or 'gem_grab'\n gamemode = gamemode.replace(\"-\", \" \")\n gamemode = gamemode.replace(\"_\", \" \")\n\n if gamemode.lower() == \"showdown\":\n raise AmbiguityError(\"Please select one between Solo and Duo Showdown.\")\n\n possible_names = {\n \"Gem Grab\": [\"gem grab\", \"gemgrab\", \"gg\", \"gem\"],\n \"Brawl Ball\": [\"brawl ball\", \"brawlball\", \"bb\", \"bball\", \"ball\"],\n \"Solo Showdown\": [\n \"solo showdown\", \"ssd\", \"solo sd\",\n \"soloshowdown\", \"solo\", \"s sd\"\n ],\n \"Duo Showdown\": [\n \"duo showdown\", \"dsd\", \"duo sd\", \"duoshowdown\", \"duo\", \"d sd\"\n ],\n \"Bounty\": [\"bounty\", \"bonty\", \"bunty\"],\n \"Heist\": [\"heist\", \"heis\"],\n \"Lone Star\": [\"lone star\", \"lonestar\", \"ls\", \"lone\"],\n \"Takedown\": [\"takedown\", \"take down\", \"td\"],\n \"Robo Rumble\": [\n \"robo rumble\", \"rr\", \"roborumble\", \"robo\", \"rumble\"\n ],\n \"Big Game\": [\"big game\", \"biggame\", \"bg\", \"big\"],\n \"Boss Fight\": [\"boss fight\", \"bossfight\", \"bf\", \"boss\"]\n }\n\n for gmtype in possible_names:\n modes = possible_names[gmtype]\n if gamemode.lower() in modes:\n return gmtype\n else:\n return None",
"def mode(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"mode\")",
"def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)",
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def get_mode(self):\r\n return self._api.get_mode()",
"def get_mode():\n\tprint(\"Please choose the file type that you want to normalize ('band'/'phband'/'dos'): \", end='')\n\twhile True:\n\t\tmode = input()\n\t\tif mode == \"b\" or mode == \"band\" : return \"band\"\n\t\telif mode == \"pb\" or mode == \"phband\": return \"phband\"\n\t\telif mode == \"d\" or mode == \"dos\" : return \"dos\"\n\t\telif mode == \"phd\" or mode == \"phdos\" :\n\t\t\tprint(\"phdos-files don't need to be normalized; if you wanna change the unit, plz try other programs.\"); exit(1)\n\t\telse: print(\"Plz enter 'band'/'b', 'phband'/'pb', or 'dos'/'d': \", end=\"\")",
"def get_mode(self):\r\n return self.mode",
"def get_mode_name(self, i):\n for mode in self.modes:\n if mode['id'] == i:\n return mode['name']\n return 'Unknown Game Mode'",
"def getmode(self):\n return self.mode",
"def get_mode(guild_id: int):\n key = _mode_key(guild_id)\n if key not in db:\n return fixtures.chat\n return db[key]",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode",
"def get_app_mode(self):\n\t\treturn call_sdk_function('PrlApi_GetAppMode')",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def _get_mode():\n return context.get_context('mode')",
"def ask_for_mode(self):\n current_session = inquirer.prompt(\n app_mode(self),\n theme=load_theme_from_dict(self.theme)\n )\n\n if current_session['mode'] == 'quit':\n sys.exit(0)\n\n self.session['mode'] = current_session['mode']",
"def mode(self):\n return self._data.get('mode', None)",
"def _assert_valid_mode(mode:str):\n if not mode in [_TRAIN, _EVAL, _PREDICT]:\n raise ValueError(\"Invalid mode.\")",
"def mode(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"mode\")"
]
| [
"0.75565565",
"0.7202764",
"0.6821942",
"0.66530854",
"0.6651077",
"0.6651077",
"0.6563576",
"0.65586215",
"0.6554351",
"0.6524634",
"0.65119785",
"0.64491296",
"0.63630396",
"0.6348841",
"0.6338388",
"0.6233118",
"0.6232582",
"0.6226205",
"0.62008864",
"0.620042",
"0.6192885",
"0.61912155",
"0.61319584",
"0.6130999",
"0.6130999",
"0.6092834",
"0.6083213",
"0.6056685",
"0.60165924",
"0.59832954"
]
| 0.77662545 | 0 |
Return a valid section number entered by the user. | def get_section_number() -> int:
section_num = input('Enter a section number (1 - 4): ')
while not (section_num.isdigit() and wf.is_valid_section(int(section_num))):
print('Invalid section number!')
section_num = input('Enter a section number (1 - 4): ')
return int(section_num) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def section(self, idx: int) -> int:\n if self.sections >= (idx + 1):\n return int(RE_DIGIT.match(self.string.split(\".\")[idx]).group(1))\n return 0",
"def get_section_hint(state: str) -> int:\n section_nums = [i + 1 for i in range(len(state) // wf.SECTION_LENGTH)]\n random.shuffle(section_nums)\n for section_num in section_nums:\n if not wf.check_section(state, section_num):\n return section_num\n return 0 # should never get here",
"def get_section(line: str) -> str:\n if len(line) < 2:\n raise Exception(\"Error: Section line can't be shorter than 2\")\n return line[1:len(line) - 1]",
"def get_index_from_section(section):\n return section.rsplit(\"(\", 1)[1].rstrip(\")\")",
"def get_selected_sections(sections):\n num_sections = len(sections)\n number = int(input('Enter Your Choice: '))\n while number > num_sections + 1:\n _print('Enter a valid Number between 1 and %d' % (num_sections + 1))\n number = int(input('Enter Your Choice: '))\n\n if number == num_sections + 1:\n return sections\n return [sections[number - 1]]",
"def check_section_exists(section_number, report=None):\n if report is None:\n report = MAIN_REPORT\n if not report['source']['success']:\n return False\n found = int((len(report['source']['sections']) - 1) / 2)\n if section_number > found:\n report.attach('Syntax error', category='Syntax', tool='Source',\n group=report['source']['section'],\n mistake=(\"Incorrect number of sections in your file. \"\n \"Expected {count}, but only found {found}\"\n ).format(count=section_number, found=found))",
"def _invalid_section_error(self, section_name):\n msg = \"'{}' is not a subsection for the '{}' section.\".format(section_name, self._SECTION_NAME)\n raise ValueError(msg)",
"def section(self):\n return SECTION_NAME_TO_SECTION[self.section_name]",
"def getSectionIndex(self) -> int:\n ...",
"def _resolve_section_id_from_context(self, cr, uid, context=None):\n if context is None:\n context = {}\n if type(context.get('default_section_id')) in (int, long):\n return context.get('default_section_id')\n if isinstance(context.get('default_section_id'), basestring):\n section_name = context['default_section_id']\n section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=section_name, context=context)\n if len(section_ids) == 1:\n return int(section_ids[0][0])\n return None",
"def bus_section_id(self) -> int:\n return self.dss_obj.BUSI(5, 0)",
"def find_section_id( self, tool_panel_section_id ):\n if not tool_panel_section_id:\n tool_panel_section_id = ''\n else:\n if tool_panel_section_id not in self._tool_panel:\n # Hack introduced without comment in a29d54619813d5da992b897557162a360b8d610c-\n # not sure why it is needed.\n fixed_tool_panel_section_id = 'section_%s' % tool_panel_section_id\n if fixed_tool_panel_section_id in self._tool_panel:\n tool_panel_section_id = fixed_tool_panel_section_id\n else:\n tool_panel_section_id = ''\n return tool_panel_section_id",
"def _read_section(self, pointer, nr_of_leads):\n if pointer.id == 1:\n return self._section1(pointer)\n if pointer.id == 2:\n return self._section2(pointer)\n elif pointer.id == 3:\n return self._section3(pointer)\n elif pointer.id == 4:\n return self._section4(pointer)\n elif pointer.id == 5:\n return self._section5(pointer, nr_of_leads)\n elif pointer.id == 6:\n return self._section6(pointer, nr_of_leads)\n elif pointer.id == 7:\n return self._section7(pointer)\n elif pointer.id == 8:\n return self._section8(pointer)\n elif pointer.id == 9:\n return self._section9(pointer)\n elif pointer.id == 10:\n return self._section10(pointer)\n elif pointer.id == 11:\n return self._section11(pointer)\n elif pointer.id == 12:\n return self._section12(pointer)\n elif pointer.id > 12:\n print(\"WARN: Section Id %s is not implemented\" % str(pointer.id))\n return None",
"def section_id(self) -> str:\n\n return self[\"id\"]",
"def _int_input_in_range(self, print_out, range_):\n try:\n i = int(input(print_out))\n assert range_[0] <= i <= range_[1]\n return i\n except AssertionError:\n print('Please, enter a vaild number')\n return None\n except ValueError:\n print('Please, enter a number not a string')\n return None",
"def get_int(self):\n while True:\n try:\n choice = int(input(\"Choose: \"))\n if 1 <= choice <= len(self.menu):\n return choice\n print(\"Invalid choice.\")\n except (NameError,ValueError, TypeError,SyntaxError):\n print(\"That was not a number, genious.... :(\")",
"def get_section_id(section_service, import_id):\n section_id = None\n if import_id in section_cache:\n section_id = section_cache[import_id]\n else:\n filters = { 'import_id': import_id, }\n results = section_service.get_sections(filters)\n if len(results) > 0:\n # cache the results\n section_cache[import_id] = results[0].id\n section_id = results[0].id\n return section_id",
"def makesection(section):\n s = []\n if section is None:\n return s\n try:\n for i in section.split(':'):\n s.append(int(i))\n except Exception as e:\n msg = 'Not able to convet section to list because %s' % e\n raise SpecError(msg)\n return s",
"def section(data):\n if len(data['index']) == 2 and data['index'][1][0].isdigit():\n element = {}\n element['is_section'] = True\n element['section_id'] = '-'.join(data['index'])\n if u\"§§ \" == data['title'][:3]:\n element['is_section_span'] = True\n else:\n element['is_section_span'] = False\n match = SECTION_TITLE_REGEX.match(data['title'])\n element['label'] = match.group(1)\n element['sub_label'] = match.group(2)\n return element",
"def stage_num():\r\n first = 1\r\n last = 420\r\n\r\n while True:\r\n s_num = input(\"Enter stage number (1-420). No input will yield default 001. \")\r\n if s_num == \"\":\r\n s_num = \"001\"\r\n return s_num\r\n else:\r\n try:\r\n s_num = int(s_num)\r\n except ValueError:\r\n print(\"Invalid stage number! Try again.\")\r\n else:\r\n if s_num < first or s_num > last:\r\n print(\"Invalid stage number! Try again.\")\r\n else:\r\n s_num = \"{:03d}\".format(s_num)\r\n return s_num",
"def getHenhouseDisplayMenuChoice ():\r\n while True :\r\n try :\r\n choice = int(input('Select an option: '))\r\n if 0 <= choice <= 2 :\r\n break \r\n else :\r\n print('Please enter a valid option')\r\n except ValueError :\r\n print('Please enter a valid option')\r\n return(choice)",
"def find_section_state(line, current_section, section_order, content, highlight_content):\n for section, pattern in SEC_PAT_DICT.items():\n if pattern.match(line):\n section_order.append(section)\n content[section] = []\n highlight_content[section] = []\n return section, 1\n\n if current_section is None:\n raise InvalidDataError(\"Could not identify section from line: {}\".format(line))\n else:\n return current_section, 1",
"def get_selected_course(courses):\n num_of_courses = len(courses)\n\n c_number = None\n while True:\n c_number = int(input('Enter Course Number: '))\n\n if c_number not in range(1, num_of_courses+1):\n _print('Enter a valid number between 1 and ', num_of_courses)\n continue\n elif courses[c_number - 1].state != 'Started':\n _print('The course has not started!')\n continue\n else:\n break\n\n selected_course = courses[c_number - 1]\n return selected_course",
"def clean_course_id(self):\r\n\r\n cleaned_id = self.cleaned_data[\"course_id\"]\r\n try:\r\n course_key = CourseKey.from_string(cleaned_id)\r\n except InvalidKeyError:\r\n try:\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(cleaned_id)\r\n except InvalidKeyError:\r\n msg = 'COURSE NOT FOUND'\r\n msg += u' --- Entered course id was: \"{0}\". '.format(cleaned_id)\r\n msg += 'Please recheck that you have supplied a valid course id.'\r\n raise forms.ValidationError(msg)\r\n\r\n if not modulestore().has_course(course_key):\r\n msg = 'COURSE NOT FOUND'\r\n msg += u' --- Entered course id was: \"{0}\". '.format(course_key.to_deprecated_string())\r\n msg += 'Please recheck that you have supplied a valid course id.'\r\n raise forms.ValidationError(msg)\r\n\r\n return course_key",
"def fancy_section_type(section):\n return {\n 'LEC': 'Lecture',\n 'LAB': 'Laboratory',\n 'TUT': 'Tutorial',\n 'PRA': 'Practicum',\n 'COR': 'Correspondence',\n 'SEM': 'Seminar',\n 'ONL': 'Online',\n }.get(section, section)",
"def getValidation(myInput):\r\n if myInput == \"\":\r\n print('You did not enter the number of bugs collected.')\r\n return -1\r\n elif myInput.isnumeric() == False:\r\n print('You entered a negative or a text value, please enter numerical digits only.')\r\n return -1\r\n elif myInput.isnumeric() == True:\r\n return int(myInput)\r\n else:\r\n print('There has been a read error, please reenter your number')\r\n return -1",
"def get_section(self, section_name):\n section_name = JSONSchema.format_section_name(section_name).lower()\n try:\n return self._sections[section_name]\n except KeyError:\n raise AquaError('No section \"{0}\"'.format(section_name))",
"def get_patient_nr(segment):\n try:\n national_register = str(segment[19])\n except IndexError:\n nr_list = segment[2:5]\n national_register = [nr for nr in nr_list if str(nr) is not \"\"].pop()[0]\n national_register = str(national_register).split(\"^\")[0]\n return national_register",
"def get_valid_emission_heading(self, heading):\n if heading or int(float(heading)) >= 0:\n try:\n heading = int(float(heading))\n if s.MIN_HEADING <= heading <= s.MAX_HEADING:\n return heading\n except:\n pass\n \n message = 'This vehicle heading is not valid. Try something between [%s, %s].' % (s.MIN_HEADING, s.MAX_HEADING)\n raise InvalidUsage(message)",
"def ssn(self, x=None):\n section = self.pick_section(self._ssn, x)\n if self.force_mono and section.ndim > 1:\n return section[0]\n return section"
]
| [
"0.6522393",
"0.6294414",
"0.5988498",
"0.59530413",
"0.57162154",
"0.56322765",
"0.5559978",
"0.55530655",
"0.5531887",
"0.5508567",
"0.54969805",
"0.54601294",
"0.5433692",
"0.5413692",
"0.53559995",
"0.5241563",
"0.52240914",
"0.5203008",
"0.515979",
"0.5142805",
"0.51392704",
"0.5133834",
"0.5131228",
"0.51297045",
"0.51057476",
"0.510403",
"0.5088353",
"0.5079873",
"0.5074262",
"0.5064382"
]
| 0.8810455 | 0 |
Return a valid move entered by the user. | def get_move() -> str:
msg = 'Enter a move for that section (C to check, S to swap, R to rotate): '
move = input(msg)
while not wf.is_valid_move(move):
print('Invalid move!')
move = input(msg)
return move | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_move(self, board):\n\n valid_moves = [move for move in board.legal_moves]\n is_valid_move = False\n while not is_valid_move:\n move = input(\"Enter a valid move in uci format: \").lower()\n if len(move) == 4 or len(move) == 5:\n try:\n player_move = chess.Move.from_uci(move)\n\n if board.is_legal(player_move):\n try:\n board.push(player_move)\n return player_move\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")",
"def player_move():\n\tmove = None\n\twhile move not in moves:\n\t\tmove = raw_input(\"What is your move %s? --> \" % name)\n\treturn move",
"def get_next_move(self):\n return int(input('Enter your move: '))",
"def make_move(self, board: Board) -> int:\n\n move = input()\n move = int(move)\n\n while move not in board.get_valid_moves():\n print(\"That is not a valid move\")\n move = input()\n move = int(move)\n\n return move",
"def get_move_from_user(self):\n user_input = input(\"Move: \")\n if user_input == 'undo':\n return user_input\n try:\n move_list = user_input.split(\" \")\n move_list[1] = int(move_list[1])\n except:\n move_list = ['XX', 0, 'XX']\n return move_list",
"def move_valid(move):\n return True",
"def valid_move(self, player, move):\n return (True)",
"def fix_move(self, invalid_move: QMove):\n\n # TODO: reduce time_per_game second by second\n ERROR_MSG = f\"INVALID_MOVE {invalid_move.to_string()}\"\n\n if self.is_ai and self.proc is not None:\n self.proc.stdin.write(str.encode(ERROR_MSG + '\\n'))\n self.proc.stdin.flush()\n new_move = QMove(os.read(self.proc.stdout.fileno(), 100))\n else:\n new_move = QMove(\n input(\"Move was invalid, enter a valid move:\\n\\t>> \"))\n\n return new_move",
"def get_user_move(self):\n while True:\n user_input = input(\"Enter the coordinates: > \")\n try:\n col, row = map(int, user_input.split())\n if col not in [1, 2, 3] or row not in [1, 2, 3]:\n raise CoordinateError\n idx = self.board_coords[(col, row)]\n if self.game_board[idx] != ' ':\n raise CellOccupyError\n return idx\n except ValueError:\n print(\"You should enter numbers!\")\n except CoordinateError:\n print(\"Coordinates should be from 1 to 3!\")\n except CellOccupyError:\n print('This cell is occupied! Choose another one!')",
"def askMove(self,posibleMoves):\n print(\"Where will you move?\")\n while True:\n pos = raw_input(\"Type Colum and Row 'CR' Ex:a1 for first column/row: \")\n if len(pos) == 2:\n c = ord(pos[0])-97\n r = int(pos[1])-1\n move = c+r*8\n if move in posibleMoves:\n return move\n print(\"Invalid move, try again\")\n return",
"def get_next_move(current_player: str, board_values: dict[str, str]) -> str:\n valid_input = False\n while not valid_input:\n # take care of any excess whitespace around the input and converts to lowercase\n raw_input = input(c.NEXT_TURN_MESSAGE.format(player=current_player))\n\n validation_result = get_validation_result(raw_input, board_values)\n\n if not validation_result.is_valid:\n print(validation_result.error_message)\n continue\n\n return validation_result.cleaned_input",
"def getMove(self):\n while True:\n try:\n init = tuple(int(str.strip()) for str in raw_input('Choose the initial position of your move: ').split(','))\n break\n except ValueError:\n print(\"Input is not integer.\")\n\n while (len(init) != 2) or (init[0] not in range(1, self.grid.width+1)) or (init[1] not in range(1, self.grid.height+1)):\n print 'Initial position is not valid.'\n init = tuple(int(str.strip()) for str in raw_input('Choose the initial position of your move: ').split(','))\n\n while True:\n try:\n dest = tuple(int(str.strip()) for str in raw_input('Choose the destination position of your move: ').split(','))\n break\n except ValueError:\n print(\"Input is not integer.\")\n\n while (len(dest) != 2) or (dest[0] not in range(1, self.grid.width+1)) or (dest[1] not in range(1, self.grid.height+1)):\n print 'Destination position is not valid.'\n dest = tuple(int(str.strip()) for str in raw_input('Choose the destination position of your move: ').split(','))\n\n return (init, dest)",
"def choose_move(self, board):\n if self.opp == Player.HUMAN:\n time.sleep(4)\n if self.type == Player.HUMAN:\n move = input(\"Please enter your move:\")\n while not board.legalMove(self, move):\n print(move, \"is not valid\")\n move = input(\"Please enter your move\")\n return move\n elif self.type == Player.RANDOM:\n move = choice(board.legalMoves(self))\n return move\n elif self.type == Player.MINIMAX:\n val, move = self.minimax_move(board, self.depth * 2,\n Player.MAX_PLAYER)\n board.last_move = move\n return move\n elif self.type == Player.ABPRUNE:\n val, move = self.alpha_beta_move(board, self.depth * 2,\n float('-inf'), float('inf'),\n Player.MAX_PLAYER)\n return move\n elif self.type == Player.CUSTOM:\n move = self.agent.getAction(board)\n self.agent.update_current_state(board, move)\n return move\n elif self.type == Player.MIX:\n return self.mixed_move(board)\n\n else:\n print(\"Unknown player type\")\n return -1",
"def getFirstMove(self):\n while True:\n try:\n move = tuple(int(str.strip()) for str in raw_input('Choose your first move: ').split(','))\n break\n except ValueError:\n print(\"Input is not a integer.\")\n\n while move not in [(1, 1), (self.grid.width/2, self.grid.height/2), \\\n (self.grid.width/2+1, self.grid.height/2+1), (self.grid.width, self.grid.height)]:\n print 'First move is not valid.'\n move = tuple(int(str.strip()) for str in raw_input('Choose your first move: ').split(','))\n return move",
"def GetMove(self, board):\n move = None\n while True:\n move = input(\"Enter coordinates as XY (e.g. 21): \")\n if board[Game.GetIndexFromCoords(*move)] == \" \":\n return Game.GetIndexFromCoords(*move)\n else:\n print(\"Space occupied.\")",
"def human(gstate: TicTacToe, *args):\n return input_with_validation(\"Please enter move.\", list(gstate.next_moves.keys()))",
"def askformove(b):\n while True:\n print(b)\n userInput = input(\"enter your move \")\n try:\n userInput= int(userInput)\n assert(userInput <= b.width )\n assert(b.allowsMove(userInput))\n except (ValueError,AssertionError):\n print(\"enter a diff move\")\n continue\n return userInput",
"def is_valid_move(state, move):\n row, col = move\n if row not in [1, 2, 3] or col not in [1, 2, 3]:\n print(\"Invalid move! Specify correct game square!\")\n return False\n if state[row-1][col-1] != '_':\n print('Invalid move! Place your marker on a free square!')\n return False\n return True",
"def select_move(self, game, player) -> int:\n print()\n print(f\"Player {player} turn\")\n game.print_player_perspective(player)\n\n move = -1\n while(move==-1):\n entered_move = input (\"Enter move: \")\n\n if(int(entered_move) in game.possible_moves(player)):\n move = int(entered_move)\n else:\n print(\"Entered an invalid move\")\n\n print()\n return move",
"def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")",
"def interactive_strategy(game: Any) -> Any:\r\n move = input(\"Enter a move: \")\r\n return game.str_to_move(move)",
"def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)",
"def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)",
"def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)",
"def human_move(self):\n move = -1\n while move < 1 or move > self.BOARD.COLUMNS:\n try:\n move = input(\"{}: Choose a column>>> \".format(self.NAME))\n\n for i in self.QUIT:\n if str(move) == i:\n return None\n\n move = int(move)\n\n except KeyboardInterrupt:\n exit(0)\n except ValueError:\n pass\n if self.PIECE_COUNT <= 0:\n # cannot do anything\n self.STATE == Spectator.State.INACTIVE\n return None\n else:\n return move",
"def interactive_strategy(game: Game) -> str:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)",
"def is_valid(self, move):\r\n return move > 10 and move < 89",
"def human_move(board,player):\r\n \r\n s = input(\"Please input a legal move in a format of \\\"current_position-landing_position\\\", if the move is cantering or plain. In case of a capturing move, follow \\\"current_position-landing_position-enemy piece\\\": \")\r\n move = s.split('-')\r\n legal = legal_moves(board,player)\r\n execution(move,legal,board,player)",
"def move(self):\n move = input(\"Pick one weapon - rock, scissors, paper: \").lower()\n while move not in moves:\n \"\"\"Prints out a message to try again\n when something is mistyped or a weapon that is not\n in the moves variable is typed. Will keep repeating\n until a validated move is played.\"\"\"\n move = input(\"You can only use rock, scissors, paper: \").lower()\n return move",
"def get_next_move(board, player):\n print(\"Player '{}' please enter your next move:\".format(player))\n while True:\n move = input(\"> \")\n if '1' <= move <= '9':\n move = int(move)\n if move-1 in valid_moves(board):\n break\n print(\"That is not a valid move, please try again...\")\n return move"
]
| [
"0.790794",
"0.7603971",
"0.75918245",
"0.7441765",
"0.7396418",
"0.72964424",
"0.72695065",
"0.72305745",
"0.720543",
"0.7186338",
"0.70309246",
"0.70161897",
"0.70109475",
"0.6986676",
"0.6979692",
"0.69704807",
"0.69599783",
"0.6916412",
"0.69141704",
"0.6911527",
"0.6881968",
"0.6873117",
"0.6873117",
"0.6873117",
"0.68595",
"0.68568486",
"0.6854852",
"0.6832709",
"0.681738",
"0.6801363"
]
| 0.81562686 | 0 |
Return 1 if a hint was given, and 0 if not. Prompt the user to answer whether they would like a hint of type hint_type if and only if mode indicates the game is in easy mode. If yes, generate the hint on how to rearrange state (only using section_num if hint_type corresponds to a move hint) and print the hint. | def get_hints(state: str, mode: str, hint_type: str, section_num: int = 0) -> int:
if in_easy_mode(mode):
if hint_type == SECTION_HINT:
hint = input('Enter Y if you want a section hint: ')
if hint == 'Y':
print('Your section hint is: ' + str(get_section_hint(state)))
return 1
elif hint_type == MOVE_HINT:
hint = input('Enter Y if you want a move hint: ')
if hint == 'Y':
print('Your move hint is: ' + wf.get_move_hint(state, section_num))
return 1
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_hint(self):\r\n # Sample variables x and y in the range [-10, 10]\r\n sample_dict = {'x': (-10, 10), 'y': (-10, 10)}\r\n\r\n # Give a hint if the user leaves off the coefficient\r\n # or leaves out x\r\n hints = [('x + 3*y', 'y_coefficient', 'Check the coefficient of y'),\r\n ('2*y', 'missing_x', 'Try including the variable x')]\r\n\r\n # The expected solution is numerically equivalent to x+2y\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=0.01,\r\n answer=\"x+2*y\",\r\n hints=hints)\r\n\r\n # Expect to receive a hint if we add an extra y\r\n input_dict = {'1_2_1': \"x + 2*y + y\"}\r\n correct_map = problem.grade_answers(input_dict)\r\n self.assertEquals(correct_map.get_hint('1_2_1'),\r\n 'Check the coefficient of y')\r\n\r\n # Expect to receive a hint if we leave out x\r\n input_dict = {'1_2_1': \"2*y\"}\r\n correct_map = problem.grade_answers(input_dict)\r\n self.assertEquals(correct_map.get_hint('1_2_1'),\r\n 'Try including the variable x')",
"def take_turn(state, hint):\n \"\"\" display the current state and the labels for choosing a move\"\"\"\n\n print(state) # print the game board\n print(color_magenta(hint))\n print(\"\") #add a space # print the numbers that correspond to all moves in the game board\n print(color_green(\"Your current score is: \"), color_green(str(state.score1)))\n print(color_green(\"AI's current score is: \"), color_green(str(state.check_finished_boxes() - state.score1))) # record the scores of the player and AI at the moment\n print(\"\") #add a space\n\n move = input(color_yellow(\"Please enter a number to connect: \"))\n\n \"\"\"prompt again for a move until it's a valid input and corresponds to an empty space in the board\"\"\"\n while not move.isnumeric() or not (0 <= int(move) <= 48) or (int(move) % 2 == 0) or state.board[int(move) // 7][int(move) % 7] != \"\":\n move = input(color_yellow(\"Please enter a valid connection: \"))\n number = move\n if number in hint:\n index = hint.find(number)\n if len(number) == 1:\n hint = hint[0:index] + \" \" + hint[index + 1:] # Make the moves player already made disappear\n else:\n hint = hint[0:index] + \" \" + hint[index + 2:]\n\n state.make_move(move)\n return hint",
"def check_hint_condition(self, hxml_set, student_answers):\r\n pass",
"async def hint(self, ctx):\n level = await self.get_player_level(ctx.author)\n\n if len(self.enigmas[level][\"hints\"]) == 0:\n await ctx.send(\"No hints available for this level, sorry!\")\n else:\n await ctx.send(random.choice(self.enigmas[level][\"hints\"]))",
"def hint(self):\n\t\tif not self.game:\n\t\t\treturn\n\t\tzeros = [(x,y) for x in range(self.width) for y in range(self.height)\n\t\t if self.game.marks[x][y] == CLOSED and\n\t\t not self.game.mines[x][y] and\n\t\t self.game.count_neighbor_mines(x, y) == 0]\n\t\tif zeros:\n\t\t\tx, y = random.choice(zeros)\n\t\t\tself.game.reveal(x, y, True)\n\t\t\tself.draw_field()",
"def test_gethint_1hint(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'problem_name': '25.0'}\r\n out = mock_module.get_hint(json_in)\r\n self.assertTrue('Really popular hint' in out['hints'])\r\n # Also make sure that the input gets added to user_submissions,\r\n # and that the hint is logged in previous_answers.\r\n self.assertTrue('25.0' in mock_module.user_submissions)\r\n self.assertTrue(['25.0', ['1']] in mock_module.previous_answers)",
"def useHint(self):\n self.hints = self.hints - 1",
"def get_section_hint(state: str) -> int:\n section_nums = [i + 1 for i in range(len(state) // wf.SECTION_LENGTH)]\n random.shuffle(section_nums)\n for section_num in section_nums:\n if not wf.check_section(state, section_num):\n return section_num\n return 0 # should never get here",
"def play_game(state: str, mode: str) -> int:\n moves = 0\n if in_test_mode(mode):\n print('Answer: ' + wf.ANSWER)\n \n while state != wf.ANSWER:\n print('Current state: ' + state)\n moves += get_hints(state, mode, SECTION_HINT) \n section_num = get_section_number() \n moves += get_hints(state, mode, MOVE_HINT, section_num)\n move = get_move()\n state = make_move(state, section_num, move)\n moves += 1 \n return moves",
"def show_hint(self, hint=str):\n self.clear_screen()\n print('\\n' * 15)\n self.cool_print(\"ERROR 210.04-TC6: [KEYCODE INCORRECT]\")\n self.cool_print(\"DATA CORRUPTED. ATTEMPTING TO DECRYPT METADATA.\")\n print()\n sleep(.6)\n self.cool_print(f\"[!] METADATA RECOVERED: {hint}\")\n print()\n self.cool_print(\"PRESS ENTER TO REATTEMPT\", newline=False)\n input()",
"def showHint():\n\t#First get the master pod\n\tcurrentMasterPod=masterPod.currentMasterPod\n\t#Get the hint\n\thint=currentMasterPod.hint\n\t#Check if the pod is disabled\n\tif hasattr(currentMasterPod,\"locked\"):\n\t\tcurrentTime = getCurrentTime()\n\t\tif currentMasterPod.locked < currentTime:\n\t\t\tloginAttemptVar.set(hint)\n\n\telse:\n\t\t#Update the control variable\n\t\tloginAttemptVar.set(hint)",
"def rechargeHint(self):\n if self.hints < 8:\n self.hints = self.hints + 1",
"def _create_hint(self):\n for index, letter in enumerate(self._player_guess):\n if self._player_code[index] == letter:\n self._hint += \"x\"\n elif letter in self._player_code:\n self._hint += \"o\"\n else:\n self._hint += \"*\"",
"def fake_get_hint(_):\r\n return {'best_hint': 'This is the best hint.',\r\n 'rand_hint_1': 'A random hint',\r\n 'rand_hint_2': 'Another random hint',\r\n 'answer': '42.5'}",
"def _create_hint(self, code, guess): \n hint = \"\"\n for index, letter in enumerate(guess):\n if code[index] == letter:\n hint += \"x\"\n elif letter in code:\n hint += \"o\"\n else:\n hint += \"*\"\n return hint",
"def answerIsSet(self, input):\n if input in self.word:\n self.guessedRight.append(input)\n self.setWordKnown()\n if \"_\" not in self.wordKnown:\n self.active = False\n return \"Kaua läks, aga asja sai. Arvasid ära, sõna on tõesti \" + self.wordKnown + \".\"\n return \"Täht \" + input + \" on tõesti sõnas sees. Tubli. Veel on vaja arvata \" + str(\n self.wordKnown.count(\"_\")) + \" tähte\\n\" + \"Hetkel proovitud \" + ' '.join(\n self.guessedChars) + \"\\n\" + self.wordKnown\n return \"Kahjuks tähte \" + input + \" sõnas ei ole. Vaja veel \" + str(\n self.wordKnown.count(\"_\")) + \" ära arvata. \\nHetkel proovitud \" + ' '.join(\n self.guessedChars) + \" \\n\" + self.wordKnown",
"def input_problem_answer(_, problem_type, correctness):\r\n assert(correctness in ['correct', 'incorrect'])\r\n assert(problem_type in PROBLEM_DICT)\r\n answer_problem(world.scenario_dict['COURSE'].number, problem_type, correctness)",
"def test_gethint_0hint(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'problem_name': '26.0'}\r\n out = mock_module.get_hint(json_in)\r\n print mock_module.previous_answers\r\n self.assertTrue(out is None)\r\n self.assertTrue('26.0' in mock_module.user_submissions)",
"def isHelp():\n return (True)",
"def test_submithint_withpermission_existing(self):\r\n mock_module = CHModuleFactory.create(previous_answers=[['25.0', [1, None, None]]])\r\n json_in = {'answer': '25.0', 'hint': 'This is a new hint.'}\r\n mock_module.submit_hint(json_in)\r\n # Make a hint request.\r\n json_in = {'problem name': '25.0'}\r\n out = mock_module.get_hint(json_in)\r\n self.assertTrue('This is a new hint.' in out['hints'])",
"def test_get_hint_message(self, hints, hint_counter, result):\n self.xblock.hints = hints\n self.xblock.hint_counter = hint_counter\n test_result = self.xblock.get_hint_message()\n self.assertEquals(result, test_result)",
"def test_gethint_manyhints(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'problem_name': '24.0'}\r\n out = mock_module.get_hint(json_in)\r\n self.assertTrue('Best hint' in out['hints'])\r\n self.assertTrue(len(out['hints']) == 3)",
"def test_get_css_hint_display(\n self,\n hints,\n result,\n ):\n self.xblock.hints = hints\n test_result = self.xblock.get_css_hint_button_display()\n self.assertEquals(result, test_result)",
"def determine_marks():\n # ask player 1 if X or O\n valid_answer = False\n while not valid_answer:\n p1_string = input(\"Player 1: Would you like X or O? \")\n p1_mark = p1_string[0].capitalize()\n valid_answer = check_inputs([\"X\", \"O\"], p1_mark)\n if p1_mark == 'X':\n return {\"Player 1\": \"X\", \"Player 2\": \"O\"}\n else:\n return {\"Player 1\": \"O\", \"Player 2\": \"X\"}",
"def check_for_help(opt):\n\n opt_pat=re.compile(r'''-?(?P<opt>\\w*)[+-]?''',re.I)\n help=0\n authorized_opt=tag_to_num.keys()+['version']\n for i in range(0,len(num_to_tag)):\n authorized_opt+=[str(i)]\n for i in range(1,len(opt)):\n if opt_pat.search(opt[i]):\n if opt_pat.search(opt[i]).group('opt').lower() not in authorized_opt:\n try:\n int(opt_pat.search(opt[i]).group('opt').lower())\n except:\n os.system('cat ./Source/MadWeight_File/MWP_template/Readme.txt')\n sys.exit()\n if opt_pat.search(opt[i]).group('opt').lower()=='version':\n print 'MadWeight Version'\n os.system('cat ./Source/MadWeight_File/MW_TemplateVersion.txt')\n sys.exit()",
"def onStartAssistModeToggled(self, checked):\r\n # productive\r\n profprint()\r\n if checked:\r\n self.fiducialObturatorButton.checked = 0\r\n self.fiducialButton.checked = 0\r\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\r\n self.start(self.addCTLPoints)\r\n self.startAssistModeButton.text = \"Stop Assisted Manual Segmentation\"\r\n else:\r\n self.stop()\r\n self.startAssistModeButton.text = \"Start Assisted Manual Segmentation\"",
"def check_option(update: Update, context: CallbackContext) -> None:\n if not update.effective_chat or not update.effective_user:\n return\n if not isinstance(context.chat_data, dict):\n raise AssertionError\n if update.effective_user.id not in context.chat_data[\n 'question_attempted_by']:\n chosen = int(update.callback_query.data.split('_')[1])\n que: Question = context.chat_data['qlist'][\n context.chat_data['question_number']]\n if context.chat_data['marksheet'].get(update.effective_user.id,\n None) is None:\n context.chat_data['marksheet'][int(\n update.effective_user.id)] = {\n 'name':\n escape_markdown(update.effective_user.full_name),\n 'score': 0\n }\n if que.is_correct(que.get_options()[chosen]):\n context.chat_data['marksheet'][\n update.effective_user.id]['score'] += 1\n context.bot.answer_callback_query(\n callback_query_id=update.callback_query.id,\n text=\"Correct!\",\n show_alert=True)\n context.chat_data['question_attempted_by'].append(\n update.effective_user.id)\n else:\n context.bot.answer_callback_query(\n callback_query_id=update.callback_query.id,\n text=\"Incorrect!, \" +\n f\"the correct answer is: {que.get_correct()}\",\n show_alert=True)\n context.chat_data['question_attempted_by'].append(\n update.effective_user.id)\n else:\n context.bot.answer_callback_query(\n callback_query_id=update.callback_query.id,\n text=\"You can only attempt once!\",\n show_alert=True)",
"def begin_present_are_quiz():\n\n print(\"Get ready for a quiz.\\nInstructions: You'll be shown a verb and a pronoun. Conjugate it in the present tense\"\n \".\\n\")\n verb_good = False\n verb = \"\"\n pronoun = \"\"\n go_again = True\n while go_again:\n while not verb_good:\n verb = random.choice(are_verb_options)\n checker = functions.verb_ending_good(verb)\n if checker is True:\n verb_good = True\n pronoun_good = False\n while not pronoun_good:\n pronoun = random.choice(pronouns)\n if pronoun in pronouns:\n pronoun_good = True\n ending = functions.verb_ending(verb)\n if ending == \"are\":\n answer = are_present_quiz(verb, pronoun)\n checker = input(f'Tense: Presente \\nVerb: {verb} \\nPronoun: {pronoun}...\\n')\n if answer == checker:\n print(\"Correct!\")\n else:\n print(f'Incorrect.\\nCorrect answer is {answer}')\n another = input(\"Go again? y/n\\n\").lower()\n if another != \"y\":\n go_again = False\n print(\"Quiz over.\")\n else:\n verb_good = False",
"def hintsLeft(self):\n return self.hints > 0",
"def ask_question(index, attributes):\n \n print(\"ask_question, index: \", str(index))\n\n curr_question = quiz.list_fragen[attributes[\"sess_questions\"][index]].get_frage()\n print(\"@ask_question: \", curr_question)\n\n print(\"@ask_question before if \")\n if len(attributes[\"scores\"]) > 1:\n print(\"@ask_question if > 1\")\n text = \"<s>Frage {0} an Spieler {1}:</s> <s>{2}</s>\".format(int(attributes[\"current_round\"]),\\\n attributes[\"current_player\"], curr_question)\n else:\n print(\"@ask_question else\")\n text = \"<s>Frage {0}:</s> <s>{1}</s>\".format(int(attributes[\"current_round\"]),\\\n curr_question)\n \n text = slower_speech(text)\n text += TICK_BEEP_SOUND\n \n print(\"@ask_question before setatts\")\n attributes[\"current_question\"] = curr_question\n print(\"@ask_question before setatts\")\n\n #returns string here excepcionally because response is formed elsewhere\n return text"
]
| [
"0.5843901",
"0.5729622",
"0.57262224",
"0.57114905",
"0.56002575",
"0.5516281",
"0.5414793",
"0.54023033",
"0.53952",
"0.53130263",
"0.5303876",
"0.52066034",
"0.518067",
"0.51021737",
"0.50333303",
"0.4967362",
"0.49285474",
"0.49111894",
"0.48984668",
"0.48977473",
"0.48809403",
"0.4871032",
"0.4869117",
"0.48644793",
"0.48637152",
"0.48556888",
"0.47824672",
"0.47795224",
"0.47747114",
"0.47727972"
]
| 0.83458793 | 0 |
Return the number of moves taken to arrive at the correct answer. Run the main loop in gamemode mode, prompting the user for input and consequently updating state. | def play_game(state: str, mode: str) -> int:
moves = 0
if in_test_mode(mode):
print('Answer: ' + wf.ANSWER)
while state != wf.ANSWER:
print('Current state: ' + state)
moves += get_hints(state, mode, SECTION_HINT)
section_num = get_section_number()
moves += get_hints(state, mode, MOVE_HINT, section_num)
move = get_move()
state = make_move(state, section_num, move)
moves += 1
return moves | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\r\n noMove = 0\r\n while(noMove < 2):\r\n options = self.get_valid_moves()\r\n if len(options) > 0:\r\n res = False\r\n while(not res):\r\n move = self.players[self.turn-1].get_move(self.board.copy(),options.copy())\r\n res = self.update_board(move,_testing=False)\r\n else:\r\n noMove += 1\r\n self.turn = (self.turn * 2 ) % 3 # 1 --> 2 2 --> 1\r\n return self.gameCount()",
"def run():\n game = Game()\n i = 0\n while True:\n print(i, \"\\n\\n\" + str(game))\n i += 1\n actions = game.possible_moves()\n if actions == []:\n return game.score()\n else:\n game_state = replace_none(np.array(game.state))\n action = h_min_max(game_state)[0]\n if action == UP:\n game.up()\n elif action == DOWN:\n game.down()\n elif action== LEFT:\n game.left()\n elif action== RIGHT:\n game.right()\n else:\n print(\"Didn't move\")\n return game",
"def run_game():\n\n #global correct\n correct = False\n\n code = create_code()\n show_instructions()\n\n turns = 0\n while not correct and turns < 12:\n #print(code)\n correct_digits_and_position = take_turn(code)\n turns += 1\n #print(correct_digits_and_position[0])\n correct = check_correctness(turns, correct_digits_and_position[0])\n #print(correct)\n\n show_code(code)",
"def human_opponent(state):\n print(state)\n while True:\n inp = input(\"What is your move? \\n\")\n if inp == 'pass':\n return len(state.valid_actions) - 1\n if inp == 'random':\n return random.randint(0, len(state.valid_actions) - 1)\n\n try:\n pos = [int(x) for x in inp.split()]\n action = pos[0]*state.board_size + pos[1]\n choice = state.valid_actions.index(action)\n return choice\n except:\n print(\"Invalid move {} try again.\".format(inp))",
"def play_game() -> None:\n board = tuple(tuple(0 for _ in range(i, i + 16))\n for i in range(0, 64, 16))\n state = GameState(board, 1)\n while state.util is None:\n # human move\n print(state.display)\n state = state.traverse(int(input(\"Move: \")))\n if state.util is not None:\n break\n # computer move\n find_best_move(state)\n move = (state.selected if state.selected != -1\n else random.choice(state.moves))\n state = state.traverse(move)\n print(state.display)\n if state.util == 0:\n print(\"Tie Game\")\n else:\n print(f\"Player {state.util} Wins!\")",
"def determine_attempts():\r\n #Inputs: # of attempts requested by user\r\n #Outputs: game gives number of attempts user selected before ending \r\n how_many_tries = int(raw_input(\"How many attempts do you want to answer a blank correctly before the answer is provided to you? Please provide a number, such as 2.\\n\"))\r\n attempts = how_many_tries\r\n number_of_tries = 5\r\n while how_many_tries < 1:\r\n print \"Please try again.\"\r\n determine_attempts\r\n attempts = attempts + 1\r\n if attempts == number_of_tries:\r\n break \r\n else:\r\n print \"Please read the paragraph below and provide the answers to fill in the numbered blanks.\\nYou will be given \" + str(attempts) + \" chances to enter the correct answer before it is provided to you.\\n\"\r\n return how_many_tries",
"def game():\n display.display_game_scenario()\n board = map.make_board()\n player = characters.create_character()\n found_exit = False\n while not found_exit:\n display.print_current_position(board, player)\n direction = get_user_choice()\n if direction == \"quit\":\n print(\"You have either chosen to quit or died either way you failed your quest!\")\n return\n valid_move = validate_move(board, player, direction)\n if valid_move:\n characters.move_character(player, direction)\n found_exit = check_if_exit_is_reached(player)\n if not found_exit:\n if not movement_handler(player):\n print(\"You have either chosen to quit or died either way you failed your quest!\")\n return\n else:\n print(\"You can't go in that direction because it is a wall\")\n display.display_game_ending()",
"def solveOneStep(self):\n ### Student code goes here\n\n if (self.currentState.state == self.victoryCondition):\n self.visited[self.currentState] = True\n win_or_not = (self.currentState.state == self.victoryCondition)\n return win_or_not\n\n game_depth = self.currentState.depth\n continue_game = True\n test_its = 0\n while continue_game:\n test_its += 1\n # too long \n # time test\n if test_its == \"too long\":\n return \"too long\"\n result = self.solveOneStep_helper(game_depth)\n if result:\n victory_satisfied = (self.currentState.state == self.victoryCondition)\n if victory_satisfied:\n result_bool = True\n return result_bool\n else:\n game_depth = game_depth + 1\n else:\n return False",
"def interactive_strategy(score, opponent_score):\r\n print('Current score:', score, 'to', opponent_score)\r\n while True:\r\n response = input('How many dice will you roll? ')\r\n try:\r\n result = int(response)\r\n except ValueError:\r\n print('Please enter a positive number')\r\n continue\r\n if result < 0:\r\n print('Please enter a non-negative number')\r\n else:\r\n return result",
"def play_game():\n board = create_board()\n while True:\n for player in [1, 2]:\n random_place(board, player)\n result = evaluate(board)\n if result != 0:\n return result",
"def game():\n board = create_board(8)\n character = create_character()\n reached_goal = False\n while not reached_goal:\n display_position(board, character)\n direction = input(\"Please enter a key in 'wasd' to move that direction. You cannot move pass the edge of the \"\n \"map.\")\n if validate_move(board, character, direction):\n move_character(direction, character)\n reached_goal = is_win(character, board)\n else:\n print(\"Please select a valid input. Enter a wasd key and do not move past the walls!\")\n display_position(board, character)\n print(\"You win!\")",
"def run_game():\n set_locations()\n while True:\n draw_dungeon()\n valid_moves = get_moves('player')\n print(\"You're currently in room {}.\".format(STATUS['locations']['player']))\n print(\"You are currently {}.\".format(STATUS['weapon']))\n print(\"{}\".format(monster_check()))\n print(\"You can move {} (w/a/s/d)\"\n .format(', '.join(parse_moves(valid_moves))))\n move = input(\"> \").upper()\n while move not in valid_moves:\n clear_screen()\n print(\"There's no door that way... \")\n draw_dungeon()\n move = input(\"> \").upper()\n clear_screen()\n else:\n clear_screen()\n move_player(move)\n weapon_check()",
"def play_game(difficulty):\r\n lives = 3\r\n directions = Direction(difficulty=difficulty)\r\n drawer.bgcolor = Colors.BLUE\r\n drawer.fill_screen()\r\n\r\n # Starting Countdown\r\n drawer.display_countdown(3, 'Starting in ')\r\n\r\n # ---------- Main Program Loop ------------\r\n lost = False\r\n # Each game is 20 rounds long.\r\n turn = 10\r\n while turn > 0:\r\n time.sleep(0.1)\r\n if lost:\r\n if lives > 0:\r\n drawer.display_option('use a life and continue?')\r\n drawer.display_lives(lives)\r\n drawer.refresh()\r\n\r\n while (input_direction := gamepad.direction_input()) is None:\r\n pygame.event.get()\r\n\r\n if input_direction == 'LEFT':\r\n lives -= 1\r\n lost = False\r\n time.sleep(0.5)\r\n\r\n else:\r\n print('round lost')\r\n return -1\r\n\r\n else:\r\n drawer.bgcolor = Colors.ORANGE\r\n drawer.display_lose()\r\n time.sleep(1)\r\n return -1\r\n\r\n # User did something.\r\n for event in pygame.event.get():\r\n # If user clicked close.\r\n if event.type == pygame.QUIT:\r\n pass\r\n\r\n # Choose a random direction either up right left or down\r\n # target_direction = random.choice(directions)\r\n directions.pick_direction()\r\n\r\n prev_input_direction = None\r\n ball_pos = [drawer.width // 2, drawer.height // 2]\r\n\r\n drawer.bgcolor = Colors.BLUE\r\n\r\n for angle in (a / 10 for a in range(63, -1, -1)):\r\n time.sleep(speed)\r\n\r\n # display the information\r\n drawer.fill_screen()\r\n drawer.display_text(directions.target_direction, Colors.GREY)\r\n\r\n drawer.display_text(f'{turn}', offset_x=320, offset_y=-200)\r\n\r\n # draw the ball in the proper place\r\n drawer.display_ball(ball_pos)\r\n drawer.display_lives(lives)\r\n drawer.display_timer(stop_angle=angle)\r\n drawer.refresh()\r\n\r\n # If the ball reached the end.\r\n if not drawer.ball_in_border(ball_pos):\r\n\r\n # The player chose correct.\r\n if directions.correct_direction(input_direction):\r\n # Leave the for; go on to the next turn.\r\n turn -= 1\r\n break\r\n\r\n # The player chose wrong.\r\n else:\r\n drawer.bgcolor = Colors.RED\r\n drawer.fill_screen()\r\n\r\n drawer.display_text(\"You chose wrong!\")\r\n drawer.display_lives(lives)\r\n drawer.refresh()\r\n time.sleep(0.3)\r\n # prompt to use a life and play again above\r\n lost = True\r\n break\r\n\r\n # makes it easier to get controller input\r\n pygame.event.get()\r\n # capture the controller input\r\n input_direction = gamepad.direction_input()\r\n\r\n # Initialize the previous input\r\n # We need prev_input_direction otherwise\r\n # input_direction is None most of the time.\r\n # prev_ lets the ball continue to update after\r\n # choosing a direction.\r\n #\r\n # Need to update later to be able to correct a wrong move in time.\r\n # But for now it works good enough.\r\n if prev_input_direction is None:\r\n prev_input_direction = input_direction\r\n else:\r\n input_direction = prev_input_direction\r\n\r\n # get the input\r\n if input_direction is not None:\r\n # update the balls position\r\n if input_direction == 'LEFT':\r\n ball_pos[0] -= ball_speed\r\n\r\n elif input_direction == 'RIGHT':\r\n ball_pos[0] += ball_speed\r\n\r\n elif input_direction == 'UP':\r\n ball_pos[1] -= ball_speed\r\n\r\n else:\r\n ball_pos[1] += ball_speed\r\n\r\n # The ball didn't reach the end.\r\n # The player was too slow and time ran out.\r\n else:\r\n\r\n drawer.bgcolor = Colors.RED\r\n drawer.fill_screen()\r\n drawer.display_text('Out of Time! You were too slow.')\r\n drawer.display_lives(lives)\r\n drawer.refresh()\r\n time.sleep(1)\r\n if lives > 0:\r\n drawer.display_option('use a life and continue?')\r\n drawer.display_lives(lives)\r\n drawer.refresh()\r\n\r\n while (input_direction := gamepad.direction_input()) is None:\r\n pygame.event.get()\r\n\r\n time.sleep(0.5)\r\n\r\n if input_direction == 'LEFT':\r\n lives -= 1\r\n time.sleep(0.5)\r\n continue\r\n\r\n # End the game\r\n drawer.bgcolor = Colors.ORANGE\r\n drawer.display_lose()\r\n time.sleep(1)\r\n return -1\r\n\r\n\r\n # The player completed the round successfully.\r\n else:\r\n drawer.bgcolor = Colors.GREEN\r\n drawer.fill_screen()\r\n drawer.display_text('Congratulations', Colors.WHITE)\r\n drawer.refresh()\r\n time.sleep(2)\r\n return lives",
"def runGame():\n # Game state\n player = [2,4] # initial location of the player\n score = 0 # initial score\n cubes = [[0,0], [3,0], [4,0]] # initial cube locations\n\n print(\"Welcome to cubes! Quit by typing 'quit'\")\n prettyPrint(cubes, player, score)\n\n # Main loop\n while True:\n direction = raw_input(\"Input 'left', 'right', 'stay', or 'quit': \")\n if direction=='quit':\n print(\"You quit! Score was\", score)\n break\n if direction !='left' and direction != 'right' and direction != 'stay':\n continue\n player = updatePlayerLocation(player, direction)\n cubes = updateCubes(cubes)\n score = updateScore(score)\n print(player)\n prettyPrint(cubes, player, score)\n\n if collision(cubes, player):\n print(\"You lose! Score was\", score)\n break",
"def UCTPlayGame():\n #start_time = time.time()\n state = GameState()\n m = UCT(rootstate=state, itermax=750, verbose=False) # play with values for itermax and verbose = True\n print str(m[0][0])+\" \"+str(m[0][1])+\" \"+str(m[1][0])+\" \"+str(m[1][1])\n state.DoMove(m)\n #print state #for user vs bot\n #print(\"--- %s seconds ---\" % (time.time() - start_time))\n \"\"\"if state.GetResult(state.whosemove) == 1.0:\n print \"Player \" + str(1 - state.whosemove) + \" wins!\"\n elif state.GetResult(state.whosemove) == 0.0:\n print \"Player \" + str(state.whosemove) + \" wins!\"\n else:\n print \"Nobody wins!\"\"\"",
"def update_game_state(self):\n # if board is not filled out, returns a valid move message\n for row in self.board:\n if 0 in row:\n return \"Valid input\"\n\n # if board is filled out, verifies if solution is valid and updates game state\n self.game_state = alg.check_solution(self.board)\n return self.game_state",
"def main():\r\n print(WELCOME_MESSAGE)\r\n\r\n playing = True\r\n while playing:\r\n\r\n # Valid inputs that the user can use\r\n move_actions = (UP, DOWN, LEFT, RIGHT)\r\n other_actions = (GIVE_UP, HELP)\r\n\r\n grid_size = int(input(BOARD_SIZE_PROMPT))\r\n\r\n # Get the puzzle and its solution\r\n solution = get_game_solution(WORDS_FILE, grid_size)\r\n puzzle = shuffle_puzzle(solution)\r\n\r\n solved = check_win(puzzle, solution)\r\n print_solution_position(solution, puzzle)\r\n\r\n # Continue to loop until the puzzle is solved or the user gives up\r\n while not solved:\r\n player_action = input(DIRECTION_PROMPT)\r\n\r\n # Player move input handler\r\n # Updates the puzzle with the new board layout, if fail alert user\r\n if player_action in move_actions:\r\n move_attempt = move(puzzle, player_action)\r\n if move_attempt:\r\n puzzle = move_attempt\r\n else:\r\n print(INVALID_MOVE_FORMAT.format(player_action))\r\n\r\n # Other inputs handler\r\n elif player_action in other_actions:\r\n if player_action == GIVE_UP:\r\n break\r\n elif player_action == HELP:\r\n print(HELP_MESSAGE)\r\n\r\n # If there is no match for input, alert the user\r\n else:\r\n print(INVALID_MESSAGE)\r\n\r\n print_solution_position(solution, puzzle)\r\n solved = check_win(puzzle, solution)\r\n\r\n # Show message depending if user won or not\r\n if solved:\r\n print(WIN_MESSAGE)\r\n else:\r\n print(GIVE_UP_MESSAGE)\r\n\r\n # Check if the user wishes to play again\r\n play_again = input(PLAY_AGAIN_PROMPT)\r\n if not (play_again.lower() == \"y\" or play_again == \"\"):\r\n playing = False\r\n print(BYE)",
"def play_game():\n\tstate = Coinche(verbose=True)\n\tbeliefs = [Belief(i, state) for i in range(4)]\n\n\twhile state.get_moves():\n\t\tprint(state)\n\t\tm = ismcts(rootstate=state, itermax=2000, verbose=False, belief=beliefs[state.player_to_move])\n\t\tprint(\"Best Move: \" + str(m) + \"\\n\")\n\t\tstate.do_move(m)\n\n\tfor p in range(state.number_of_players):\n\t\tprint(\"Player \" + str(p), state.get_result(p))",
"def the_counting_game(number_of_players=10, total=100):\n # a b c d e f g h i j\n # 1 2 3 4 5 6 7\n # 13 12 11 10 9 8\n # 14\n # 15 16 17 18 19 20 21\n # 27 26 25 24 23 22\n # 28\n # 29\n # print \"total\", total\n player_number = 1 # first player will say the number 1\n dir = 'right' # we start off counting to the right\n num_said = 1 # the number said by the first player\n while num_said < total:\n if dir == 'right':\n print dir\n # if we're at the last player, go back to the first player\n # which is last player minus total number of players minus 1\n if player_number == number_of_players:\n player_number = number_of_players - 1\n print \"p\", player_number, \"said: \", num_said\n else:\n print \"p\", player_number, \"said: \", num_said\n player_number += 1\n # if the next number will be a multiple of 7, time to switch directions\n if (num_said + 1) % 7 == 0:\n print \"this should switch\", dir\n dir = switch_direction(dir)\n print \"this should switch\", dir\n elif dir == 'left':\n print dir\n # if this is the first player, going left means going to the last player\n # which is total number of players\n if player_number == 1:\n player_number += (number_of_players - 1)\n else:\n print \"p\", player_number, \"said: \", num_said\n player_number -= 1\n # if the next number will be a multiple of 7, time to switch directions\n if (num_said + 1) % 7 == 0:\n print \"this should switch\", dir\n dir = switch_direction(dir)\n print \"this should switch\", dir\n num_said += 1\n return \"Player to say the total: \" + str(player_number)",
"def UCTPlayGame(itermax):\r\n print(\"Welcome to Ultimate Tic-Tac-Toe!\")\r\n player = 2 if input(\"Do you want to go first? [Y/N]: \") == \"N\" else 1\r\n\r\n state = GameState()\r\n while state.GetMoves():\r\n currentPlayer = state.NextPlayer()\r\n\r\n print(str(state))\r\n print(\"Moves for player \" + str(currentPlayer) + \": \")\r\n print(np.matrix(state.GetMoves()), \"\\n\")\r\n\r\n if currentPlayer == player:\r\n m = None\r\n while m not in state.GetMoves():\r\n try:\r\n m = int(input(\"Your move: \"))\r\n except ValueError:\r\n continue\r\n # m = random.choice(state.GetMoves())\r\n else:\r\n m = UCT(rootstate=state, itermax=itermax, verbose=False)\r\n print(\"AI played: \" + str(m))\r\n state.DoMove(m)\r\n print(str(state))\r\n\r\n if state.GetResult(state.playerJustMoved) == 1.0:\r\n print(\"Player \" + str(state.playerJustMoved) + \" wins!\")\r\n return state.playerJustMoved\r\n elif state.GetResult(state.playerJustMoved) == 0.0:\r\n print(\"Player \" + str(state.NextPlayer()) + \" wins!\")\r\n return state.NextPlayer()\r\n else:\r\n print(\"Nobody wins!\")\r\n return 0",
"def main():\r\n global user_pick, pickno, total\r\n test_total()\r\n sleep(delay)\r\n print(\"It is your turn!\")\r\n pickno = int(4)\r\n #Repeats the process as many times as we need\r\n while total >= 4:\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n test_remain\r\n test_pick()\r\n remain()\r\n cmp_logic()\r\n sleep(delay)\r\n print(\"You should pick \" + str(total))\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n test_remain()\r\n test_pick()\r\n remain()\r\n # Only way that USER WINS!!\r\n if int(total) == 0:\r\n sleep(delay)\r\n print(\"User WINS!\")\r\n exit()",
"def run_game(self):\n self.food.randomize_position(self.grid)\n while True:\n self.clock.tick(8)\n self.handle_keys()\n if not self.snake.move(self.grid):\n if self.end_game_dialog() == 0:\n end_game()\n else:\n self.score = 0\n self.grid = deepcopy(self.base_grid)\n self.snake.initialize_snake_on_grid(self.grid)\n self.food.randomize_position(self.grid)\n\n self.draw_grid()\n\n if self.snake.get_head_position() == self.food.position:\n self.snake.length += 1\n self.score += 2\n self.food.randomize_position(self.grid)\n if self.score > self.maxScore:\n self.maxScore = self.score\n\n self.screen.blit(self.surface, (0, 0))\n score = self.font.render(\"Score {0}\".format(self.score), True, (0, 0, 0))\n self.screen.blit(score, (5, 10))\n pygame.display.update()",
"def run_simulation(self, state):\n \"*** YOUR CODE HERE ***\"\n player = 0\n visited_states = [(player, state)]\n depth_limited = self.depth != -1\n depth = self.depth\n expand = True\n while not visited_states[-1][1].isWin() and not visited_states[-1][1].isLose():\n if depth_limited and depth == 0: break\n state = self.UCB1(state, player) # Selection & Simulation\n if expand and state not in self.plays: # Expansion\n expand = False\n self.plays[state] = 0\n self.wins[state] = 0\n visited_states.append((player, state))\n player = (player + 1) % state.getNumAgents()\n if not expand and depth_limited and player == 0: depth -= 1\n \n for player, state in visited_states:\n if state in self.plays: # Not simulated nodes\n self.plays[state] += 1\n eval = self.evaluationFunction(visited_states[-1][1])\n if depth_limited:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] -= eval\n else:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] += (1 - eval)",
"def main() -> None:\n # the current game is initialized with 1, 3, 5, 7 matches on the 4 rows.\n game: List[int] = [1, 3, 5, 7]\n\n print(\"\\nGame of Nim\")\n print( \"===========\")\n display_game(game)\n start = input(\"Do you want to start? (y/n) \")\n print()\n if start==\"y\" or start==\"Y\":\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n while True:\n print(\"My turn\")\n computer_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"I WON\\n\")\n break\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"YOU WON\\n\")\n break",
"def ask_move(player: int) -> int:\n\n while True:\n\n try:\n pawn_number = int(input(f\"Player {player}: Choose a piece to move (0-3): \"))\n except ValueError:\n continue\n else:\n if 0 <= pawn_number <= 3:\n break\n\n return pawn_number",
"def main():\n\n board = [[\".\"] * grid_size for i in range(grid_size)]\n ship_row = random_row(board)\n ship_col = random_col(board) - 1\n ships = 0\n turn = 0\n\n print_board(board)\n while turn < total_turns:\n\n guess_col = get_col()\n guess_row = get_row()\n\n print(\"-\" * 35)\n print(\n f\"You entered: {letter_and_index_conversion(guess_col, grid_size)}{guess_row} \\n\"\n )\n\n if guess_row == ship_row and guess_col == ship_col:\n board[guess_row - 1][guess_col - 1] = \"X\"\n print(\"Congratulations Captain! You got a hit!\")\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n ships += 1\n ship_row = random_row(board)\n ship_col = random_col(board)\n if ships == 10:\n print(\"Congratulations Captain! You won!\")\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n else:\n if (\n board[guess_row - 1][guess_col - 1] == \"X\" or\n board[guess_row - 1][guess_col - 1] == \"*\"\n ):\n print(\"You already guessed this one -_-\")\n print(\"-\" * 35)\n else:\n print(\"Your aim is WAY off! \\n\")\n board[guess_row - 1][guess_col - 1] = \"*\"\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n if turn == total_turns:\n print(\"Game Over! You ran out of turns\")\n print(\"-\" * 35)\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n\n print(f\"Turn {turn + 1} of {total_turns}\")\n print(f\"You have {10 - ships} ships left\")",
"def number_moves(game, player):\n return float(len(game.get_legal_moves(player)))",
"def game_loop(games,num_of_players,winning_score):\n updated_score = 0\n game_continue = True\n\n # Assign player list\n games.create_player_list(num_of_players)\n\n # Game Loop\n while game_continue:\n # cycle between players\n for player_position in range (num_of_players):\n print \"\\nPlayer {} Turn\".format(player_position + 1)\n\n # Clear temp score each round\n temp_score = 0\n\n # Turns loop\n while True:\n # User input\n user_input = raw_input(\"Enter (r)ole or (h)old:\")\n\n # roll dice, get value\n if user_input == \"r\":\n # roll dice using class Game.roll with random seed (time)\n roll_result = games.roll(time.time())\n # if roll a 1 end Turns for this player\n if roll_result < 0:\n print \"\\nPlayer {} rolled a 1. End turn.\".format((player_position + 1))\n break\n # continue game logic\n else:\n print \"Player {} rolled a {}\".format((player_position + 1),roll_result)\n # incriment temp score\n temp_score += roll_result\n\n # hold dice and bank temp score\n elif user_input == \"h\":\n print \"Player {} Holds\".format((player_position + 1))\n # assign value to score using class Game\n updated_score = games.update_player_score((player_position),temp_score)\n break\n\n else:\n print \"Whoops! Enter correct selection (r or h)\"\n\n print \"Score List:\",games.return_player_list()\n\n # determine if a winner\n if updated_score >= winning_score:\n print 'Player {} wins!'.format(player_position + 1)\n game_continue = False\n break",
"def guesses():\n tries = 3\n print (\" You may choose your maximum number of tries per question.\"\n \"The default is 3.\")\n player_prompt = \" Please type in your preferred number: \"\n while tries > 0:\n user_choice = raw_input(player_prompt)\n if user_choice.isdigit():\n print \"\\n OK, {} {} allowed per blank. Here we go!\\n\".format(user_choice, how_many(user_choice))\n return int(user_choice)\n tries -= 1\n player_prompt = (\" Silly, that's not a valid number of guesses! {} more {}. \\n\"\n \" Try again: \").format(tries, how_many(tries))\n if tries == 0:\n print \" You defaulted your number of guesses, so 3 it is!\"\n return 3",
"def main():\n board = [\n [' ', ' ', ' '],\n [' ', ' ', ' '],\n [' ', ' ', ' ']\n ]\n counter = 0\n\n while not check_victory(board):\n # This is called the game loop. It keeps the game running until it is finished.\n # On every iteration of the loop we check to see if a player has won.\n\n # Show the board to the player.\n show_board(board)\n\n # Take input to add a new token.\n board = take_input(board, OPTIONS[counter % 2])\n\n counter += 1"
]
| [
"0.70284015",
"0.6404722",
"0.6338472",
"0.62832123",
"0.62282634",
"0.61336094",
"0.5992224",
"0.5973218",
"0.594925",
"0.5926609",
"0.5920745",
"0.591415",
"0.59038174",
"0.58734757",
"0.5852002",
"0.58160967",
"0.5811705",
"0.5810292",
"0.57928646",
"0.5756727",
"0.5756169",
"0.5702213",
"0.5701985",
"0.56887466",
"0.56663656",
"0.5660537",
"0.56394625",
"0.5639016",
"0.5621208",
"0.5615177"
]
| 0.7799572 | 0 |
Run an autosegmentation on an input image. Choose which algorithm to run and pass the path to the Nitfti input image OR a directory containing a DICOM series. Optionally, pass in a configuration file for the segmentation algorithm. Output the default configuration for an algorithm using the default flag. | def click_command(algorithm, input_path, config, default, output):
algorithm_config = segmentation_algorithms[algorithm]["default_settings"]
if default:
print(json.dumps(algorithm_config, indent=4))
return
# If we get to here but no input_path was set, we need to inform the user
if not input_path:
print("Supply the path the the Nifti Image OR DICOM series to process")
return
print(f"Running {algorithm} segmentation")
if config:
with open(config, "r") as file_obj:
algorithm_config = json.load(file_obj)
read_path = input_path
if os.path.isdir(input_path):
# If it's a directory then read as a DICOM series
read_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(input_path)
image = sitk.ReadImage(read_path)
# Run the algorithm
results = segmentation_algorithms[algorithm]["algorithm"](image, algorithm_config)
# If results is a tuple, the algorithm is returning other items as well as the semgmentation.
# We are only interested in the segmentation here (the first element of the tuple)
if isinstance(results, tuple):
results = results[0]
# Save the output to the output directory
if not output:
output = "."
if not os.path.exists(output):
os.makedirs(output, exist_ok=True)
for result in results:
sitk.WriteImage(results[result], os.path.join(output, f"{result}.nii.gz")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def applySegmentationSteps(input_img, input_mode, output_root, save_intermediate=False, overwrite=False):\n\n np_img = loadImage(input_img)\n\n if save_intermediate == False:\n np_dist = getDistanceRaster(input_img, input_mode=input_mode)\n np_ridge = ridgeDetection(np_dist, 'np', method='meijering', black_ridges=False)\n np_blobs = connectedComponentsImage(np_ridge, 'np', output_path=output_root + '_blobs.tif')\n exportBlobs(np_img, np_blobs, 'np', output_root)\n plt.imsave(output_root + 'blobs_cmap.png', np_blobs, cmap='nipy_spectral')\n else:\n np_dist = getDistanceRaster(input_img, input_mode=input_mode, output_path=output_root + '_distance.tif')\n np_ridge = ridgeDetection(np_dist, 'np', method='meijering', black_ridges=False, output_path=output_root + '_ridge.tif')\n np_blobs = connectedComponentsImage(np_ridge, 'np', output_path=output_root + '_blobs.tif')\n exportBlobs(np_img, np_blobs, 'np', output_root)\n plt.imsave(output_root + 'blobs_cmap.png', np_blobs, cmap='nipy_spectral')\n\n if os.path.exists(output_root + 'blobs_tif'):\n return 0\n else:\n return 1",
"def run_afni_segmentation(anatomical_brain, out_dir=None, run=True):\n\n import os\n import glob\n\n import nipype.interfaces.io as nio\n import nipype.pipeline.engine as pe\n\n output = \"anatomical_afni_segmentation_masks\"\n\n workflow = pe.Workflow(name='afni_segmentation_workflow')\n\n if not out_dir:\n out_dir = os.getcwd()\n\n workflow_dir = os.path.join(out_dir, \"workflow_output\", output)\n workflow.base_dir = workflow_dir\n\n resource_pool = {}\n config = {}\n num_cores_per_subject = 1\n\n resource_pool[\"anatomical_brain\"] = anatomical_brain\n\n workflow, resource_pool = \\\n afni_segmentation_workflow(workflow, resource_pool, config)\n\n ds = pe.Node(nio.DataSink(), name='datasink_afni_segmentation')\n ds.inputs.base_directory = workflow_dir\n\n seg_types = [\"gm\", \"wm\", \"csf\"]\n\n for seg in seg_types:\n\n node, out_file = resource_pool[\"anatomical_%s_mask\" % seg]\n\n workflow.connect(node, out_file, ds, 'anatomical_%s_mask' % seg)\n\n if run == True:\n workflow.run(plugin='MultiProc', plugin_args= \\\n {'n_procs': num_cores_per_subject})\n outpath = glob.glob(os.path.join(workflow_dir, \"anatomical_*_mask\", \\\n \"*\"))\n return outpath\n else:\n return workflow, workflow.base_dir",
"def autoSegmentROIs(dataDir, strategy, channel = 0):\n # Load the dataset which is located in the motCorr.sima\n dataset = sima.ImagingDataset.load(dataDir)\n \n # Determine the segmentation approach\n if strategy == 'STICA':\n segmentation_approach = sima.segment.STICA(channel = channel,components=20)\n segmentation_approach.append(sima.segment.SparseROIsFromMasks(min_size=20))\n segmentation_approach.append(sima.segment.MergeOverlapping(threshold=0.85))\n segmentation_approach.append(sima.segment.SmoothROIBoundaries())\n size_filter = sima.segment.ROIFilter(lambda roi: roi.size >= 20 and roi.size <= 50)\n segmentation_approach.append(size_filter)\n \n \n elif strategy == 'BAM':\n segmentation_approach = sima.segment.BasicAffinityMatrix(channel=channel, max_dist=(3,3), \n spatial_decay=(4,4), num_pcs=75, verbose=True)\n \n \n \n \n \n# segmentation_approach.append(sima.segment.MergeOverlapping(threshold=0.5))\n \n \n \n # returns a sima.ROI.ROIList\n outputFile = os.path.join(dataDir,'ROIs')\n rois = dataset.segment(segmentation_approach, 'auto_ROIs')\n rois.save(outputFile)\n print('Saved segmentation results...')\n return outputFile",
"def segmentation(\n img,\n img_path,\n results_folder,\n callback_context,\n crf_theta_slider_value,\n crf_mu_slider_value,\n rf_downsample_value,\n crf_downsample_factor,\n gt_prob,\n mask,#=None,\n n_sigmas,\n multichannel,#=True,\n intensity,#=True,\n edges,#=True,\n texture,#=True,\n sigma_min,#=0.5,\n sigma_max,#=16,\n):\n\n # #standardization using adjusted standard deviation\n img = standardize(img)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image standardized')\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n for ni in np.unique(mask[1:]):\n logging.info('examples provided of %i' % (ni))\n\n if len(np.unique(mask)[1:])==1:\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Only one class annotation provided, skipping RF and CRF and coding all pixels %i' % (np.unique(mask)[1:]))\n result2 = np.ones(mask.shape[:2])*np.unique(mask)[1:]\n result2 = result2.astype(np.uint8)\n\n else:\n\n result = do_classify(img,mask,n_sigmas,multichannel,intensity,edges,texture, sigma_min,sigma_max, rf_downsample_value)#,SAVE_RF) # n_estimators,rf_file,data_file,\n\n Worig = img.shape[0]\n result = filter_one_hot(result, 2*Worig)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('One-hot labels filtered')\n\n if Worig>512:\n result = filter_one_hot_spatial(result, 2)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('One-hot labels spatially filtered')\n else:\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('One-hot labels not spatially filtered because width < 512 pixels')\n\n result = result.astype('float')\n result[result==0] = np.nan\n result = inpaint_nans(result).astype('uint8')\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Spatially filtered values inpainted')\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('RF model applied with sigma range %f : %f' % (sigma_min,sigma_max))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n def tta_crf_int(img, result, k):\n k = int(k)\n result2, n = crf_refine(np.roll(result,k), np.roll(img,k), crf_theta_slider_value, crf_mu_slider_value, crf_downsample_factor, gt_prob)\n result2 = np.roll(result2, -k)\n if k==0:\n w=.1\n else:\n w = 1/np.sqrt(k)\n\n return result2, w,n\n\n num_tta = 5#10\n\n if (psutil.virtual_memory()[0]>10000000000) & (psutil.virtual_memory()[2]<50): #>10GB and <50% utilization\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF parallel test-time augmentation')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n w = Parallel(n_jobs=-2, verbose=0)(delayed(tta_crf_int)(img, result, k) for k in np.linspace(0,int(img.shape[0])/5,num_tta))\n R,W,n = zip(*w)\n else:\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF serial test-time augmentation')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n R = []; W = []; n = []\n for k in np.linspace(0,int(img.shape[0])/5,num_tta):\n r,w,nn = tta_crf_int(img, result, k)\n R.append(r); W.append(w); n.append(nn)\n\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF model applied with %i test-time augmentations' % ( num_tta))\n\n result2 = np.round(np.average(np.dstack(R), axis=-1, weights = W)).astype('uint8')\n del R,W\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Weighted average applied to test-time augmented outputs')\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF model applied with theta=%f and mu=%f' % ( crf_theta_slider_value, crf_mu_slider_value))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n if ((n==1)):\n result2[result>0] = np.unique(result)\n\n result2 = result2.astype('float')\n result2[result2==0] = np.nan\n result2 = inpaint_nans(result2).astype('uint8')\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Spatially filtered values inpainted')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return result2",
"def do_segmentation(img_idxs=None, output=True, save_candidates=True, data='train'):\n \n zoom_levels = [1, 0.7, 0.5]\n \n # Load images\n dl = pipeline.DataLoader()\n \n if data == 'train':\n data_imgs = dl.get_original_images(dataset=\"train\")\n elif data == 'test':\n data_imgs = dl.get_original_images(dataset=\"test\")\n elif data == 'final':\n print('Final stage not started yet')\n exit()\n else:\n print('Unknown data set: ' + data)\n exit()\n \n data_x = data_imgs['x']\n data_meta = data_imgs['meta']\n \n if img_idxs is None:\n img_idxs = list(range(len(data_x)))\n\n if len(img_idxs) == 0:\n print('Empty index range given.')\n exit()\n if img_idxs[-1] >= len(data_x):\n print('Invalid index range ending in %d for used data set of size %d' % (img_idxs[-1], len(data_x)))\n exit()\n \n # Prepare output file\n if save_candidates:\n if data == 'train':\n classlist = dl.get_classes()\n out_train_json_objs = {}\n for cls in classlist:\n out_train_json_objs[cls] = []\n else:\n out_json_obj = []\n \n # Prepare performance measurements\n tp_boxes = [0 for _ in zoom_levels]\n tp_compact_boxes = [0 for _ in zoom_levels]\n num_boxes = 0\n tp_fish = [0 for _ in zoom_levels]\n tp_compact_fish = [0 for _ in zoom_levels]\n num_fish = 0\n \n # See how well the centroids match\n #lower = lambda centroid, dim: min(max(centroid[dim] - bbox_size/2.0, 0), img.shape[dim] - bbox_size)\n #upper = lambda centroid, dim: max(bbox_size, min(centroid[dim] + bbox_size/2.0, img.shape[dim]))\n #intersection_centroid = lambda bbox, centroid: max(0, min(upper(centroid, 1), bbox['x']+bbox['width']) - max(lower(centroid, 1), bbox['x'])) * max(0, min(upper(centroid, 0), bbox['y']+bbox['height']) - max(lower(centroid, 0), bbox['y']))\n #matches_centroid = lambda bbox, centroid: intersection_centroid(bbox, centroid) / float(bbox['width']*bbox['height']) >= min_overlap_ratio\n \n clust_bbox_to_dict = lambda cand: {'x': cand[1], 'width': cand[3]-cand[1], 'y': cand[0], 'height': cand[2]-cand[0]}\n intersection_bbox = lambda cand, fish: max(0, min(cand['x']+cand['width'], fish['x']+fish['width']) - max(cand['x'], fish['x'])) * max(0, min(cand['y']+cand['height'], fish['y']+fish['height']) - max(cand['y'], fish['y']))\n containment_ratio = lambda cand, fish: intersection_bbox(cand, fish) / float(fish['width']*fish['height'])\n \n # Prepare histogram matching template\n print('Computing histogram template...')\n if data == 'train':\n template = preprocessing.build_template(data_x, data_meta)\n else:\n hist_template_data_imgs = dl.get_original_images(file_filter=preprocessing.DEFAULT_HIST_MATCH_TEMPLATES)\n template = preprocessing.build_template(hist_template_data_imgs['x'], hist_template_data_imgs['meta'])\n print('Histogram template computed. Starting segmentation...')\n \n for idx_idx in range(len(img_idxs)):\n idx = img_idxs[idx_idx]\n \n # Load image\n img = data_x[idx]().astype('uint8')\n if 'bounding_boxes' in data_meta[idx]:\n imgboxes = data_meta[idx]['bounding_boxes']\n else:\n imgboxes = []\n \n # Use histogram matching for night vision images\n nvg = False\n if colour.is_night_vision(img): # night vision\n nvg = True\n img = preprocessing.hist_match(img, template)\n \n # Perform actual segmentation\n regions, centroids, clust_bboxes = colour_segmentation(img, max_clust_size=0.10)\n clust_bboxes = unique([clust_bbox_to_dict(clust) for clust in clust_bboxes], key=lambda cand: (cand['x'], cand['y']))\n \n #num_matching_boxes = sum(any(matches_centroid(bbox, centroid) for bbox in imgboxes) for centroid in centroids)\n #num_found_fish = sum(any(matches_centroid(bbox, centroid) for centroid in centroids) for bbox in imgboxes)\n #num_impossible_here = sum(overlap_ratio * max(bbox['width'], bbox['height']) >= bbox_size for bbox in imgboxes)\n \n num_compact_matching_boxes = [sum(any(containment_ratio(bbox, preprocessing.zoom_box(clust, img.shape, zoom_factor=zoom, output_dict=True)) >= min_containment_ratio for bbox in imgboxes) for clust in clust_bboxes) for zoom in zoom_levels]\n num_compact_found_fish = [sum(any(containment_ratio(bbox, preprocessing.zoom_box(clust, img.shape, zoom_factor=zoom, output_dict=True)) >= min_containment_ratio for clust in clust_bboxes) for bbox in imgboxes) for zoom in zoom_levels]\n num_matching_boxes = [sum(any(containment_ratio(preprocessing.zoom_box(clust, img.shape, zoom_factor=zoom, output_dict=True), bbox) >= min_overlap_ratio for bbox in imgboxes) for clust in clust_bboxes) for zoom in zoom_levels]\n num_found_fish = [sum(any(containment_ratio(preprocessing.zoom_box(clust, img.shape, zoom_factor=zoom, output_dict=True), bbox) >= min_overlap_ratio for clust in clust_bboxes) for bbox in imgboxes) for zoom in zoom_levels]\n \n # Record this information\n #tp_boxes += num_matching_boxes\n num_boxes += len(clust_bboxes)\n #tp_fish += num_found_fish\n num_fish += len(imgboxes)# - num_impossible_here\n #num_impossible += num_impossible_here\n tp_compact_boxes = [a+b for a,b in zip(tp_compact_boxes, num_compact_matching_boxes)]\n tp_compact_fish = [a+b for a,b in zip(tp_compact_fish, num_compact_found_fish)]\n tp_boxes = [a+b for a,b in zip(tp_boxes,num_matching_boxes)]\n tp_fish = [a+b for a,b in zip(tp_fish,num_found_fish)]\n \n if output:\n # Output performance for this image\n if data == 'train':\n #print('Image %d (found %d/%d%s, %d FPs%s)' % (idx, num_found_fish, len(imgboxes)-num_impossible_here, (', %d impossible' % num_impossible_here) if num_impossible_here > 0 else '', len(centroids)-num_matching_boxes, '; NVG' if nvg else ''))\n print('Image %d (compact: found %d/%d %d FPs none; %d/%d %d FPs 70%%; %d/%d %d FPs 50%%%s)' % (idx, num_compact_found_fish[0], len(imgboxes), len(centroids)-num_compact_matching_boxes[0], num_compact_found_fish[1], len(imgboxes), len(centroids)-num_compact_matching_boxes[1], num_compact_found_fish[2], len(imgboxes), len(centroids)-num_compact_matching_boxes[2], '; NVG' if nvg else ''))\n print('Image %d (encompassing: found %d/%d %d FPs none; %d/%d %d FPs 70%%; %d/%d %d FPs 50%%%s)' % (idx, num_found_fish[0], len(imgboxes), len(centroids)-num_matching_boxes[0], num_found_fish[1], len(imgboxes), len(centroids)-num_matching_boxes[1], num_found_fish[2], len(imgboxes), len(centroids)-num_matching_boxes[2], '; NVG' if nvg else ''))\n else:\n print('Image %d (%d candidates)' % (idx, len(centroids)))\n \n # Summarise performance up till now\n if idx_idx%50 == 49:\n if data == 'train':\n #box_precision = 100*tp_boxes / float(num_boxes) if num_boxes > 0 else -1\n #fish_recall = 100*tp_fish / float(num_fish) if num_fish > 0 else -1\n #print('Box precision after %d images: %g%% (%d/%d)\\nFish recall after %d images: %g%% (%d/%d%s)\\n' % (idx_idx+1, box_precision, tp_boxes, num_boxes, idx_idx+1, fish_recall, tp_fish, num_fish, (', %d impossible' % num_impossible) if num_impossible > 0 else ''))\n \n box_precision = [100*tp_boxes_i / float(num_boxes) if num_boxes > 0 else -1 for tp_boxes_i in tp_boxes]\n compact_box_precision = [100*tp_boxes_i / float(num_boxes) if num_boxes > 0 else -1 for tp_boxes_i in tp_compact_boxes]\n fish_recall = [100*tp_fish_i / float(num_fish) if num_fish > 0 else -1 for tp_fish_i in tp_fish]\n compact_fish_recall = [100*tp_fish_i / float(num_fish) if num_fish > 0 else -1 for tp_fish_i in tp_compact_fish]\n \n print('Box compact-match precision after %d images: %g%% (%d/%d) none; %g%% (%d/%d) 70%%; %g%% (%d/%d) 50%%\\nFish compact-match recall after %d images: %g%% (%d/%d) no zoom; %g%% (%d/%d) 70%% zoom; %g%% (%d/%d) 50%% zoom' % (idx_idx+1, compact_box_precision[0], tp_compact_boxes[0], num_boxes, compact_box_precision[1], tp_compact_boxes[1], num_boxes, compact_box_precision[2], tp_compact_boxes[2], num_boxes, idx_idx+1, compact_fish_recall[0], tp_compact_fish[0], num_fish, compact_fish_recall[1], tp_compact_fish[1], num_fish, compact_fish_recall[2], tp_compact_fish[2], num_fish))\n print('Box encompassing-match precision after %d images: %g%% (%d/%d) none; %g%% (%d/%d) 70%%; %g%% (%d/%d) 50%%\\nFish encompassing-match recall after %d images: %g%% (%d/%d) no zoom; %g%% (%d/%d) 70%% zoom; %g%% (%d/%d) 50%% zoom\\n' % (idx_idx+1, box_precision[0], tp_boxes[0], num_boxes, box_precision[1], tp_boxes[1], num_boxes, box_precision[2], tp_boxes[2], num_boxes, idx_idx+1, fish_recall[0], tp_fish[0], num_fish, fish_recall[1], tp_fish[1], num_fish, fish_recall[2], tp_fish[2], num_fish))\n else:\n print('%d images segmented (%d candidates in total)' % (idx_idx+1, num_boxes))\n \n if save_candidates:\n img_json_obj = {'filename': data_meta[idx]['filename']}\n #img_json_obj['candidates'] = unique([{'x': lower(centroid, 1), 'y': lower(centroid, 0), 'width': bbox_size, 'height': bbox_size} for centroid in centroids], key=lambda cand: (cand['x'], cand['y']))\n img_json_obj['candidates'] = clust_bboxes\n if data == 'train':\n out_train_json_objs[data_meta[idx]['class']].append(img_json_obj)\n else:\n out_json_obj.append(img_json_obj)\n \n \n if output:\n # Summarise total performance\n if data == 'train':\n #box_precision = 100*tp_boxes / float(num_boxes) if num_boxes > 0 else -1\n #fish_recall = 100*tp_fish / float(num_fish) if num_fish > 0 else -1\n #print('\\n%d images completed!\\nTotal box precision: %g%% (%d/%d)\\nTotal fish recall: %g%% (%d/%d%s)\\n' % (len(img_idxs), box_precision, tp_boxes, num_boxes, fish_recall, tp_fish, num_fish, (', %d impossible' % num_impossible) if num_impossible > 0 else ''))\n \n box_precision = [100*tp_boxes_i / float(num_boxes) if num_boxes > 0 else -1 for tp_boxes_i in tp_boxes]\n compact_box_precision = [100*tp_boxes_i / float(num_boxes) if num_boxes > 0 else -1 for tp_boxes_i in tp_compact_boxes]\n fish_recall = [100*tp_fish_i / float(num_fish) if num_fish > 0 else -1 for tp_fish_i in tp_fish]\n compact_fish_recall = [100*tp_fish_i / float(num_fish) if num_fish > 0 else -1 for tp_fish_i in tp_compact_fish]\n \n print('\\n%d images completed!\\nTotal compact box precision: %g%% (%d/%d) no zoom; %g%% (%d/%d) 70%% zoom; %g%% (%d/%d) 50%% zoom\\nTotal compact fish recall: %g%% (%d/%d) no zoom; %g%% (%d/%d) 70%% zoom; %g%% (%d/%d) 50%% zoom' % (idx_idx+1, compact_box_precision[0], tp_compact_boxes[0], num_boxes, compact_box_precision[1], tp_compact_boxes[1], num_boxes, compact_box_precision[2], tp_compact_boxes[2], num_boxes, compact_fish_recall[0], tp_compact_fish[0], num_fish, compact_fish_recall[1], tp_compact_fish[1], num_fish, compact_fish_recall[2], tp_compact_fish[2], num_fish))\n print('Total encompassing box precision: %g%% (%d/%d) no zoom; %g%% (%d/%d) 70%% zoom; %g%% (%d/%d) 50%% zoom\\nTotal encompassing fish recall: %g%% (%d/%d) no zoom; %g%% (%d/%d) 70%% zoom; %g%% (%d/%d) 50%% zoom\\n' % (box_precision[0], tp_boxes[0], num_boxes, box_precision[1], tp_boxes[1], num_boxes, box_precision[2], tp_boxes[2], num_boxes, fish_recall[0], tp_fish[0], num_fish, fish_recall[1], tp_fish[1], num_fish, fish_recall[2], tp_fish[2], num_fish))\n else:\n print('%d images segmented (%d candidates in total)' % (idx_idx+1, num_boxes))\n\n if save_candidates:\n outdir = settings.SEGMENTATION_CANDIDATES_OUTPUT_DIR\n os.makedirs(outdir)\n filename = 'candidates%s.json' % ('' if img_idxs is None else ('_%d-%d' % (min(img_idxs), max(img_idxs))))\n if data == 'train':\n for cls in classlist:\n with open(os.path.join(outdir, cls + '_' + filename), 'w') as outfile:\n json.dump(out_train_json_objs[cls], outfile)\n else:\n with open(os.path.join(outdir, filename), 'w') as outfile:\n json.dump(out_json_obj, outfile)",
"def staining_segmentation():\n\n # Inputs of the function\n parser = argparse.ArgumentParser(description='Segmentation script')\n parser.add_argument('-scheduler', default=False, help='dask scheduler address ex. tcp://192.168.0.4:7003')\n parser.add_argument('-path', help='processing directory')\n parser.add_argument('-processing_file', help='path to the file with the \\\n staning to process')\n parser.add_argument('-segmentation_staining', help='staining to be \\\n segmented')\n \n \n\n args = parser.parse_args()\n \n # Directory to process\n processing_directory = args.path\n\n # File to process\n processing_file = args.processing_file\n\n # staining to segment\n segmentation_staining = args.segmentation_staining\n\n # Dask scheduler address\n scheduler_address = args.scheduler\n \n if scheduler_address:\n # Start dask client on server or cluster\n client=Client(scheduler_address)\n\n else:\n # Start dask client on local machine. It will use all the availabe\n # cores -1\n\n # number of core to use\n ncores = multiprocessing.cpu_count()-1\n cluster = LocalCluster(n_workers=ncores)\n client=Client(cluster)\n\n # Determine the operating system running the code\n os_windows, add_slash = utils.determine_os()\n\n # Check training slash in the processing directory\n processing_directory=utils.check_trailing_slash(processing_directory,os_windows)\n\n\n segmentation_parameters = utils.general_yaml_parser(processing_directory+'Staining_segmentation.config.yaml')\n\n # Chunking parameters\n chunk_size = segmentation_parameters[segmentation_staining]['image_chunking_parameters']['chunk_size']\n percent_padding = segmentation_parameters[segmentation_staining]['image_chunking_parameters']['percent_padding']\n\n # Segmentation parameters\n trimming = segmentation_parameters[segmentation_staining]['segmentation_parameters']['trimming']\n min_object_size = segmentation_parameters[segmentation_staining]['segmentation_parameters']['min_object_size']\n disk_radium_rank_filer = segmentation_parameters[segmentation_staining]['segmentation_parameters']['disk_radium_rank_filer']\n min_distance = segmentation_parameters[segmentation_staining]['segmentation_parameters']['min_distance'] \n threshold_rel = segmentation_parameters[segmentation_staining]['segmentation_parameters']['threshold_rel'] \n\n # Load the image (will be modified after the change to hdf5 input)\n img = io.imread(processing_file)\n\n # Image chunking\n nr_chunks,nc_chunks,Coords_Chunks_list, Coords_Padded_Chunks_list,r_coords_tl_all_padded,\\\n c_coords_tl_all_padded,r_coords_br_all_padded,c_coords_br_all_padded = \\\n object_based_segmentation.image_chunking(img,chunk_size,percent_padding)\n \n \n # Create the chunks idx\n Chunks_idxs_linear=np.arange(len(Coords_Padded_Chunks_list),dtype='int32')\n\n # Distribute the chunks idx and distridute them in an array according to the position\n # in the chunked image\n Chunks_idxs=Chunks_idxs_linear.reshape(nr_chunks,nc_chunks) \n\n # Flatten the array for make it easier the creation of the coords combination\n Chunks_idxs_rows=np.ravel(Chunks_idxs)\n Chunks_idxs_cols=np.ravel(Chunks_idxs,order='F')\n\n # Calculate coords of the overlapping chunks\n Overlapping_chunks_coords=list()\n counter=0\n left_pos=Chunks_idxs_rows[0]\n for el in Chunks_idxs_rows[1:]:\n if counter < nc_chunks-1:\n Coords_left=Coords_Padded_Chunks_list[left_pos]\n Coords_right=Coords_Padded_Chunks_list[el]\n row_tl=Coords_left[0]\n row_br=Coords_left[1]\n col_tl=Coords_right[2]\n col_br=Coords_left[3]\n Overlapping_chunks_coords.append((row_tl,row_br,col_tl,col_br))\n left_pos=el\n counter+=1\n else:\n left_pos=el\n counter=0\n \n counter=0\n top_pos=Chunks_idxs_cols[0]\n for el in Chunks_idxs_cols[1:]:\n if counter < nr_chunks-1:\n Coords_top=Coords_Padded_Chunks_list[top_pos]\n Coords_bottom=Coords_Padded_Chunks_list[el]\n \n row_tl=Coords_bottom[0]\n row_br=Coords_top[1]\n col_tl=Coords_top[2]\n col_br=Coords_top[3]\n Overlapping_chunks_coords.append((row_tl,row_br,col_tl,col_br))\n \n counter+=1\n top_pos=el\n else:\n top_pos=el\n counter=0\n\n # Now i use this approach for testing. If the image gets to big to fit in RAM\n # then save the files and load them separately in each node\n chunked_image_seq = list()\n for coords in Coords_Padded_Chunks_list:\n chunked_image_seq.append(img[coords[0]:coords[1],coords[2]:coords[3]])\n\n # Run the segmentation\n futures_processes = client.map(object_based_segmentation.polyT_segmentation,chunked_image_seq,\n min_object_size=min_object_size,\n min_distance=min_distance,\n disk_radium_rank_filer=disk_radium_rank_filer,\n threshold_rel=threshold_rel,\n trimming=trimming)\n\n Objects_list = client.gather(futures_processes)\n\n # Recalculate labels and coords\n\n processed_images_data = dict()\n\n max_starting_label = 0\n total_data_dict = dict()\n\n for idx, objs_chunk in enumerate(Objects_list):\n for label ,cvalues in objs_chunk.items():\n new_label=max_starting_label+1\n coords = Coords_Padded_Chunks_list[idx][0::2]\n total_data_dict[new_label] = cvalues+coords\n max_starting_label = new_label\n\n\n # Calculate all the intersecting objects\n futures_processes = client.map(object_based_segmentation.OverlappingCouples,Overlapping_chunks_coords,\n TotalDataDict = total_data_dict)\n \n\n All_intersecting = client.gather(futures_processes)\n\n\n # Put together the couple with the same label for multiple intersection\n # for the labels of objects where there is intersection between multiple regions\n # Then scatter all of them and calculate intersection\n\n # Combine the results from the parallel processing\n flatten_couple = [el for grp in All_intersecting for el in grp] \n # Remove duplicates\n flatten_couple=list(set(flatten_couple))\n\n # Create a list of the labels (removing the repeats)\n singles=list()\n [singles.append(x) for cpl in flatten_couple for x in cpl]\n singles=list(set(singles))\n\n # Identify the couples containing singles\n Combined_all_singles=list()\n for item in singles:\n Combined_single=list()\n for couple in flatten_couple:\n if item in couple:\n Combined_single.append(couple)\n Combined_all_singles.append(Combined_single)\n\n if Combined_all_singles:\n # Combine all the intersecting labeles\n start=Combined_all_singles[0]\n ComparisonList=Combined_all_singles[1:].copy()\n #merged=start.copy()\n merged=list()\n SavedCombinations=list()\n tmp_list=ComparisonList.copy()\n KeepGoing=True\n Loop=0\n while KeepGoing:\n Loop+=1\n\n\n for idx,el in enumerate(ComparisonList):\n\n if set(start).intersection(set(el)):\n #merged=list(set(merged)|set(el))\n [merged.append(x) for x in el]\n tmp_list = [e for e in tmp_list if e != el]\n\n\n intersection=list(set.intersection(set(merged),set(start))) \n if intersection:\n merged=list(set.union(set(merged),set(start)))\n #merged=list(set(merged))\n start=merged.copy()\n merged=list()\n ComparisonList=tmp_list.copy()\n #tmp_list.append(merged)\n else:\n SavedCombinations.append(start)\n start=tmp_list[0]\n tmp_list=tmp_list[1:]\n ComparisonList=tmp_list.copy()\n\n\n if len(tmp_list)<1:\n [SavedCombinations.append(x) for x in tmp_list]\n KeepGoing =False\n \n # Remove all the duplicated labeled that intersect\n # in this case the labeled are merged. It will be nice to run an extra\n # segmentation on the merged objects\n # If it is too slow this step can be parallelised\n\n SavedLab_list=list()\n CleanedDict=total_data_dict.copy()\n for couple in SavedCombinations:\n SaveLab, RemoveLabs,NewCoords=object_based_segmentation.IntersectionCouples(couple,total_data_dict)\n SavedLab_list.append(SaveLab)\n for lab in RemoveLabs:\n del CleanedDict[lab]\n CleanedDict[SaveLab]=NewCoords\n else:\n CleanedDict=total_data_dict \n\n\n # Calculate all objects properties\n all_objects_list = [(key,coords) for key,coords in CleanedDict.items()]\n\n futures_processes = client.map(object_based_segmentation.obj_properties_calculator,all_objects_list)\n\n all_objects_properties_list = client.gather(futures_processes)\n\n\n # convert the list to a dictionary\n all_objects_properties_dict = { k: v for d in all_objects_properties_list for k, v in d.items() }\n\n # Save all the objects\n segmented_objs_fname = processing_directory + 'segmented_' + segmentation_staining + '_all_objs_properties.pkl'\n pickle.dump(all_objects_properties_dict,open(segmented_objs_fname,'wb'))",
"def afni_segmentation_workflow(workflow, resource_pool, config, name=\"_\"):\n\n import copy\n import nipype.pipeline.engine as pe\n from nipype.interfaces.afni import preprocess\n\n if \"anatomical_brain\" not in resource_pool.keys():\n\n from anatomical_preproc import anatomical_skullstrip_workflow\n old_rp = copy.copy(resource_pool)\n workflow, new_resource_pool = \\\n anatomical_skullstrip_workflow(workflow, resource_pool, config,\n name)\n\n if resource_pool == old_rp:\n return workflow, resource_pool\n\n segment = pe.Node(interface=preprocess.Seg(), name='segmentation%s' % name)\n\n segment.inputs.mask = 'AUTO'\n\n if len(resource_pool[\"anatomical_brain\"]) == 2:\n node, out_file = resource_pool[\"anatomical_brain\"]\n workflow.connect(node, out_file, segment, 'in_file')\n else:\n segment.inputs.in_file = resource_pool[\"anatomical_brain\"]\n\n # output processing\n AFNItoNIFTI = pe.Node(interface=preprocess.AFNItoNIFTI(),\n name=\"segment_AFNItoNIFTI%s\" % name)\n\n AFNItoNIFTI.inputs.out_file = \"classes.nii.gz\"\n\n workflow.connect(segment, 'out_file', AFNItoNIFTI, 'in_file')\n\n # break out each of the three tissue types into\n # three separate NIFTI files\n extract_CSF = pe.Node(interface=preprocess.Calc(),\n name='extract_CSF_mask%s' % name)\n extract_CSF.inputs.expr = \"within(a,1,1)\"\n extract_CSF.inputs.out_file = \"anatomical_csf_mask.nii.gz\"\n\n extract_GM = pe.Node(interface=preprocess.Calc(),\n name='extract_GM_mask%s' % name)\n extract_GM.inputs.expr = \"within(a,2,2)\"\n extract_GM.inputs.out_file = \"anatomical_gm_mask.nii.gz\"\n\n extract_WM = pe.Node(interface=preprocess.Calc(),\n name='extract_WM_mask%s' % name)\n extract_WM.inputs.expr = \"within(a,3,3)\"\n extract_WM.inputs.out_file = \"anatomical_wm_mask.nii.gz\"\n\n workflow.connect(AFNItoNIFTI, 'out_file', extract_CSF, 'in_file_a')\n workflow.connect(AFNItoNIFTI, 'out_file', extract_GM, 'in_file_a')\n workflow.connect(AFNItoNIFTI, 'out_file', extract_WM, 'in_file_a')\n\n resource_pool[\"anatomical_csf_mask\"] = (extract_CSF, 'out_file')\n resource_pool[\"anatomical_gm_mask\"] = (extract_GM, 'out_file')\n resource_pool[\"anatomical_wm_mask\"] = (extract_WM, 'out_file')\n\n return workflow, resource_pool",
"def segment(image, algorithm: str, **kwargs):\n # try to unwrap the method using string name\n try:\n segment_image = SEGMENTATION[algorithm]\n except KeyError:\n raise ValueError('{} is not a valid segmentation algorithm')\n # if the image is in [0, 255], convert the image to [0, 1]\n if str(image.dtype) == 'uint8':\n image = img_as_float(image)\n # if the algorithm is watershed, apply sobel and grayscale first\n if algorithm == 'watershed':\n image = sobel(rgb2gray(image))\n # apply the segmentation algorithm with given key word arguments\n segments = segment_image(image, **kwargs)\n boundaries = mark_boundaries(image, segments, (1, 1, 1)).astype('uint8')\n\n return segments, boundaries",
"def run_visualization(filepath, MODEL, outputFilePath):\n try:\n # print(\"Trying to open : \" + sys.argv[1])\n \t# f = open(sys.argv[1])\n \tjpeg_str = open(filepath, \"rb\").read()\n \torignal_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image. Please check file: ' + filepath)\n return\n\n print('running deeplab on image %s...' % filepath)\n resized_im, seg_map = MODEL.run(orignal_im)\n\n # vis_segmentation(resized_im, seg_map)\n drawSegment(resized_im, seg_map, outputFilePath)",
"def do_segmentation_eval(args, segmentation_file_list):\n task_output_path = args.task_output_path\n os.makedirs(task_output_path, exist_ok=True)\n setting_folder_path = args.setting_folder_path\n file_txt_path = ''\n if args.file_txt_path:\n file_txt_path = args.file_txt_path\n fname_txt_path = os.path.join(os.path.split(file_txt_path)[0],\"file_name_list.txt\")\n fname_list = read_txt_into_list(fname_txt_path) if os.path.isfile(fname_txt_path) else None\n else:\n print(segmentation_file_list)\n fname_list = [[f.split('/')[-1].split('.')[0] for f in segmentation_file_list[0]]]*2\n dm, tsm = init_test_env(setting_folder_path, task_output_path, segmentation_file_list, fname_list)\n tsm.task_par['tsk_set']['gpu_ids'] = args.gpu_id\n model_path= args.model_path\n if model_path is not None:\n assert os.path.isfile(model_path), \"the model {} not exist\".format_map(model_path)\n tsm.task_par['tsk_set']['model_path'] = model_path\n force_test_setting(dm, tsm, task_output_path)\n\n dm_json_path = os.path.join(task_output_path, 'cur_data_setting.json') if dm is not None else None\n tsm_json_path = os.path.join(task_output_path, 'cur_task_setting.json')\n run_one_task(tsm_json_path, dm_json_path)",
"def segment_from_command_line(args):\n\n input_file = BedTool(args.input)\n # Segment the input file\n return segment(input_file, args.method, p0=args.p0, prior=args.prior)",
"def main(args):\n # Generate detectron2 config from command line arguments.\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n\n # The configuration file should not contain any datasets. They are configured\n # from command line arguments instead.\n if len(cfg.DATASETS.TRAIN) > 0 or len(cfg.DATASETS.TEST) > 0:\n logging.error(\"Please set DATASETS.TRAIN = () and DATASETS.TEST = ().\")\n sys.exit(1)\n cfg.DATASETS.TRAIN = (TRAIN_SPLIT_NAME, )\n cfg.DATASETS.TEST = (VALID_SPLIT_NAME, )\n\n cfg.freeze()\n default_setup(cfg, args)\n\n # Register synthetic sign datasets.\n if args.image_width is not None or args.image_height is not None:\n if args.image_width is None or args.image_height is None:\n logging.error(\n \"Please specify both, image-width and image-height (or none).\")\n sys.exit(1)\n image_shape = args.image_height, args.image_width\n else:\n image_shape = None\n\n register_synthetic_signs(args.train_csv,\n args.label_map,\n cfg,\n name=TRAIN_SPLIT_NAME,\n image_shape=image_shape)\n if args.valid_csv is not None:\n register_synthetic_signs(args.valid_csv,\n args.label_map,\n cfg,\n name=VALID_SPLIT_NAME,\n image_shape=image_shape)\n\n # Run training or evaluation.\n if args.eval_only:\n model = Trainer.build_model(cfg)\n DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume)\n res = Trainer.test(cfg, model)\n if comm.is_main_process():\n verify_results(cfg, res)\n return res\n\n trainer = Trainer(cfg)\n trainer.resume_or_load(resume=args.resume)\n return trainer.train()",
"def run_visualization(filepath):\n try:\n \t#print(\"Trying to open : \" + sys.argv[1])\n \t# f = open(sys.argv[1])\n \tjpeg_str = open(filepath, \"rb\").read()\n \torignal_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image. Please check file: ' + filepath)\n return\n\n #print('running deeplab on image %s...' % filepath)\n resized_im, seg_map = MODEL.run(orignal_im)\n\n # vis_segmentation(resized_im, seg_map)\n drawSegment(resized_im, seg_map)",
"def run_visualization(SAMPLE_IMAGE):\n original_im = Image.open(SAMPLE_IMAGE)\n global seg_map\n seg_map = MODEL.run(original_im)\n vis_segmentation(original_im, seg_map)",
"def Segmentation(WorkingDirectory, ListTrainingDataFile, ListImageName, modelname, noiseReduction, numberOfClasses, classesNamesList, ROI, ListAreaNames, fusionClassesY_N, maskY_N, imageY_N, InfoY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, ReferencePicture):\n ### Create the folder where the output will be saved \n if maskY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/Masks'): \n os.mkdir(WorkingDirectory+'/Masks')\n if imageY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/MaskedImages'): \n os.mkdir(WorkingDirectory+'/MaskedImages')\n if NFMaskY_N=='Y':\n if not os.path.exists(WorkingDirectory+'/NonFilteredMasks'): \n os.mkdir(WorkingDirectory+'/NonFilteredMasks')\n\n \n ### Import and format the training data from the training data files.\n trainDataTab=np.array([[0,0,0,0,0,0,0,0,0,0,0,0,0]])\n for file in ListTrainingDataFile: \n f=open(file,\"r\",newline='') \n TrainData = list(csv.reader(f))\n f.close()\n TrainData.remove(['Class', 'Image', 'x','y','B','G','R','H','S','V','L','a','b'])\n TrainData=np.asarray(TrainData) \n trainDataTab=np.concatenate((trainDataTab, TrainData), axis=0)\n trainDataTab=np.delete(trainDataTab, (0), axis=0)\n if len(ListTrainingDataFile)>1: # if the user choose more than one file, a new file is saved combining all the selected files.\n np.savetxt(WorkingDirectory+'/trainData_'+str(numberOfClasses)+'classes.csv', trainDataTab, delimiter=\",\",header='Class,Image,x,y,B,G,R,H,S,V,L,a,b', comments='',fmt='%s')\n trainDataTab=np.delete(trainDataTab,1, 1)\n trainDataTab=np.delete(trainDataTab,1, 1)\n trainDataTab=np.delete(trainDataTab,1, 1)\n\n ### Format the list of ROI \n if ROI!='Whole pictures':\n ROI=ast.literal_eval(ROI)\n\n \n ### Train the model \n model=TrainModel(trainDataTab, modelname,classesNamesList) \n\n \n \n ### Get the size of the reference picture with a 1 pixel difference to avoid any resizing issue\n FirstImage=cv2.imread(ReferencePicture)\n ShapeFirstImage=np.shape(FirstImage)\n a=ShapeFirstImage[0]\n b=ShapeFirstImage[1]\n c=ShapeFirstImage[2]\n ShapeFirstImage2=(a+1,b,c)\n ShapeFirstImage3=(a+1,b+1,c)\n ShapeFirstImage4=(a+1,b-1,c)\n ShapeFirstImage5=(a,b,c)\n ShapeFirstImage6=(a,b+1,c)\n ShapeFirstImage7=(a,b-1,c) \n ShapeFirstImage8=(a-1,b,c)\n ShapeFirstImage9=(a-1,b+1,c)\n ShapeFirstImage10=(a-1,b-1,c) \n\n ### List initialization \n ListImageWrongSize=[]\n ListRunningTimes=[]\n ListTestDataTimes=[]\n ListApplyModelTimes=[]\n ListSaveOutputTimes=[]\n \n if BiggestBlobY_N=='Y':\n ListAirs=np.array([['Area/Plant','Image Name','Surface','Coverage', 'Aspect Ratio','Extent','Solidity', 'Equivalent Diameter', 'Main axe', 'Secondary axe']]) \n else:\n ListAirs=np.array([['Area/Plant','Image Name','Surface','Coverage']]) \n \n ### Main loop on the image list.\n for i in ListImageName:\n start_time = time.monotonic() \n TestImageBGR=cv2.imread(i) \n ImageName=i.split('/')\n ImageName=ImageName[-1] \n ImageName=ImageName.split('.')\n ImageName=ImageName[0] \n ######################################THESE THREE LINES CAN BE USED TO ADD a TIME FILTER ( only keep the pictures between certain hours)\n# hour=float(ImageName[8:10]) #get the time the picture was taken from the name of the file\n hour=float(10)\n if 8<hour<16: # apply a time condition \n ######################################\n if ROI!='Whole pictures':\n if np.shape(TestImageBGR)==ShapeFirstImage or np.shape(TestImageBGR)==ShapeFirstImage2 or np.shape(TestImageBGR)==ShapeFirstImage3 or np.shape(TestImageBGR)==ShapeFirstImage4 or np.shape(TestImageBGR)==ShapeFirstImage5 or np.shape(TestImageBGR)==ShapeFirstImage6 or np.shape(TestImageBGR)==ShapeFirstImage7 or np.shape(TestImageBGR)==ShapeFirstImage8 or np.shape(TestImageBGR)==ShapeFirstImage9 or np.shape(TestImageBGR)==ShapeFirstImage10 : # Test the size of the picture\n for j in range(len(ROI)): \n #Crop the picture for each ROI\n x1,y1,x2,y2=ROI[j]\n if x1>x2:\n a=x1\n x1=x2\n x2=a\n if y1>y2:\n a=y1\n y1=y2\n y2=a \n croppedImagej=TestImageBGR[y1:y2,x1:x2] \n \n NameArea=ListAreaNames[j] \n #Initialize the output names\n OutputMaskName=''\n OutputimageName=''\n OutputNFMaskName=''\n \n #Create the output names and folders\n if maskY_N=='Y': \n croppedMaskDirectoryArea=WorkingDirectory+'/Masks/'+NameArea \n if not os.path.exists(croppedMaskDirectoryArea): \n os.mkdir(croppedMaskDirectoryArea)\n OutputMaskName=croppedMaskDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_mask.png'\n \n if imageY_N=='Y': \n croppedMaskedImagesDirectoryArea=WorkingDirectory+'/MaskedImages/'+NameArea \n if not os.path.exists(croppedMaskedImagesDirectoryArea): \n os.mkdir(croppedMaskedImagesDirectoryArea) \n OutputimageName=croppedMaskedImagesDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_maskedImage.png'\n \n if NFMaskY_N=='Y':\n croppedNonFilteredMaskDirectoryArea=WorkingDirectory+'/NonFilteredMasks/'+NameArea \n if not os.path.exists(croppedNonFilteredMaskDirectoryArea): \n os.mkdir(croppedNonFilteredMaskDirectoryArea) \n OutputNFMaskName=croppedNonFilteredMaskDirectoryArea+'/'+ImageName+'_crop_'+NameArea+'_NFMask.png'\n \n # Segment the image with the function ApplyModelAndSaveOutput\n ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes=ApplyModelAndSaveOutput(model, modelname, croppedImagej, ImageName, NameArea, noiseReduction, numberOfClasses, classesNamesList, fusionClassesY_N, maskY_N, InfoY_N, imageY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, OutputMaskName, OutputimageName, OutputNFMaskName, ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes)\n \n \n print(str(ImageName)+' '+str(NameArea)+' Done!') \n else: #if the picture is not the right size \n ListImageWrongSize.append(i) \n print(str(ImageName)+' Wrong size')\n \n else: #if the user wants to use the whole pictures\n #Create the output names\n OutputMaskName=WorkingDirectory+'/Masks/'+ImageName+'_mask.png'\n OutputimageName=WorkingDirectory+'/MaskedImages/'+ImageName+'_maskedImage.png'\n OutputNFMaskName=WorkingDirectory+'/NonFilteredMasks/'+ImageName+'_NFMask.png'\n \n # Segment the image with the function ApplyModelAndSaveOutput\n ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes=ApplyModelAndSaveOutput(model, modelname, TestImageBGR, ImageName, '', noiseReduction, numberOfClasses, classesNamesList, fusionClassesY_N, maskY_N, InfoY_N, imageY_N, NFMaskY_N, BiggestBlobY_N, chosenArea, OutputMaskName, OutputimageName, OutputNFMaskName, ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes)\n \n \n print(str(ImageName)+' Done!')\n \n end_time = time.monotonic()\n RunningTime=timedelta(seconds=end_time - start_time)\n sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)\n \n if i==ListImageName[0]: # get an estimation of the running time after the first picture is done\n print('Running time for 1 image =', RunningTime)\n print('Total running time estimation =', RunningTime*len(ListImageName))\n ListRunningTimes.append(sec) \n \n \n else: # usefull only if you apply a time filter \n ListImageWrongSize.append(i) \n print(str(ImageName)+' Wrong time')\n \n # Save the info file \n if len(ListAirs)>1:\n np.savetxt(WorkingDirectory+'/'+'InformationFile.csv', ListAirs, delimiter=\",\", comments='', fmt='%s') \n \n return ListImageWrongSize,ListRunningTimes, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes",
"def main(filepath, maskpath):\n analytics.result = {}\n img_mask = nib.load(maskpath).get_fdata()\n print(\"loading\\n\", flush=True)\n # segmentation\n print(\"loading segmentation...\\n\", flush=True)\n seg = nib.load(filepath).get_fdata()\n # post processing\n print(\"applying some post processing...\\n\", flush=True)\n seg = apply_mask(seg, img_mask)\n seg_2d = binarize(seg, img_mask)\n print(\"End of slice processing\\n\", flush=True) \n distance_map, skel = analytics.distance(seg_2d)\n print(\"distance\\n\", flush=True)\n dist_per_label , skel= analytics.label_value(distance_map, skel)\n print(\"label_value\\n\", flush=True) \n analytics.get_analytics(seg, img_mask, dist_per_label, skel, verbose=True)\n print(\"got analytics\\n\", flush=True)",
"def _cmd_diagram(args):\n if not args.filename and not args.segment:\n raise ValueError(\n \"Must specify a filename as an argument or with \"\n \"the '-s' option, or both. You did neither.\"\n )\n\n cnarr = read_cna(args.filename) if args.filename else None\n segarr = read_cna(args.segment) if args.segment else None\n if args.adjust_xy:\n is_sample_female = verify_sample_sex(\n cnarr or segarr, args.sample_sex, args.male_reference, args.diploid_parx_genome\n )\n if cnarr:\n cnarr = cnarr.shift_xx(args.male_reference, is_sample_female)\n if segarr:\n segarr = segarr.shift_xx(args.male_reference, is_sample_female)\n outfname = diagram.create_diagram(\n cnarr,\n segarr,\n args.threshold,\n args.min_probes,\n args.output,\n args.chromosome,\n args.title,\n args.show_labels,\n )\n logging.info(\"Wrote %s\", outfname)",
"def det2seg(cann, output_dir):\n\n if os.path.isdir(output_dir) is False:\n os.makedirs(output_dir, exist_ok=True)\n\n imids = cann.getImgIds()\n cats = cann.loadCats(cann.getCatIds())\n\n cat_colours = {0: (0, 0, 0)}\n\n # Set seed for palette colour\n np.random.seed(121)\n\n # Create category colourmap\n for c in cats:\n cat_colours[c['id']] = (np.random.randint(0,256), np.random.randint(0,256), np.random.randint(0,256))\n\n colour_map = np.array(list(cat_colours.values()))\n if colour_map.shape != (len(cats) + 1, 3):\n raise AssertionError(\"Incorrect shape of color map array\")\n\n for imid in tqdm(imids):\n img = cann.loadImgs(imid)\n if len(img) > 1:\n raise AssertionError(\"Multiple images with same id\")\n h, w = img[0]['height'], img[0]['width']\n name = img[0]['file_name']\n if name[-4:] != \".png\":\n name = name[:-4] + \".png\"\n im = np.zeros((h, w), dtype=np.uint8)\n annids = cann.getAnnIds(imgIds=[imid])\n if not annids:\n # No annotations\n res = Image.fromarray(im)\n res.save(os.path.join(output_dir, '{}'.format(name)))\n else:\n anns = cann.loadAnns(annids)\n for ann in anns:\n poly = ann['segmentation'][0]\n cat = ann['category_id']\n img = Image.new('L', (w, h))\n if len(poly) >= 6:\n ImageDraw.Draw(img).polygon(poly, fill=cat)\n else:\n continue\n mask = np.array(img)\n im = np.maximum(im, mask)\n res = Image.fromarray(im)\n res.putpalette(colour_map.astype(np.uint8))\n res.save(os.path.join(output_dir, '{}'.format(name)))",
"def preproc(indat, Cntd, smooth=True, reftrim='', outpath=None, mode='nac'):\n opth = Path(indat).parent if outpath is None else Path(outpath)\n\n if mode == 'nac':\n outdir = opth / mode.upper()\n elif mode[:3] == 'qnt':\n outdir = opth / mode.upper()\n else:\n raise ValueError('unrecognised mode')\n\n imio.create_dir(outdir)\n\n if isinstance(indat, (str, PurePath)) and Path(indat).is_dir():\n # CONVERT TO NIfTI\n if not imio.dcmdir(indat):\n raise IOError('the provided folder does not contain DICOM files')\n for f in chain(outdir.glob('*.nii*'), outdir.glob('*.json')):\n # remove previous files\n os.remove(f)\n run([dcm2niix.bin, '-i', 'y', '-v', 'n', '-o', outdir, '-f', '%f_%s', str(indat)])\n fnii = list(outdir.glob('*offline3D*.nii*'))\n if len(fnii) == 1:\n fnii = fnii[0]\n else:\n raise ValueError('Confusing or missing NIfTI output')\n elif isinstance(indat, (str, PurePath)) and Path(indat).is_file() and hasext(\n indat, ('nii', 'nii.gz')):\n fnii = Path(indat)\n else:\n raise ValueError('the input NIfTI file or DICOM folder do not exist')\n\n # > Gaussian smooth image data if needed\n if smooth:\n if Cntd['fwhm_' + mode[:3]] > 0:\n smostr = '_smo-' + str(Cntd['fwhm_' + mode]).replace('.', '-') + 'mm'\n fnii = prc.imsmooth(\n fnii, fwhm=Cntd['fwhm_' + mode[:3]], fout=outdir /\n (mode.upper() + '_' + fnii.name.split('.nii')[0] + smostr + '.nii.gz'),\n output='file')\n\n Cntd['f' + mode] = fnii\n\n # > trim and upsample the PET\n imup = prc.imtrimup(\n fnii,\n refim=reftrim,\n scale=Cntd['sclt'],\n int_order=Cntd['interp'],\n fmax=0.1, # controls how much trimming there is\n fcomment_pfx=fnii.name.split('.nii')[0] + '__',\n store_img=True)\n\n Cntd[f'f{mode}up'] = Path(imup['fim'])\n\n return Cntd",
"def get_segmented_image(image_path):\n\n # Setup Caffe Segnet\n sys.path.append('/usr/local/lib/python2.7/site-packages')\n caffe_root = '/opt/caffe-segnet/'\n sys.path.insert(0, caffe_root + 'python')\n import caffe\n\n model = 'static/nn_files/segnet_model_driving_webdemo.prototxt'\n weights = 'static/nn_files/segnet_weights_driving_webdemo.caffemodel'\n colours = 'static/nn_files/camvid12.png'\n\n net = caffe.Net(model,weights, caffe.TEST)\n caffe.set_mode_cpu()\n\n input_shape = net.blobs['data'].data.shape\n output_shape = net.blobs['argmax'].data.shape\n label_colours = cv2.imread(colours).astype(np.uint8)\n\n resized_images = slice_and_resize(image_path)\n\n images = [ cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) for img in resized_images ]\n\n\n\n def segment_image(image):\n input_image = image.transpose((2,0,1))\n input_image = image.transpose((2,0,1))\n input_image = np.asarray([input_image])\n\n out = net.forward_all(data=input_image)\n\n segmentation_ind = np.squeeze(net.blobs['argmax'].data)\n segmentation_ind_3ch = np.resize(segmentation_ind, (3, input_shape[2], input_shape[3]))\n segmentation_ind_3ch = segmentation_ind_3ch.transpose(1,2,0).astype(np.uint8)\n segmentation_rgb = np.zeros(segmentation_ind_3ch.shape, dtype=np.uint8)\n\n cv2.LUT(segmentation_ind_3ch, label_colours, segmentation_rgb)\n\n return segmentation_rgb\n\n segmented_images = map(segment_image, images)\n\n # 5. Create a single full image from the segmented parts\n segmented_full_image = join_images_horizontally(segmented_images)\n\n folder = \"static/images/segmented\"\n os.system(\"rm %s/*.png\" % (folder))\n\n name = next(tempfile._get_candidate_names())\n segment_path = \"%s/%s_resized.png\" % (folder, name)\n segmented_full_image.save(segment_path)\n return segment_path",
"def singleHierarchy(config_path, model_path, image_path, cuda, crf, sizeThresh=1/9, nIterations=10, doPlot=True):\r\n\r\n # Setup\r\n CONFIG = OmegaConf.load(config_path)\r\n device = get_device(cuda)\r\n torch.set_grad_enabled(False)\r\n\r\n classes = get_classtable(CONFIG)\r\n postprocessor = setup_postprocessor(CONFIG) if crf else None\r\n\r\n model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)\r\n state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)\r\n model.load_state_dict(state_dict)\r\n model.eval()\r\n model.to(device)\r\n print(\"Model:\", CONFIG.MODEL.NAME)\r\n\r\n # Inference\r\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\r\n image, raw_image = preprocessing(image, device, CONFIG)\r\n # labelmap = inference(model, image, raw_image, postprocessor)\r\n labelmapList = inferenceHierarchy(model, image, raw_image, postprocessor, sizeThresh, nIterations)\r\n if doPlot:\r\n for labelmap in labelmapList:\r\n labels = np.unique(labelmap)\r\n \r\n # Show result for each class\r\n rows = np.floor(np.sqrt(len(labels) + 1))\r\n cols = np.ceil((len(labels) + 1) / rows)\r\n \r\n plt.figure(figsize=(10, 10))\r\n ax = plt.subplot(rows, cols, 1)\r\n ax.set_title(\"Input image\")\r\n ax.imshow(raw_image[:, :, ::-1])\r\n ax.axis(\"off\")\r\n \r\n for i, label in enumerate(labels):\r\n mask = labelmap == label\r\n ax = plt.subplot(rows, cols, i + 2)\r\n ax.set_title(classes[label])\r\n ax.imshow(raw_image[..., ::-1])\r\n ax.imshow(mask.astype(np.float32), alpha=0.5)\r\n ax.axis(\"off\")\r\n \r\n plt.tight_layout()\r\n plt.show()",
"def process_command_line():\n # Add the command line arguments.\n parser = argparse.ArgumentParser(description=\"ilastik autocontext\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # General arguments.\n parser.add_argument(\"--ilastik\", type=str, required=True,\n help=\"path to the file run_ilastik.sh\")\n parser.add_argument(\"--predict_file\", action=\"store_true\",\n help=\"add this flag if ilastik supports the --predict_file option\")\n parser.add_argument(\"-c\", \"--cache\", type=str, default=\"cache\",\n help=\"name of the cache folder\")\n parser.add_argument(\"--compression\", default=\"lzf\", type=str, choices=[\"lzf\", \"gzip\", \"szip\", \"None\"],\n help=\"compression filter for the hdf5 files\")\n parser.add_argument(\"--clear_cache\", action=\"store_true\",\n help=\"clear the cache folder without asking\")\n parser.add_argument(\"--keep_cache\", action=\"store_true\",\n help=\"keep the cache folder without asking\")\n\n # Training arguments.\n parser.add_argument(\"--train\", type=str,\n help=\"path to the ilastik project that will be used for training\")\n parser.add_argument(\"-o\", \"--outfile\", type=str, default=\"\",\n help=\"output file\")\n parser.add_argument(\"-n\", \"--nloops\", type=int, default=3,\n help=\"number of autocontext loop iterations\")\n parser.add_argument(\"-d\", \"--labeldataset\", type=int, default=-1,\n help=\"id of dataset in the ilp file that contains the labels (-1: use all datasets)\")\n parser.add_argument(\"--seed\", type=int, default=None,\n help=\"the random seed\")\n parser.add_argument(\"--weights\", type=float, nargs=\"*\", default=[],\n help=\"amount of labels that are used in each round\")\n\n # Batch prediction arguments.\n parser.add_argument(\"--batch_predict\", type=str,\n help=\"path of the cache folder of a previously trained autocontext that will be used for batch \"\n \"prediction\")\n parser.add_argument(\"--files\", type=str, nargs=\"+\",\n help=\"the files for the batch prediction\")\n parser.add_argument(\"--no_overwrite\", action=\"store_true\",\n help=\"create one _probs file for each autocontext iteration in the batch prediction\")\n\n # Do the parsing.\n args, ilastik_args = parser.parse_known_args()\n\n # Expand the filenames.\n args.ilastik = os.path.expanduser(args.ilastik)\n args.cache = os.path.expanduser(args.cache)\n if args.train is not None:\n args.train = os.path.expanduser(args.train)\n args.outfile = os.path.expanduser(args.outfile)\n if args.batch_predict is not None:\n args.batch_predict = os.path.expanduser(args.batch_predict)\n\n # Check if ilastik is an executable.\n if not os.path.isfile(args.ilastik) or not os.access(args.ilastik, os.X_OK):\n raise Exception(\"%s is not an executable file.\" % args.ilastik)\n\n # Check that only one of the options --clear_cache, --keep_cache was set.\n if args.clear_cache and args.keep_cache:\n raise Exception(\"--clear_cache and --keep_cache must not be combined.\")\n\n # Check for conflicts between training and batch prediction arguments.\n if args.train is None and args.batch_predict is None:\n raise Exception(\"One of the arguments --train or --batch_predict must be given.\")\n if args.train is not None and args.batch_predict is not None:\n raise Exception(\"--train and --batch_predict must not be combined.\")\n\n # Check if the training arguments are valid.\n if args.train:\n if len(ilastik_args) > 0:\n raise Exception(\"The training does not accept unknown arguments: %s\" % ilastik_args)\n if args.files is not None:\n raise Exception(\"--train cannot be used for batch prediction.\")\n if not os.path.isfile(args.train):\n raise Exception(\"%s is not a file.\" % args.train)\n if len(args.outfile) == 0:\n file_path, file_ext = os.path.splitext(args.train)\n args.outfile = file_path + \"_out\" + file_ext\n if args.labeldataset < -1:\n raise Exception(\"Wrong id of label dataset: %d\" % args.d)\n if args.compression == \"None\":\n args.compression = None\n if len(args.weights) == 0:\n args.weights = None\n if args.weights is not None and len(args.weights) != args.nloops:\n raise Exception(\"Number of weights must be equal to number of autocontext iterations.\")\n\n # Check if the batch prediction arguments are valid.\n if args.batch_predict:\n if os.path.normpath(os.path.abspath(args.batch_predict)) == os.path.normpath(os.path.abspath(args.cache)):\n raise Exception(\"The --batch_predict and --cache directories must be different.\")\n if args.files is None:\n raise Exception(\"Tried to use batch prediction without --files.\")\n if not os.path.isdir(args.batch_predict):\n raise Exception(\"%s is not a directory.\" % args.batch_predict)\n\n # Expand filenames that include *.\n expanded_files = [os.path.expanduser(f) for f in args.files]\n args.files = []\n for filename in expanded_files:\n if \"*\" in filename:\n if \".h5/\" in filename or \".hdf5/\" in filename:\n if \".h5/\" in filename:\n i = filename.index(\".h5\")\n filename_path = filename[:i+3]\n filename_key = filename[i+4:]\n else:\n i = filename.index(\".hdf5\")\n filename_path = filename[:i+5]\n filename_key = filename[i+6:]\n to_append = glob.glob(filename_path)\n to_append = [f + \"/\" + filename_key for f in to_append]\n args.files += to_append\n else:\n args.files += glob.glob(filename)\n else:\n args.files.append(filename)\n\n # Remove the --headless, --project and --output_internal_path arguments.\n ilastik_parser = argparse.ArgumentParser()\n ilastik_parser.add_argument(\"--headless\", action=\"store_true\")\n ilastik_parser.add_argument(\"--project\", type=str)\n ilastik_args = ilastik_parser.parse_known_args(ilastik_args)[1]\n\n return args, ilastik_args",
"def run_config(self, device, command, *argv, **kwarg):\n ############# Implement me ################\n if not IxnetworkIxiaClientImpl.ixnet:\n return 0, \"Ixia not connected\"\n params = kwarg[\"params\"]\n if not params or not params[0]:\n return 0, \"Need to specify config file name\"\n param = params[0]\n fname = param[\"config_file_name\"]\n name = os.path.basename(fname)\n if command == \"load_config\":\n files = IxnetworkIxiaClientImpl.session.GetFileList()\n found = False\n for f in files[\"files\"]:\n if f[\"name\"] == name:\n found = True\n break\n if not found:\n out = IxnetworkIxiaClientImpl.session.UploadFile(fname, name)\n out = IxnetworkIxiaClientImpl.ixnet.LoadConfig(Files(name))\n # get the traffic items back\n IxnetworkIxiaClientImpl.tis = IxnetworkIxiaClientImpl.ixnet.Traffic.TrafficItem.find()\n elif command == \"save_config\":\n out = IxnetworkIxiaClientImpl.ixnet.SaveConfig(Files(name))\n out += IxnetworkIxiaClientImpl.session.DownloadFile(name, fname)\n return 0, out",
"def create_seg(ctx,\n t1_dicom_file,\n aseg_image_file,\n aseg_dicom_seg_output,\n seg_metadata):\n ctx = utils.check_docker_and_license(ctx)\n\n # make sure any tilde in path names are resolved\n t1_dicom_file = os.path.expanduser(t1_dicom_file)\n aseg_image_file = os.path.expanduser(aseg_image_file)\n aseg_dicom_seg_output = os.path.expanduser(aseg_dicom_seg_output)\n seg_metadata = os.path.expanduser(seg_metadata)\n\n docker_user_string = utils.get_docker_user(aseg_image_file)\n\n with tempfile.TemporaryDirectory() as seg_temp_dir:\n print('[fs2dicom] Running create-seg with {temp_dir} as tempdir\\n'.format(temp_dir=seg_temp_dir))\n resampled_aseg = os.path.join(seg_temp_dir,\n 'aseg_native_space.nii.gz')\n t1_dicom_dir = utils.abs_dirname(t1_dicom_file)\n docker_user_string = utils.get_docker_user(aseg_image_file)\n\n resample_aseg_cmd = seg.get_resample_aseg_cmd(aseg_image_file,\n t1_dicom_file,\n resampled_aseg)\n generate_dicom_seg_cmd = seg.get_generate_dicom_seg_cmd(resampled_aseg,\n seg_metadata,\n t1_dicom_file,\n aseg_dicom_seg_output)\n\n fs_commands = [resample_aseg_cmd]\n if ctx.obj['freesurfer_type'] == 'docker':\n \"\"\"\n Inputs (ro):\n t1_dicom_dir\n aseg_image_file\n Output directories (rw):\n seg_temp_dir\n \"\"\"\n volumes_dict = {t1_dicom_dir: {'bind': t1_dicom_dir,\n 'mode': 'ro'},\n aseg_image_file: {'bind': aseg_image_file,\n 'mode': 'ro'},\n seg_temp_dir: {'bind': seg_temp_dir,\n 'mode': 'rw'}}\n\n environment_dict = {'FS_KEY': utils.base64_convert(ctx.obj['fs_license_key'])}\n\n utils.run_docker_commands(docker_image=ctx.obj['freesurfer_docker_image'],\n commands=fs_commands,\n volumes=volumes_dict,\n user='root', # corticometrics/fs6-base needs to be root\n environment=environment_dict,\n working_dir=seg_temp_dir)\n else:\n utils.run_local_commands(fs_commands)\n\n dcmqi_commands = [generate_dicom_seg_cmd]\n if ctx.obj['dcmqi_type'] == 'docker':\n \"\"\"\n Inputs (ro):\n resampled_aseg\n seg_metadata\n t1_dicom_dir\n Output directories (rw):\n aseg_dicom_seg_output directrory (output dir)\n \"\"\"\n output_dir = utils.abs_dirname(aseg_dicom_seg_output)\n volumes_dict = {resampled_aseg: {'bind': resampled_aseg,\n 'mode': 'ro'},\n seg_metadata: {'bind': seg_metadata,\n 'mode': 'ro'},\n t1_dicom_dir: {'bind': t1_dicom_dir,\n 'mode': 'ro'},\n output_dir: {'bind': output_dir,\n 'mode': 'rw'}}\n\n utils.run_docker_commands(docker_image=ctx.obj['dcmqi_docker_image'],\n commands=dcmqi_commands,\n volumes=volumes_dict,\n user=docker_user_string,\n working_dir=output_dir)\n else:\n utils.run_local_commands(dcmqi_commands)",
"def _cmd_segment(args):\n cnarr = read_cna(args.filename)\n variants = load_het_snps(\n args.vcf,\n args.sample_id,\n args.normal_id,\n args.min_variant_depth,\n args.zygosity_freq,\n )\n results = segmentation.do_segmentation(\n cnarr,\n args.method,\n args.diploid_parx_genome,\n args.threshold,\n variants=variants,\n skip_low=args.drop_low_coverage,\n skip_outliers=args.drop_outliers,\n save_dataframe=bool(args.dataframe),\n rscript_path=args.rscript_path,\n processes=args.processes,\n smooth_cbs=args.smooth_cbs,\n )\n\n if args.dataframe:\n segments, dframe = results\n with open(args.dataframe, \"w\") as handle:\n handle.write(dframe)\n logging.info(\"Wrote %s\", args.dataframe)\n else:\n segments = results\n tabio.write(segments, args.output or segments.sample_id + \".cns\")",
"def runauto(self, istart, nrows, rstep):\n self.ImageSolution=self.arcdisplay.autoidentify(istart=istart, nrows=nrows, rstep=rstep, oneline=False)",
"def init_algorithm(config, id_algo, id_discdds, discdds):\n # instance the algorithm\n set_current_config(config)\n algo = config.algos.instance(id_algo) \n # initialize the algorithm with the dynamics\n # TODO: add computation time\n #t0 = time.clock()\n algo.set_name_for_log(id_algo)\n algo.init(id_discdds, discdds) \n #init_time = time.clock() - t0\n return algo",
"def cli(avg, config, default, last_nyears, use_month_dim, refinfo, smode, storeconfig, years, indir, outname, verbose):\n \n # example:\n #./lgt_createinput.py processed output --dems=srtm1/*.zip --masks=srtm1_shp_mask --extent -76 -56 -66 -16\n\n if verbose:\n logging.getLogger(__name__).setLevel(logging.DEBUG)\n else:\n logging.getLogger(__name__).setLevel(logging.INFO)\n\n if years != []:\n try:\n years=[int(x) for x in years.split('-')]\n except:\n log.error(\"Years require format 1990-1999\")\n exit(1)\n \n if refinfo != None: \n if ',' in refinfo:\n f,v=[x.strip() for x in refinfo.split(',')[0:2]]\n else:\n f=refinfo.strip()\n v=None\n refinfo=(f,v)\n else:\n refinfo=(None,None)\n \n \n if smode and (use_month_dim == False):\n log.warn(\"Site mode requires the use of a monthly dim (-m). Proceeding.\")\n use_month_dim = True\n \n if avg and (use_month_dim == False):\n log.warn(\"Average mode requires the use of a monthly dim (-m). Proceeding.\")\n use_month_dim = True\n \n # the setup dictionary to convert into a bunch obj\n config_data=dict(AVG=avg,\n CONFIG=config,\n DEFAULT=default,\n LAST_NYEARS=last_nyears,\n USE_MONTH_DIM=use_month_dim,\n REFINFO=refinfo,\n SMODE=smode,\n STORECONFIG=storeconfig,\n YEARS=years,\n INDIR=indir,\n OUTNAME=outname)\n \n # TODO: change logging level based on verbose flag\n cfg = Bunch(config_data)\n\n\n if cfg.STORECONFIG and (cfg.CONFIG is None):\n log.critical(\"Option -S requires that you pass a file with -c.\")\n exit(1)\n\n if cfg.YEARS != [] and cfg.LAST_NYEARS != -1:\n log.critical(\"Use either option -y or Option -l.\")\n exit(1)\n\n main(cfg)",
"def main(folder, outputfile):\n parser = argument_parser()\n args = parser.parse_args()\n\n show_all = args.show_all\n verbose = args.verbose\n\n random.seed(args.rng_seed)\n\n args.files = folder\n print args.files\n\n try:\n image = Image.open(args.files[0])\n except IOError, msg:\n print >> sys.stderr, msg\n return 1\n if image.mode == 'P':\n image = image.convert('RGB')\n \n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n\n if not show_all:\n def nothing(a, b):\n pass\n do_something = nothing\n elif args.saving:\n do_something = Imsave(\"saved/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]) + \"/\").save\n else:\n import im_debug\n do_something = im_debug.show\n\n if verbose:\n import time\n class Logger:\n def __init__(self):\n self.t = 0\n\n def __call__(self, m):\n t_n = time.time()\n if self.t > 0:\n print >> sys.stderr, \"\\t\" + str(t_n - self.t)\n print >> sys.stderr, m\n self.t = t_n\n logger = Logger()\n\n else:\n def logger(m):\n pass\n \n if args.manual_mode:\n import manual\n try:\n lines = manual.find_lines(image)\n except manual.UserQuitError:\n #TODO ask user to try again\n return 1\n else:\n if args.l_cache:\n filename = (\"saved/cache/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]))\n cache_dir = \"/\".join(filename.split('/')[:-1])\n if os.path.exists(filename):\n lines, l1, l2, bounds, hough = pickle.load(open(filename))\n print >> sys.stderr, \"using cached results\"\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n d_file = open(filename, 'wb')\n pickle.dump((lines, l1, l2, bounds, hough), d_file)\n d_file.close()\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n\n grid, lines = gridf.find(lines, image.size, l1, l2, bounds, hough,\n show_all, do_something, logger)\n if show_all:\n im_g = image.copy()\n draw = ImageDraw.Draw(im_g)\n for l in grid[0] + grid[1]:\n draw.line(l, fill=(64, 255, 64), width=1)\n do_something(im_g, \"grid\", name=\"grid\")\n\n intersections = intrsc.b_intersects(image, lines, show_all, do_something, logger)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n\n logger(\"finished\")\n\n # TODO! refactor this mess:\n if len(args.files) == 1:\n\n if args.sgf_output:\n print board.asSGFsetPos()\n else:\n print board\n \n else:\n game = output.Game(19, board) #TODO size parameter\n #for f in args.files[1:]:\n for i, f in enumerate(args.files):\n try:\n image = Image.open(f)\n except IOError, msg:\n print >> sys.stderr, msg\n continue\n if verbose:\n print >> sys.stderr, \"Opening\", f\n if image.mode == 'P':\n image = image.convert('RGB')\n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n if args.sgf_output:\n game.addMove(board)\n else:\n with open(outputfile + str(i) + \".txt\", \"w\") as f:\n f.write(str(board))\n\n if args.sgf_output:\n print game.asSGF()\n\n return 0",
"def segmentation_input_fn(image_files, gt_files=None,shuffle=False, seed=None):\n # Converting to TF dataset:\n image_files = tf.constant(image_files)\n data_dict = {'image': image_files}\n if gt_files is not None:\n gt_files = tf.constant(gt_files)\n data_dict['segmentation_mask'] = gt_files\n \n dataset = tf.data.Dataset.from_tensor_slices(data_dict)\n if shuffle:\n dataset = dataset.shuffle(buffer_size=1000, seed=seed)\n dataset = dataset.prefetch(1)\n\n # Batching + adding parsing operation:\n parse_fn = functools.partial(parse_function,)\n dataset = dataset.map(parse_fn, num_parallel_calls=4)\n \n return dataset"
]
| [
"0.6112436",
"0.6015282",
"0.5750639",
"0.56479937",
"0.5598787",
"0.5504381",
"0.5437937",
"0.5428975",
"0.54104406",
"0.53636897",
"0.5304661",
"0.52893245",
"0.5221492",
"0.5198504",
"0.5196194",
"0.5154485",
"0.5152543",
"0.51421636",
"0.51167274",
"0.5087006",
"0.50604945",
"0.50472033",
"0.5045322",
"0.50424486",
"0.5040691",
"0.50283617",
"0.5017042",
"0.50108534",
"0.499896",
"0.49725953"
]
| 0.73100406 | 0 |
Squares the numbers in num_list | def squared_nums(num_list):
new_list = [] # initialize a new list
for num in num_list: # iterate through the list
squared_num = pow(num, 2) # raises to the power of 2
new_list.append(squared_num) # appends the result to the new list
return new_list # returns the new list
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def square_nums(number_list):",
"def squared(num_list):\n new_list=[]\n for num in num_list:\n sq_num=pow(num,2)\n new_list.append(sq_num)\n return new_list",
"def lsquare_of_sums(inlist):\r\n s = sum(inlist)\r\n return float(s)*s",
"def raizCuadrada(listNum):\n\n return [math.sqrt(n) for n in listNum]",
"def double_nums(num_list):",
"def _root_sum_of_squares(list):\n return sum((el ** 2 for el in list)) ** (0.5)",
"def sortedSquares(nums: List[int]) -> List[int]:\n return sorted([n*n for n in nums])",
"def multiplication_total_of(num_list):",
"def get_squares(n):\n\n return sum([i * i for i in range(n)])",
"def square_numbers_1(nums):\n result = []\n for i in nums:\n result.append(i*i)\n return result",
"def square(numbers):\n\n # Needs only one argument\n newlist = []\n for num in numbers:\n newlist.append(num*num)\n return newlist",
"def question_23(list_num: float) -> float:\n return sum(list_num) / len(list_num)",
"def sumOfSquares(num):\n sum = 0\n for i in range(1, num + 1):\n sum += i ** 2\n return sum",
"def lss(inlist):\r\n ss = 0\r\n for item in inlist:\r\n ss = ss + item*item\r\n return ss",
"def list_squared(start, stop):\n result = []\n\n for num in range(start, stop):\n divisors = set(chain.from_iterable((\n [i, num/i] for i in range(1, int(math.sqrt(num)) + 1)\n if num % i == 0\n )))\n divisor_squares = [x*x for x in divisors]\n divisor_squares_sum = sum(divisor_squares)\n if math.sqrt(divisor_squares_sum).is_integer():\n result.append([num, divisor_squares_sum])\n\n return result",
"def my_squares(iters):\n out = [i ** 2 for i in range(iters)]\n return out",
"def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)",
"def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)",
"def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)",
"def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)",
"def seq_sqrt(xs):\n num_list = []\n for xs_split in xs:\n print(xs)\n xs_num = int(xs_split)\n print(xs_num)\n xs_squrt = math.sqrt(xs_num)\n print(xs_squrt)\n num_list.append(xs_squrt)\n return num_list",
"def sum_of_squares(seq):\n if len(seq) == 0:\n return 0\n else:\n result = 0\n for num in seq:\n result += num ** 2\n return result",
"def sum_of_squares(v):\n return sum(v_i * v_i for v_i in v)",
"def sumSquares(aList):\r\n if isinstance(aList, list):\r\n total=0\r\n for value in aList:\r\n if(isinstance(value, int) or isinstance(value, float)) and abs(value)%3==0:\r\n total+=value**2\r\n return total\r\n else:\r\n return 'error'",
"def square(n):\n\n result = [num * num for num in range(n)]\n\n return result[1:]",
"def my_squares(iters):\n out = []\n for i in range(iters):\n out.append(i ** 2)\n return out",
"def square_or_square_root(numbers):\n result = []\n for element in numbers:\n root = element ** 0.5\n if root.is_integer():\n result.append(int(root))\n else:\n result.append(int(element * element))\n return result",
"def squareOfSum(num):\n return sum(range(1, num + 1)) ** 2",
"def sum_squared(variable_list):\n return sum([el * el for el in variable_list])",
"def lessthan_5(num_list):"
]
| [
"0.8848842",
"0.78166455",
"0.7215354",
"0.7150204",
"0.70824355",
"0.6997519",
"0.69374",
"0.69096386",
"0.688807",
"0.677243",
"0.6726922",
"0.67053944",
"0.66111624",
"0.66106373",
"0.6603098",
"0.65683067",
"0.64936507",
"0.64936507",
"0.64936507",
"0.64936507",
"0.64783996",
"0.6456685",
"0.6428368",
"0.6417195",
"0.63856333",
"0.63561624",
"0.6347805",
"0.63354576",
"0.63085335",
"0.6259744"
]
| 0.79106903 | 1 |
Removes strings in title_list that have numbers and are not title case title_list = list of strings | def check_title(title_list):
new_list = [] #Initialize a new list
for title in title_list: # iterates through the list to look for the non-titles
if title.istitle(): # checks for titles
new_list.append(title) # if found, appends to the new list
return new_list # returns the new appended list.
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_title(title_list):\n for w_index in range(len(title_list)):\n title_list[w_index] = title_list[w_index].replace('_', ' ')\n return [word for word in title_list if word.istitle()]",
"def keep_lowercase(str_list):",
"def fix_title(title):\n words = re.findall('[A-Z][^A-Z]*', title[0])\n final_str = \"\"\n for word in words:\n final_str += word + \" \"\n return final_str.strip()",
"def remove_tokens_with_letters_and_numbers(text_list):\n for i in range(len(text_list)):\n t = text_list[i]\n if any(c.isdigit() for c in t) and any(c.isalpha() for c in t):\n text_list[i] = REPLACEMENT_STRING\n return text_list",
"def _remove_title_from_name(titles: tuple, text: str) -> str:\n for title in titles:\n if f'{title}.' in text:\n return text.replace(f'{title}.', empty_string).replace(' ', space).strip()\n elif title in text:\n return text.replace(title, empty_string).replace(' ', space).strip()\n return text",
"def process_title(self, title, stpwds, stemmer):\n # Convert article title to lowercase and tokenize it\n title = title.lower().split(\" \")\n # Remove the stopwords\n title = [token for token in title if token not in stpwds]\n return [stemmer.stem(token) for token in title]",
"def processTitle(title):\n\n cleaned = re.sub(r'[@#\\\"]+', '', title.lower().strip())\n cleaned = re.sub(r'\\(\\d{4}.*\\)', '', cleaned)\n cleaned = re.sub(r':.+', '', cleaned).strip()\n return cleaned",
"def dedup_and_title_case_names(names):\n names1 =[]\n for n in names:\n if n.title() not in names1:\n names1.append(n.title())\n return names1\n # return [n.title() for n in names if n.title() not in names1]\n pass",
"def clean_title(self):\n # split into tokens by white space\n tokens = self.title.split(\" \")\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens] # type: List[Any]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # lemmatization and lowercase\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(w.lower()) for w in tokens]\n # filter out short tokens\n tokens = [word for word in tokens if len(word) > 1]\n self.title = tokens",
"def clean_title(data):\n def dealer_title(series):\n series = series.lower()\n key_words = ['iphone x', 'iphonex']\n if series.find(key_words[0]) == -1 and series.find(key_words[-1]) == -1:\n return -1\n return 1\n\n def dealer_price(series):\n if series == 'Sold!':\n return -1\n return 1\n\n assert isinstance(data, pd.DataFrame)\n info_tit = data['Title'].apply(dealer_title)\n info_tit = info_tit[info_tit == -1].index.tolist()\n data = data.drop(axis=0, index=info_tit, inplace=False)\n\n info_pri = data['Price'].apply(dealer_price)\n info_pri = info_pri[info_pri == -1].index.tolist()\n data = data.drop(axis=0, index=info_pri, inplace=False)\n\n data = data.reset_index(drop=True)\n return data",
"def convert_post_title(title):\n post_title = []\n for c in title.lower():\n if not c.isalnum():\n c = c.replace(c, '-')\n post_title.append(c)\n return ''.join(post_title)",
"def clean_title(title):\n title = re.sub(\"\\n\", \"\", title) # Remove newlines\n title = ' '.join(title.split()) # Turn multiple whitespaces into a single one\n title = title.lower() # Make everything lowercase\n return title",
"def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title",
"def transformation_minuscules(liste_car: List[str]) -> List[str]:\n for i in range(len(liste_car)):\n if liste_car[i].islower():\n liste_car[i] = liste_car[i].upper()\n return liste_car",
"def transformation_minuscules(liste_car: List[str]) -> List[str]:\n for i in range(len(liste_car)):\n if liste_car[i].islower():\n liste_car[i] = liste_car[i].upper()\n return liste_car",
"def remove_special_characters(string_list):",
"def filter_and_sort_number_strings():\n# fill it out\n result = []\n for s in STRING_LIST:\n if (s.isnumeric()):\n result.append(s)\n return sorted(result)",
"def clean_names_list(names):\n pure_names = []\n nan = re.compile('nan', re.IGNORECASE)\n title = re.compile('surname', re.IGNORECASE)\n for name in names:\n if nan.search(name):\n continue\n elif title.search(name):\n continue\n else:\n pure_names.append(name)\n return pure_names",
"def normalise_title(title):\n normalised = title.lower()\n if normalised.startswith('the '):\n normalised = normalised[4:]\n normalised = re.sub('[^a-z ]', '', normalised)\n normalised = re.sub(' +', ' ', normalised)\n normalised = normalised.replace(' the ', ' ')\n return normalised",
"def _clean_words(self, title, filter_stopwords=False):\n chars = '\"[]():;?!,\\'-'\n translation = dict((ord(c), u' ') for c in chars)\n def translate(text):\n if isinstance(text, unicode):\n translated = text.translate(translation)\n else:\n translated = text.translate(None, chars)\n return translated\n strips = '.'\n words = [\n x.strip(strips)\n for x in translate(title).split()\n ]\n for word in words:\n if len(word) >= self.min_word_length:\n if filter_stopwords and word.lower() not in STOPWORDS:\n continue\n # if the word contains non-ascii characters, try to convert\n # it to a ascii equivalent so that it's possible to type\n # \"naive\" when you don't even know how to type \"naïve\"\n try:\n word.encode('ascii')\n except UnicodeEncodeError:\n # it contains non-ascii characters\n ascii_word = unidecode(word)\n yield unicode(ascii_word).lower()\n yield word.lower()\n # yield ''.join(c for c in word if c.isalnum())",
"def clean_field_title(title):\r\n return ''.join((c if c in ALLOWED_CHARS else '_') for c in title.lower())",
"def getSortTitle(dictList):\n\ttitle = dictList['title'].lower().strip()\n\tfirstword = title.split(\" \",1)[0]\n\tif firstword in ['a', 'an', 'the']:\n\t\ttitle = title.split(firstword, 1)[-1]\n\treturn title.strip()",
"def replace_numbers(words):\n p = inflect.engine()\n remove_numbers = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n remove_numbers.append(new_word)\n else:\n remove_numbers.append(word)\n return remove_numbers",
"def clean_title(title):\n tokens = clean_stopwords(title)\n tokens = lemmatize_tokens(tokens)\n return tokens",
"def unify_field_title(title: str):\n\n # Convert to lowercase\n prep_title = title.lower()\n # Now remove all these unneeded ugly symbols\n prep_title = re.sub('[.,:;\\|/\\{\\}\\(\\)\\[\\]\\'\\\"\\+]','', prep_title)\n # Replace multiple whitespaces with one\n prep_title = re.sub(' +', ' ', prep_title)\n\n # The real title is now the replacement of dashes with whitespace\n real_title = prep_title.title()\n # The unified title is the removal of the dashes and whitespace\n unified_title = re.sub('[ \\-]','', prep_title)\n trimmed_unified_title = unified_title[:150]\n return trimmed_unified_title, real_title",
"def remove_words_digits(text):\n return \" \".join([word for word in str(text).split() if not any(c.isdigit() for c in word) ])",
"def parse_title(self) -> list:\n scanning = False # start of a title is found, this may be the second of later part of that.\n ret = [] # to return\n temp = [] # deal with mutiple line titles.\n for page in self.pdf.pages:\n text = page.extract_text()\n # it's possible that a blank page exists which will let text be None.\n if text is None:\n continue\n lines = text.split('\\n')\n\n for line in lines:\n if self.__is_part_of_title(line):\n # middle part of a title\n if scanning:\n temp.append(line)\n # find a new title\n else:\n scanning = True\n temp = [line]\n else:\n # just find an entire title\n if scanning:\n scanning = False\n ret.append(\"\".join(temp))\n # remove wrong titles ( maybe trigger words occur at other part of the document )\n for title in ret:\n if self.title_keyword not in title:\n ret.remove(title)\n return ret",
"def remove_numbers(self, docs):\n \n new_docs = []\n \n for text in docs:\n \n text = re.sub(r'\\b\\d+\\b',' ',text)\n text = re.sub(r'\\s+',' ',text)\n \n new_docs.append(text)\n\n return pd.Series(new_docs)",
"def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title",
"def normalize_title(title = \"\", words = [], **kwargs):\n if title:\n words = nltk.word_tokenize(title)\n\n head = words[0][0].upper() + words[0][1:]\n tail = words[1:]\n\n doc = [[w for w in nltk.word_tokenize(sent)]\n for sent in nltk.sent_tokenize(kwargs[\"doc\"])]\n\n normed_words = [head]\n\n for w in tail:\n if not w[0].isalpha(): # if no alpha, return intact\n normed_words.append(w)\n elif appear_only_in_title(w, words, doc) or all_appear_lower_not_at_sentence_beginning(w, words, doc):\n normed_words.append(lower(w))\n elif appear_only_at_sentence_beginning(w, words, doc):\n normed_words.append(w)\n elif appear_cap_in_sentence_middle(w, words, doc):\n normed_words.append(cap(w))\n else:\n raise ValueError(\"Unexpected case `%s` in `%r`\" %(w, words))\n return normed_words"
]
| [
"0.7345439",
"0.6368069",
"0.6325404",
"0.62862766",
"0.62849927",
"0.6161935",
"0.61029637",
"0.6091569",
"0.6063799",
"0.60401946",
"0.6009834",
"0.5965418",
"0.5922428",
"0.59212744",
"0.59212744",
"0.58840847",
"0.5883111",
"0.5882404",
"0.58639055",
"0.58502173",
"0.58484066",
"0.5838447",
"0.58271664",
"0.5760279",
"0.57503986",
"0.57133234",
"0.5699874",
"0.56921005",
"0.5684398",
"0.5680615"
]
| 0.65617573 | 1 |
Increases inventory of each item in dictionary by 10 | def restock_inventory(inventory):
new_dictionary = {} # create a new emply dictionary
for key, value in inventory.items(): # look for key value pairs
inventory[key] = value + 10 # in the inventory with the key lets say pencil increase the value by 10
return (inventory) # returns the updated dictionary
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restock_inventory(inventory):\n for k in inventory.keys():\n inventory[k] = inventory[k] + 10\n return inventory",
"def add_to_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] += 1\n\t\telse:\n\t\t\tself.inventory[item] = 1",
"def getitem(self):\n self.inventory += 1",
"def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity",
"def add_to_inv(self, item):\n for obj in self.inv:\n if obj.name == item.name:\n self.inv[obj] += 1\n break\n else:\n self.inv[item] = 1",
"def additemtoinventory(item):\n global ITEM_COUNT\n for i in range(0, 10): # first 10 items are weapons, (this code sux, need a better way of doing this)\n if ITEMTYPES[ITEM_LIST[ZERO_BASE_PLYR_POS]] == ITEMTYPES[i]: \n cur_weapon_strength = WEAPON_STRENGTHS[ITEMS[0]]\n new_weapon_strength = WEAPON_STRENGTHS[ITEMTYPES[i]]\n if new_weapon_strength > cur_weapon_strength:\n change_weapon(ITEMTYPES[i])\n ITEMS[0] = ITEMTYPES[i] # 'overwrite' the main weapon with the new one\n remove_item_from_map()\n return # exit here if item is weapon\n else:\n remove_item_from_map()\n return # remove the inferior weapon from the map and return\n ITEMS.append(ITEMTYPES[item])\n ITEM_COUNT = len(ITEMS)\n remove_item_from_map()",
"def add_item(self,itm,qty=1):\n inv = self.get_inventory()\n s = str(itm)\n inv[s] = inv.get(s, 0) + qty\n self.put_inventory(inv)",
"def report_update():\r\n resources[\"water\"] = resources[\"water\"] - MENU[order][\"ingredients\"][\"water\"]\r\n resources[\"milk\"] = resources[\"milk\"] - MENU[order][\"ingredients\"][\"milk\"]\r\n resources[\"coffee\"] = resources[\"coffee\"] - MENU[order][\"ingredients\"][\"coffee\"]\r\n resources[\"money\"] = resources[\"money\"] + total",
"def replenish(self, amount: int):\n self._inventory += amount",
"def knapsack(items, capacity):\r\n pass",
"def displayInventory(bag):\n print(\"Inventory:\")\n item_total = 0\n for k, v in bag.items():\n print(str(v) + ' ' + str(k))\n item_total += v\n print(\"Total number of items: \" + str(item_total))\n print('\\n')",
"def increase_character_health(character: dict):\r\n if character['HP'] == 10:\r\n return\r\n else:\r\n character['HP'] += 1\r\n return",
"def _increment_quantity(self, units):\n self.quantity += units",
"def _add_to_inv(self, block_):\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1",
"def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()",
"def update_inventory(item, quantity, price, replacement_cost):\n trans = '{}, {}, {}, {} \\n'.format(quantity)\n with open('inventory.txt', 'a') as file:\n for quantity in quantities:\n if quantity <= 500:\n file.write(trans)\n return quantity",
"def _buy(self, units=1):\n self.quantity -= units",
"def stocks(self, value):\n self._modified = True\n self.quantity = value + self.reserved",
"def __init__(self, order, warehouse_list):\n self.order=order\n \"\"\" \n a list of object with warehouse name and inventory amounts \n (inventory distribution) for these items\n \"\"\"\n self.warehouse_list=warehouse_list\n \"\"\"total amout of item being ordered in this order\"\"\"\n self.total_order_amount=0\n for amount in order.values():\n self.total_order_amount+=amount",
"def update_values(self):\n # have to reset params to 0 when recalculating\n self.total_weight = self.total_price = self.total_value = self.total_fitness = 0\n for index, value in enumerate(self.item_stats):\n if value == 1:\n self.total_weight += self.items[index].weight\n self.total_price += self.items[index].price\n self.total_value += self.items[index].value\n self.total_fitness += self.items[index].fitness",
"def load_knapsack(things,knapsack_cap):\r\n# iteration using ratio \r\n my_team_number_or_name = \"lwang\" # always return this variable as the first item\r\n \r\n items_to_pack = [] # use this list for the indices of the items you load into the knapsack\r\n load = 0.0 # use this variable to keep track of how much volume is already loaded into the backpack\r\n value = 0.0 # value in knapsack\r\n \r\n item_list = [[k,v,float(v[1])/v[0]] for k,v in things.items()]\r\n j = lambda x:x[2]\r\n item_list=sorted(item_list,key=j,reverse=True)\r\n \r\n item_keys = [item[0] for item in item_list]\r\n \r\n for i in range(len(item_keys)):\r\n if load <= knapsack_cap:\r\n pack_item = item_keys[i]\r\n load += things[pack_item][0]\r\n if load <= knapsack_cap:\r\n items_to_pack.append(pack_item)\r\n #load += things[pack_item][0]\r\n value += things[pack_item][1]\r\n return my_team_number_or_name, items_to_pack",
"def remove_from_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] -= 1\n\t\t\tif self.inventory[item] == 0:\n\t\t\t\tdel self.inventory[item]",
"def increase_dict_values_for_low_enough_keys(threshold, dictionary):\n\n dictionary = dictionary.update((x, y+1) for x, y in dictionary.items() if threshold >= x)\n return dictionary",
"def inventory_add(self, item):\n if (len(self.ItemList) >= self.InventorySize):\n # Inventory full\n return 2\n self.ItemList.append(item)\n return 0",
"def increase_hp():\n\n global character\n character['HP'] += 1",
"def inventoryAdd(obj):\n size=1\n if obj==\"TSA Trophy\":\n size =2\n print(\"The TSA Trophy takes two hands to pick up.\")\n if len(inventory)+size>2:\n print(\"Your hands are too full to pick up\",obj+\".\")\n else:\n print(\"You picked up\",obj)\n inventory.append(obj)\n inventoryCall()",
"def update_quantity(item: dict, new_qty):\n qty = item.get('quantity')\n if isinstance(qty, dict):\n item['quantity']['value'] = new_qty\n else:\n item['quantity'] = new_qty",
"def update(self):\n inventoryJson = self.__agent__.getInventoryJson()\n itemsLeft = len(inventoryJson) != 0\n itemTypesInObservation = []\n itemsAdded = []\n itemsDeleted = []\n\n # Loop over all item types in the observation\n while (itemsLeft):\n itemType = inventoryJson[0][\"type\"]\n itemTypesInObservation.append(itemType)\n numOfItemInObs = inventoryJson[0][\"quantity\"]\n\n if itemType not in self.__inventory__: # Add an array of ids for this item type if it was never discovered\n self.__inventory__[itemType] = []\n numOfItemInInv = len(self.__inventory__[itemType])\n\n for i in range(1, len(inventoryJson)): # Loop over remaining items, and for each item of matching type, add to counter\n if inventoryJson[i][\"type\"] == itemType:\n numOfItemInObs += inventoryJson[i][\"quantity\"]\n inventoryJson = [item for item in inventoryJson if item[\"type\"] != itemType] # Remove all of those inventory items\n \n if numOfItemInObs > numOfItemInInv: # Add more items with unique id of this type to inventory\n for i in range(numOfItemInInv, numOfItemInObs):\n newItem = self.addItem(itemType)\n itemsAdded.append(newItem)\n elif numOfItemInObs < numOfItemInInv: # Remove some items of this type from inventory\n for i in range(numOfItemInObs, numOfItemInInv):\n if len(self.__inventory__[itemType]) > 0:\n lostItem = self.__inventory__[itemType].pop(0)\n itemsDeleted.append(lostItem)\n\n # Only perform another iteration if there are more items of different types that we have not yet checked\n if len(inventoryJson) == 0:\n itemsLeft = False\n \n # For any items in the inventory that was not in the observation, set the quantity to 0\n for itemType in self.__inventory__:\n if itemType not in itemTypesInObservation:\n self.__inventory__[itemType].clear()\n\n return (itemsAdded, itemsDeleted)",
"def inventory_value(self):\n cost = 0\n for bike in self.inventory:\n cost = cost + bike.total_cost()\n return cost",
"def add_item(self, item: str) -> None:\n try:\n current_max = max(self.stoi.values())\n self.stoi[item] = current_max + 1\n except ValueError:\n self.stoi[item] = 0"
]
| [
"0.7979997",
"0.6748842",
"0.6425087",
"0.6418131",
"0.6270335",
"0.6233188",
"0.6208321",
"0.62063247",
"0.6127558",
"0.6091178",
"0.5963571",
"0.58908564",
"0.57726574",
"0.574582",
"0.57426375",
"0.57053244",
"0.56772745",
"0.5624655",
"0.55953807",
"0.558694",
"0.55852354",
"0.55742884",
"0.5543684",
"0.5534114",
"0.5512333",
"0.5490443",
"0.5475354",
"0.5455608",
"0.54535216",
"0.5435113"
]
| 0.7672343 | 1 |
Removes items that have a value of 0 from a dictionary of inventories | def filter_0_items(inventory):
new_list = [] # create an empty dictionary
for key in inventory: # iterate through the list
if inventory[key] == 0: # check for key = 0, if it is then
new_list.append(key) # add it to a new list
for keys in new_list:#iterting through new_list
del inventory[keys]
return inventory | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_0_items(inventory):\n for k,v in inventory.copy().items():\n if inventory[k] == 0:\n inventory.pop(k)\n return inventory",
"def clean_up_map(self):\n self.items = [i for i in self.items if i.quantity != 0]",
"def remove_outlier(dict_object, keys):\n for key in keys:\n dict_object.pop(key, 0)",
"def remove_outlier(keys):\n for key in keys:\n data_dict.pop(key, 0)",
"def remove_outlier(dict_object, keys):\r\n for key in keys:\r\n dict_object.pop(key, 0)",
"def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]",
"def filter(self, dict):\n for (pos, hashKey) in enumerate(self._sequence):\n for (key, value) in dict.items():\n data = self.dictionary[hashKey]\n \n if not (data.has_key(key) and data[key].find(value) == 0):\n del self.dictionary[hashKey]\n self._sequence.pop(pos)",
"def remove_non_integers(the_dict):\n return the_dict",
"def clean(self):\n filtered_items = {}\n for name, ls in self.items.items():\n filtered_ls = []\n for i in ls:\n if i.alive():\n filtered_ls.append(i)\n else:\n self.del_item(i)\n filtered_items[name] = filtered_ls\n self.items = filtered_items",
"def remove_item_from_map(): \n ITEM_LIST[ZERO_BASE_PLYR_POS] = int(len(ITEMTYPES) - 2) # Replaces item with the \"None\" item",
"def _filter_zero(self, meta_data):\n meta_data_new = {}\n for video, tracks in meta_data.items():\n new_tracks = {}\n for trk, frames in tracks.items():\n new_frames = {}\n for frm, bbox in frames.items():\n if not isinstance(bbox, dict):\n if len(bbox) == 4:\n x1, y1, x2, y2 = bbox\n w, h = x2 - x1, y2 - y1\n else:\n w, h = bbox\n if w <= 0 or h <= 0:\n continue\n new_frames[frm] = bbox\n if len(new_frames) > 0:\n new_tracks[trk] = new_frames\n if len(new_tracks) > 0:\n meta_data_new[video] = new_tracks\n return meta_data_new",
"def clean_up_metric_dict(metric_dict, filter_string):\n filtered_dict = {k.replace(filter_string, ''):v\n for (k, v) in metric_dict.iteritems()\n if not (v[0] == v[1] == 0)\n }\n return filtered_dict",
"def drop_item(dictionary):\n drops_i = []\n\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n if quantity:\n drops_i.append(Item(name=k, quantity=quantity, **v))\n\n return drops_i",
"def empty_seats(seats, seat_numbers):\n\n for seat in seat_numbers:\n seats[seat] = None\n\n return seats",
"def remove_empty_list(dictionary):\n\n return {k: v for k, v in dictionary.items() if v != []}",
"def simplify_pauli_weights(pauli):\n\ttemp_dic = pauli.copy() # Make a copy of the dictionary\n\tfor key in pauli.keys(): # Iterate over all the Pauli strings\n\t\tif pauli[key] == 0: # If the weight is equal to 0, erase the measurement\n\t\t\ttemp_dic.pop(key)\n\n\treturn temp_dic",
"def _removeInsufficientTransformer(self, working_stats, params):\n\n for choice, subsets in working_stats.items():\n sufficient_values = [value for value in subsets if value > 0]\n if not sufficient_values:\n del working_stats[choice]\n\n return working_stats",
"def _remove_empty(self, data, many):\n if not many:\n for key in list(data):\n if key == 'versions':\n data.pop(key)\n\n return {\n key: value for key, value in data.items()\n if value is not None\n }\n for item in data:\n for key in list(item):\n if (key == 'versions') or (item[key] is None):\n item.pop(key)\n\n return data",
"def updateEmptiesSet(self):\n self.emptiesSet = []\n for i in self.Range:\n if self.get_cell(i) == 0:\n self.emptiesSet.append(i)",
"def declutter(vector):\n for key in vector:\n clutter_values = [value for value in vector[key] if vector[key][value]<2] # gather everything with a value less than two and save it in a list\n for feature in clutter_values: # remove everything in the clutter values from a dictionary\n vector[key].pop(feature,None)\n return vector",
"def prune(self): # HashMap.prune\n for hashval, list in self.contentHash.iteritems():\n newlist=[]\n for entry in list:\n if not entry.deleted:\n newlist.append(entry)\n self.contentHash[hashval]=newlist",
"def get_missing(dicts):\n for d in dicts:\n for k, v in d.items():\n d[k] = set([1, 2, 3, 4, 5, 6, 7, 8, 9]) - set(v)\n return dicts",
"def delete(list_with_dict, value):\n i = 0\n flag = 0\n for dct in list_with_dict:\n if dct.get('number') != value:\n i += 1\n else:\n flag = 1\n\n list_with_dict.pop(i)\n return flag",
"def replace_dict_value(d, bad_values): \n for key, value in d.copy().items(): \n for n in bad_values: \n if n == value: \n del d[key]",
"def _prunelowestweight(self):\r\n # note: must be called with acquired self._lock!\r\n numentries = len(self._dict)\r\n if numentries >= self.maxentries:\r\n # evict according to entry's weight\r\n items = [(entry.weight, key) for key, entry in self._dict.iteritems()]\r\n items.sort()\r\n index = numentries - self.prunenum\r\n if index > 0:\r\n for weight, key in items[:index]:\r\n del self._dict[key]",
"def clean_dict(dictionary):\n return {k: v for k, v in dictionary.items() if v}",
"def remove_zero_bars(dgm):\r\n inds = dgm[:,0] != dgm[:,1]\r\n return dgm[inds,:]",
"def remove_negative(self, filter_table):\n new_table = []\n for record in filter_table:\n if record[1] < 0.0:\n record[1] = 0.0\n new_table.append(record)\n return new_table",
"def filter_subspace(wavefunc: dict, subspace: list) -> dict:\n filtered_wf = deepcopy(wavefunc)\n\n for key in wavefunc.keys():\n if key not in subspace:\n filtered_wf[key] = 0.0j\n\n return filtered_wf",
"def get_items(self):\n return [item for item in self.items if item.quantity > 0]"
]
| [
"0.8096963",
"0.7588919",
"0.6413768",
"0.63757044",
"0.6367684",
"0.63517725",
"0.62293726",
"0.6169815",
"0.61511654",
"0.6059877",
"0.58848983",
"0.57959706",
"0.5737003",
"0.5694725",
"0.5673215",
"0.55944765",
"0.5577092",
"0.55735654",
"0.5566581",
"0.55288976",
"0.5514361",
"0.55114764",
"0.5509332",
"0.5508432",
"0.5501841",
"0.5488957",
"0.54848534",
"0.5465115",
"0.5417419",
"0.5405988"
]
| 0.8019575 | 1 |
Takes grades values from a dictionary ad averages them into a final grade | def average_grades(grades):
for key, value in grades.items(): # iterate through the dictionary for key and value
grades[key] = sum(value)/len(value) # average of the value
return (grades) #return grades
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def average_grades(grades):\n\n for k in grades.keys():\n new_list = grades[k]\n str_len = len(new_list)\n total = float(sum(new_list) / str_len)\n grades[k] = total\n return grades",
"def average_scores(self, scores, education, count):\n\n for key in scores.keys():\n for k in scores[key].keys():\n scores[key][k] = round(scores[key][k] / count[key][k], 1)\n education[key][k] = round(education[key][k] / count[key][k], 1)\n\n return scores, education",
"def get_averages(self):\t\n\t\t\n\t\taverages = {}\n\t\tfor subject in self.grades.iterkeys():\n\t\t\taverages[subject] = float(sum(self.grades[subject])) / len(self.grades[subject])\n\t\treturn averages",
"def grades_average(grades_input):\n sum_of_grades = grades_sum(grades_input)\n average = sum_of_grades / float(len(grades_input))\n\n return average",
"def letter_grades(adict):\n\n for key in adict:\n\t if adict[key] >= 90:\n\t\t adict[key] = 'A'\n\t elif 80 <= adict[key] < 90:\n\t\t adict[key] = 'B'\n\t elif 70 <= adict[key] < 80:\n\t\t adict[key] = 'C'\n\t elif 60 <= adict[key] < 70:\n\t\t adict[key] = 'D'\n\t else:\n\t\t adict[key] = 'F'",
"def avg_hw_one(students_dict):\n scores = [\n hw['Homework 1']\n for hw in students_dict.values()\n ]\n hw_average = sum(scores) / len(scores)\n return hw_average",
"def average_grade(lst):\r\n res = []\r\n for stdnt in lst:\r\n name, avg = stdnt[0], mean(conv_to_num(stdnt[1:]))\r\n res.append([name, avg])\r\n\r\n\r\n return(res)",
"def find_average(dict_list, key):\n working_sum = 0\n for num in dict_list:\n working_sum += float(num[key])\n return round(working_sum / len(dict_list), 2)",
"def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average",
"def average(grade1, grade2, grade3):\n return (grade1 + grade2 + grade3) / 3",
"def normalize_scores(scores):\n #print(scores)\n keys = scores.keys()\n\n sum = 0.0\n for k in keys:\n #print(\"%06f\\t\" % scores.get(k)),\n sum += scores.get(k)\n\n if sum == 1.0:\n return scores\n\n new_scores = {}\n for k in keys:\n new_scores[k] = scores.get(k)/float(sum)\n\n return new_scores",
"def updateGPA(info):\n grades = []\n\n n = 5 # you can change this depends on how many your subjects are\n x = 1\n print(\"Please enter\", n, \"grades: \")\n\n \n for i in range(0, n): #for every grade that's being inputted in goes into the grades list that contains dictionaries\n print(x, \":\")\n x += 1\n grade = int(input())\n grades.append(grade)\n \n grade = calculateGPA(grades)\n\n for letter, numGrade in grading_sys.items():# this is what turns the average grade to its letter grade equivalent\n if numGrade <= grade:\n info[\"GPA\"] = letter\n break\n return info",
"def summaries(e_dict, m_dict):\n for key, value in m_dict.items():\n e_dict[key].append(np.mean(value))\n return e_dict",
"def average_dictlist(dict_list):\r\n avg=sum(dict_list)/len(dict_list)\r\n return avg",
"def average(values):\n\treturn sum(values)/len(values)",
"def compute_average_separability_score(self) -> Dict:\n avg_sep_score = {}\n for class_pair_key, class_pair_val in self.separability_scores.items():\n avg_sep_score[class_pair_key] = np.mean(np.array([val for _, val in class_pair_val.items()]))\n avg_sep_score['agg_with_risk'] = sum(\n np.array([val for _, val in avg_sep_score.items()]) *\n RISK\n ) \n avg_sep_score['agg'] = sum([val for key, val in avg_sep_score.items() if type(key)==int]) \n return avg_sep_score",
"def get_average_grade_of_students(students):\n total_grade = 0\n for row in students:\n total_grade += int(row[5])\n return total_grade/len(students)",
"def day_009_1():\n student_scores = {\n \"Harry\": 81,\n \"Ron\": 78,\n \"Hermione\": 99,\n \"Draco\": 74,\n \"Neville\": 62,\n }\n\n student_grades = {}\n\n for student in student_scores:\n if student_scores[student] <= 70:\n student_grades[student] = \"Fail\"\n elif student_scores[student] <= 80:\n student_grades[student] = \"Acceptable\"\n elif student_scores[student] <= 90:\n student_grades[student] = \"Exceeds Expectations\"\n else:\n student_grades[student] = \"Outstanding\"\n\n print(student_grades)",
"def get_average(value): # fine\r\n average_assignment = 0\r\n average_exam = 0\r\n student_count = 0\r\n if value == 'Assignment':\r\n for student in StudentRoster:\r\n student_count += 1\r\n average_assignment += int(student.assignment)\r\n if student_count == 0:\r\n print(0)\r\n else:\r\n calc = average_assignment/student_count\r\n print('{:.2f}'.format(calc))\r\n elif value == 'Exam':\r\n for student in StudentRoster:\r\n student_count += 1\r\n average_exam += int(student.exam)\r\n if student_count == 0:\r\n print(0)\r\n else:\r\n calc = average_exam/student_count\r\n print('{:.2f}'.format(calc))",
"def computeGrades(e1, e2, a):\n \n a = assignmentScores\n a.sort()\n i=0\n while i<10:\n sum+=sum a[i]\n avg = sum/10\n \n grade = ((e1 + e2) /2) * 0.4 + (avg) * 0.6\n \n return grade\n \n if grade >= 90 and grade <= 100:\n return(\"A\")\n \n elif grade >= 80 and grade < 90:\n return(\"B\")\n \n elif grade >= 70 and grade < 80:\n return(\"C\")\n \n elif grade >= 60 and grade < 70:\n return(\"D\")\n \n elif grade < 60:\n return(\"F\")",
"def calculate_avg_score(state_score,state_count):\n\tfor state in state_score.keys():\n\t\tstate_score[state] = 1.*state_score[state]/state_count[state]\n\treturn state_score",
"def average_edge(pollster_edges, pollster_errors):\r\n\r\n a = list(pollster_edges.values()) #List the values of the pollster edges\r\n alen = len(a) #Obtain the length of the list of values above\r\n b = list(pollster_edges.keys()) #List the keys of the pollster edges\r\n pollster_errors = pollster_errors \r\n c = [] #Create an empty list\r\n for i in b: #Iterate through each key\r\n c.append(pollster_to_weight(i, pollster_errors)) #Calcualte the weight of each pollster error\r\n c1 = c[0:alen] #Ignore the pollster errors that dont have a pollster edge\r\n final = weighted_average(a, c1)\r\n #Use the weighted average function to get the weighted average function\r\n #to get the weighted avergae of the Edge's, which are weighted by the errors\r\n \r\n\r\n return final",
"def average(values):\n return sum(values) / len(values)",
"def average(values):\n return sum(values) / len(values)",
"def average_probabilities(probabilities):\n result = sum_probabilities(probabilities)\n occurrences = {}\n for probability in probabilities:\n for key in probability.keys():\n if key not in occurrences.keys():\n occurrences[key] = 0\n occurrences[key] = occurrences[key] + 1\n for key in result.keys():\n result[key] = result[key] / occurrences[key]\n return result",
"def grades_sum(grades_input):\n total = 0\n for score in scores: \n total += score\n\n return total",
"def compute_average_metrics(meters):\n metrics = {m: vs.avg for m, vs in meters.items()}\n metrics = {\n m: v if isinstance(v, float) else v.item()\n for m, v in metrics.items()\n }\n return metrics",
"def adjusted_grade(clicker_points, midterm_grade):\n if not clicker_points:\n return midterm_grade\n avg_clicker_points = sum(clicker_points.values())/len(clicker_points)\n joint_set = set(clicker_points).union(set(midterm_grade))\n clicker_extra_points = {student: 1 if clicker_points.get(student)\n >= avg_clicker_points else 0 for\n student in clicker_points}\n return {name: midterm_grade.get(name, 0) +\n clicker_extra_points.get(name, 0)\n for name in joint_set}",
"def compute_average_metrics(meters):\n metrics = {m: vs.avg for m, vs in meters.items()}\n metrics = {\n m: float(v) if isinstance(v, float) or isinstance(v, int) else v.item()\n for m, v in metrics.items()\n }\n return metrics",
"def average(entry):\n return entry['total time (s)'] / float(entry['correct answers'] + entry['wrong answers'])"
]
| [
"0.7873713",
"0.6943277",
"0.68400115",
"0.6595139",
"0.6455717",
"0.6418605",
"0.6334242",
"0.62684715",
"0.623619",
"0.62083447",
"0.61359245",
"0.61074245",
"0.60298246",
"0.59715956",
"0.5936197",
"0.59098405",
"0.5870836",
"0.58561456",
"0.58551854",
"0.58504546",
"0.5847706",
"0.58297175",
"0.5807506",
"0.5807506",
"0.57997125",
"0.57751876",
"0.5760321",
"0.5721859",
"0.5721261",
"0.57174003"
]
| 0.8427332 | 0 |
Checks if the input word is valid or not. Returns boolean accordingly. Filters out any tokens that are links or stop words. | def is_valid_lemma(self, word):
# Expression for finding links. Return false if one is found
# Expression found at https://stackoverflow.com/questions/27515969/regular-expression-validation-php/27516155
expression = re.compile(r'(http|https|ftp|ftps)\:\/\/[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(\/\S*)?')
if expression.search(word):
return False
# Remove anything but alphanumeric, spaces, and '
word = re.sub('[^a-zA-Z\'\d\s]', '', word)
word = word.lower()
for stop_word in stop_words:
# If input word matches a stop word, return false
if stop_word == word:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self, word):\n\n return self.valid_word(word)",
"def validate(self, word):\n\n return self.valid_word(word)",
"def filter_tokens(x):\n if x in _STOP_WORDS:\n return False\n if not re.search(r'\\w', x):\n # Does not contain at least one word character\n return False\n return True",
"def _is_real_word(self, token):\n return not (token in self._non_words)",
"def validate_word(self, word, normalize=True):\n return not self._segs(word, include_valid=False, include_invalid=True, normalize=normalize)",
"def is_valid(text):\n return is_all_word_segment_in_text(WORDS, text)",
"def validate_word(word: str) -> bool:\n if word:\n url = f'{OXFORD_DICT_BASE_URL}{OXFORD_DICT_ENTRY_URL}/en-us/{word.lower()}'\n headers = {\n 'app_id': settings.OXFORD_APP_ID,\n 'app_key': settings.OXFORD_API_KEY,\n }\n\n logger.info(f'validating {word} against oxford dictionary...')\n response = requests.get(\n url,\n headers=headers,\n )\n\n if response.status_code == status.HTTP_200_OK:\n return True\n else:\n return False\n\n return False",
"def valid_word(self, word):\n if self.graph.is_in(word):\n return word\n return None",
"def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))",
"def word_is_url(word):\n match = URL_REGEX.search(word)\n return True if match is not None else False",
"def filter1(word):\n if not word: return False\n w = word.lower()\n if w in STOPWORDS: return False\n return True",
"def check_word(word):\r\n if word in word_master:\r\n valid = True\r\n else:\r\n valid = False\r\n return valid",
"def valid_word(self, word, pos_tag=None):\n return (\n word not in self.stopwords\n and (self.max_word_len > len(word) > self.min_word_len)\n and (pos_tag==None or self.allowed_pos_tags==None or pos_tag not in self.allowed_pos_tags)\n )",
"def check_word(self, word):\n\n return self.graph.is_in(word)",
"def is_word(self, word):\r\n\r\n return self.data(word) is not None",
"def checkWord(self, word):\n\t\treturn self.root.checkString(u' ' + word);",
"def check_word(self, word):\n word = word.lower().strip()\n return not word or word in self.dictionary",
"def check_words(dictionary_, start_word, stop_word):\n if dictionary_.is_real_word(start_word) is False:\n print(\"Word {} not found in the dictionary\".format(start_word))\n return False\n if dictionary_.is_real_word(stop_word) is False:\n print(\"Word {} not found in the dictionary\".format(stop_word))\n return False\n return True",
"def checkWord(self, word):\n toFill = int(tapeLimit - len(word) / 2)\n tape = [self.blank] * toFill + list(word) + [self.blank] * toFill\n\n state = self.initialState\n pos = toFill\n\n while state != self.acceptState and state != self.rejectState:\n transition = self.transitions[(state, tape[pos])]\n state = transition[0]\n tape[pos] = transition[1]\n\n if transition[2] == '>':\n pos += 1\n elif transition[2] == '<':\n pos -= 1\n\n return state == self.acceptState",
"def is_stop_word(self, word):\n pass",
"def check(self, word: str) -> bool:\n for s in (word, word.lower(), word.capitalize()):\n if s in self.words or s in self.ignored_words:\n return True\n return False",
"def validate(self, word):\n\n # Strip unwanted characters\n clean = re.sub(r\"[^a-zA-Z- ]+\", \"\", word).strip().lower()\n if len(clean) <= 1:\n return None # Word too short\n\n # Generate candidates for possible compound words\n # \"valid\" -> [\"valid\"]\n # \"cul de sac\" -> [\"cul-de-sac\", \"culdesac\"]\n # \"top-hat\" -> [\"top-hat\", \"tophat\"]\n candidates = []\n if \" \" in clean:\n candidates.append(re.sub(r\" +\", \"-\", clean))\n candidates.append(re.sub(r\" +\", \"\", clean))\n else:\n candidates.append(clean)\n if \"-\" in clean:\n candidates.append(re.sub(r\"-+\", \"\", clean))\n for cand in candidates:\n if cand in self.vectors:\n return cand # Return first word that is in model\n return None # Could not find valid word",
"def check_word(word):\n\n return bool(re.match(r'^[a-z]+$', word))",
"def check_for_symbols(word: str) -> bool:\n \n if MENTION_SYMBOL in word or HASH_SYMBOL in word or URL_START in word:\n return False\n return True",
"def verify(self, word):\n if len(word) < 2:\n return (True, word)\n\n if word.lower() in self.replacement_words.keys():\n return (True, self.replacement_words[word.lower()])\n\n if word.lower() in self.word_list:\n return (True, word)\n\n if word.lower() in self.ignored_words:\n return (True, word)\n\n return (False, word)",
"def valid_word(self, word):\n\n word = ''.join(word)\n return next((w for w in self.words if w == word), None)",
"def is_stopword(self, word, language):",
"def is_stop_word(word):\n return word in final_stop_words",
"def is_valid_words(args, skip=False):\n if is_valid_file_and_directory(args) or skip:\n if args.words is not None:\n return True\n return False",
"def accepts(self, word: Iterable[str]) -> bool:\n if self._enfa is None:\n self._enfa = self.to_epsilon_nfa()\n return self._enfa.accepts(word)"
]
| [
"0.70614296",
"0.70614296",
"0.67371285",
"0.66866434",
"0.66483504",
"0.6411789",
"0.6401606",
"0.6308335",
"0.63083124",
"0.62643343",
"0.62420446",
"0.6230033",
"0.62088037",
"0.62052774",
"0.6191632",
"0.61900467",
"0.6139754",
"0.61169666",
"0.6110598",
"0.6090266",
"0.60882324",
"0.60845184",
"0.6078498",
"0.6074176",
"0.60615605",
"0.6015706",
"0.6004019",
"0.5996411",
"0.5989345",
"0.5953992"
]
| 0.7445588 | 0 |
Inputs a single string that represents a line of data (tweet, news article, etc). Breaks string into a list of sentances, and passes each sentance through a lemmatizer and validator. Returns a list of prepared sentances. | def prepare_data(self, data):
# Break string into a list of sentances
in_sentances = tokenize.sent_tokenize(data)
out_sentances = list()
for sentance in in_sentances:
# Turn each word in sentance into its lemma
lemmas = [self.lemmatizer.lemmatize(word) for word in sentance.split(" ")]
# Filters out all words that fail the is_valid_lemma function
lemmas = [lemma for lemma in lemmas if self.is_valid_lemma(lemma)]
# Joins words back together and add to list
sentance = ' '.join(lemmas)
out_sentances.append(sentance)
return out_sentances | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parser(sent_list): #input: list of sentences",
"def process(self, sentence):\n\n # selects onlt alphanumeric words\n words = self.tokenizer.tokenize(sentence)\n\n # lemmatize the words\n words = [self.lemmatizer.lemmatize(word) for word in words]\n\n # lowercase all the words and remove single characters\n words = [word.lower() for word in words if len(word) > 1]\n\n # remove the stopwords using NLTK\n words = [word for word in words if word not in stopwords.words('english')]\n\n return words",
"def preprocess_lines(movie_line):\n\ttokens = tokenizer.tokenize(movie_line)\n\twords = [word for word in tokens if word not in stopwords_set]\n\tstemmed_terms = [porter_stemmer.stem(word) for word in words]\n\tlemmatized_terms = [wordnet_lemmatizer.lemmatize(word) for word in stemmed_terms]\n\treturn lemmatized_terms",
"def sentences_from_string(this_class, text):\n # Note that the native method below leaks. We work around this\n # by acquiring its pointer in __init__\n sentReps = parser.sentRepsFromString(text)\n return list(map(this_class, sentReps))",
"def tokenize(t):\n tweet_tok = TweetTokenizer(strip_handles=True, reduce_len=True)\n tokens = tweet_tok.tokenize(t)\n wnl = WordNetLemmatizer()\n stems = []\n for item in tokens:\n stems.append(wnl.lemmatize(item))\n return stems",
"def lemmatize_sentences_rus(sentences):\n split = 'fks2hwras1ma39hka766gbk'\n chunk_size = 10000\n\n def handle_chunk(sentences_chunk):\n all_sents = (' ' + split + ' ').join(sentences_chunk)\n all_lemmas = lemmatize_sentence_rus(all_sents).split()\n chunk_res = [[]]\n for lemma in all_lemmas:\n if lemma == split:\n chunk_res.append([])\n else:\n chunk_res[-1].append(lemma)\n\n return chunk_res\n\n res = []\n i = 0\n while i < len(sentences):\n if len(sentences) > chunk_size:\n print(f'Lemmatization: Done for {i} from {len(sentences)} sentences')\n\n i_step = min(chunk_size, len(sentences) - i)\n res.extend(handle_chunk(sentences[i:i + i_step]))\n i += i_step\n\n assert len(res) == len(sentences)\n res = [' '.join(arr) for arr in res]\n return res",
"def lemmatization(tokenized_word_list):\n porter=nltk.stem.PorterStemmer()\n filtered_tokens = [porter.stem(word) for word in tokenized_word_list]\n return filtered_tokens",
"def process_sentence(sentence: str) -> list:\r\n return [process_word(word) for word in sentence.split()][:-1]",
"def sentence_parse(list_of_posts): \n for parsedPosts in nlp.pipe(line_review(list_of_posts)):\n for sent in parsedPosts.sents:\n yield str(sent)",
"def tokenize(text):\n # This experiment convinced me to lemmatize only rather than lemmatize and\n # stem. I also got this nifty URL detector there.\n #https://gist.github.com/rajatsharma369007/de1e2024707ad90a73226643c314b118\n\n # initialization\n lemmatizer = WordNetLemmatizer()\n stop = stopwords.words(\"english\")\n\n # Replaced all URLs with 'urlplaceholder'\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|'+\\\n '(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n for url in re.findall(url_regex, text):\n text = text.replace(url, \"urlplaceholder\")\n\n # tokenize and lemmatize\n tokens = word_tokenize(text)\n tokens = [lemmatizer.lemmatize(token).lower().strip() for\n token in tokens if token not in stop]\n\n return tokens",
"def split_sentences(text: str) -> List[str]:\n return sent_tokenize(text)",
"def preprocess(self, sentence, vocab_set=None):\n tokens = sentence.split()\n new_tokens = []\n for token in tokens:\n new_tokens += self.__clean(token)\n tokens = new_tokens\n\n tokens = self.__normalize_document(' '.join(tokens))\n\n return tokens",
"def tokenize_sentences(self, sents):\n token_sentence_list = []\n for sentence in sents:\n token_sentence_list.append(self.tokenizer(sentence))\n return token_sentence_list",
"def _stem_words_lancaster(stringSentence):\n # type: (str) -> str\n _words = stringSentence.split(' ')\n _words = list(map(lambda x: LancasterStemmer().stem(x), _words))\n\n return ' '.join(_words)",
"def pre_process(text):\n # replace (,.'\") with ''\n text = text.replace(',', '')\n text = text.replace('.', '')\n text = text.replace(\"'\", '')\n text = text.replace(\"\\\"\", '')\n\n # tokenize into words\n tokens = [word for sent in sent_tokenize(text) for word in word_tokenize(sent)]\n\n # remove stopwords\n stop = stopwords.words('english')\n tokens = [token for token in tokens if token not in stop]\n\n # remove words less than three letters\n tokens = [word for word in tokens if len(word) >= 3]\n\n # lower capitalization\n tokens = [word.lower() for word in tokens]\n\n # lemmatize\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n\n return tokens",
"def tokenize(text):\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n \n lemmatized_words = []\n for word in tokens:\n lemmatized_words.append(lemmatizer.lemmatize(word).lower().strip())\n \n return lemmatized_words",
"def normalize(string, stemmer, stopwords_set):\n\n # tokenize using punkt data\n dummy_list = nltk.word_tokenize(string)\n\n # remove stopwords\n dummy_list = [word for word in dummy_list if word not in stopwords_set]\n\n # split using special characters as delimiters\n # example \"50,000\" -> [\"50\", \"000\"]\n # example \".\" -> [\"\", \"\"]\n term_list = []\n for word in dummy_list:\n term_list += re.split(r\"[^0-9A-Za-z]\", word)\n \n # stemming using Porter Stemmer\n term_list = [stemmer.stem(word, 0, len(word) - 1) for word in term_list]\n\n # remove empty terms\n term_list = [word for word in term_list if len(word) > 0]\n\n return term_list",
"def tokenize(self, input_string: str) -> List[str]:",
"def tokenize_sents(sents, tokenizer):\n return [tokenizer(sent) for sent in sents]",
"def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines",
"def sent_to_words(self, sentences):\n\n for sentence in sentences:\n yield(gensim.utils.simple_preprocess(str(sentence)))",
"def tokenize_sentences(sentences):\r\n \r\n # Initialize the list of lists of tokenized sentences\r\n tokenized_sentences = []\r\n \r\n for sentence in sentences:\r\n \r\n # Convert to lowercase letters\r\n sentence = sentence.lower()\r\n \r\n # Convert into a list of words\r\n \r\n tokenized = nltk.word_tokenize(sentence)\r\n \r\n # append the list of words to the list of lists\r\n tokenized_sentences.append(tokenized)\r\n \r\n return tokenized_sentences",
"def split_on_sentences(conversations):\n\n sentence_list = []\n\n for conversation in conversations:\n for sentences in conversation:\n token_sen = tokenize.sent_tokenize(sentences)\n for sentence in token_sen:\n if sentence != 'Patient:' and sentence != 'Doctor:':\n sentence_list.append(sentence)\n\n return sentence_list",
"def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized",
"def parse_and_check_reactants(raw_text_line: str) -> List[str]:\n smiles = raw_text_line.strip().replace(' ', '')\n out = []\n for s in smiles.split('.'):\n mol = Chem.MolFromSmiles(s, sanitize=False)\n if mol is None:\n print(smiles)\n raise ValueError\n out.append(s)\n return out",
"def _proc(dat):\n def lemma(text):\n lemmatizer = WordNetLemmatizer()\n w_tokenizer = WhitespaceTokenizer()\n return [lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)]\n\n dat['text_lemmatized'] = dat['clean_comments'].apply(lemma)\n dat['text_lemmatized'] = dat['text_lemmatized'].apply(' '.join)",
"def _process_sentence(cls, stringSentence, vectoriser):\n # type: (str,CountVectorizer) -> spmatrix\n\n _str = cls._strip_remove_non_alpha(stringSentence)\n _str = cls._stem_words_lancaster(_str)\n\n return vectoriser.transform([_str])",
"def sents(path):\n\n data = pd.read_csv( path , sep = \"\\t\", index_col=False, encoding='latin-1', low_memory=False)\n df = DataFrame(data)\n# print(df['Sentiment'])\n labelCount = df.groupby(df['Sentiment']).count()\n #print(labelCount)\n x = df['SentimentText'].str.replace('http\\S+|www.\\S+', '', case=False)\n y = df['Sentiment']\n x = x.str.replace('[^a-zA-Z]', ' ') #\n x_check = [\" \".join([lemmatize(word) for word in sentence.split(\" \")]) for sentence in x]\n stopset = set(stopwords.words('English'))\n x_check = [' '.join(w for w in sentence.split() if w.lower() not in stopset)\n for sentence in x\n ]\n #print(x_check)\n return x_check, y",
"def lemmatize(self, sentence):\n porter_stemmer = PorterStemmer()\n return ' '.join(porter_stemmer.stem(str(w)) for w in sentence.lower().split())",
"def preprocess_tweet(tweet):\n clean_tweet = tp.clean(tweet)\n\n # perform lemmatization\n tokenizer = TweetTokenizer()\n tweet_tokens = tokenizer.tokenize(clean_tweet)\n\n lemmatized_tweet = lemmatize_tweet(tweet_tokens)\n\n # remove stopwords\n preprocessed_tweet = remove_stopwords(lemmatized_tweet)\n return preprocessed_tweet"
]
| [
"0.6374897",
"0.62834436",
"0.6217632",
"0.61962837",
"0.60802025",
"0.59153664",
"0.5852285",
"0.57940227",
"0.5789301",
"0.5753251",
"0.5715902",
"0.5711697",
"0.56845933",
"0.5679322",
"0.5672883",
"0.56566954",
"0.5654694",
"0.5646494",
"0.5638076",
"0.5634727",
"0.5634518",
"0.56309927",
"0.56277657",
"0.5622697",
"0.5619868",
"0.56103563",
"0.56055063",
"0.55905396",
"0.5589033",
"0.5571972"
]
| 0.7173315 | 0 |
Iterates over input list of sentances. Gets sentiment of each sentance and returns the average sentiment. | def get_sentiment(self, sentances):
sentiment_total = 0
# Add each sentances combined sentiment to a total tally
for sentance in sentances:
sentiment = self.sentiment_analyzer.polarity_scores(sentance)
sentiment_total += sentiment['compound']
return sentiment_total / len(sentances) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_average_sentiment(self, list_sentiments):\n average_polarity = 0\n for sentiment in list_sentiments: \n polarity = sentiment[1]\n average_polarity += polarity \n average_polarity /= len(list_sentiments)\n return average_polarity",
"def averages():\r\n totalsubs = 0\r\n for sub in subs:\r\n totalsubs += sub\r\n avgsubs = totalsubs / len(subs)\r\n\r\n totalsent = 0\r\n for sent in sentiments:\r\n totalsent += sent\r\n avgsent = totalsent / len(sentiments)\r\n print('The average subjectivity is: ' + str(avgsubs))\r\n print('The average sentiment is: ' + str(avgsent))",
"def get_query_sentiment_avg(tweets):\r\n\r\n total = 0\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg",
"def sentiment_score(review):\n return sum([sentence_score(sentence, None, 0.0) for sentence in review])",
"def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.tweets:\n parsed_tweet = {}\n parsed_tweet['text'] = tweet\n sentiment_data = self.tweet_sentiment_analysis(tweet)\n parsed_tweet['sentiment'] = sentiment_data[0]\n parsed_tweet['polarity'] = sentiment_data[1]\n parsed_tweet['subjectivity'] = sentiment_data[2]\n\n tweets_sentiment.append(parsed_tweet)\n\n self.sentiment_data = tweets_sentiment\n self.positive_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Positive']\n self.negative_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Negative']\n self.neutral_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Neutral']\n\n return tweets_sentiment",
"def question_sentiment_analysis(self):\n sentiments = get_sentiments()\n student_data = self.responses\n question_text = 'In one word'\n\n # Set up data for calculations\n num_scores = 0\n sentiment_sum = 0\n score_list = list()\n\n for response in student_data:\n\n if question_text in response.question.text:\n words = response.response.lower().split()\n\n # Find the sentiment score for each word, and add it to our data\n for word in words:\n # Ignore the word if it's not in the sentiment dictionary\n if word in sentiments:\n sentiment_sum += sentiments[word]\n num_scores += 1\n score_list.append(sentiments[word])\n\n average = sentiment_sum / num_scores\n standard_dev = statistics.stdev(score_list)\n\n return average, standard_dev",
"def sentiment(data_list):\n for x in data_list:\n print(x)\n analysis = TextBlob(x)\n print(analysis.sentiment)",
"def sentiment(sentences: List[str]) -> List[List[float]]:\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n pass\n else:\n ssl._create_default_https_context = _create_unverified_https_context\n\n nltk.download('vader_lexicon')\n darth = SentimentIntensityAnalyzer()\n collector = []\n for sentence in sentences:\n ss = darth.polarity_scores(sentence)\n temp = []\n for k in ss.values():\n temp.append(k)\n collector.append(temp)\n return collector",
"def getSentiment(tweets, location):\n sentiment = [0, 0, 0]\n for tweet in tweets:\n analyser(tweets[tweet], sentiment,location)\n return sentiment",
"def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment",
"def aggregate_sentiment(tweets):\r\n\r\n positive = 0\r\n negative = 0\r\n neutral = 0\r\n\r\n for tweet in tweets:\r\n if tweet.sentiment_type == \"positive\":\r\n positive += 1\r\n elif tweet.sentiment_type == \"negative\":\r\n negative += 1\r\n else:\r\n neutral += 1\r\n\r\n result = [[\"Positive\", positive], [\"Neutral\", neutral], [\"Negative\", negative]]\r\n return result",
"def get_whole_and_per_sentence_flair_sentiments(list_of_comments):\n\n for comment in list_of_comments:\n result_sum = get_whole_flair_sentiment(comment)\n print(comment)\n print('Whole comment sentiment:', result_sum)\n print()\n sentence_score_list = get_sentence_sentiments(comment)\n print(comment)\n print('per sentence sentiment:', sentence_score_list)\n print()",
"def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average",
"def get_sentiment(self, texts):\n list_sentiments = []\n\n for text in texts: \n sentiments = TextBlob(text)\n for sentence in sentiments.sentences: \n list_sentiments.append((str(sentence), sentence.sentiment.polarity))\n \n list_sentiments = self._sort_sentiments(list_sentiments)\n return list_sentiments",
"def get_overall_sentiment(text):\n return alchemy_language.sentiment(text=text)",
"def get_sentiment_scores(self, words):\n sentiment_scores = [0, 0]\n if self.allow_negation:\n negated_words = self.get_negated_words(words)\n else:\n negated_words = [(word.lower(), False) for word in words]\n\n for word, negated in negated_words:\n sign = -1 if negated else 1\n if word in self.mapping.keys():\n sentiments = self.mapping[word]\n sentiment_scores[0] += sign * sentiments[0]\n sentiment_scores[1] += sign * sentiments[1]\n\n return sentiment_scores",
"def word_average(self, sent):\n\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word) *\n self.word_idf_weight[word]) # idf weighted\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def get_sentiment(string_list):\n sentiment = []\n subjectivity = []\n \n for text in string_list:\n blob = textblob.TextBlob(text)\n sentiment.append(blob.sentiment.polarity)\n subjectivity.append(blob.sentiment.subjectivity)\n \n return sentiment, subjectivity",
"def sentiment(self) -> Dict[str, float]:",
"def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average",
"def sentiment_aspects(docs: Iterable[tokens.Doc]) -> List[collections.Counter]:\n sent_dict_list = []\n start_time = time.time()\n\n for doc in docs:\n sent_dict = collections.Counter()\n for token in doc:\n # check if the word is an opinion word, then assign sentiment\n if token.text.lower() in _OPINION_WORDS:\n sentiment = 1 if token.text.lower() in _POS_WORDS else -1\n if (token.dep_ == \"advmod\"):\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n continue\n\n elif (token.dep_ == \"amod\"):\n sent_dict[token.head.text.lower()] += sentiment\n\n else:\n for child in token.children:\n # if there's a adj modifier (i.e. very, pretty, etc.) add\n # more weight to sentiment\n # This could be better updated for modifiers that either\n # positively or negatively emphasize\n if _is_opinion_mod(child):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if child.dep_ == \"neg\":\n sentiment *= -1\n for child in token.children:\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n # if verb, check if there's a direct object\n sent_dict[child.text.lower()] += sentiment\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text.lower() == \"and\": conj=1\n if (conj == 1) and (subchild.text.lower() != \"and\"):\n subchildren.append(subchild.text.lower())\n conj = 0\n for subchild in subchildren:\n sent_dict[subchild] += sentiment\n\n # check for negation\n for child in token.head.children:\n noun = \"\"\n if _is_opinion_mod(child):\n sentiment *= 1.5\n if (child.dep_ == \"neg\"):\n # check for negation words and flip the sign of sentiment\n sentiment *= -1\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text.lower()\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text.lower() + \" \" + noun\n sent_dict[noun] += sentiment\n sent_dict_list.append(collections.Counter(sent_dict))\n\n print(\"\\nFound aspects on {} reviews.\".format(len(sent_dict_list)))\n print(time.time() - start_time)\n return sent_dict_list",
"def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score",
"def process_sentiment(self):\r\n\r\n\r\n print(\"Beginning sentiment analysis\")\r\n # textblob time\r\n #tweet_sentiment = [TextBlob(tweet['filtered_text']).sentiment for index, tweet in self.tweet_dataframe.iterrows()]\r\n #self.tweet_dataframe['polarity'] = [i.polarity for i in tweet_sentiment]\r\n #self.tweet_dataframe['subjectivity'] = [i.subjectivity for i in tweet_sentiment]\r\n\r\n #vader time\r\n #http://t-redactyl.io/blog/2017/04/applying-sentiment-analysis-with-vader-and-the-twitter-api.html\r\n sentiment = []\r\n\r\n analyzer = SentimentIntensityAnalyzer()\r\n\r\n for tweet in self.tweet_dataframe['filtered_text']:\r\n vs = analyzer.polarity_scores(tweet)\r\n sentiment.append(vs['compound'])\r\n\r\n self.tweet_dataframe['vader_polarity'] = pd.Series(sentiment)",
"def word_average(self, sent):\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word))\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def analyze(self, text):\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n \n tokens = tokenizer.tokenize(text)\n \n sentiment = 0\n \n for word in tokens:\n if word in self.__positives:\n sentiment += 1\n elif word in self.__negatives:\n sentiment -= 1\n \n return sentiment",
"def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average",
"def analyze(self, text):\n tknzr = nltk.tokenize.casual.TweetTokenizer(preserve_case=True, reduce_len=False, strip_handles=False)\n tknTxt = tknzr.tokenize(text)\n sentiment = 0\n \n for i in range(len(tknTxt)):\n if tknTxt[i] in self.posTxt:\n #print(\"POS\")\n #print(tknTxt[i])\n sentiment += 1\n elif tknTxt[i] in self.negTxt:\n #print(\"NEG\")\n #print(tknTxt[i])\n sentiment -= 1\n \n return sentiment",
"def aver_score(datalist):\n scores_per_position = []\n \n for tupl in datalist:\n count = 0\n sum_of_position = 0\n for element in tupl[3]:\n sum_of_position += element\n count +=1\n aver_pos = sum_of_position/ count\n scores_per_position += [aver_pos]\n \n return scores_per_position",
"def analyze_sentiment(test_files_list: list, classification_dict: dict):\n\n # Lexicon words used for sentiment analysis\n pos_lex_words = get_lexicon_words(POS_LEXICON_DIR_PATH)\n neg_lex_words = get_lexicon_words(NEG_LEXICON_DIR_PATH)\n\n classification_scores = []\n true_labels = []\n\n for file in test_files_list:\n \n # Read the file to analyze\n with open(file) as f:\n sentences = f.readlines()\n\n # tokenize the sentences in the file\n tokens = []\n for sentence in sentences:\n tokens += tokenize(sentence) # Do not want to remove duplicate words, so we have more data\n \n # Get number of positive and negative words found in the file\n positive_words, negative_words = get_pos_neg_word_count(tokens, pos_lex_words, neg_lex_words)\n \n # Keep an array of all the scores we have (negative, positive)\n classification_score = [negative_words, positive_words]\n classification_scores.append(classification_score)\n \n # Maintain the true answer (negative, positive)\n true_label = [0, 0]\n if file.split('/')[1] == 'pos': true_label[1] += 1\n else: true_label[0] += 1\n true_labels.append(true_label)\n\n # Print for submitting assignment\n if true_label[0]: #file is actually negative\n classification_dict['neg'][file.split('/')[2]] = 'neutral'\n if positive_words > negative_words: classification_dict['neg'][file.split('/')[2]] = 'positive'\n else: classification_dict['neg'][file.split('/')[2]] = 'negative'\n else:\n classification_dict['pos'][file.split('/')[2]] = 'neutral'\n if positive_words > negative_words: classification_dict['pos'][file.split('/')[2]] = 'positive'\n else: classification_dict['pos'][file.split('/')[2]] = 'negative'\n\n \n return np.array(classification_scores), np.array(true_labels)",
"def func(lst):\n tot = 0\n for i in lst:\n tot = tot + i\n avg = tot / len(lst)\n return avg"
]
| [
"0.7829622",
"0.75677633",
"0.7163454",
"0.71054775",
"0.70540977",
"0.68959516",
"0.67398673",
"0.67392325",
"0.6698907",
"0.66943854",
"0.6660001",
"0.6407294",
"0.6405679",
"0.63865983",
"0.6334893",
"0.6324999",
"0.6322835",
"0.6309016",
"0.63056993",
"0.629432",
"0.626196",
"0.62592524",
"0.6218544",
"0.61800116",
"0.616135",
"0.6109872",
"0.5988758",
"0.59718925",
"0.59532726",
"0.5952051"
]
| 0.77835083 | 1 |
Inputs a string and returns a list of all emoticons that are found. | def get_emoticons_value(self, line):
emoticons = list()
# Finds any substring which represents an emote
# Expression found at https://stackoverflow.com/questions/28783420/cannot-compile-8-digit-unicode-regex-ranges-in-python-2-7-re
emoticons.extend(re.findall(u'[\U00010000-\U0010ffff]', line, flags=re.UNICODE))
return emoticons | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_emojis(text):\n emojis = []\n for emoji in emot.emoji(text):\n emojis.append(emoji['value'])\n text = text.replace(emoji['value'], '')\n\n return text, emojis",
"def get_emojis(self):\n return self.tweets.str.findall(r':{1}[\\d\\w\\-]+:{1}')",
"def emotions(self):\n return self._emotions",
"def _load_emoticons(emotions):\n return [nparray_as_image(cv2.imread('graphics/%s.png' % emotion, -1), mode=None) for emotion in emotions]",
"def fetch_emojis(route):\n url = _config['emojicons_baseurl'] + route\n logging.debug(\"Requesting URL '{0}'\".format(url))\n page = requests.get(url)\n tree = html.fromstring(page.text)\n emojis = []\n for id, t, e in zip([re.search(\"^emoticon-(\\d+)$\", x).group(1) for x in tree.xpath(_config['xpath']['ids'])],\n tree.xpath(_config['xpath']['titles']),\n tree.xpath(_config['xpath']['emojis'])):\n emojis.append({'id': id, 'title': t, 'emoji': e})\n return emojis",
"def get_emoji_list():\n return list(map(lambda x: x.get('emoji'), emoji_list))",
"async def getemoji(self, ctx):\n pass",
"def get_slack_emoji():\n all_slack_emoji = []\n\n # load stock emoji from file\n with app.open_resource('../static/emoji-names.json') as f:\n stock_emojis = json.load(f)\n all_slack_emoji += stock_emojis\n\n # concat custom emoji by slack API call\n all_slack_emoji += sc.api_call('emoji.list')['emoji'].keys()\n return all_slack_emoji",
"def removeEmoticons(text):\n text = re.sub(':\\)|;\\)|:-\\)|\\(-:|:-D|=D|:P|xD|X-p|\\^\\^|:-*|\\^\\.\\^|\\^\\-\\^|\\^\\_\\^|\\,-\\)|\\)-:|:\\'\\(|:\\(|:-\\(|:\\S|T\\.T|\\.\\_\\.|:<|:-\\S|:-<|\\*\\-\\*|:O|=O|=\\-O|O\\.o|XO|O\\_O|:-\\@|=/|:/|X\\-\\(|>\\.<|>=\\(|D:', '', text)\n return text",
"def emotions(self):\n return self[self.emotion_columns]",
"def emotions(self):\n return self[self.emotion_columns]",
"def emotions(self):\n if self.pickled_emotions:\n return pickle_util.load(self.pickled_emotions)\n else:\n return None",
"def _words_and_emoticons(self):\n wes = self.text.split()\n words_punc_dict = self._words_plus_punc()\n wes = [we for we in wes if len(we) > 1]\n \n for i, we in enumerate(wes):\n if we in words_punc_dict:\n wes[i] = words_punc_dict[we]\n return wes",
"async def fetch_emojis(self):\n data = await self.http.get_emojis()\n emojis = []\n for emoji_data in data['customReactions']:\n team = self.get_team(emoji_data['teamId'])\n emoji = Emoji(team=team, data=emoji_data, state=self.http)\n emojis.append(emoji)\n\n return emojis",
"def get_emotions_in_sentence(sentence):\n tknzr = TweetTokenizer()\n \n tokens = tknzr.tokenize(sentence)\n \n emotions = {}\n \n for word in tokens:\n # Replace hashtags with pure words (i.e. \"#positive\" becomes \"positive\")\n if re.match(\"^#\\S+\", word):\n word = re.sub(\"^#\", \"\", word)\n \n try:\n _emotions = nrc_lexicon.loc[word]\n \n _emotions = _emotions[_emotions['yes_or_no'] == 1]\n \n if _emotions[_emotions['yes_or_no'] == 1].empty:\n pass\n \n for _emotion in _emotions[_emotions['yes_or_no'] == 1]['emotion']:\n if _emotion not in emotions:\n emotions[_emotion] = 0\n\n emotions[_emotion] += 1 \n except:\n pass\n \n return emotions",
"def find_emoji(term: str) -> None:\n term = term.lower()\n emoji_mapping = _make_emoji_mapping()\n\n for emoji, name in emoji_mapping.items():\n if term in name:\n print(f\"{name.strip().title():>42} | {emoji}\")\n else:\n print(\"no matches\")",
"def find_emoji(term):\r\n emoji_dict = _make_emoji_mapping()\r\n emoji_name_lengths = []\r\n emojis_found = {}\r\n for emoji, emoji_name in emoji_dict.items():\r\n if term.lower() in emoji_name.lower():\r\n emoji_name_lengths.append(len(emoji_name))\r\n emojis_found[emoji_name.strip()] = emoji.strip()\r\n if len(emojis_found) == 0:\r\n print(\"no matches\")\r\n else:\r\n for name, emoji in emojis_found.items():\r\n print(f\"{name.title(): <{max(emoji_name_lengths)}} | {emoji}\")",
"def oem_text(self) -> list[str]:\n return [block.strip() if block else \"\" for block in self._oem_text]\n # return {'block{}'.format(idx):text\n # for idx, text in enumerate(self._oem_text)}\n\n # return ''.join([block for block in self._oem_text if block])",
"def de_emojify(self, tweet):\n regrex_pattern = re.compile(\n pattern=\"[\"\n \"\\U0001F600-\\U0001F64F\" # emoticons\n \"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n \"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n \"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"\\U00002500-\\U00002BEF\" # chinese char\n \"\\U00002702-\\U000027B0\"\n \"\\U00002702-\\U000027B0\"\n \"\\U000024C2-\\U0001F251\"\n \"\\U0001f926-\\U0001f937\"\n \"\\U00010000-\\U0010ffff\"\n \"\\u2640-\\u2642\"\n \"\\u2600-\\u2B55\"\n \"\\u200d\"\n \"\\u23cf\"\n \"\\u23e9\"\n \"\\u231a\"\n \"\\ufe0f\" # dingbats\n \"\\u3030\"\n \"]+\",\n flags=re.UNICODE,\n )\n return regrex_pattern.sub(r\"\", tweet)",
"async def emojis(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"emojis\")",
"def showEmoticonList(self):\n print \"Guess what? No emoticons. But I'll put in a random one for you\"\n self.appendImageAtCursor(\"throbber.gif\")",
"async def emojis(self, ctx):\n server = ctx.message.server\n await self.bot.say('This may take some time, generating list...')\n data = discord.Embed(description=\"Emojilist\")\n for ej in server.emojis:\n data.add_field(\n name=ej.name, value=str(ej) + \" \" + ej.id, inline=False)\n await self.bot.say(embed=data)",
"def find_emails(text):\n # Split into tokens in case someone uses for example\n # 'Name <[email protected]>' format.\n emails = []\n\n for word in WORD_SPLIT.split(text):\n if SIMPLE_EMAIL.match(word):\n local, domain = word.rsplit(\"@\", 1)\n try:\n domain = domain.encode(\"idna\").decode(\"ascii\")\n except UnicodeError:\n continue\n emails.append(\"{}@{}\".format(local, domain))\n\n return emails",
"def get_emotions(tweet):\n classifier = train_model('NRC-Emotion-Intensity-Lexicon-v1.txt')\n\n emotions = []\n probabilities = classifier.prob_classify(tweets_features(tweet))\n for sample in probabilities.samples():\n emotions.append((sample, probabilities.prob(sample)))\n\n label = classifier.classify(tweets_features(tweet))\n\n return emotions, label",
"def _preserve_emoticons(element):\n for img in element.xpath('//img[starts-with(@src, \"/community/emoticons\")]'):\n img.text = \" emoticon-{0} \".format(img.attrib[\"alt\"])\n return element",
"def replace_emoticon(text):\n for emoji in emojis.keys():\n text = text.replace(emoji, \" EMOJI\" + emojis[emoji] + \" \") \n return text",
"def remove_emoji(text):\n # using regex to identify all emojis\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001f926-\\U0001f937\"\n u'\\U00010000-\\U0010ffff'\n u\"\\u200d\"\n u\"\\u2640-\\u2642\"\n u\"\\u2600-\\u2B55\"\n u\"\\u23cf\"\n u\"\\u23e9\"\n u\"\\u231a\"\n u\"\\u3030\"\n u\"\\ufe0f\"\n \"]+\", flags=re.UNICODE)\n\n #removes all specified emoji found in text\n words = emoji_pattern.sub(r'', text)\n return words",
"def weather_emoji(description: str):\n\n emoji_map = {\n \"cloud\": \"☁️\",\n \"rain\": \"🌧\",\n \"sun\": \"☀️\",\n \"snow\": \"❄️\",\n }\n\n emojis = \"\"\n for key in emoji_map:\n if key in description:\n emojis += emoji_map[key]\n return emojis",
"async def stealemoji(self, ctx, *, emojis):\n try:\n m = await commands.MessageConverter().convert(ctx, emojis)\n emojis = m.content\n except commands.MessageNotFound:\n pass\n\n emojis = [await commands.PartialEmojiConverter().convert(ctx, e) for e in\n re.findall(r'<a?:\\w+:\\d+>', emojis)]\n\n if not emojis:\n await ctx.send_help()\n return\n\n ae = list(ctx.guild.emojis) + emojis\n if len([e for e in ae if not e.animated]) > ctx.guild.emoji_limit:\n await ctx.send(\"Not enough emoji slots\")\n if len([e for e in ae if e.animated]) > ctx.guild.emoji_limit:\n await ctx.send(\"Not enough animated emoji slots\")\n\n async with ctx.typing():\n for emoji in emojis:\n if emoji.name in [e.name for e in ctx.guild.emojis]:\n continue\n await ctx.guild.create_custom_emoji(name=emoji.name, image=await emoji.url.read())\n await ctx.tick()",
"def parse_etags(etag_str):\n if etag_str.strip() == \"*\":\n return [\"*\"]\n else:\n # Parse each ETag individually, and return any that are valid.\n etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(\",\"))\n return [match[1] for match in etag_matches if match]"
]
| [
"0.68734354",
"0.64763284",
"0.64244825",
"0.6348878",
"0.6011213",
"0.5939441",
"0.5886352",
"0.57929456",
"0.5788512",
"0.5787994",
"0.5787994",
"0.57172185",
"0.570008",
"0.56960046",
"0.5693696",
"0.56645405",
"0.56580323",
"0.56060207",
"0.55989766",
"0.55843586",
"0.5575612",
"0.5483723",
"0.5465268",
"0.5417198",
"0.54170793",
"0.54110926",
"0.53981626",
"0.5370441",
"0.53683394",
"0.5357355"
]
| 0.749108 | 0 |
Iterates over each sentance and counts the number of words that are in each of the seven emotion categories. Returns dictionary of results. | def get_mood(self, sentances):
moods = {
"happiness": 0,
"anxiety": 0,
"sadness": 0,
"affection": 0,
"aggression": 0,
"expressive": 0,
"glory": 0
}
for sentance in sentances:
sentance = sentance.lower()
# Clean data for analysis
sentance = re.sub('[^a-zA-Z\'\d\s]', ' ', sentance)
sentance = re.sub('[ ]{2,}', ' ', sentance)
# Match words to correct category
for word in sentance.split(" "):
if word in happiness:
moods['happiness'] += 1
elif word in anxiety:
moods['anxiety'] += 1
elif word in sadness:
moods['sadness'] += 1
elif word in affection:
moods['affection'] += 1
elif word in aggression:
moods['aggression'] += 1
elif word in expressive:
moods['expressive'] += 1
elif word in glory:
moods['glory'] += 1
return moods | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def frequency_feelings(self):\n feelings = {}\n for response in self.responses:\n if response.question.text == \"In one word, how does this text make you feel?\":\n lower_case_word = response.response.lower()\n if feelings.get(lower_case_word, 0) == 0:\n feelings[lower_case_word] = 1\n else:\n feelings[lower_case_word] += 1\n\n frequent_words = [] # list of tuples in the format (frequency, word)\n for word in feelings:\n if feelings[word] > 1:\n frequent_words.append((word, feelings[word]))\n frequent_words.sort(key=lambda x: x[1], reverse=True)\n return frequent_words",
"def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq",
"def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict",
"def get_num_words_spoken_by_character_per_episode(content):\n content = list(csv.reader(content.splitlines(), delimiter=','))\n characters = [name[2] for name in content]\n characters = list(dict.fromkeys(characters))\n del characters[0]\n res = defaultdict()\n for character in characters:\n episode = 1\n dic = {}\n count = 0\n for row in content: \n if row[2] == character:\n if str(episode) == row[1]:\n count += len(row[3].split())\n else:\n dic[str(episode)] = count\n episode = int(row[1])\n count = len(row[3].split())\n if '13' not in dic.keys():\n dic['13'] = count \n dic = Counter(dic)\n res[character] = dic\n return res",
"def get_e_probs(dataset):\n\n # Number of times that the state s is seen paired with observation x in the corpus\n e_word_tag_counts = {}\n\n for sentence in dataset:\n\n for word_to_tag in sentence:\n # Foreach (word, tag) tuple we are calculating number of incstances\n if word_to_tag in e_word_tag_counts:\n e_word_tag_counts[word_to_tag] += 1\n else:\n e_word_tag_counts[word_to_tag] = 1\n\n return e_word_tag_counts",
"def get_counts(self):\n counts = {}\n for document in self.docs:\n for word in document:\n if word not in counts.keys():\n counts[word] = 1\n else:\n counts[word] += 1\n return counts",
"def get_emotions_in_sentence(sentence):\n tknzr = TweetTokenizer()\n \n tokens = tknzr.tokenize(sentence)\n \n emotions = {}\n \n for word in tokens:\n # Replace hashtags with pure words (i.e. \"#positive\" becomes \"positive\")\n if re.match(\"^#\\S+\", word):\n word = re.sub(\"^#\", \"\", word)\n \n try:\n _emotions = nrc_lexicon.loc[word]\n \n _emotions = _emotions[_emotions['yes_or_no'] == 1]\n \n if _emotions[_emotions['yes_or_no'] == 1].empty:\n pass\n \n for _emotion in _emotions[_emotions['yes_or_no'] == 1]['emotion']:\n if _emotion not in emotions:\n emotions[_emotion] = 0\n\n emotions[_emotion] += 1 \n except:\n pass\n \n return emotions",
"def count_words(tokenized_sentences):\r\n \r\n word_counts = {}\r\n \r\n # Loop through each sentence\r\n for sentence in tokenized_sentences: # complete this line\r\n \r\n for token in sentence: # complete this line\r\n\r\n # If the token is not in the dictionary yet, set the count to 1\r\n if token not in word_counts.keys(): # complete this line\r\n word_counts[token] = 1\r\n \r\n # If the token is already in the dictionary, increment the count by 1\r\n else:\r\n word_counts[token] += 1\r\n \r\n return word_counts",
"def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features",
"def getCounter(self):\n word_count, noun_word_count = Counter(), Counter()\n word_rating, noun_word_rating = defaultdict(list), defaultdict(list)\n docs = self.nlp.pipe(\n self.docs, n_process=1, disable=self.disablelayers)\n \n\n for index, doc in enumerate(docs):\n for token in doc:\n if not token.is_stop and not token.is_punct and token.pos_ in self.pos:\n if token.pos_ == 'PROPN':\n word_count[token.lemma_] += 1\n word_rating[token.lemma_].append(self.ratings[index])\n else:\n noun_word_count[token.lemma_] += 1\n noun_word_rating[token.lemma_].append(self.ratings[index])\n\n # if 0<=proper nouns<=5 found, add regular nouns\n if not word_count or len(word_count) <= 5:\n word_count += noun_word_count\n word_rating = {**word_rating, **noun_word_rating}\n \n word_color = {word: self.getColor(\n ratings)[1] for word, ratings in word_rating.items()}\n word_sentiment = {word: self.getColor(\n ratings)[0] for word, ratings in word_rating.items()}\n\n return word_count, word_color, word_sentiment",
"def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END",
"def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic",
"def nrc_hashtag_emotion(self, tokens):\n num_features = 10 # 'anger', 'anticipation', 'disgust', 'fear', 'joy', 'negative', 'positive', 'sadness', 'surprise', 'trust'\n sum_vec = [0.0] * num_features\n for token in tokens:\n if token in self.nrc_hashtag_emotion_map:\n sum_vec = [a + b for a, b in zip(sum_vec, self.nrc_hashtag_emotion_map[token])] # sum up the individual word feature vectors\n feature_names = ['anger', 'anticipation', 'disgust', 'fear', 'joy', 'negative', 'positive', 'sadness', 'surprise', 'trust']\n feature_names = ['nrc_hashtag_emotion_' + name for name in feature_names]\n return dict(zip(feature_names, sum_vec))",
"def entity_counts(doc):\n \n tags = []\n for token in doc.ents:\n tags.append(token.label_)\n frequency = dict(Counter(tags).most_common())\n\n return frequency",
"def iterate_words(counter, li, all_dict, emotion_dict):\n\n counter += 1\n # iterate through the words in the list\n for word in li:\n # if word not in the dict of all words add it with frequency 1, else increase its frequency by 1\n if word not in all_dict:\n all_dict[word] = 1\n else:\n all_dict[word] += 1\n # if word not in the dict of words with certain emotion add it with frequency 1, else increase its frequency by 1\n if word not in emotion_dict:\n emotion_dict[word] = 1\n else:\n emotion_dict[word] += 1\n\n return counter",
"def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return",
"def get_num_words_spoken_by_character_per_episode(content):\n d = defaultdict(Counter)\n reader_list = csv.DictReader(content.splitlines())\n for row in reader_list:\n words = row['Line'].strip().split()\n d[row['Character']][row['Episode']] += len(words)\n return d",
"def get_opinion_count(self, words):\n sentiment_counts = [0, 0]\n if self.allow_negation:\n negated_words = self.get_negated_words(words)\n else:\n negated_words = [(word.lower(), False) for word in words]\n for word, negated in negated_words:\n if word in self.mapping.keys():\n sentiment = self.mapping[word]\n if sentiment == 'positive' or (sentiment == 'negative' and negated):\n sentiment_counts[1] += 1\n elif sentiment == 'negative' or (sentiment == 'positive' and negated):\n sentiment_counts[0] += 1\n return sentiment_counts",
"def count_words(self, contents):\n wordCounts = {}\n for i in self.ngramCounts:\n if i == 0: # want the default to be the size of the corpus\n total = 0\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for word in words:\n if word:\n total += 1\n wordCounts[i] = defaultdict(lambda: total)\n continue\n else:\n counts = defaultdict(lambda: 0)\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for k, word in enumerate(words): \n if k < (i-1) or not word:\n continue\n key = \"\"\n for j in range(k-i+1, k+1):\n key += words[j] + \" \"\n counts[key.strip()] += 1\n wordCounts[i] = counts\n return wordCounts",
"def _create_word_count_dict(self):\n word_counts = dict()\n for wc in self.word_counts.all():\n word_counts[wc.word.name] = wc.count\n return word_counts",
"def analyze_embeddings(emb):\n dic = {\"Hi\": 0, \"En\": 1, \"Ot\": 2}\n count = [0, 0, 0, 0]\n count_zero = [0, 0, 0, 0]\n for i, j in zip(emb, corpus_trans):\n for k, l in zip(i, j):\n count[dic[l[1]]] += 1\n if sum(k) == 0:\n count_zero[dic[l[1]]] += 1\n count[-1] = sum(count)\n count_zero[-1] - sum(count_zero)\n print(\"hi, en, ot, total\")\n print(\"count: \", count)\n print(\"zero count: \", count_zero)",
"def count_words(data, number_word_frequency_results=40):\n current_max_sentence_size = 0\n count_word_frequency = Counter()\n for entry in data:\n print (entry)\n terms_all = [term for term in entry]\n count_word_frequency.update(terms_all)\n return count_word_frequency.most_common(number_word_frequency_results)",
"def num_words():\n # Load the GT.\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n stats = {\n \"T\": {\"words\": [], \"duration\": []},\n \"P\": {\"words\": [], \"duration\": []},\n \"sess\": {\"words\": [], \"duration\": []},\n }\n\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n stats[\"P\"][\"words\"].append(float(row[\"gt_patient_num_words\"]))\n stats[\"T\"][\"words\"].append(float(row[\"gt_therapist_num_words\"]))\n stats[\"P\"][\"duration\"].append(float(row[\"gt_patient_time_spoken\"]))\n stats[\"T\"][\"duration\"].append(\n float(row[\"gt_therapist_time_spoken\"])\n )\n stats[\"sess\"][\"duration\"].append(float(row[\"sess_dur\"]))\n n_words = (\n row[\"gt_therapist_num_words\"] + row[\"gt_patient_num_words\"]\n )\n stats[\"sess\"][\"words\"].append(n_words)\n\n for speaker in stats:\n for metric in stats[speaker]:\n print(f\"------ {speaker} | {metric} ------\")\n print_stats(stats[speaker][metric])",
"def __yago_counts(self):\n\n num_lines = 0\n print(\"Calculating Yago occurrences\")\n custom_freq = {}\n with open(\n os.path.join(self.base_url, \"generic/p_e_m_data/aida_means.tsv\"),\n \"r\",\n encoding=\"utf-8\",\n ) as f:\n for line in f:\n num_lines += 1\n\n if num_lines % 5000000 == 0:\n print(\"Processed {} lines.\".format(num_lines))\n\n line = line.rstrip()\n line = unquote(line)\n parts = line.split(\"\\t\")\n mention = parts[0][1:-1].strip()\n\n ent_name = parts[1].strip()\n ent_name = ent_name.replace(\"&\", \"&\")\n ent_name = ent_name.replace(\""\", '\"')\n\n x = ent_name.find(\"\\\\u\")\n while x != -1:\n code = ent_name[x : x + 6]\n replace = unicode2ascii(code)\n if replace == \"%\":\n replace = \"%%\"\n\n ent_name = ent_name.replace(code, replace)\n x = ent_name.find(\"\\\\u\")\n\n ent_name = self.wikipedia.preprocess_ent_name(ent_name)\n if ent_name in self.wikipedia.wiki_id_name_map[\"ent_name_to_id\"]:\n if mention not in custom_freq:\n custom_freq[mention] = {}\n ent_name = ent_name.replace(\" \", \"_\")\n if ent_name not in custom_freq[mention]:\n custom_freq[mention][ent_name] = 1\n\n return custom_freq",
"def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all",
"def find_total_occurrences(poem, *words):\n word_counts = word_count(poem)\n print word_counts\n return {word: word_counts.get(word, 0) for word in words}",
"def word_count(self):\n\n # split words on default word boundaries for words list\n words = self.phrase.split() \n\n # translate removes punctuation only, normalizes to lower case\n normalized_words = [self.normalize_word(w) for w in words]\n\n # removes empty strings after stripping punctuation\n filtered_words = [w for w in normalized_words if w]\n\n # sets up default dictionary, so all entries are 0\n word_counts = collections.defaultdict(int) #{}\n\n # define word counting function for use in reduce\n def count_word(dictionary, word):\n dictionary[word] = dictionary[word] + 1\n return dictionary\n\n # count words into dictionary from word list\n reduce(count_word, filtered_words, word_counts)\n\n return word_counts",
"def _build_set(self, n_words):\n # count all words\n counter = Counter()\n utterances, labels = self.read_json()\n for utterance in utterances:\n tokens = nltk.word_tokenize(utterance)\n counter.update(tokens)\n\n # generate an int representation\n count = [['UNK', -1]]\n count.extend(counter.most_common(n_words - 1))\n\n # convert the int representation into a dictionary\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in counter.most_common():\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n return dictionary",
"def compute_vocab_count(sents):\n counter = collections.Counter()\n for sentence in sents:\n counter.update(untag(sentence))\n return counter",
"def count_tokens(self, words: Iterable[str]) -> Dict[str, int]:\r\n token_counts = Counter(words)\r\n return {\" \".join(token): count for token, count in token_counts.items()}"
]
| [
"0.64196604",
"0.63828003",
"0.6320231",
"0.624571",
"0.6229315",
"0.62036866",
"0.61986345",
"0.6181265",
"0.6176596",
"0.61327237",
"0.60983986",
"0.6078352",
"0.60563964",
"0.6022864",
"0.60128236",
"0.6010636",
"0.6002797",
"0.5984998",
"0.59828514",
"0.59767723",
"0.5962573",
"0.5957734",
"0.59365165",
"0.5929952",
"0.59281635",
"0.59132385",
"0.58330363",
"0.5814473",
"0.58102846",
"0.58034563"
]
| 0.7067273 | 0 |
Runs analytics; takes all the data from the specified file, collects sentiment, emotional, and emoticon data. Stores results into dictionaries and returns them. | def run(self, input_type, file_name):
data = self.get_data(file_name)
sentiment = dict()
mood = dict()
emoticon = dict()
for line in data:
weight = 1
# Twitter data has a weight defined before the |
if input_type == "Twitter":
columns = line.split("|")
weight += int(columns[0])
# Everything but the weight at the beginning
line = '|'.join(columns[1:])
# Prepare data for analysis
sentances = self.prepare_data(line)
# Perform analysis
sentiment_val = self.get_sentiment(sentances)
mood_val = self.get_mood(sentances)
emoticon_val = self.get_emoticons_value(line)
# Add each sentiment value to a dictionary along with its weight
sentiment[sentiment_val] = weight if sentiment_val not in sentiment else sentiment[sentiment_val] + weight
# Add results to mood totals
for m, count in mood_val.items():
mood[m] = count if m not in mood else mood[m] + count
# Add results to emote totals
for e in emoticon_val:
emoticon[e] = 1 if e not in emoticon else emoticon[e] + 1
return sentiment, mood, emoticon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vader_analyse(file_input):\n sentences = getdata_from_db(1000)\n print(\"Working on %d tweets\" % (len(sentences)))\n headers = ('text', 'label', 'score')\n analyzed_data = []\n sid = SentimentIntensityAnalyzer()\n for line in sentences:\n text = pre.clean(line)\n scores = sid.polarity_scores(text)\n analyzed_data.append((text, getlabel(scores), scores['compound']))\n save_data_to_db(analyzed_data)\n analyzed = Dataset(*analyzed_data, headers=headers)\n return analyzed",
"def forecast(self, results_filename):\n\n analyser = TweetSentimentAnalyser()\n\n candidates = self.dataset.candidates\n party_key_from_candidate = lambda candidate: candidate.party[0]\n number_tweets = len(self.tweets)\n if number_tweets == 0:\n print(\"No tweets were found in the file '{}' : aborting the prediction.\"\n .format(number_tweets))\n\n # General statistics dictionnaries\n # NB: defaultdict allows for efficiently incrementing the value of a key/value\n # pair that may not be initialized yet without checking its existence\n stats_tweets_per_user = defaultdict(int)\n stats_dates = defaultdict(int)\n stats_hashtags = defaultdict(int)\n stats_parties = {party_key_from_candidate(c): 0 for c in candidates.values()}\n\n # Tweet analysis\n results_per_candidate = {candidate_key: [0, 0] for candidate_key in candidates.keys()}\n for tweet in self.tweets:\n # retrieve the tweet data (text) and metadata (retweets, date...)\n candidate_key = tweet[0].decode('utf-8')\n text = tweet[1].decode('utf-8')\n datetime_str = tweet[2].decode('utf-8')\n datetime = dt.datetime.strptime(datetime_str, \"%Y-%m-%d %H:%M:%S\") # parse the datetime\n retweets = int(tweet[3])\n username = tweet[4].decode('utf-8')\n hashtags = None if len(tweet[5]) == 0 else tweet[5].decode('utf-8').split(' ')\n\n # basic statistics (individual users & hashtags)\n stats_tweets_per_user[username] += 1\n date = datetime.strftime(\"%Y-%m-%d\") # keep only the date\n stats_dates[date] += 1\n if hashtags is not None:\n for hashtag in hashtags:\n if hashtag == '#': # ignore empty hashtag\n continue\n stats_hashtags[hashtag.lower()] += 1\n\n # per-candidate analysis\n for key, data in candidates.items():\n # party mentions statistics\n party_key = party_key_from_candidate(data)\n for party_designation in data.party:\n if party_designation in text:\n stats_parties[party_key] += 1\n break\n # basic forecast : 1 tweet = 1 vote for the mentionned candidate\n for candidate_name in data.names:\n if candidate_name in text:\n results_per_candidate[key][0] += 1\n\n # sentimenal-analysis forecast :\n score = analyser.analyse_tweet(text, retweets)\n results_per_candidate[candidate_key][1] += score\n\n # Data interpretation\n compound_sum = sum([v[1] for v in results_per_candidate.values()])\n sort_default_dict = lambda dict, reverse:\\\n sorted(dict.items(), key=lambda k_v: k_v[1], reverse=reverse)\n\n # Write the results file\n votes_sum = sum([v[0] for v in results_per_candidate.values()])\n with open(results_filename, 'w+') as results_file:\n csv_writer = csv.writer(results_file, dialect=\"excel\")\n\n # per candiadate forecast\n for candidate, results in results_per_candidate.items():\n csv_writer.writerow([\n candidate,\n results,\n \"{} %\".format(100 * results[0] / votes_sum),\n \"{} %\".format(100 * results[1] / compound_sum)\n ])\n\n # general statistics\n for party, occurences in sort_default_dict(stats_parties, True):\n csv_writer.writerow([party, occurences])\n for username, occurences in sort_default_dict(stats_tweets_per_user, True):\n csv_writer.writerow([username, occurences])\n for hashtag, occurences in sort_default_dict(stats_hashtags, True):\n csv_writer.writerow([hashtag, occurences])\n\n print(results_per_candidate)",
"def analyze(filenames, usage = {}, behavior = {}, happiness = {}):\n\n\tdaily_tweet = \"../{}.log\".format(filenames)\n\t\n\twith open(daily_tweet, \"r\", encoding=\"utf8\", errors='ignore') as tweet:\n\t\tline = tweet.readline()\n\n\t\twhile line:\n\n\t\t\t# Get emoji that found in the tweet\n\t\t\temoji_found = [emo for emo in line if emo in emoji.UNICODE_EMOJI]\n\n\t\t\t# Collect emoji usage data\n\t\t\temoji_usage(emoji_found, usage)\n\t\t\t# Collect emotion from found emoji\n\t\t\temotion(emoji_found, happiness)\n\t\t\t# Collect emoji typing behavior\n\t\t\ttyping_behavior(emoji_found, behavior)\n\n\t\t\tline = tweet.readline()\n\n\texport_filename = filenames.split(\"/\")\n\texport_filename = export_filename[1]\n\texport_dict(usage, \"ThaiTrend-Emoji/dictionary/usage/daily/%s-usage.pkl\" % export_filename)\n\texport_dict(behavior, \"ThaiTrend-Emoji/dictionary/behavior/daily/%s-behavior.pkl\" % export_filename)\n\texport_dict(happiness, \"ThaiTrend-Emoji/dictionary/emotion/daily/%s-emotion.pkl\" % export_filename)",
"def text_analytics(self):\n\n headers = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': self.keys['text_analytics'],\n }\n \n sentiment_url = 'https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'\n \n raw_text = self.article_params['text']\n\n # Build post for sentiment\n try:\n sentences = tokenize.sent_tokenize(str(raw_text))\n content = []\n for i, sentence in enumerate(sentences):\n content.append({'id': str(i), 'language': 'en', 'text': sentence})\n body = json.dumps({\"documents\": content}).encode('utf-8')\n\n request = urllib.request.Request(sentiment_url, body, headers)\n response = urllib.request.urlopen(request)\n json_response = json.loads(response.read().decode('utf-8'))\n \n # A list of dictionaries, with each dictionary containing a sentence\n # sentiment score\n sentiments_list = json_response['documents']\n\n # Calculate the articles average sentiment from all the sentences\n cumulative_sentiment_score = 0\n for sent in sentiments_list:\n cumulative_sentiment_score += sent['score']\n avg_article_sentiment = cumulative_sentiment_score/len(sentiments_list)\n\n # Put article sentiments in bucket from 1 to 5, with 1 being very\n # negative and 5 being very positive\n if avg_article_sentiment < 0.2:\n sentiment = 1\n elif 0.2 <= avg_article_sentiment < 0.4:\n sentiment = 2\n elif 0.4 <= avg_article_sentiment < 0.6:\n sentiment = 3\n elif 0.6 <= avg_article_sentiment < 0.8:\n sentiment = 4\n else:\n sentiment = 5\n\n except Exception as e:\n print('Unable to process sentiment for article. Assuming '\n 'sentiment is neutral.')\n sentiment = 3\n\n return sentiment",
"def analyze(movie_review_filename):\n client = language.LanguageServiceClient()\n\n with open(movie_review_filename, 'r') as review_file:\n # Instantiates a plain text document.\n content = review_file.read()\n\n document = types.Document(content=content,\n type=enums.Document.Type.PLAIN_TEXT)\n annotations = client.analyze_sentiment(document=document)\n # Print the results\n return annotations",
"def analyze_sentiment(test_files_list: list, classification_dict: dict):\n\n # Lexicon words used for sentiment analysis\n pos_lex_words = get_lexicon_words(POS_LEXICON_DIR_PATH)\n neg_lex_words = get_lexicon_words(NEG_LEXICON_DIR_PATH)\n\n classification_scores = []\n true_labels = []\n\n for file in test_files_list:\n \n # Read the file to analyze\n with open(file) as f:\n sentences = f.readlines()\n\n # tokenize the sentences in the file\n tokens = []\n for sentence in sentences:\n tokens += tokenize(sentence) # Do not want to remove duplicate words, so we have more data\n \n # Get number of positive and negative words found in the file\n positive_words, negative_words = get_pos_neg_word_count(tokens, pos_lex_words, neg_lex_words)\n \n # Keep an array of all the scores we have (negative, positive)\n classification_score = [negative_words, positive_words]\n classification_scores.append(classification_score)\n \n # Maintain the true answer (negative, positive)\n true_label = [0, 0]\n if file.split('/')[1] == 'pos': true_label[1] += 1\n else: true_label[0] += 1\n true_labels.append(true_label)\n\n # Print for submitting assignment\n if true_label[0]: #file is actually negative\n classification_dict['neg'][file.split('/')[2]] = 'neutral'\n if positive_words > negative_words: classification_dict['neg'][file.split('/')[2]] = 'positive'\n else: classification_dict['neg'][file.split('/')[2]] = 'negative'\n else:\n classification_dict['pos'][file.split('/')[2]] = 'neutral'\n if positive_words > negative_words: classification_dict['pos'][file.split('/')[2]] = 'positive'\n else: classification_dict['pos'][file.split('/')[2]] = 'negative'\n\n \n return np.array(classification_scores), np.array(true_labels)",
"def sentiment_analysis(name, dictionary):\n\ttone_analyzer = ToneAnalyzerV3(\n\t\t username='2ed2f0c6-1722-472d-9126-224897b991af',\n\t\t password='UcuSde1YmeK6',\n\t\t version='2016-05-19')\n\tl = open(name + '.txt')\n\tlines = l.readlines()\n\tfeel_dict = {'Anger':1.0,'Fear':2.0, 'Sadness':3.0, 'Disgust':4.0,'Joy':5.0, 'Excitement':6.0}\n\tdictionary[name] = []\n\tfor i in lines:\n\t\t#print('-----------------')\n\t\t#print(i)\n\t\tmax_score = 0.0\n\t\tmax_feel = ''\n\t\ttone = tone_analyzer.tone(i, 'emotion')\n\t\tfor feel in tone['document_tone']['tone_categories']:\n\t\t\tfor feeling in feel['tones']:\n\t\t\t\tif feeling['score'] > max_score:\n\t\t\t\t\tmax_score = feeling['score']\n\t\t\t\t\tmax_feel = feeling['tone_name']\n\t\t#print(max_score, max_feel)\n\t\t#blob1 = TextBlob(i, pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())\n\t\tif max_feel != '':\n\t\t\ttweet_tbu = db.Tweet.objects(rating=feel_dict[max_feel]).first()\n\t\t\tdict_tbu = {}\n\t\t\tif tweet_tbu:\n\t\t\t\tdict_tbu = mongo_to_dict(tweet_tbu)\n\t\t\t\tprint('exists')\n\t\t\t\tprint(dict_tbu)\n\t\t\t\tif max_feel != '':\n\t\t\t\t\tnew_dict = {}\n\t\t\t\t\tnew_dict['tweet'] = dict_tbu['tweet']\n\t\t\t\t\tnew_dict['tweet'].append(i[0:-2])\n\t\t\t\t\ttweet_tbu.update(**new_dict)\n\t\t\t\t\ttweet_tbu.reload()\n\t\t\telse:\n\t\t\t\tprint('not exists - with max')\n\t\t\t\tnew_dict = {}\n\t\t\t\tnew_dict['tweet'] = [i[0:-1]]\n\t\t\t\tif max_feel != '':\n\t\t\t\t\tnew_dict['rating'] = feel_dict[max_feel]\n\t\t\t\telse:\n\t\t\t\t\tnew_dict['rating'] = 0.0\n\t\t\t\tprint(new_dict)\n\t\t\t\tnew_tweet = db.Tweet(**new_dict)\n\t\t\t\tnew_tweet.save()\n\t\telse:\n\t\t\tprint('not exists - without')\n\t\t\tnew_dict = {}\n\t\t\tnew_dict['tweet'] = [i[0:-1]]\n\t\t\tif max_feel != '':\n\t\t\t\tnew_dict['rating'] = feel_dict[max_feel]\n\t\t\telse:\n\t\t\t\tnew_dict['rating'] = 0.0\n\t\t\tprint(new_dict)\n\t\t\tnew_tweet = db.Tweet(**new_dict)\n\t\t\tnew_tweet.save()\n\tresult = db.Tweet.objects()\n\treturn(result)",
"def perform_analysis_tagging(input_file, output_file, analysis_func, fields):\n stem_df = pd.read_csv(input_file, encoding='utf8')\n\n # Initialize Dataframe fields\n for field in fields:\n stem_df[field] = 0.0\n\n # Iterate over all tweets in dataset\n for index, row in stem_df.iterrows():\n # Clean + stem tweet\n stripped_text = row.text[2:-1]\n cleaned_text = clean_tweet(stripped_text)\n stemmed_tweet = stem_tweet(cleaned_text)\n\n # Analyze sentiment and record scores\n analysis_res = analysis_func(stemmed_tweet)\n for field in fields:\n stem_df[field][index] = analysis_res[field]\n\n if index % 100 == 0:\n print(\"Completed #\", index)\n\n # Save analyzed sentiment to CSV\n stem_df.to_csv(output_file, index=False)",
"def main(rundir, outputfile):\n # Read avg_count for all runs in the ranking\n results = list()\n for run in Runs(rundir):\n filename = run.get_file(name='results/analytics.json')\n doc = util.read_object(filename=filename)\n results.append(doc)\n # Delay execution to allow for testing running post-processing\n # workflows\n time.sleep(1)\n # Write analytics results. Ensure that output directory exists:\n # influenced by http://stackoverflow.com/a/12517490\n if not os.path.exists(os.path.dirname(outputfile)):\n try:\n os.makedirs(os.path.dirname(outputfile))\n except OSError as exc: # guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(outputfile, \"at\") as f:\n json.dump(results, f)",
"def main(inputfile, outputfile):\n # Count number of lines, characters, and keep track of the longest line\n max_line = ''\n total_char_count = 0\n line_count = 0\n with open(inputfile, 'r') as f:\n for line in f:\n line = line.strip()\n line_length = len(line)\n total_char_count += line_length\n line_count += 1\n if line_length > len(max_line):\n max_line = line\n # Create results object\n results = {\n 'avg_count': total_char_count / line_count,\n 'total_count': total_char_count,\n 'max_len': len(max_line),\n 'max_line': max_line\n }\n # Write analytics results. Ensure that output directory exists:\n # influenced by http://stackoverflow.com/a/12517490\n if not os.path.exists(os.path.dirname(outputfile)):\n try:\n os.makedirs(os.path.dirname(outputfile))\n except OSError as exc: # guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(outputfile, \"at\") as f:\n json.dump(results, f)",
"def parse_sentiment_file(self, file):\n\n file_sentiment = file['documentSentiment']\n file_entities = [x['name'] for x in file['entities']]\n file_entities = self.sentence_sep.join(file_entities)\n\n if self.extract_sentiment_text:\n file_sentences_text = [x['text']['content'] for x in\n file['sentences']]\n file_sentences_text = self.sentence_sep.join(file_sentences_text)\n file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]\n\n file_sentences_sentiment = pd.DataFrame.from_dict(\n file_sentences_sentiment, orient='columns').sum()\n file_sentences_sentiment = file_sentences_sentiment.add_prefix(\n 'document_').to_dict()\n\n file_sentiment.update(file_sentences_sentiment)\n\n df_sentiment = pd.DataFrame.from_dict(file_sentiment, orient='index').T\n if self.extract_sentiment_text:\n df_sentiment['text'] = file_sentences_text\n\n df_sentiment['entities'] = file_entities\n df_sentiment = df_sentiment.add_prefix('sentiment_')\n\n return df_sentiment",
"def run(self,infilename): \n ### initizlize the analysis\n self.init_analysis(infilename)\n ### run the analysis\n self.run_analysis()\n ### store selected results\n self.store_results()\n return",
"def parse_sentiment_file(self, file):\n \n file_sentiment = file['documentSentiment']\n file_entities = [x['name'] for x in file['entities']]\n file_entities = self.sentence_sep.join(file_entities)\n\n if self.extract_sentiment_text:\n file_sentences_text = [x['text']['content'] for x in file['sentences']]\n file_sentences_text = self.sentence_sep.join(file_sentences_text)\n file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]\n \n file_sentences_sentiment = pd.DataFrame.from_dict(\n file_sentences_sentiment, orient='columns').sum()\n file_sentences_sentiment = file_sentences_sentiment.add_prefix('document_').to_dict()\n \n file_sentiment.update(file_sentences_sentiment)\n \n df_sentiment = pd.DataFrame.from_dict(file_sentiment, orient='index').T\n if self.extract_sentiment_text:\n df_sentiment['text'] = file_sentences_text\n \n df_sentiment['entities'] = file_entities\n df_sentiment = df_sentiment.add_prefix('sentiment_')\n \n return df_sentiment",
"def read_data(self):\n # This matrix has the following shape: num_movies x num_users\n # The values stored in each row i and column j is the rating for\n # movie i by user j\n self.titles, self.ratings = ratings()\n self.base_rating = 3.0\n self.binarize()\n reader = csv.reader(open('data/sentiment.txt', 'rb'))\n sentiment = dict(reader)\n self.sentiment = {}\n # added stemming for sentiment keywords\n for key, val in sentiment.items():\n stemmed_key = self.stemmer.stem(key)\n self.sentiment[stemmed_key] = val",
"def sentimentAnalysis(fileName, city, outFileName):\n tweetTokenizer = TweetTokenizer()\n punct = list(string.punctuation)\n stopwordList = stopwords.words('english') + punct + ['rt', 'via', '...']\n vaderSent = vaderSentimentAnalysis(fileName, tweetTokenizer, stopwordList)\n vaderSent['city'] = city\n vaderSent = vaderSent[vaderSent['sentiment'] < 0]\n vaderSent.to_csv(outFileName)",
"def get_tweets_from_file(self, filename):\n # empty list to store parsed tweets\n tweets = []\n if not os.path.isfile(filename):\n print('Could not find file: ', filename)\n return -1\n\n with open(filename, mode='r', encoding='utf-8') as f:\n # parsing tweets one by one\n for tweet in f:\n print(tweet.strip())\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.strip()\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.strip())\n\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets",
"def read_data(self):\n # This matrix has the following shape: num_movies x num_users\n # The values stored in each row i and column j is the rating for\n # movie i by user j\n self.titles, self.ratings = ratings()\n reader = csv.reader(open('data/sentiment.txt', 'rb'))\n self.sentiment = dict(reader)\n\n self.titlesOnly = []\n\n for entry in self.titles:\n titleOnly = entry[0].split(' (')[0]\n self.titlesOnly.append(titleOnly.lower())\n self.sentiment.update({self.p.stem(k): v for k, v in self.sentiment.items()})",
"def main():\n filename = \"data/exercise.csv\"\n analyze(filename)",
"def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.tweets:\n parsed_tweet = {}\n parsed_tweet['text'] = tweet\n sentiment_data = self.tweet_sentiment_analysis(tweet)\n parsed_tweet['sentiment'] = sentiment_data[0]\n parsed_tweet['polarity'] = sentiment_data[1]\n parsed_tweet['subjectivity'] = sentiment_data[2]\n\n tweets_sentiment.append(parsed_tweet)\n\n self.sentiment_data = tweets_sentiment\n self.positive_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Positive']\n self.negative_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Negative']\n self.neutral_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Neutral']\n\n return tweets_sentiment",
"def performLexiconBasedSentimentAnalysis(data):\n opinions = data[0]\n taggedTweets = data[3]\n sentiments_mapping = lexiconBasedSentimentPrediction(\n taggedTweets) # identify the sentiment orientation of each tweet\n for key in sentiments_mapping:\n opinions[key].setSO(sentiments_mapping[key]) # set the sentiment orientation for each tweet\n return opinions",
"def __yago_counts(self):\n\n num_lines = 0\n print(\"Calculating Yago occurrences\")\n custom_freq = {}\n with open(\n os.path.join(self.base_url, \"generic/p_e_m_data/aida_means.tsv\"),\n \"r\",\n encoding=\"utf-8\",\n ) as f:\n for line in f:\n num_lines += 1\n\n if num_lines % 5000000 == 0:\n print(\"Processed {} lines.\".format(num_lines))\n\n line = line.rstrip()\n line = unquote(line)\n parts = line.split(\"\\t\")\n mention = parts[0][1:-1].strip()\n\n ent_name = parts[1].strip()\n ent_name = ent_name.replace(\"&\", \"&\")\n ent_name = ent_name.replace(\""\", '\"')\n\n x = ent_name.find(\"\\\\u\")\n while x != -1:\n code = ent_name[x : x + 6]\n replace = unicode2ascii(code)\n if replace == \"%\":\n replace = \"%%\"\n\n ent_name = ent_name.replace(code, replace)\n x = ent_name.find(\"\\\\u\")\n\n ent_name = self.wikipedia.preprocess_ent_name(ent_name)\n if ent_name in self.wikipedia.wiki_id_name_map[\"ent_name_to_id\"]:\n if mention not in custom_freq:\n custom_freq[mention] = {}\n ent_name = ent_name.replace(\" \", \"_\")\n if ent_name not in custom_freq[mention]:\n custom_freq[mention][ent_name] = 1\n\n return custom_freq",
"def sentiment(self) -> Dict[str, float]:",
"def process_sentiment(self):\r\n\r\n\r\n print(\"Beginning sentiment analysis\")\r\n # textblob time\r\n #tweet_sentiment = [TextBlob(tweet['filtered_text']).sentiment for index, tweet in self.tweet_dataframe.iterrows()]\r\n #self.tweet_dataframe['polarity'] = [i.polarity for i in tweet_sentiment]\r\n #self.tweet_dataframe['subjectivity'] = [i.subjectivity for i in tweet_sentiment]\r\n\r\n #vader time\r\n #http://t-redactyl.io/blog/2017/04/applying-sentiment-analysis-with-vader-and-the-twitter-api.html\r\n sentiment = []\r\n\r\n analyzer = SentimentIntensityAnalyzer()\r\n\r\n for tweet in self.tweet_dataframe['filtered_text']:\r\n vs = analyzer.polarity_scores(tweet)\r\n sentiment.append(vs['compound'])\r\n\r\n self.tweet_dataframe['vader_polarity'] = pd.Series(sentiment)",
"def analyze(content):\r\n client = language.LanguageServiceClient()\r\n\r\n document = types.Document(\r\n content=content,\r\n type=enums.Document.Type.PLAIN_TEXT)\r\n annotations = client.analyze_sentiment(document=document)\r\n\r\n # Write results to GCS \r\n return annotations.document_sentiment.score",
"def analyze_text_sentiment(raw_data_path):\n client = language.LanguageServiceClient()\n\n with open(raw_data_path, 'r') as review_file:\n content = review_file.read()\n\n document = types.Document(\n content=content,\n type=enums.Document.Type.PLAIN_TEXT)\n annotations = client.analyze_sentiment(document=document)\n score = annotations.document_sentiment.score\n magnitude = annotations.document_sentiment.magnitude\n\n # data for evaluation\n return magnitude, score",
"def generateSentimentAnalysis(self, fs_db, cleaned_submissions, cleaned_tweets):\n all_posts = []\n\n for p in range(len(cleaned_submissions)):\n print('reddit', self.clean(cleaned_submissions[p][3]))\n all_posts.append(self.clean(cleaned_submissions[p][3]))\n\n for t in range(len(cleaned_tweets)):\n print('twitter', self.clean(cleaned_tweets[t][2]))\n all_posts.append(self.clean(cleaned_tweets[t][2]))\n \n if len(all_posts) == 0:\n raise Exception(\"No crawled data\")\n\n count = 0\n\n for c in all_posts:\n blob = TextBlob(c)\n\n polarity = blob.sentiment.polarity\n subjectivity = blob.sentiment.subjectivity\n\n doc_ref = fs_db.collection(u'sentimentAnalysis').document('first')\n if (polarity != 0 and subjectivity != 0):\n count += 1\n doc_ref.set({str(count): {'post': c, 'polarity': polarity, 'subjectivity':subjectivity}}, merge=True)\n\n with open('wc.txt', 'w') as output:\n for data in all_posts:\n output.write('%s\\n' % data)",
"def get_sentiment_lexicon(datafile):\n user_dict = {}\n item_dict = {}\n feature_dict ={}\n aspect_index = 0\n\n for tupel in datafile.iterrows():\n line = tupel[0]\n user_id = datafile[\"user_id\"][line]\n item_id = datafile[\"business_id\"][line]\n list_len = len(datafile['absa'][line])\n if user_id not in user_dict:\n user_dict[user_id] = []\n if item_id not in item_dict:\n item_dict[item_id] = []\n for i in range(0, list_len):\n feature = datafile['absa'][line][i]['aspect']\n fetaure_confidence = datafile['absa'][line][i]['aspect_confidence']\n polarity = datafile['absa'][line][i]['polarity']\n polarity_confidence = datafile['absa'][line][i]['polarity_confidence']\n if feature not in feature_dict:\n feature_dict[feature] = aspect_index\n aspect_index = aspect_index+1\n user_dict[user_id].append([feature, fetaure_confidence, polarity, polarity_confidence])\n item_dict[item_id].append([feature, fetaure_confidence, polarity, polarity_confidence])\n return [feature_dict, user_dict, item_dict]",
"def main():\n chatfile_name = sys.argv[1]\n analyze(chatfile_name)",
"def do_analysis(ckpt, queries_type, entities_type, request):\n global currently_analyzing, results, d, analysis_user\n try:\n print(\"starting analysis!\")\n if entities_type == \"all\":\n print(\"using all entities detected!\")\n elif entities_type == \"uploaded\":\n print(\"using only entities specified in csv file!\")\n \n currently_analyzing = True\n analysis_user = request.user.username\n results = []\n proj_path = os.path.abspath(os.path.dirname(__file__)).split(\"FYP_Web_App\")[0]\n ckpt = proj_path + \"FewRel/checkpoint/\" + ckpt\n if d is None or d.ckpt_path != ckpt:\n d = DetectionFramework(ckpt_path=ckpt)\n if cancel_flag[0]:\n return\n d.clear_support_queries()\n if len([i for i in os.listdir(\"temp/relation_support_datasets\") if 'csv' in i and request.user.username in i]) == 0:\n raise ValueError(\"Please upload relation support dataset!\")\n \n d.load_support_files(\"temp/relation_support_datasets\", request.user.username)\n if queries_type == \"csv_option\":\n if not os.path.exists(\"temp/queries.csv\"):\n raise ValueError(\"Please upload query CSV dataset!\")\n d.load_queries_csv(\"temp/queries.csv\")\n \n elif queries_type == \"url_option\":\n if not os.path.exists(\"temp/url.txt\"):\n raise ValueError(\"Please specify news article url!\")\n with open(\"temp/url.txt\") as f:\n url = f.read()\n d.load_url(url)\n \n elif queries_type == \"txt_option\":\n d.load_text_files(os.path.abspath(\"temp/text_files\"))\n \n elif queries_type == \"ind_sentence_option\":\n ind_sentence = request.POST.get('ind_sent')\n d.load_ind_sentence(ind_sentence)\n \n elif queries_type == \"html_option\":\n d.load_html_file_queries(os.path.abspath(\"temp/html_files\"))\n \n if entities_type == \"uploaded\":\n d.trim_queries_based_on_entities_file(os.path.abspath(\"temp/entities_csv_file.csv\"))\n\n if cancel_flag[0]:\n return\n d.detect(rt_results=results, cancel_flag=cancel_flag)\n if cancel_flag[0]:\n return\n src=None\n if queries_type == \"csv_option\":\n src = \"queries_csv\"\n elif queries_type == \"txt_option\":\n src = \"queries_text_file\"\n elif queries_type == \"ind_sentence_option\":\n src = \"ind_sentence\"\n elif queries_type == \"url_option\":\n with open(\"temp/url.txt\") as f:\n src = f.read()\n elif queries_type == \"html_option\":\n src = \"html_files\"\n \n s = Source(source=src, user=request.user)\n s.save()\n for r in results:\n er = ExtractedRelation(sentence=r['sentence'],head=r['head'],tail=r['tail'],pred_relation=r['pred_relation'],sentiment=r['sent'],conf=r['conf'],ckpt=ckpt, source=s)\n er.save()\n except Exception as e:\n print(len(str(e)))\n print(str(e))\n errors.append(str(e))\n tb = traceback.format_exc()\n print(tb)\n finally:\n currently_analyzing = False\n analysis_user = None",
"def calculate_results(file_path):\r\n\torg_dict = dict()\r\n\tmisc_dict = dict()\r\n\tper_dict = dict()\r\n\tloc_dict = dict()\r\n\twith open(\"development_output.csv\") as f:\r\n\t\tcount = 0;\r\n\t\tfor line in f:\r\n\t\t\tcount+=1\r\n\t\t\tif count is 2:\r\n\t\t\t\tl = line[4:] #string without Category\r\n\t\t\t\tpairs = l.split()\r\n\t\t\t\torg_dict = {tuple(pair.split('-')) : 'ORG' for pair in pairs}\r\n\t\t\tif count is 3:\r\n\t\t\t\tl = line[5:] #string without Category\r\n\t\t\t\tpairs = l.split()\r\n\t\t\t\tmisc_dict = {tuple(pair.split('-')) : 'MISC' for pair in pairs}\r\n\t\t\tif count is 4:\r\n\t\t\t\tl = line[4:] #string without Category\r\n\t\t\t\tpairs = l.split()\r\n\t\t\t\tper_dict = {tuple(pair.split('-')) : 'PER' for pair in pairs}\r\n\t\t\tif count is 5:\r\n\t\t\t\tl = line[4:] #string without Category\r\n\t\t\t\tpairs = l.split()\r\n\t\t\t\tloc_dict = {tuple(pair.split('-')) : 'LOC' for pair in pairs}\r\n\t\r\n\twith open(file_path) as f:\r\n\t\tcount = 0\r\n\t\ttotal_entities = 0\r\n\t\tgold_entities = 0\r\n\t\ttrue_positives = 0\r\n\t\tpredicted_entities = len(org_dict)+len(misc_dict)+len(per_dict)+len(loc_dict) \r\n\t\tfor line in f:\r\n\t\t\tcount+=1\r\n\t\t\tif(count%3!=0):\r\n\t\t\t\tcontinue\r\n\t\t\ttag_set = line.lower().split()\r\n\t\t\ti =0\r\n\t\t\tis_in_middle_of_word = False\r\n\t\t\tstarting_index = 0\r\n\t\t\tending_index = 0\r\n\t\t\ttag_to_match = \"\"\r\n\t\t\twhile(i<len(tag_set)):\r\n\t\t\t\ttag = tag_set[i]\r\n\r\n\t\t\t\t#print(tag)\r\n\t\t\t\ttotal_entities+=1\r\n\r\n\t\t\t\tif(is_in_middle_of_word):\r\n\t\t\t\t\tif(tag.split(\"-\")[0] is \"i\"):\r\n\t\t\t\t\t\tending_index+=1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif(tag_to_match == \"b-org\"):\r\n\t\t\t\t\t\t\tif((str(starting_index), str(ending_index)) in org_dict):\r\n\t\t\t\t\t\t\t\ttrue_positives+=1\r\n\t\t\t\t\t\telif(tag_to_match == \"b-misc\"):\r\n\t\t\t\t\t\t\tif((str(starting_index), str(ending_index)) in misc_dict):\r\n\t\t\t\t\t\t\t\ttrue_positives+=1\r\n\t\t\t\t\t\telif(tag_to_match == \"b-per\"):\r\n\t\t\t\t\t\t\tif((str(starting_index), str(ending_index)) in per_dict):\r\n\t\t\t\t\t\t\t\ttrue_positives+=1\r\n\t\t\t\t\t\telif(tag_to_match == \"b-loc\"):\r\n\t\t\t\t\t\t\tif((str(starting_index), str(ending_index)) in loc_dict):\r\n\t\t\t\t\t\t\t\ttrue_positives+=1\r\n\t\t\t\t\t\tis_in_middle_of_word = False\r\n\t\t\t\tif(\"b\" in tag):\r\n\t\t\t\t\tgold_entities+=1\r\n\t\t\t\t\tis_in_middle_of_word = True\r\n\t\t\t\t\tstarting_index = ending_index = total_entities-1\r\n\t\t\t\t\ttag_to_match = tag\r\n\t\t\t\t\t#starting_index=predicted_entities\r\n\t\t\t\t\t# while(tag!=\"o\" and i<len(tag_set)):\r\n\t\t\t\t\t# \ti+=1\r\n\t\t\t\t\t# \tpredicted_entities+=1\r\n\t\t\t\t\t# ending_index=predicted_entities-1\r\n\t\t\t\t\t# if(tag is \"b-org\"):\r\n\t\t\t\t\t# \tif((starting_index, ending_index) in org_dict):\r\n\t\t\t\t\t# \t\ttrue_positives+=1\r\n\r\n\t\t\t\ti+=1\t\r\n\r\n\r\n\r\n\tprint(\"Total entities: \"+str(total_entities))\r\n\tprint(\"Gold entities: \"+str(gold_entities))\r\n\tprint(\"True positives: \" + str(true_positives))\r\n\tprecision = float(true_positives)/predicted_entities\r\n\trecall = float(true_positives)/gold_entities\r\n\tprint(\"Precision: \" + str(precision))\r\n\tprint(\"Recall: \" + str(recall))\r\n\tprint(\"F1 score: \" + str((2*precision*recall)/(precision+recall)))"
]
| [
"0.6742306",
"0.64086676",
"0.631625",
"0.6295548",
"0.6106927",
"0.60053074",
"0.5944882",
"0.5902826",
"0.58235234",
"0.5807723",
"0.57983965",
"0.57389295",
"0.5731411",
"0.5680243",
"0.5669391",
"0.56486934",
"0.55876726",
"0.55819345",
"0.5570177",
"0.5555029",
"0.553321",
"0.5503595",
"0.5501124",
"0.54908127",
"0.54811835",
"0.5473",
"0.54683566",
"0.54545665",
"0.5437752",
"0.5419531"
]
| 0.7130047 | 0 |
checks if no_bell map doesn't prohibit bells today | def is_no_bell_day():
today = time.localtime()[:3]
for r in no_bell:
if today >= r[0] and today <= r[1]:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _user_assigned_bell(self, bell: Bell) -> bool:\n return not self._bot_assigned_bell(bell)",
"def power_bells(state):\n if not pinlessMode:\n if state:\n for pin in bellPins:\n GPIO.output(pin, GPIO.HIGH)\n elif not state:\n for pin in bellPins:\n GPIO.output(pin, GPIO.LOW)\n else:\n logging.debug(\"Bell state: \" + str(state))",
"def _check_number_of_bells(self) -> bool:\n if self.row_generator.stage == 0:\n self.logger.debug(\"Place holder row generator. Wheatley will not ring!\")\n return False\n if self._tower.number_of_bells < self.row_generator.stage:\n self.logger.warning(f\"Row generation requires at least {self.row_generator.stage} bells, \"\n + f\"but the current tower has {self._tower.number_of_bells}. \"\n + \"Wheatley will not ring!\")\n return False\n if self._tower.number_of_bells > self.row_generator.stage + 1:\n if self.row_generator.stage % 2:\n expected = self.row_generator.stage + 1\n else:\n expected = self.row_generator.stage\n self.logger.info(f\"Current tower has more bells ({self._tower.number_of_bells}) than expected \"\n + f\"({expected}). Wheatley will add extra cover bells.\")\n return True",
"def causes_not_in_bridge_map(self, df):\n check = set(df.loc[df['bridge_code'].isnull(), 'acause'])\n if len(check) > 0:\n print(\"These acauses are not in the bridge map: {}\".format(check))",
"def test_dead_man_alerts_when_no_breath(sim_sampler, events, data):\n time_intervals = 1 / DriverFactory.MOCK_SAMPLE_RATE_HZ\n num_of_samples = int(NO_BREATH_TIME / time_intervals)\n for i in range(num_of_samples):\n sim_sampler.sampling_iteration()\n\n assert len(events.alerts_queue) > 0\n\n all_alerts = list(events.alerts_queue.active_alerts)\n assert any(a.code == alerts.AlertCodes.NO_BREATH for a in all_alerts),\\\n f\"NO_BREATH is not in {all_alerts}\"",
"def _bot_assigned_bell(self, bell: Bell) -> bool:\n return self._tower.is_bell_assigned_to(bell, self._user_name)",
"def cell_is_blocked(self, y, x, map_data):\n symbol = map_data[y][x]\n # collision: obstacle, bridge, mirror (all types), anti-tank (all types)\n if symbol == self.OBSTACLE_SYMBOL or symbol == self.BRIDGE_SYMBOL or symbol == self.BRICK_SYMBOL or \\\n symbol == self.MIRROR_UL_SYMBOL or symbol == self.MIRROR_UR_SYMBOL or \\\n symbol == self.MIRROR_DL_SYMBOL or symbol == self.MIRROR_DR_SYMBOL or \\\n symbol == self.ANTI_TANK_UP_SYMBOL or symbol == self.ANTI_TANK_DOWN_SYMBOL or \\\n symbol == self.ANTI_TANK_LEFT_SYMBOL or symbol == self.ANTI_TANK_RIGHT_SYMBOL or \\\n symbol == self.ANTI_TANK_DESTROYED_SYMBOL:\n return True\n return False",
"def check_loss(self):\n return POKEMON in self.get_game()",
"def test_alerts_when_no_breath(app, events, data):\n time_intervals = 1 / DriverFactory.MOCK_SAMPLE_RATE_HZ\n num_of_samples = int(NO_BREATH_TIME / time_intervals)\n app.run_iterations(num_of_samples)\n assert alerts.AlertCodes.NO_BREATH in events.alerts_queue.active_alerts, \\\n f\"NO_BREATH missing from: {events.alerts_queue.active_alerts}\"",
"def isBlocked(self):\n cal = self.request.get('form.widgets.calendarConfig')\n if cal is not None:\n return (cal == ['bloque'])\n wrapper = getSAWrapper('gites_wallons')\n session = wrapper.session\n for heb in getHebergementsForProprio(self.context, session):\n if heb.heb_calendrier_proprio == 'bloque':\n return True\n return False",
"def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")",
"def check_bollinger(self):\n upper, lower = self.bollinger_bands()\n if self.daily['Adj Close'][-1] > upper[-1]:\n self.debug += '\\nAbove upper bollinger: sells + 1'\n self.sells += 1\n elif self.daily['Adj Close'][-1] < lower[-1]:\n self.debug += '\\nBelow lower bollinger: buys + 1'\n self.buys += 1",
"def allBoatsSunk(self):\n for boat in self.boats:\n if not boat.isCaput():\n return False\n return True",
"def checkMap(self):\n return True",
"def is_off(self):\n return self is Milk.BAD_MILK",
"def event1924():\n header(1924)\n if_not_in_world_area(1, 13, 0)\n if_not_in_world_area(1, 13, 1)\n if_condition_true(0, 1)\n flag.disable(11310201)",
"def isBlocked(mapObj, gameStateObj, x, y):\n\n if isWall(mapObj, x, y):\n return True\n\n elif x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]):\n return True # x and y aren't actually on the map.\n\n elif (x, y) in gameStateObj['stars']:\n return True # a star is blocking\n\n return False",
"def bolt_check(self):\n for x in self.get_bolts():\n if x.get_velocity() > 0:\n self.set_plyrbolts(1)",
"def has_cooling_system(bpr):\n\n if bpr.hvac['type_cs'] in {'T1', 'T2', 'T3'}:\n return True\n elif bpr.hvac['type_cs'] in {'T0'}:\n return False\n else:\n raise",
"def is_boring(state):\n return state.boring_moves >= state.just_stop",
"def business_day(self): \n\n if self.time_stamp.weekday() not in (5, 6) and not holiday(self.time_stamp):\n return True \n return False",
"def _should_ignore_events_on_leaf(self, leaf: LeafNode, event_types_listeners):\n if len(self.__freeze_map) == 0:\n # freeze option disabled\n return False\n for freezer in self.__active_freezers:\n for freezer_leaf in event_types_listeners[freezer.type]:\n if freezer_leaf.get_event_name() not in self.__freeze_map:\n continue\n if leaf.get_event_name() in self.__freeze_map[freezer_leaf.get_event_name()]:\n return True\n return False",
"def cell_is_game_over(self, y, x, map_data):\n # check for water\n if map_data[y][x] == self.WATER_SYMBOL:\n return True\n\n # check for anti-tank\n # up direction\n for i in range(y, -1, -1):\n if map_data[i][x] == self.ANTI_TANK_DOWN_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(i, x, map_data):\n break\n\n # down direction\n for i in range(y, self.y_size):\n if map_data[i][x] == self.ANTI_TANK_UP_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(i, x, map_data):\n break\n\n # left direction\n for i in range(x, -1, -1):\n if map_data[y][i] == self.ANTI_TANK_RIGHT_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(y, i, map_data):\n break\n\n # right direction\n for i in range(x, self.x_size):\n if map_data[y][i] == self.ANTI_TANK_LEFT_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(y, i, map_data):\n break\n\n # no water or anti-tank danger\n return False",
"def update_needs_bg_power_workaround(data):\n return \"bg_power\" in data",
"def is_onhold(self) -> bool:",
"def is_bull_market(self):\n return self.port_return(self.data.last('2M')) >= .2",
"def event_m20_11_x73(z52=_):\n \"\"\"State 0,1: Defeat determination\"\"\"\n IsChrDead(0, z52)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0",
"def black_kingside_castling(self):\n return (self.castling[2] and self.empty((1, 7)) and self.empty((1, 8))\n and not self.attacked((1, 6), WHITE) and not\n self.attacked((1, 7), WHITE))",
"def is_caught_up_well_enough_for_government_work():\n return config.CAUGHT_UP or (config.BLOCKCHAIN_SERVICE_LAST_BLOCK and config.CURRENT_BLOCK_INDEX >= config.BLOCKCHAIN_SERVICE_LAST_BLOCK - 1)",
"def is_water(self):\n return False"
]
| [
"0.63722193",
"0.6049356",
"0.5962841",
"0.59310937",
"0.5906065",
"0.5905496",
"0.5834034",
"0.58233607",
"0.5822433",
"0.5809948",
"0.5805469",
"0.57956964",
"0.57908773",
"0.5769487",
"0.5678093",
"0.5604709",
"0.5599803",
"0.5576898",
"0.5575153",
"0.55698776",
"0.5542476",
"0.5524395",
"0.5496104",
"0.5487028",
"0.5479499",
"0.5461474",
"0.54274994",
"0.5425857",
"0.5418054",
"0.54134357"
]
| 0.7161038 | 0 |
schedule events for current day, this is usually called at the begginning of each new day to scedule events for that day only + one event for rescheduling next day | def reschedule():
if not schedule.empty():
purge_events()
today_s = tuple_to_str(time.localtime()[:3])
# first check if exception entry exist for today in datemap
if today_s in datemap:
schedule_day(datemap[today_s])
else:
# otherwise schedule it as normal weekday
schedule_day(days[time.strftime("%A")]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def schedule_task(self, name, date):\n pass",
"def startSchedule(self):\n DPxStartDinSched()",
"def bootstrap_events():\n import datetime\n import random\n # let's have each event occur once between 3 and 6 pm, \n # some day in the next month, and then recur for the next 1 to 8 weeks\n\n now = datetime.datetime.now().replace(minute=0, second=0)\n\n for program in Program.objects.all():\n start_hour = random.randint(15, 18)\n the_date = now.replace(hour=start_hour) + datetime.timedelta(random.randint(0, 31))\n duration = random.randint(1,6) * 30\n next_week = datetime.timedelta(7)\n\n program.events.add(EventDate.objects.create(\n date=the_date, \n duration_mins = duration))\n\n for next_occur in range(random.randint(1,8)):\n the_date += next_week\n program.events.add(EventDate.objects.create(\n date = the_date,\n duration_mins = duration))\n\n print \"Scheduled\", program.events.count(), \"events for\", program",
"def _run_scheduled_daily_tasks():\n worker.add_task(daily.run)",
"def runs_today(self,s_id,day):\n if self.schedule_keys[s_id][day]==1:\n return True\n else:\n return False",
"def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks",
"def _create_schedules(self):\n\n ''''''",
"def set_schedule(self, new_schedule):\n #first, set all the others to inactive\n\n new_schedule.deprecated=False\n if new_schedule.started == None or new_schedule.started <= datetime.utcnow():\n new_schedule.started=datetime.utcnow()\n for sched in self.weekly_schedule:\n if not sched.deprecated:\n #sched.deprecated=True\n sched.ended=datetime.utcnow()\n sched.save()\n elif new_schedule.started > datetime.utcnow():\n #if it's in the future, then don't deprecate the future schedule, just procede along and let the system set the dates correctly\n pass\n self.weekly_schedule.append(new_schedule)\n self.save()",
"def _schedule(self, when):\n sched = IScheduler(self.store)\n for scheduledAt in sched.scheduledTimes(self):\n # https://github.com/twisted/epsilon/issues/38\n if when._time < scheduledAt._time:\n sched.reschedule(self, scheduledAt, when)\n break\n else:\n sched.schedule(self, when)",
"def nextEvent(self, systemDate):\n while True:\n calendar_list = self.service.calendarList().list(pageToken=self.page_token).execute()\n #print(calendar_list) This will print the entire payload\n for calendar_list_entry in calendar_list['items']:\n if (calendar_list_entry['summary'] == 'Work'): # Check if the correct calender even exist in the calendar pool\n workFlag = True\n ID = (calendar_list_entry['id'])\n #print(ID)\n #print(\"The user calendar 'Work' exists\")\n self.page_token = calendar_list.get('nextPageToken')\n if not self.page_token:\n break\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n events_result = self.service.events().list(calendarId=ID, timeMin=now,\n maxResults=20, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n if not events:\n #Uncomment for debugging\n #print(\"No upcoming events founds.\")\n self.recEvent = None\n for event in events:\n start = event['start'].get('dateTime') #Ex:2021-02-13T03:30:00-08:00 (date)T(Time-Timezone)\n splDate = start.split(\"T\")\n stime = splDate[1].split(\"-\")\n rstime = stime[0].rsplit(\":\", 1)\n end = event['end'].get('dateTime')\n end1= end.split(\"T\")\n endtime= end1[1].split(\"-\")\n rendtime = endtime[0].rsplit(\":\", 1)\n title = (event['summary'])\n print(title)\n #The first events of the same date will be added to the table\n if systemDate == splDate[0]:\n if (\"Meeting\") in title:\n print(\"Upcoming meeting:\", title)\n self.recEvent = {'Title' : title, 'Date': splDate[0], 'Start' : rstime[0], 'End' : rendtime[0]}\n break\n else:\n self.recEvent = None\n else:\n self.recEvent = None\n self.workFlag = False\n del now\n return (self.recEvent)",
"def schedule_next_event(self):\n if self.events:\n self.event = self.events.pop()\n self.timeout_counter = self.event.get(\"timeout\", 0)\n self.label.set_text(\n self.event[\"template\"].format(time=self.timeout_counter)\n )\n glib.timeout_add_seconds(1, self.on_timeout_cb)\n else:\n # Return Accept response\n # if there are no other events scheduled\n self.response(gtk.RESPONSE_ACCEPT)",
"def schedule(self, datetime, function, parameters=None):\n if datetime <= self.current_time:\n raise ValueError(\"Cannot schedule an event in the past\")\n if datetime <= self.end_time:\n self.events.append(Event(datetime, function, parameters))\n self.events.sort(key=lambda event: event.datetime)",
"def _use_default_schedule(self):\n def gen_day():\n dl = []\n ll = [-1, '', -1, '', '']\n for i in range(8):\n dl.append(ll[:])\n rl = []\n for i in range(4):\n rl.append(dl[:])\n return rl\n\n self.schedule = {\n 'current_week': [1, date.today().isocalendar()[1]],\n 'lessons_time': [\n ['8:00', '9:35'],\n ['9:45', '11:20'],\n ['11:40', '13:15'],\n ['13:25', '15:00'],\n ['15:20', '16:55'],\n ['17:05', '18:40'],\n ['18:45', '20:20'],\n ['20:25', '22:00']\n ],\n 'schedule': {\n 'Monday': gen_day(),\n 'Tuesday': gen_day(),\n 'Wednesday': gen_day(),\n 'Thursday': gen_day(),\n 'Friday': gen_day(),\n 'Saturday': gen_day()\n },\n 'subgroup': 0\n }",
"def solve_one_day_todo_events(self, todo_items_list):\n seconds_until_tomorrow = self.seconds_left_util_tomorrow()\n if len(todo_items_list) < 1:\n # indicates that today has no todo events, then cruise will sleep until tomorrow\n # time.sleep(seconds_until_tomorrow + 1)\n # seems can not directly sleep to tomorrow, directly continue\n time.sleep(10)\n else:\n # add now time to calculate intervals\n now = datetime.datetime.now().replace(microsecond=0, second=0)\n now_item = {\n 'time': now\n }\n todo_items_list.append(now_item)\n todo_items_list = sorted(todo_items_list, key=lambda x: x['time'])\n todo_still_items = [i for i in todo_items_list if i['time'] >= now]\n if len(todo_still_items) > 1:\n intervals = [(todo_still_items[i]['time'] - todo_still_items[i - 1]['time']).seconds for i in\n range(1, len(todo_still_items))]\n print('[TODO CRUISER] still todo intervals: ', intervals)\n for i, interval in enumerate(intervals):\n print('[TODO CRUISER] start solve event {}, interval {}'.format(i, interval))\n\n # I am changing time.sleep(interval) into following one, which make sense more\n # so that sleep will interrupt when local_file changed,\n for t in range(interval):\n if self.changes_event_handler.CHANGE_FLAG:\n # if detected changes, then return and reset flag to False\n self.changes_event_handler.CHANGE_FLAG = False\n print('[IMPORTANT] changes detected, solve today return.')\n return\n else:\n time.sleep(1)\n todo_item = todo_still_items[i + 1]\n try:\n print(' ... try to execute mission.')\n class_name = todo_item['class']\n func = todo_item['func']\n args = todo_item['args']\n\n c_obj = globals()[class_name](self.msg_executor)\n func = getattr(c_obj, func)\n func(*args)\n print('[CHEER] time mission executed!!!!!!')\n except KeyError:\n pass\n # sleep more 1 minute\n time.sleep(61)\n else:\n pass\n time.sleep(50)\n # time.sleep(seconds_until_tomorrow + 1)",
"def get_next_trading_day_schedule(reference_day: dt):\n reference_day = reference_day.date()\n schedule = get_trading_calendar(reference_day, reference_day)\n while schedule.empty:\n reference_day += timedelta(days=1)\n schedule = get_trading_calendar(reference_day, reference_day)\n return schedule",
"async def start_periodically_refresh_appointments(): # pylint: disable=invalid-name\n await asyncio.sleep(60)\n await app[\"snct_scrapper\"].refresh_appointments_every_minutes()",
"def process_all_events(self):\n day_ordinal = self.end_date_ordinal\n scratchpad_dict = self.all_students_dict.copy()\n if len(self.change_events):\n for day_index in xrange(self.number_of_days-1, 0, -1):\n self.active_dicts[day_index] = scratchpad_dict.copy()\n if (self.change_events[self.event_index].date_ordinal == \n day_ordinal):\n while ((self.event_index > -1) and \n self.change_events[\n self.event_index].date_ordinal ==\n day_ordinal):\n event = self.change_events[self.event_index]\n self.process_event(event, day_index,\n scratchpad_dict)\n self.event_index -= 1\n day_ordinal -= 1",
"def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='[email protected]', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n\n # TODO noitem found\n print(datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00'))\n\n nextStartTime = datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n if delta < 0:\n print(\"capture next\")\n nextStartTime = datetime.datetime.strptime(events[1]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n print(delta)\n\n if NOTIFY_THRESHOLD_SECOND > delta:\n alert_time_limit()\n else:\n set_normal()\n\n\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])",
"async def _check_schedule(self, now, last):\n\n if self._schedule is None:\n return\n\n for event in self._schedule.events:\n if event.begin <= now:\n if event.begin > last:\n await self._announce_event(event)",
"def _update_schedule(recurrences, time=None, for_object=None):\n time = time or datetime.utcnow()\n if for_object is None:\n for recurrence in recurrences:\n recurrence.next_scheduled = recurrence.utc_of_next_schedule(time)\n recurrence.previous_scheduled = time\n recurrence.save()\n else:\n for recurrence in recurrences:\n ct = ContentType.objects.get_for_model(for_object)\n obj, created = RecurrenceForObject.objects.get_or_create(\n recurrence=recurrence,\n content_type=ct,\n object_id=for_object.id\n )\n obj.next_scheduled = recurrence.utc_of_next_schedule(time)\n obj.previous_scheduled = time\n obj.save()",
"def test_create_next_event_daily(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=timezone('Europe/Paris')),\n 'Some description',\n EventRepetitionFrequency.daily)\n\n next_event = event.create_next_event()\n\n self.assertEqual(next_event.guild.id, 12345)\n self.assertEqual(next_event.title, 'Some title')\n self.assertEqual(next_event.timezone, timezone('Europe/Paris'))\n self.assertNotEqual(next_event.date, event.date)\n self.assertEqual(\n next_event.date,\n datetime(2020, 10, 11, 10, 10, tzinfo=timezone('Europe/Paris')))",
"def create( self ):\r\n for rsrc in self.ee.getRsrcs( ):\r\n self.schedule[rsrc.getid( )] = [ ]",
"def add_schedule(doc_user, date, schedule, logger):\n #my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n if my_calendar == None:\n logger.info('{}: calendar start'.format(doc_user[\"user_id\"]))\n my_calendar = {\"User\": doc_user[\"_id\"],\n \"schedules\": []}\n col_calendar.insert_one(my_calendar)\n\n if not schedule:\n return False\n\n if len(schedule) > 5:\n logger.info('{}: day schedules are already full'.format(\n doc_user[\"user_id\"]))\n return False\n\n ret = 0\n for s in schedule:\n my_calendar[\"schedules\"] += [{\"date\": date,\n \"events_list\": [s]}]\n logger.info('{}: {} added into schedule'.format(\n date, s))\n ret += 1\n\n if ret >= 1:\n col_calendar.find_one_and_replace({\"User\": doc_user[\"_id\"]}, my_calendar)\n\n return True",
"def __simulate_one_day__(self):\n self.compute()\n self.days.append(next(self.index))",
"def __run_schedules():\n while True:\n __scheduler.run()",
"def startService(self):\n super(_SiteScheduler, self).startService()\n self._transientSchedule(self.now(), self.now())",
"def _scheduled_update(now):\n _LOGGER.debug(\"%s: executing scheduled update\", self.entity_id)\n self.async_schedule_update_ha_state(True)\n self._update_listener = None",
"def __calender_events(self):\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n pt=\"Getting the upcoming latest events\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % pt)\n self.speech.synthesize_text(pt)\n eventsResult = service.events().list(\n calendarId='primary', timeMin=now, maxResults=1, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n pq=\"No upcoming events found.\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % pt)\n self.speech.synthesize_text(pq)\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n #start1=''.join(start)\n summary=event['summary']\n print start,summary\n requests.get(\"http://localhost:8080/statement?text=\"+start+\" \"+summary)",
"def schedule_start(self):\n self.initialize_scheduler()",
"def run_scheduled_tasks(self) -> None:\n self.scheduler.run(False)"
]
| [
"0.63996965",
"0.6364515",
"0.6270511",
"0.6233818",
"0.61795765",
"0.6167035",
"0.61531335",
"0.60690117",
"0.6052733",
"0.60286534",
"0.6019136",
"0.5975083",
"0.5972812",
"0.5967778",
"0.5949784",
"0.5916603",
"0.58772904",
"0.5870145",
"0.5862457",
"0.58550876",
"0.5811954",
"0.5797242",
"0.5780748",
"0.57805574",
"0.5772513",
"0.57400316",
"0.5736493",
"0.5729356",
"0.5729016",
"0.5720717"
]
| 0.7430452 | 0 |
Using the Fiona library convert GeopackageHelper to GeoJSON | def convert_gpkg_to_geojson(self,shape_fname, destdirectory):
features = []
crs = None
if not os.path.isfile(shape_fname):
self.logger.error('File not found: %s' % shape_fname)
self.opstatus.add_info(stage=6, msg = "Rounding coordinates to six decimal precision")
out_fname = os.path.join(destdirectory,os.path.basename(shape_fname).replace('.gpkg', '.geojson'))
with fiona.open(shape_fname, driver='GPKG') as source:
with fiona.open(out_fname, "w",driver='GeoJSON',crs = fiona.crs.from_epsg(4326),schema=source.schema) as sink:
for rec in source:
sink.write(rec)
self.logger.info('file written: %s' % out_fname)
self.opstatus.set_status(stage=6, status=1, statustext ="File successfully converted to GeoJSON with six decimal precision")
self.opstatus.add_success(stage=6, msg = "GeoJSON file successfully written")
return out_fname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def default(self, o): \n if isinstance(o, GEOSGeometry):\n dictval = json.loads(o.geojson)\n #raise Exception(o.ewkt)\n dictval['__GEOSGeometry__'] = ['__init__', [o.ewkt]] #json class hint; see http://json-rpc.org/wiki/specification\n return dictval\n else:\n super(DjangoGEOJSONEncoder, self).default(o)",
"def get_geojson():\n\n # check the file was already downloaded\n global GEOJSON\n if GEOJSON: return GEOJSON\n\n conn = None\n cur = None\n try:\n\n conn = utils.pgconnect(**settings.DEFAULT_CONNECTION)\n cur = conn.cursor()\n cur.execute( \"\"\"SELECT row_to_json(fc) FROM \n ( SELECT 'FeatureCollection' As type, array_to_json(array_agg(f)) As features\n FROM (SELECT 'Feature' As type , ST_AsGeoJSON(lg.geom)::json As geometry, row_to_json(lp) As properties\n FROM exercise.states As lg INNER JOIN (SELECT gid,name FROM exercise.states) As lp\n ON lg.gid = lp.gid ) As f) As fc;\"\"\", (AsIs(settings.STATES_TABLE_NAME)))\n result = cur.fetchone()[0]\n\n #print(result)\n\n #make the result global\n GEOJSON = result\n return GEOJSON\n\n except Exception as e:\n raise Exception(e)\n\n finally:\n if conn: conn = None\n if cur: cur = None",
"def __geo_interface__(self):\r\n if HASARCPY:\r\n template = {\r\n \"type\": \"FeatureCollection\",\r\n \"features\": []\r\n }\r\n geom_type = self.geometry_type\r\n if geom_type.lower() == \"point\":\r\n geom_type = \"Point\"\r\n elif geom_type.lower() == \"polyline\":\r\n geom_type = \"LineString\"\r\n elif geom_type.lower() == \"polygon\":\r\n geom_type = \"Polygon\"\r\n df_copy = self.copy(deep=True)\r\n df_copy['geom_json'] = self.geometry.JSON\r\n df_copy['SHAPE'] = df_copy['geom_json']\r\n del df_copy['geom_json']\r\n for index, row in df_copy.iterrows():\r\n geom = row['SHAPE']\r\n del row['SHAPE']\r\n template['features'].append(\r\n {\"type\" : geom_type,\r\n \"geometry\" : pd.io.json.loads(geom),\r\n \"attributes\":row}\r\n )\r\n return pd.io.json.dumps(template)",
"def vegref2geojson( vegref, dagensverdi=False): \r\n \r\n \r\n vegstr = vvi2vegrefstring( vegref) \r\n \r\n \r\n fradato = vegref['ValidFrom'][0:10]\r\n tildato = vegref['ValidTo'][0:10]\r\n veglenkeid = vegref['ReflinkOID']\r\n veglenkeposisjon = round( float( vegref['Measure'] ), 8) \r\n \r\n X = float( vegref['RoadNetPosition']['X'] ) \r\n Y = float( vegref['RoadNetPosition']['Y'] ) \r\n coordinates = [X, Y]\r\n if 'Z' in vegref['RoadNetPosition']:\r\n coordinates.append( float( vegref['RoadNetPosition']['Z'] ) )\r\n \r\n geoj = { \"type\": \"Feature\",\r\n \"geometry\": {\r\n \"type\": \"Point\",\r\n \"coordinates\": coordinates\r\n },\r\n \"properties\": {\r\n \"vegref\" : vegstr, \r\n \"fradato\" : fradato, \r\n \"tildato\" : tildato,\r\n \"veglenkeid\" : veglenkeid, \r\n \"veglenkeposisjon\" : veglenkeposisjon\r\n }\r\n }\r\n \r\n if dagensverdi: \r\n params = { 'viewDate' : '2022-10-31', \r\n 'reflinkoid' : veglenkeid, \r\n 'rellen' : veglenkeposisjon } \r\n \r\n url = 'https://visveginfo-static.opentns.org/RoadInfoService/GetRoadReferenceForNVDBReference' \r\n r = requests.get( url, params=params) \r\n if r.ok and 'RoadReference' in r.text: \r\n data = xmltodict.parse( r.text ) \r\n if 'RoadCategory' in data['RoadReference'].keys(): \r\n geoj['properties']['dagensvegref'] = vvi2vegrefstring( data['RoadReference'] ) \r\n else: \r\n geoj['properties']['dagensvegref'] = '' \r\n else: \r\n geoj['properties']['dagensvegref'] = '' \r\n \r\n return geoj",
"def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry",
"def test_to_geojson(self):\n fc = self.read_feature()\n dest_filename = str(self.datadir.join('test.geojson'))\n fc.to_geojson(dest_filename)\n fc_check = read_feature_collection(dest_filename)\n self.check_feature(fc_check.features[0])",
"def get_allpoints_geojson():\n\n # check the file was already downloaded\n global GEOJSON\n if GEOJSON: return GEOJSON\n\n conn = None\n cur = None\n try:\n\n conn = utils.pgconnect(**settings.DEFAULT_CONNECTION)\n cur = conn.cursor()\n cur.execute( \"\"\"SELECT row_to_json(fc) FROM \n ( SELECT 'FeatureCollection' As type, array_to_json(array_agg(f)) As features\n FROM (SELECT 'Feature' As type , ST_AsGeoJSON(lg.geom)::json As geometry, row_to_json(lp) As properties\n FROM %s As lg INNER JOIN (SELECT id,label,size FROM %s) As lp\n ON lg.id = lp.id ) As f) As fc;\"\"\", (AsIs(settings.BOOKMARKS_TABLE_NAME),AsIs(settings.BOOKMARKS_TABLE_NAME)))\n result = cur.fetchone()[0]\n\n #print(result)\n\n #make the result global\n GEOJSON = result\n return GEOJSON\n\n except Exception as e:\n raise Exception(e)\n\n finally:\n if conn: conn = None\n if cur: cur = None",
"def data_geojson(self):\n coordinates = self.value\n if not coordinates:\n return\n\n title = getattr(self.context, 'title', '') or ''\n description = getattr(self.context, 'description', '') or ''\n\n geo_json = {\n 'type': 'FeatureCollection',\n 'features': [\n {\n 'type': 'Feature',\n 'properties': {\n 'popup': u'<h3>{0}</h3><p>{1}</p>'.format(\n safe_unicode(title),\n safe_unicode(description)\n )\n },\n 'geometry': {\n 'type': 'Point',\n 'coordinates': [\n coordinates[1], # lng\n coordinates[0] # lat\n ]\n }\n },\n ]\n }\n\n if self.mode == 'input':\n properties = geo_json['features'][0]['properties']\n properties['editable'] = True\n properties['no_delete'] = True\n properties['latinput'] = u'#{0}'.format(self.id_input_lat)\n properties['lnginput'] = u'#{0}'.format(self.id_input_lng)\n\n return json.dumps(geo_json)",
"def as_geojson(self):\n return _property_op(arctern.ST_AsGeoJSON, self)",
"def wrapGeoJSON(cities):\n cities_geojson = {\n \"type\": \"FeatureCollection\",\n \"features\": cities\n }\n\n return cities_geojson",
"def geos_geom_from_py(ob, create_func=...): # -> tuple[Any | Unknown, Unknown]:\n ...",
"def get_geojson(self, sql, context):\n result = self.db.query(sql).getresult()\n geo_objects = []\n\n for poly in result:\n poly = poly[0]\n geo_objects.append(geojson.Feature(geometry=geojson.loads(poly)))\n\n crs = {\n \"type\": \"name\",\n \"properties\": {\n \"name\": \"EPSG:\" + str(context[\"crs\"])\n }\n }\n collection = geojson.FeatureCollection(geo_objects, crs=crs)\n\n return {\n 'type': 'result',\n 'result': geojson.dumps(collection)\n }",
"def geojson2postgis(self, filepath, table_name, geo_type):\n map_data = gpd.GeoDataFrame.from_file(filepath)\n # Maybe you want to change link address\n link = \"postgresql://{0}:{1}@{3}:5432/{2}\".format(self.username, self.password, self.dbname, self.host)\n engine = create_engine(link, encoding='utf-8')\n map_data = self.dict_to_json(map_data)\n map_data['geometry'] = map_data['geometry'].apply(lambda x: WKTElement(x.wkt, 4326))\n # Maybe you want to change 'replace' to 'append' in the future\n map_data.to_sql(\n name=table_name,\n con=engine,\n if_exists='replace',\n dtype={'geometry': Geometry(geometry_type=geo_type, srid=4326)}\n )",
"def json2shp(ret, outFC):\r\n jsonFile = os.path.join(os.path.dirname(outFC), \"tempjsonOutput.json\")\r\n with open(jsonFile, \"wb\") as myJSON:\r\n myJSON.write(ret)\r\n gdf = geopandas.read_file(jsonFile, outFC)\r\n os.remove(jsonFile)\r\n return gdf",
"def geo_transform(self):\n pass",
"def products_2_geojson(products, filename):\n # import geopandas as gpd\n # import shapely\n\n assert isinstance(products, list), \"Expected 'products' to be a list\"\n assert filename and filename.strip() != '', \"Give me a valid filename\"\n\n gdf = geojson_2_geodataframe(products)\n\n gdf.to_file(filename, driver='GeoJSON')\n print(\"File '{}' written.\".format(filename))\n return",
"def convert_to_geojson(type, coords):\n\tgeojson = {\"type\": \"FeatureCollection\", \"features\": None}\n\n\tif type == \"location_field\":\n\t\tgeojson[\"features\"] = merge_location_features_in_one(coords)\n\telif type == \"coordinates\":\n\t\tgeojson[\"features\"] = create_gps_markers(coords)\n\n\treturn geojson",
"def load_from_geojson(self, filename_or_url):",
"def GEOJsonToEWKT(dict): \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict",
"def zip_geom():\r\n engine = get_sql_engine()\r\n zipgeom = text(\r\n \"\"\"\r\n SELECT zip_code, geom\r\n FROM philly_zipcode\r\n \"\"\"\r\n )\r\n zipgeom = gpd.read_postgis(zipgeom, con=engine)\r\n return zipgeom",
"def transform_data(self, outformat=None, epsg=None):\n out_data = geopandas.GeoDataFrame.copy(self.data)\n if epsg and str(self.get_epsg()) != epsg:\n out_data[out_data.geometry.name] = \\\n self.data.geometry.to_crs(epsg=epsg)\n out_data.crs = fiona.crs.from_epsg(epsg)\n if outformat == formats.JSON and self.default_output in (\n formats.PANDAS, formats.JSON):\n out_json = out_data.to_json()\n if out_data.crs:\n gj = json.loads(out_json)\n gj[\"crs\"] = {\n \"type\": \"name\",\n \"properties\": {\n \"name\": out_data.crs[\"init\"].upper()\n }\n }\n return json.dumps(gj)\n else:\n return out_json\n elif outformat in [formats.PANDAS, None]:\n return out_data\n else:\n raise GaiaException(\"Format {} not supported\".format(outformat))",
"def wkt_to_geojson(wkt_data: str) -> dict:\n parsed_wkt = wkt.loads(wkt_data)\n\n geo = geometry.mapping(parsed_wkt)\n\n if geo[\"type\"] == \"GeometryCollection\":\n feature_collection = []\n for g in geo[\"geometries\"]:\n feature = geojson.Feature(geometry=g)\n feature_collection.append(feature)\n return geojson.FeatureCollection(feature_collection)\n else:\n return geojson.Feature(geometry=geo)",
"def way_to_geojson(way):\n coords = [[c['lon'], c['lat']] for c in way['geometry']]\n bbox = {\n 'xmin': way['bounds']['minlon'],\n 'xmax': way['bounds']['maxlon'],\n 'ymin': way['bounds']['minlat'],\n 'ymax': way['bounds']['minlat']\n }\n\n try:\n tags = way['tags']\n except KeyError:\n tags = None\n\n return {\n 'type': 'Feature',\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [coords]\n },\n 'properties': {\n 'tags': tags,\n 'bounds': bbox\n }\n }",
"def convert_to_geojson(path):\n outdir = path.rstrip('.zip')\n basename = outdir.split('/')[-1]\n\n if os.path.exists(outdir): # Delete any existing outdir\n shutil.rmtree(outdir)\n os.makedirs(outdir, exist_ok=True)\n unzip(path, '-d', outdir)\n\n geojson_files = []\n\n for filename in os.listdir(outdir):\n if filename.endswith(\".shp\"):\n shpFile = os.path.join(outdir, filename)\n geojsonFile = shpFile.replace('.shp', '.geojson')\n print(shpFile, geojsonFile)\n\n ogr_command = 'ogr2ogr -f \"GeoJSON\" -t_srs crs:84 {outpath} {inpath}'.format(outpath=quote(geojsonFile), inpath=quote(shpFile))\n\n os.popen(ogr_command).read()\n geojson_files.append(geojsonFile)\n\n return geojson_files",
"def _get_geometry(self, val):\n g = OGRGeometry(val)\n return json.loads(g.json)",
"def geoJSON(self, as_json=True, using_style_template=True):\n properties_main = self.properties or {}\n properties_built = dict(id=self.id,\n status=self.status,\n analyst=self.analyst.username,\n created_at=datetime.strftime(self.created_at, '%Y-%m-%dT%H:%M:%S%Z'),\n updated_at=datetime.strftime(self.updated_at, '%Y-%m-%dT%H:%M:%S%Z'),\n )\n properties_template = self.template.properties or {}\n\n # properties_template can return a list from it's backing model, make sure we get the Dict\n if type(properties_template) == types.ListType:\n properties_template = properties_template[0]\n\n # srj: if using_style_template set, we're styling object from its feature id, else we'll\n # just use the style properties (which should already be included if defined for feature)\n # (we may want to set some defaults later on to make sure)\n if using_style_template:\n properties_built['template'] = self.template.id if hasattr(self.template, \"id\") else None\n\n properties = dict(properties_built.items() + properties_main.items() + properties_template.items())\n\n feature_type = FeatureType.objects.get(id=self.template.id)\n\n geojson = SortedDict()\n geojson[\"type\"] = \"Feature\"\n geojson[\"properties\"] = properties\n geojson[\"geometry\"] = json.loads(self.the_geom.json)\n\n if feature_type and using_style_template:\n geojson[\"style\"] = feature_type.style_to_geojson()\n else:\n geojson[\"style\"] = feature_type.style\n\n if(as_json):\n return clean_dumps(geojson)\n else:\n for key in properties:\n if isinstance(properties[key],str) or isinstance(properties[key], unicode):\n properties[key] = properties[key].replace('<', '<l').replace('>', '>').replace(\"javascript:\", \"j_script-\")\n return geojson",
"def geojson(self):\n return {\n \"type\": \"FeatureCollection\",\n \"features\": [f.geojson(i) for i, f in self._features.items()]\n }",
"def geojson(self, feature_id):\n lat, lon = self.lat_lon\n return {\n 'type': 'Feature',\n 'id': feature_id,\n 'geometry': {\n 'type': 'Point',\n 'coordinates': (lon, lat),\n },\n }",
"def prepare(dp: frictionless.package.Package, name: str):\n data = read_datapackage(dp)\n data[\"fid\"] = name + \"_\" + data[ID].astype(str)\n\n spatial = gpd.GeoDataFrame(\n data[\"fid\"],\n columns=[\"fid\"],\n geometry=gpd.points_from_xy(data.longitude, data.latitude),\n crs=\"EPSG:4326\",\n )\n\n # Other fields to json\n def np_encoder(object):\n \"\"\"Source: https://stackoverflow.com/a/65151218.\"\"\"\n if isinstance(object, np.generic):\n return object.item()\n\n other_cols = [\n x for x in data.columns if x not in VALUE_VARS + SPATIAL_VARS + ID_VARS\n ]\n\n # Int64 to int\n data.loc[:, other_cols].loc[:, data[other_cols].dtypes == \"int64\"] = (\n data.loc[:, other_cols].loc[:, data[other_cols].dtypes == \"int64\"].astype(int)\n )\n data = data.replace({np.nan: None})\n data[\"fields\"] = data[other_cols].to_dict(orient=\"records\")\n data[\"fields\"] = data[\"fields\"].apply(lambda x: json.dumps(x, default=np_encoder))\n\n # Unpivoting\n data = data.melt(id_vars=ID_VARS, value_vars=VALUE_VARS)\n\n # Remove nan\n data = data.dropna()\n\n # Conversion\n enermaps_data = utilities.ENERMAPS_DF\n enermaps_data[\"fid\"] = data[\"fid\"]\n enermaps_data[\"value\"] = data[\"value\"]\n enermaps_data[\"variable\"] = data[\"variable\"]\n enermaps_data[\"fields\"] = data[\"fields\"]\n enermaps_data[\"unit\"] = UNIT\n enermaps_data[\"israster\"] = ISRASTER\n\n return enermaps_data, spatial",
"def save_to_geojson(self, topology_map, filename):"
]
| [
"0.6658173",
"0.66399086",
"0.6400657",
"0.6355311",
"0.63249147",
"0.6312711",
"0.6296247",
"0.62948203",
"0.62946975",
"0.6292872",
"0.62386644",
"0.62367254",
"0.61942255",
"0.6115604",
"0.60356015",
"0.60196793",
"0.5957294",
"0.59480387",
"0.59430397",
"0.59273314",
"0.59171057",
"0.5891131",
"0.58818614",
"0.58780813",
"0.586728",
"0.5853381",
"0.58496386",
"0.5841477",
"0.5841312",
"0.5833468"
]
| 0.6736785 | 0 |
Rebuild the LCI/LCIA matrices from a new Monte Carlo sample or provided vector. | def rebuild_all(self, vector=None):
if not hasattr(self, "positions"):
self.load_data()
if vector is not None and not isinstance(vector, np.ndarray):
raise ValueError("`vector` must be a 1-d numpy array")
if vector is not None:
assert vector.shape == self.params.shape, \
"Incorrect `vector` shape. Is {}, but should be {}".format(
vector.shape, self.params.shape
)
# Copy to break references and avoid later manipulation by RNG
self.sample = (self.rng.next() if vector is None else vector).copy()
self.rebuild_technosphere_matrix(self.tech_sample)
self.rebuild_biosphere_matrix(self.bio_sample)
if self.lcia:
self.rebuild_characterization_matrix(self.cf_sample)
if self.weighting:
self.weighting_value = self.weighting_sample
if self.presamples:
self.presamples.update_matrices() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rebuild_the_laplacians():\n local_matrix = InteractomeInterface()\n local_matrix.full_rebuild()\n\n annot_matrix = AnnotomeInterface()\n annot_matrix.full_rebuild()",
"def reconstruct(self, vector):\n return self.reconstruct_vectors(vector[None, :]).flatten()",
"def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])",
"def lci_calculation(self) -> None:\n self.supply_array = self.solve_linear_system()\n # Turn 1-d array into diagonal matrix\n count = len(self.dicts.activity)\n self.inventory = self.biosphere_matrix * sparse.spdiags(\n [self.supply_array], [0], count, count\n )",
"def LD_I_Vx(self, x):\n\t\tfor i in range(0, x + 1):\n\t\t\tself.ram[self.I + i] = self.V[i]",
"def LD_Vx_I(self, x):\n\t\tfor i in range(0, x + 1):\n\t\t\tself.V[i] = self.ram[self.I + i]",
"def _set_ls_matrices(self):\n zz_t = self.z_matrix * self.z_matrix.transpose()\n l, s, l_t = np.linalg.svd(zz_t)\n s[self.p:] = 0\n self.l_matrix = np.matrix(l)\n self.s_matirx = np.matrix(np.diag(s))",
"def fill_vector(group):\n vec = array([0 for _ in range(0, 300)], dtype=float)\n for dim, value in group.attributes[\"lda_vector\"]:\n vec[dim] = value\n return vec",
"def mutate_matrix(matrix):\n L = len(matrix)\n r_i = random.randrange(L)\n r_j = random.randrange(4)\n r = random.gauss(0,1)\n return [[matrix[i][j]+r*(i==r_i)*(j==r_j)\n for j in range(4)] for i in range(L)]",
"def _sample_lam(self, cur_y, cur_z):\n old_loglik = self._loglik(cur_y, cur_z)\n old_lam = self.lam\n \n # modify the feature ownership matrix\n self.lam = np.random.beta(1,1)\n new_loglik = self._loglik(cur_y, cur_z)\n move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));\n if random.random() < move_prob:\n pass\n else:\n self.lam = old_lam",
"def reconstruct_vectors(self, vectors):\n return self.instance_vectors(self.project_vectors(vectors))",
"def fill_matrix(matrix, label, num):\n m, n = matrix.shape\n mat = np.zeros((num, n))\n label=list(label)\n for i in range(num):\n if i < m:\n mat[i, :] = matrix[i, :]\n\n # get a random vector\n else:\n vector_id = random.randint(0, i-1)\n vector1 = mat[vector_id, :]\n l1 = label[vector_id]\n\n # get a neighbors\n nid = get_nearest_indices(l1, mat[0:i, :])\n v2 = mat[nid, :]\n l2 = label[nid]\n\n n_vector, n_label = generate_data(vector1, v2, l1, l2)\n\n mat[i,:]=n_vector\n label.append(n_label)\n\n return mat,np.array(label)",
"def fromTranslation(cls, rows, cols, vector):\n data = np.eye(rows, cols)\n data[0:rows - 1, cols - 1] = vector[0:rows - 1]\n return cls.create(rows, cols, data)",
"def _multiply_matrix(self, v):\n\n self.inputs.grad.data.zero_()\n\n with torch.no_grad():\n v_features = self.lpips_model.features(self.inputs.detach() +\n self.h * v)\n D_phi_v = (\n normalize_flatten_features(v_features) -\n self.input_features\n ) / self.h\n\n torch.sum(self.input_features * D_phi_v).backward(retain_graph=True)\n\n return self.inputs.grad.data.clone()",
"def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )",
"def normalized_laplacian(degree_vector, weight_matrix, length):\n holders = np.zeros((length, 1))\n holders[:, 0] = 1 / degree_vector\n\n return np.eye(length) - holders * weight_matrix",
"def set_random_vector(self):\n self.vector = vu.create_dense_random_vector(dimension)",
"def _make_random_matrix(self, n_components, n_features):",
"def cal_L(self):\n # calculate the l matrix\n self.point_matrixs = self.point_matrix.reshape(\n self.point_matrix.shape[0], 1, self.point_matrix.shape[1])\n self.point_matrixs = np.tile(self.point_matrixs,\n (self.attach_points.shape[0], 1))\n self.attach_points_matrix = np.matlib.repmat(\n self.attach_points[:, 0:3], self.point_matrix.shape[0], 1)\n self.attach_points_matrix = self.attach_points_matrix.reshape(\n self.point_matrix.shape[0], self.attach_points.shape[0], 3)\n self.L = np.subtract(self.attach_points_matrix,\n self.point_matrixs)\n # self.L[:,self.attach_points[:,3]==1,:] = \\\n # - self.L[:,self.attach_points[:,3]==1,:]\n # print(self.L)",
"def _build_sparse_matrix(L):\n shape = L.shape\n i = torch.LongTensor(np.vstack((L.row, L.col)).astype(int))\n v = torch.FloatTensor(L.data)\n return torch.sparse.FloatTensor(i, v, torch.Size(shape))",
"def from_vector_inplace(self, vector):\n self.pdm.from_vector_inplace(vector)\n # By here the pdm has updated our target state, we just need to\n # update the transform\n self.transform.set_target(self.target)",
"def rebuild(self):\n self.from_samples(self.samples)",
"def set_matrix_row(matrix, new_vector, row):\n set_matrix_cell(matrix, new_vector.x, row, 0)\n set_matrix_cell(matrix, new_vector.y, row, 1)\n set_matrix_cell(matrix, new_vector.z, row, 2)",
"def update(self, learning_rate, influence, input_vector, bmu):\n factor = learning_rate * influence\n self.vector = [x + factor * (y - x)\n for x, y in zip(self.vector, input_vector)]",
"def mutate(x, rvs, point=(0,0), insert=(0,0,None), delete=(0,0)):\n data = x.data\n idx = x.indices\n if idx.size==0:\n return x\n n_point = np.random.binomial(point[0], point[1])\n i_point = np.random.choice(x.size, size=n_point, replace=False)\n data[i_point] = rvs(n_point)\n # insertion\n n_insert = np.random.binomial(insert[0], insert[1])\n for _ in range(n_insert):\n while True:\n insert_idx = np.random.choice(insert[2]) if insert[2]\\\n else np.random.choice(x.shape[0])\n if insert_idx not in idx: break\n idx = np.append(idx, insert_idx)\n data = np.append(data, rvs(1))\n # deletion\n n_delete = np.random.binomial(delete[0], delete[1])\n i_delete = np.random.choice(idx.size, size=n_delete, replace=False)\n idx = np.delete(idx, i_delete)\n data = np.delete(data, i_delete)\n y = sp.sparse.csc_matrix((data, (idx, np.zeros(idx.shape, dtype=int))),\n shape=x.shape)\n return y",
"def lu(matrix):\n SIZE = matrix.shape[0]\n BS = np.BLOCKSIZE\n\n if matrix.shape[0] != matrix.shape[0]:\n raise Exception(\"LU only supports squared matricis\")\n if not matrix.dist():\n raise Exception(\"The matrix is not distributed\")\n\n if(SIZE % np.BLOCKSIZE != 0):\n raise Exception(\"The matrix dimensions must be divisible \"\\\n \"with np.BLOCKSIZE(%d)\"%np.BLOCKSIZE)\n\n (prow,pcol) = matrix.pgrid()\n A = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True);A += matrix\n L = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n U = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpL = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpU = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n for k in xrange(0,SIZE,BS):\n bs = min(BS,SIZE - k) #Current block size\n kb = k / BS # k as block index\n\n #Compute vertical multiplier\n slice = ((kb,kb+1),(kb,kb+1))\n for a,l,u in zip(A.blocks(slice), L.blocks(slice), U.blocks(slice)):\n (p,tl,tu) = linalg.lu(a)\n if not (np.diag(p) == 1).all():#We do not support pivoting\n raise Exception(\"Pivoting was needed!\")\n #There seems to be a transpose bug in SciPy's LU\n l[:] = tl.T\n u[:] = tu.T\n\n #Replicate diagonal block horizontal and vertical\n for tk in xrange(k+bs,SIZE,BS):\n tbs = min(BS,SIZE - tk) #Current block size\n L[tk:tk+tbs,k:k+bs] = U[k:k+tbs,k:k+bs]\n U[k:k+bs,tk:tk+tbs] = L[k:k+bs,k:k+tbs]\n\n if k+bs < SIZE:\n #Compute horizontal multiplier\n slice = ((kb,kb+1),(kb+1,SIZE/BS))\n for a,u in zip(A.blocks(slice), U.blocks(slice)):\n u[:] = np.linalg.solve(u.T,a.T).T\n\n #Compute vertical multiplier\n slice = ((kb+1,SIZE/BS),(kb,kb+1))\n for a,l in zip(A.blocks(slice), L.blocks(slice)):\n l[:] = np.linalg.solve(l,a)\n\n #Apply to remaining submatrix\n A -= pyHPC.summa(L[:,:k+bs],U[:k+bs,:], ao=(k+bs,k),\n bo=(k,k+bs), co=(k+bs,k+bs))\n\n return (L, U)",
"def map_vects(self, input_vects):\n \n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n \n to_return = []\n \n distances = []\n \n \n contador_adyacentes = 0\n \n matriz = np.array(list(self._neuron_locations(self._m, self._n)))\n \n m = self._m\n \n n = self._n\n \n matrices = []\n \n matrices = np.stack((matriz,\n matriz + np.array([m,n]), \n matriz - np.array([m,n]), \n matriz + np.array([m,0]),\n matriz - np.array([m,0]),\n matriz + np.array([0,n]),\n matriz - np.array([0,n]),\n matriz + np.array([m,-n]),\n matriz + np.array([-m,n])\n ))\n \n distancias_matriz = []\n\n for i in range(n*m):\n distancias_matriz.append([])\n for j in range(m*n):\n distancias_matriz[i].append(np.min(np.sum(np.power(np.subtract(matriz[i], matrices[:,j]),2), axis = 1)))\n \n distancias_matriz = np.array(distancias_matriz)\n \n \n for vect in input_vects:\n\n # min_index is the index of the BMU\n \n lista_indices = [i for i in range(len(self._weightages))]\n \n min_index = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x]))\n\n # min_index_2 is the index of the 2nd BMU\n \n lista_indices.pop(min_index) # El indice es el mismo que el valor\n \n min_index_2 = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x])) \n \n r2 = np.sqrt(2)\n\n if np.sqrt(distancias_matriz[min_index][min_index_2]) > r2: \n# print('loc 1')\n# print(locaciones[min_index])\n# print('loc 2')\n# print(locaciones[min_index_2])\n contador_adyacentes += 1\n\n\n distance = np.linalg.norm(vect - self._weightages[min_index])\n \n distances.append(distance)\n \n to_return.append(self._locations[min_index]) \n \n # Quantization Error qe (the mean of all distances to the BMU)!\n self.distances = distances \n \n # Topographic error te\n self.proporcion = contador_adyacentes / len(input_vects)\n \n self.prom_dist = np.mean(self.distances)\n \n return to_return",
"def simulate_sparse(n_rows, n_columns, row_density=0.001):\n # X = sparse.lil_matrix((n_rows, n_columns))\n X = []\n counter = 0\n for row in range(n_rows):\n counter += 1\n if (counter % 1000) == 0:\n print(counter)\n\n # X[row] = sparse.random(1, n_columns, density=row_density)\n X.append(sparse.random(1, n_columns, density=row_density, format='lil'))\n\n return sparse.vstack(X).tocsr()",
"def fix_norm_and_length(vector):\n # normalize\n norm_vector = vector / np.linalg.norm(vector)\n\n # pad with zeros\n num_bits = get_bits_needed(len(vector))\n state_vector = np.zeros(2 ** num_bits, dtype=complex)\n for i in range(len(vector)):\n state_vector[i] = norm_vector[i]\n\n return state_vector",
"def lap_mat(self):"
]
| [
"0.5767239",
"0.54645854",
"0.5186119",
"0.5121431",
"0.51160043",
"0.51107913",
"0.5103802",
"0.510151",
"0.50146663",
"0.49944654",
"0.49386817",
"0.4923459",
"0.48787823",
"0.4870082",
"0.48589543",
"0.48388454",
"0.48099792",
"0.48059684",
"0.48047376",
"0.48034263",
"0.47795853",
"0.47771835",
"0.47699845",
"0.4757633",
"0.47495332",
"0.4749381",
"0.4743698",
"0.4736517",
"0.4719974",
"0.47152537"
]
| 0.5868529 | 0 |
serialize struct_time to timezone | def serialize_datetime(date_time) -> timezone:
if isinstance(date_time, struct_time):
return timezone.datetime.fromtimestamp(mktime(date_time)).replace(tzinfo=pytz.UTC)
else:
raise TypeError("Can't convert this type to datetime") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])",
"def _serialize_time(val):\n return val.isoformat()",
"def serialized_time(time):\n formatted = time.isoformat()\n if formatted.endswith(\"+00:00\"):\n formatted = formatted[:-6] + \"Z\"\n\n return formatted",
"def serialize_time(timestamp_input, *, use_tz=False):\n if isinstance(timestamp_input, tuple):\n return tuple(serialize_time(timestamp, use_tz=use_tz) for timestamp in timestamp_input)\n\n if timestamp_input is None:\n return '..'\n\n if not isinstance(timestamp_input, dt.date):\n raise ValueError('Expected a datetime object or a tuple of datetime objects')\n\n if use_tz and not isinstance(timestamp_input, dt.datetime):\n raise ValueError('Cannot ensure timezone information for datetime.date objects, use datetime.datetime instead')\n\n if use_tz and not timestamp_input.tzinfo:\n timestamp_input = timestamp_input.replace(tzinfo=dateutil.tz.tzutc())\n\n if not use_tz and isinstance(timestamp_input, dt.datetime) and timestamp_input.tzinfo:\n timestamp_input = timestamp_input.replace(tzinfo=None)\n\n return timestamp_input.isoformat().replace('+00:00', 'Z')",
"def _serialize_datetime(val):\n return datetime_to_iso8601(val)",
"def to_json(self, value):\r\n if value is None:\r\n return None\r\n if isinstance(value, time.struct_time):\r\n # struct_times are always utc\r\n return time.strftime('%Y-%m-%dT%H:%M:%SZ', value)\r\n elif isinstance(value, datetime.datetime):\r\n if value.tzinfo is None or value.utcoffset().total_seconds() == 0:\r\n # isoformat adds +00:00 rather than Z\r\n return value.strftime('%Y-%m-%dT%H:%M:%SZ')\r\n else:\r\n return value.isoformat()\r\n else:\r\n raise TypeError(\"Cannot convert {!r} to json\".format(value))",
"def utc_to_local_timestruct(ts, orig_tz=UTC):\n return utc_to_local_timestamp(time.mktime(ts),orig_tz)",
"def _convert_struct_time_to_dt(stime):\n return date.fromtimestamp(mktime(stime))",
"def serialize(obj):\n\n if isinstance(obj, datetime.datetime):\n serial = obj.replace(microsecond=0).replace(tzinfo=None).isoformat() + \"Z\"\n return serial\n\n if isinstance(obj, bytes):\n return obj.decode('utf-8')\n\n return obj.__dict__",
"def serialize(obj):\n\n if isinstance(obj, datetime.datetime):\n serial = obj.replace(microsecond=0).replace(tzinfo=None).isoformat() + \"Z\"\n return serial\n\n if isinstance(obj, bytes):\n return obj.decode('utf-8')\n\n return obj.__dict__",
"def json_serialize_datetime(self, obj):\n if isinstance(obj, datetime.datetime):\n return int(time.mktime(obj.timetuple()))\n\n raise TypeError(\"Type %s not serializable\" % type(obj))",
"def testProtobufEncodeDateTimeMessageWithTimeZone(self):\n for tz_offset in (30, -30, 8 * 60, 0):\n mine = HasDateTimeMessage(value=datetime.datetime(\n 1970, 1, 1, tzinfo=util.TimeZoneOffset(tz_offset)))\n nested = NestedDateTimeMessage()\n nested.value = message_types.DateTimeMessage(\n milliseconds=0, time_zone_offset=tz_offset)\n\n my_encoded = protobuf.encode_message(mine)\n encoded = protobuf.encode_message(nested)\n self.assertEquals(my_encoded, encoded)",
"def _marshal_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour=0,\n tm_min=0,\n tm_sec=0,\n tm_wday=-1,\n tm_yday=-1,\n tm_isdst=-1,\n ):\n _struct_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour,\n tm_min,\n tm_sec,\n tm_wday,\n tm_yday,\n tm_isdst,\n )",
"def json_encoder(obj):\n if isinstance(obj, datetime.datetime):\n return time.mktime(obj.timetuple())\n if isinstance(obj, datetime.timedelta):\n return obj.total_seconds()",
"def timezone():\n \n pass",
"def serialize_date(dt):\n if dt.tzinfo:\n dt = dt.astimezone(UTC).replace(tzinfo=None)\n return dt.isoformat()",
"def _convert_struct_time_to_dt(stime):\n\n dt = datetime.datetime.fromtimestamp(mktime(stime))\n\n return dt.date()",
"def encode_timestamp(self, time_obj):\n\n if isinstance(time_obj, (dt.datetime, dt.date, dt.time)):\n return time_obj.strftime(self.timestamp_format)\n else:\n return time_obj",
"def ts_datetime(val):\n return val.isoformat() + \"Z\"",
"def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)",
"def UpdateStructTime(self, t):\n self.date.UpdateStructTime(t)\n self.time.UpdateStructTime(t)",
"def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))",
"def transform_time(t):\n if t is None:\n return None\n elif isinstance(t, basestring):\n return t\n\n dt = datetime.fromtimestamp(t, UTC())\n return dt.strftime('%Y-%m-%dT%H:%M:%S%z')",
"def dehydrate_time(value):\n if isinstance(value, Time):\n nanoseconds = int(value.ticks * 1000000000)\n elif isinstance(value, time):\n nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +\n 1000000000 * value.second + 1000 * value.microsecond)\n else:\n raise TypeError(\"Value must be a neotime.Time or a datetime.time\")\n if value.tzinfo:\n return Structure(ord(b\"T\"), nanoseconds, value.tzinfo.utcoffset(value).seconds)\n else:\n return Structure(ord(b\"t\"), nanoseconds)",
"def serialize(obj):\n\n # if isinstance(obj, date):\n # serial = obj.isoformat()\n # return serial\n #\n # if isinstance(obj, time):\n # serial = obj.isoformat()\n # return serial\n\n return obj.to_json()",
"def dump_datetime(value):\n return value.strftime('%Y-%m-%dT%H:%M:%SZ')",
"def time_to_js(obj):\n if isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()",
"def serialize_datetime(self, obj):\r\n if isinstance(obj, datetime.datetime):\r\n return obj.isoformat()\r\n raise TypeError(\"Type not serializable\")",
"def serialize_dt(value):\n return value.isoformat() if hasattr(value, 'isoformat') else value",
"def timestamp(fmt, timestruct=None):\n return _time.strftime(fmt, timestruct or _time.gmtime())"
]
| [
"0.6696328",
"0.66784537",
"0.6304099",
"0.6207683",
"0.605701",
"0.5979643",
"0.5960567",
"0.5920082",
"0.5860897",
"0.5860897",
"0.5845807",
"0.5836228",
"0.58273715",
"0.5815514",
"0.5766322",
"0.5720496",
"0.5704188",
"0.5628591",
"0.56161356",
"0.5607155",
"0.56001204",
"0.5513459",
"0.5490024",
"0.5478178",
"0.5450412",
"0.5427186",
"0.5404001",
"0.5402308",
"0.54007214",
"0.53921956"
]
| 0.7104688 | 0 |
set properties for object from parsed_data. | def set_properties(self):
# assign feed entries from the root of the parsed data
if hasattr(self.parsed_data, "entries"):
self.items = self.parsed_data.entries
# check if it is a feed root or feed element
if hasattr(self.parsed_data, "feed"):
source_data = self.parsed_data.feed
else:
source_data = self.parsed_data
# assign available properties not listed in keymap
self.title = source_data.title
self.link = source_data.link
for key in self.parsed_data.keymap.keys():
if hasattr(self, key) and not getattr(self, key):
attr_value = source_data.get(key)
if isinstance(attr_value, struct_time):
attr_value = self.serialize_datetime(attr_value)
setattr(self, key, attr_value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, data):\n # add play_guid as it sometimes doesn't exist\n if 'play_guid' not in data:\n data['play_guid'] = ''\n # loop through data\n for x in data:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])",
"def assign_values(self, data):\n\n for key in self.__dict__.keys():\n if key in data.keys():\n setattr(self, key, data[key]) # handy built-in function",
"def _update_attributes(self, data):\n self._set_avatar(data)\n self.boosts_since = parse_boosts_since(data)\n self.flags = parse_flags(data)\n self.nick = parse_nick(data)\n self.pending = parse_pending(data)\n self.role_ids = parse_role_ids(data)\n self.timed_out_until = parse_timed_out_until(data)",
"def _set_data(self, new_data):\n for name, field in self._get_fields().items():\n if name in new_data:\n try:\n setattr(self, f\"__{name}\", field.from_raw(new_data[name]))\n except (fields.ValidationError, ValueError):\n # should at least log validation and value errors\n # this can happen in case of e.g. fields type change\n pass",
"def load_data(self):\n super(MudderyObjectCreater, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n \n # set common object's info\n self.obj_list = {}\n\n for obj in data.obj_list.split(\",\"):\n obj_key = \"\"\n number = 0\n arg = obj.split(\":\", 1)\n if len(arg) == 1:\n obj_key = arg[0]\n number = 1\n elif len(arg) >= 2:\n obj_key = arg[0]\n number = int(arg[1])\n\n self.obj_list[obj_key] = number",
"def set_JobProperties(self,data):\n tp=type(data)\n if tp.__name__=='dict':\n list_context=list(JobProperty._nInstancesContextDict.keys())\n for i in data.keys():\n for j in data[i].keys():\n if list_context.count(i+'.'+j)==1:\n jp=JobProperty._nInstancesContextDict[i+'.'+j]\n jp.set_Value(data[i][j])\n self._log.info(\"The JobProperty %s has been set to %s\",\n i+'.'+j,data[i][j])\n else:\n self._log.warning(\"The JobProperty %s does not exist\",\n i+'.'+j)\n else:\n raise ValueError('The received data is has not the expected'\n 'type/format')",
"def parse_data( self ):\n self.parsed_data = dict( self.results )",
"def _loadData(self, data):\n self._data = data\n self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))\n self.email = data.attrib.get('email')\n self.friend = utils.cast(bool, data.attrib.get('friend'))\n self.friendlyName = data.attrib.get('friendlyName')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.server = utils.cast(bool, data.attrib.get('server'))\n self.servers = self.findItems(data, MyPlexServerShare)\n self.thumb = data.attrib.get('thumb')\n self.username = data.attrib.get('username', '')\n for server in self.servers:\n server.accountID = self.id",
"def set_metadata(self, data):\r\n pass",
"def parse(self, data_payload):\n self.id = data_payload.get('id')\n self.name = data_payload.get('title')\n images = data_payload.get('images')\n preview_gif = images.get('preview_gif')\n original = images.get('original')\n self.url = original.get('url')\n self.preview = preview_gif.get('url')\n return self",
"def __init__(self, raw_data: Dict) -> None:\n self.raw_data = raw_data\n self.__extract_common_attrs(raw_data)\n\n # Fetch data with DATA_KEY or simply use the initial data.\n # In some cases the DATA_KEY is the same as the object attribute.\n # For example:\n # \"comments\": [{\n # \"comment_id\": 44444,\n # \"comment\": \"Hello, world!\"\n # }]\n # This object has a `comment` attribute but its DATA_KEY is also `comment`:\n # \"comment\": {\"comment_id\": 44444,\n # \"key_id\": 12345,\n # \"comment\": \"This is a test.\"}\n # This is an edge case happening only twice, so to overcome it\n # just check the value type under the given key.\n if self.DATA_KEY in raw_data and \\\n (isinstance(raw_data[self.DATA_KEY], dict)):\n data = raw_data[self.DATA_KEY]\n else:\n data = raw_data\n\n for attr in self.ATTRS:\n setattr(self, attr, data.get(attr, None))",
"def set_properties(struct):",
"def updateFromDict(self, data):\n for key, value in data.items():\n setattr(self, key, value)",
"def from_dict(self, data):\n for field in [\"first_name\", \"last_name\", \"username\", \n \"email\", \"city\", \"state\", \"active_plan\"]:\n if field in data:\n setattr(self, field, data[field])",
"def from_dict(self, data: dict):\n if 'title' in data:\n self.title = data['title']\n if 'description' in data:\n self.description = data['description']\n if 'deadline' in data:\n self.deadline = parser.parse(data['deadline'])\n return",
"def _set_attributes(self):",
"def loads(self, data):\n self._id = data.get('id', -1)\n self._created = data.get('created', 0) # datetime.strptime(data.get('created', '1970-01-01T00:00:00'), '%Y-%m-%dT%H:%M:%S').timestamp()\n self._stage = data.get('stage', 0) # self.stage_from_str(data.get('stage', ''))\n self._dir = data.get('direction', 0) # self.direction_from_str(data.get('direction', ''))\n self._timeframe = data.get('timeframe') # timeframe_from_str(data.get('timeframe', 't'))\n self._expiry = data.get('expiry', 0) # datetime.strptime(data.get('expiry', '1970-01-01T00:00:00'), '%Y-%m-%dT%H:%M:%S').timestamp()",
"def parse(cls, data):\n raise NotImplementedError",
"def parse_data( self ):\n super( PivotGraph, self ).parse_data()\n new_parsed_data = {}\n parsed_data = getattr( self, 'parsed_data', self.results )\n for pivot, data in parsed_data.items():\n new_pivot = self.parse_pivot( pivot )\n data = self.parse_datum( data )\n if data != None:\n new_parsed_data[ new_pivot ] = data\n self.parsed_data = new_parsed_data",
"def from_data(cls,data):\n\n new_object = cls() # Only this line needs to be updated\n new_object.data = data\n\n return new_object",
"def _update_object(self, data_dict):\r\n pass",
"def fromDict(cls, data):\n obj = cls()\n obj.raw = data\n for name, value in data.iteritems():\n if name in cls.SIMPLE_PROPS:\n setattr(obj, name, value)\n elif name in cls.COMPLEX_PROPS:\n value = cls.COMPLEX_PROPS[name].fromDict(value)\n setattr(obj, name, value)\n\n return obj",
"def __init__(self, data):\n # loop through data\n for x in data:\n # create pitches list if attribute name is pitches\n if x == 'pitches':\n self.pitches = []\n for y in data[x]:\n self.pitches.append(Pitch(y))\n else:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])",
"def load(self):\n for prop in self.properties:\n try:\n value = getattr(self, prop)\n self._prop_dict[prop] = value\n except AttributeError as ate:\n pass",
"def _loadData(self, data):\n self._data = data\n self.id = utils.cast(int, data.attrib.get('id'))\n self.accountID = utils.cast(int, data.attrib.get('accountID'))\n self.serverId = utils.cast(int, data.attrib.get('serverId'))\n self.machineIdentifier = data.attrib.get('machineIdentifier')\n self.name = data.attrib.get('name')\n self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))\n self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))\n self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))\n self.owned = utils.cast(bool, data.attrib.get('owned'))\n self.pending = utils.cast(bool, data.attrib.get('pending'))",
"def set_data(self, data):\n\n pass",
"def LoadProps( self, props_dict ):\n for k in ( 'timeValue', ):\n if k in props_dict:\n setattr( self, k, props_dict[ k ] )\n\n for k in ( 'dataSetSelections', ):\n if k in props_dict:\n cur_attr = props_dict[ k ]\n\tfor name in cur_attr.keys():\n\t cur_value = cur_attr[ name ]\n\t del cur_attr[ name ]\n\t cur_attr[ DataSetName( name ) ] = cur_value\n\t#end for name\n\n setattr( self, k, cur_attr )\n #end if k in props_dict\n #end for k\n\n super( PlotWidget, self ).LoadProps( props_dict )\n self.container.dataSetMenu.UpdateAllMenus()\n wx.CallAfter( self.UpdateState, replot = True )",
"def from_data(cls, data):\n self = object.__new__(cls)\n self.id = parse_id(data)\n self._set_icon(data)\n self.bot = parse_bot(data)\n self.description = parse_description(data)\n self.name = parse_name(data)\n return self",
"def parse(self, data):\n raise NotImplementedError",
"def from_data(cls, data):\n self = object.__new__(cls)\n self.required = parse_required(data)\n self.title = parse_title(data)\n self.type = parse_type(data)\n self.values = parse_values(data)\n return self"
]
| [
"0.6668565",
"0.66331637",
"0.6311598",
"0.6184064",
"0.61690265",
"0.6161215",
"0.614354",
"0.6140493",
"0.60973036",
"0.6087782",
"0.60834193",
"0.6038224",
"0.59710634",
"0.5877489",
"0.58292496",
"0.5816957",
"0.5805474",
"0.5780091",
"0.5763294",
"0.5756133",
"0.5750351",
"0.57410085",
"0.5733476",
"0.57209104",
"0.57202804",
"0.5716935",
"0.5705671",
"0.5681961",
"0.5669981",
"0.56646603"
]
| 0.73541945 | 0 |
This method finds the nearest neighbors in a table/view called 'datatable' for each object in 'objectlist' within a search radius 'radius'. | def object_finder(
self, datatable, objectlist, ralist, declist, radius,
longquery=True
):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nearest_neighbors(self, kdt, radius=8):\n neighbors = kdt.query_radius(np.array([self.position[:-1]]), r = radius)\n return neighbors[0][1:]",
"def get_neighbours(self, business, num=5, add_self=False):\n\n def radius_step(radius, num_longtidues, num_latitudes, time):\n \"\"\"expand the search-radius exponentially\"\"\"\n step = int(exp(time))\n radius['long_down'] = radius['long_down'] - step\n if radius['long_down'] <= 0:\n radius['long_down'] = 0\n radius['long_up'] = radius['long_up'] + step\n if radius['long_up'] >= num_longtidues - 1:\n radius['long_up'] = num_longtidues - 1\n radius['lat_down'] = radius['lat_down'] - step\n if radius['lat_down'] <= 0:\n radius['lat_down'] = 0\n radius['lat_up'] = radius['lat_up'] + step\n if radius['lat_up'] >= num_latitudes - 1:\n radius['lat_up'] = num_latitudes - 1\n\n cell = self.get_cell(business)\n b_long = business.longitude\n b_lat = business.latitude\n radius = {'long_down': cell[0], 'long_up': cell[0] + 1,\n 'lat_down': cell[1], 'lat_up': cell[1] + 1}\n ret = []\n time = 0\n inner_radius = 0\n while len(ret) < num and inner_radius < 100:\n found = []\n radius_step(radius, self.longitudes.size, self.latitudes.size,\n time)\n time = time + 1\n for row in range(radius['long_down'], radius['long_up']):\n for col in range(radius['lat_down'], radius['lat_up']):\n if row in self.cells and col in self.cells[row]:\n for item in self.cells[row][col]:\n if item not in ret:\n found.append(item)\n if (len(found) + len(ret)) < num:\n continue\n # We approximate the in-radius of the search-rectangle by half of\n # the distance between the centers of left and right border\n # (Not exactly the in-radius on the surface of a sphereoid, but\n # easier to calculate)\n inner_radius = haversine((self.longitudes[radius['long_down']],\n self.latitudes[cell[1]]),\n (self.longitudes[radius['long_up']],\n self.latitudes[cell[1]])) / 2\n for neighbour in found:\n n_long = neighbour['longitude']\n n_lat = neighbour['latitude']\n dist = haversine((b_long, b_lat), (n_long, n_lat))\n # make sure we only include businesses in the in-circle of the\n # search-rectangle\n if dist <= inner_radius and \\\n (add_self or neighbour['index'] != business.name):\n neighbour['distance'] = dist\n ret.append(neighbour)\n return sorted(ret, key=itemgetter('distance'))[:num]",
"def get_closest_neighbours(atomlist, neighbours=2):\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist",
"def __find_neighbors(self, list_of_nodes):\n for node in list_of_nodes:\n x_pos = node.location[0]\n y_pos = node.location[1]\n if x_pos - 1 >= 0:\n # find the node in the list of nodes\n # add it as a neighbor of the current node\n neighbor = self.__find_neighbor_at(x_pos - 1, y_pos, list_of_nodes)\n node.add_neighbor(neighbor)\n if x_pos + 1 <= self.__n_rows - 1:\n neighbor = self.__find_neighbor_at(x_pos + 1, y_pos, list_of_nodes)\n node.add_neighbor(neighbor)\n if y_pos - 1 >= 0:\n neighbor = self.__find_neighbor_at(x_pos, y_pos - 1, list_of_nodes)\n node.add_neighbor(neighbor)\n if y_pos + 1 <= self.__n_columns - 1:\n neighbor = self.__find_neighbor_at(x_pos, y_pos + 1, list_of_nodes)\n node.add_neighbor(neighbor)",
"def neighbourhood(self, query, radius):\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x, query) < radius, self.pool))",
"def get_neighbors(index, radius, height, width):\n # Calculate the original 2-D coordinates of the central pixel.\n row, col = index // width, index % width\n\n # Get a grid of possible candidates that are close to the central pixel.\n r = int(radius)\n x = np.arange(max(col - r, 0), min(col + r + 1, width))\n y = np.arange(max(row - r, 0), min(row + r + 1, height))\n X, Y = np.meshgrid(x, y)\n\n # Determine which candidates are within the given radius of the pixel.\n R = np.sqrt(((X - col)**2 + (Y - row)**2))\n mask = R < radius\n return (X[mask] + Y[mask]*width).astype(np.int), R[mask]",
"def find_objects_within_radius(self, radius):\n\n if type(radius) != float:\n radius = float(radius)\n\n objects_nearby = []\n\n for item in self.object_store:\n\n if item.Position == None:\n continue\n\n if math.sqrt(math.pow((item.Position.X - self.agent.Position.X), 2) + math.pow((item.Position.Y - self.agent.Position.Y), 2) + math.pow((item.Position.Z - self.agent.Position.Z), 2)) <= radius:\n objects_nearby.append(item)\n\n return objects_nearby",
"def find_distances_and_rows(self) -> None:\r\n \r\n # Initialize results dictionary and counter\r\n dists = {}\r\n rows = {}\r\n num = 1\r\n curr_row = 1\r\n\r\n # Add all the results to a dictionary with unique ids\r\n for row in self.field.get_rows():\r\n for c in range(len(row) - 1):\r\n\r\n # Get distance between plant and the one next to it\r\n d = distance_between(row[c].get_center(),\r\n row[c + 1].get_center())\r\n \r\n d = self.pixels_to_inches(d)\r\n d = round(d, 2)\r\n\r\n # Add the distance to the results dictionary\r\n dists[num] = d\r\n rows[num] = curr_row\r\n num += 1\r\n\r\n curr_row += 1\r\n\r\n # Set this rulers dists and rows\r\n self.dists = dists\r\n self.rows = rows",
"def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)",
"def cone_search(conn, table, center_ra, center_dec, radius, schema='public'):\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n query = 'SELECT * FROM {}.{} WHERE q3c_join(%s, %s, ra, dec, %s)'\n # This one isn't needed until > 1 million rows\n #query = 'SELECT * FROM {}.{} WHERE q3c_radial_query(ra, dec, %s, %s, %s)'\n cur.execute(psycopg2.sql.SQL(query).format(\n psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(table)),\n (center_ra, center_dec, radius))\n rows = cur.fetchall()\n cur.close()\n\n return rows",
"def nearest_loop(row, gdf2,geometry_cols=['geo_lon','geo_lat'],src_column=None,surrounding=False):\r\n def haversine_distance(origin, destination):\r\n lon1, lat1 = origin\r\n lon2, lat2 = destination\r\n radius = 6371000 # meters\r\n \r\n dlat = math.radians(lat2-lat1)\r\n dlon = math.radians(lon2-lon1)\r\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\r\n * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n d = radius * c\r\n return d\r\n\r\n # start the main iteration\r\n if row.geometry.type == 'Polygon':\r\n point_xy = np.array((row.geometry.centroid.x,\r\n row.geometry.centroid.y))\r\n if row.geometry.type in ['Point', 'LineString']:\r\n point_xy = np.array((row.geometry.x, row.geometry.y)) \r\n # Select most current stations datasets.\r\n closest = None\r\n closest_distance = 99999999999\r\n for _, station in gdf2.iterrows():\r\n d = haversine_distance((point_xy[0], point_xy[1]),\r\n (station[geometry_cols[0]], station[geometry_cols[1]]))\r\n if d < closest_distance:\r\n closest = station\r\n closest_distance = d\r\n # if surroung \r\n if surrounding:\r\n closest1 = []\r\n closest_distance = closest_distance+surrounding\r\n i = 0\r\n for _, station in gdf2.iterrows():\r\n d = haversine_distance((point_xy[0], point_xy[1]),\r\n (station[geometry_cols[0]], station[geometry_cols[1]]))\r\n if d < closest_distance:\r\n closest1.append(station)\r\n i += 1\r\n closest = closest1\r\n return closest[src_column]",
"def get_nearest(infected_coordinates, uninfected_coordinates, d):\n # Create tree from the GPS coordinates of uninfected users\n tree = BallTree(uninfected_coordinates, leaf_size=15, metric='haversine')\n indices,distances=tree.query_radius(infected_coordinates, r=d,return_distance=True)\n indices=indices.transpose()\n distances=distances.transpose()\n return indices,distances",
"def get_neighbours_round(self, cell, radius):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width or radius < 2):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-radius,y+radius+1) if 0<=i<width for j in range(x-radius,x+radius+1) if (0<=j<length)]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\ti , j = neigh\n\t\t\tif round(math.sqrt((j-x)**2+(i-y)**2),4) < round(radius,4):\n\t\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours",
"def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance):\n neigh_ind = [np.where(d <= radius)[0] for d in dist]\n\n if return_distance:\n if self.effective_metric_ == \"euclidean\":\n dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)]\n else:\n dist = [d[neigh_ind[i]] for i, d in enumerate(dist)]\n results = dist, neigh_ind\n else:\n results = neigh_ind\n return results",
"def test_get_neighborhood_radius_consistent():\r\n grid_spacing = random.uniform(1e-6, 10.0)\r\n center = numpy.random.random(random.randint(1, 3))\r\n\r\n # Find points with radius neighborhood\r\n radius = random.uniform(_distance_to_nearest(grid_spacing, center), grid_spacing*5)\r\n points = ill.get_neighborhood_radius(grid_spacing, center, radius)\r\n\r\n # Every points found within this radius, should be in the points of a larger radius\r\n outer_points = ill.get_neighborhood_radius(grid_spacing, center,\r\n radius+random.uniform(0.0, grid_spacing*5))\r\n\r\n for point in points:\r\n assert point in outer_points",
"def investigate(self, nearest_neighbors):\n pass",
"def NN_finder_all(initial_config_data, cut_off_distance, box_dim, path_to_test_dir, atom_list = None, save_results = False, re_calc = False):\n\t# set up path_to_file and check results out of this function before calling it\n\t# if check_results is True: \n\t# if path_to_file is None or os.path.exists(path_to_file):\n\t# raise Exception(\"NN results file not found, please specify the correct path to the file\")\n\t\t\n\tpath_to_nn_results = path_to_test_dir + \"/nn_results_dict.pkl\"\n\t\n\tif re_calc is False:\n\t\tif os.path.exists(path_to_nn_results):\n\t\t\tprint \"nn results dictionary already calculated and saved in pkl file, skip calculation\"\n\t\t\treturn pickle.load(open(path_to_nn_results,'r'))\n\tnn = dict()\n\t\t\n\t# if there is no atom_list specified, use all atoms in initial_config_data\n\tif atom_list is None:\n\t\tatom_list = (initial_config_data[\"item\"]).tolist()\n\t\n\t_data = initial_config_data\n\t\n\tgroups = Atom.classify_df(_data)\n\t\n\t#_atom_data = initial_config_data[['x','y','z']]\n\t\n\t_interested_data = _data.loc[_data['item'].isin(atom_list)]\n\t\n\tinterested_groups = Atom.classify_df(_interested_data)\n\t\n\t#_interested_atom = _interested_data[['x','y','z']]\n\t\n\t\n\t# build the efficient nearest neighbor KDTree algorithm\n\t# default distance metric Euclidian norm p = 2\n\t# create tree object using the larger points array\n\tfor (i, int_group) in interested_groups.items():\n\t\tfor (j, atom_group) in groups.items():\n\t\t\t# comparing atom_type_i and atom_type_j\n\t\t\tfor pair in [(i,j),(j,i)]:\n\t\t\t\tif pair in cut_off_distance:\n\t\t\t\t\t curr_cut_off = cut_off_distance[pair]\n\t\t\t\n\t\t\t# iterate over each row seems inefficient for (index, curr_atom) in int_group.iterrows()\n\t\t\tresult_tree = PeriodicCKDTree(box_dim, atom_group[['x','y','z']].values)\n\t\t\tresult_groups = result_tree.query_ball_point(int_group[['x','y','z']].values, curr_cut_off)\n\t\t\t#indices = np.unique(IT.chain.from_iterable(result_groups))\n\t\t\t\n\t\t\t#for (int_NN,(index,int_atom)) in (result_groups,int_group.iterrows()):\n\t\t\tk = 0\n\t\t\tfor index,int_atom in int_group.iterrows():\n\t\t\t\t# int_NN is a list of index of NN, index is according to the order\n\t\t\t\t# in atom_group \n\t\t\t\t# curr_NN is a dataframe storing NN found for current atom_group\n\t\t\t\tint_NN = result_groups[k]\n\t\t\t\tcurr_NN = atom_group.iloc[int_NN]\n\t\t\t\tif int_atom[\"item\"] not in nn:\n\t\t\t\t\tnn[int_atom[\"item\"]] = curr_NN\n\t\t\t\telif int_atom[\"item\"] in nn:\n\t\t\t\t\tnn[int_atom[\"item\"]] = nn[int_atom[\"item\"]].append(curr_NN)\t\t\t\t\n\t\t\t\tk = k + 1\t\n\t# it is best practice to save this NN dictionary results into a pkl file \n\t# to prevent rerun, if this file exists, let user know that\n\t# the file_of_nearest_neighbor exists before calling it\n\tif save_results is True:\n\t\twith open(path_to_nn_results, 'w') as f:\n\t\t\tpickle.dump(nn,f)\n\t\t\tf.close()\n\treturn nn",
"def lsh_search(self,query_index, num_neighbors = 10):\r\n def l1(u,v):\r\n return dt.norm(np.array(u)-np.array(v), ord=1)\r\n \r\n start_time = time.time()\r\n #print(start_time)\r\n buckets = self.get_candidates(query_index)\r\n distance1 = buckets.map(lambda p : p + (l1(p[0],query_index[0]),))\r\n distance_sort = distance1.map(lambda y : (y[3],y[1]))\r\n distance_sorted = distance_sort.sortByKey()\r\n lsh_End_time = time.time()- start_time\r\n return (distance_sorted.take(num_neighbors),lsh_End_time)\r\n raise NotImplementedError",
"def proximity_search(self, latitude, longitude, radius):\n\n hashcode = geohash.encode(latitude=latitude, longitude=longitude)\n centerpoint = (latitude, longitude)\n\n tmp_hashcode = ''\n for x in hashcode:\n # Go through the hashcode character by character\n tmp_hashcode += x\n lat, lng, delta_lat, delta_lng = geohash.decode(tmp_hashcode,\n delta=True)\n overall_lat = 2 * 1000 * haversine(\n point1=(latitude - delta_lat, longitude),\n point2=(latitude + delta_lat, longitude)\n )\n overall_lng = 2 * 1000 * haversine(\n point1=(latitude, longitude-delta_lng),\n point2=(latitude, longitude+delta_lng)\n )\n\n dist = min(overall_lng, overall_lat)\n if dist < radius:\n tmp_hashcode = tmp_hashcode[:-1]\n break\n\n if tmp_hashcode == '':\n raise ValueError('Radius larger than earth')\n\n precision = len(tmp_hashcode)\n\n search_hashes = self._get_adjoining_hashes(hashcode=hashcode,\n precision=precision)\n search_hashes.append(tmp_hashcode)\n\n possible_points = []\n result_values = []\n\n for search_hash in search_hashes:\n possible_points.extend(self.storage.values(prefix=search_hash))\n\n for point_id in possible_points:\n point = self.points_by_id[point_id]\n dist = 1000 * haversine(centerpoint, point)\n if dist <= radius:\n result_values.append((point_id, dist))\n\n sorted_results = sorted(result_values, key = lambda x: x[1])\n final_results = [x[0] for x in sorted_results]\n return final_results",
"def nearest(coordinate, coordinate_list, limit=None):\r\n distances = []\r\n coordinate_lat=coordinate[0]\r\n coordinate_lon=coordinate[1]\r\n for c in coordinate_list:\r\n if len(c)==5:\r\n distances.append( (distance(coordinate_lat, coordinate_lon, c[3][0], c[3][1]), c))\r\n else:\r\n distances.append( (distance(coordinate_lat, coordinate_lon, c[0], c[1]), c)) \r\n distances.sort()\r\n if limit:\r\n return distances[:limit]\r\n return distances",
"def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")",
"def _get_single_direction_neighbors(object_idx, ui_v_dist, ui_h_dist):\n neighbor_dict = {}\n vertical_dist = ui_v_dist[object_idx]\n horizontal_dist = ui_h_dist[object_idx]\n bottom_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] > 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n top_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] < 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n right_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] > 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n left_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] < 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n\n if bottom_neighbors.size:\n neighbor_dict[NeighborContextDesc.TOP] = bottom_neighbors[np.argmin(\n vertical_dist[bottom_neighbors])]\n if top_neighbors.size:\n neighbor_dict[NeighborContextDesc.BOTTOM] = top_neighbors[np.argmax(\n vertical_dist[top_neighbors])]\n if right_neighbors.size:\n neighbor_dict[NeighborContextDesc.LEFT] = right_neighbors[np.argmin(\n horizontal_dist[right_neighbors])]\n if left_neighbors.size:\n neighbor_dict[NeighborContextDesc.RIGHT] = left_neighbors[np.argmax(\n horizontal_dist[left_neighbors])]\n\n return neighbor_dict",
"def neirest_neighbour(business, cells):\n array = cells.get_neighbours(business, num=1)\n neighbours = pd.DataFrame(array).set_index('index')\n index = neighbours['distance'].idxmin()\n return neighbours.loc[index]",
"def findNeighbors(dataPoint,DistanceRanks,eps):\r\n neighbours = []\r\n #print(len(DistanceRanks[0]))\r\n #print(dataPoint.id)\r\n for idx in range(len(DistanceRanks)):\r\n temp = DistanceRanks[dataPoint.id][idx]\r\n if temp[1]< eps:\r\n neighbours.append(temp[0])\r\n else: return neighbours",
"def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)",
"def getNeighbors(obj, separation, filter=\"all\"):\n if not isinstance(obj, Dso):\n if isinstance(obj, str):\n obj = Dso(obj)\n else:\n raise TypeError('Wrong type obj. Either a Dso or string type was expected.')\n if not (isinstance(separation, int) or isinstance(separation, float)):\n raise TypeError('Wrong type separation. Either a int or float type was expected.')\n\n cols = 'objects.name'\n tables = 'objects'\n params = 'type != \"Dup\" AND ra != \"\" AND dec != \"\" AND name !=\"' + obj.getName() + '\"'\n if filter.upper() == \"NGC\":\n params += \" AND name LIKE 'NGC%'\"\n elif filter.upper() == \"IC\":\n params += \" AND name LIKE 'IC%'\"\n\n neighbors = []\n for item in _queryFetchMany(cols, tables, params):\n possibleNeighbor = Dso(item[0])\n distance = getSeparation(obj, possibleNeighbor)[0]\n if distance <= (separation / 60):\n neighbors.append((possibleNeighbor, distance))\n\n return sorted(neighbors, key=lambda neighbor: neighbor[1])",
"def construct_fast_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n coord_list_tree = scipy.spatial.cKDTree(coord_list)\n for j, data in enumerate(coord_list):\n '''save nodes which are in range'''\n connections_ckd = coord_list_tree.query_ball_point(data, radie)\n for i in connections_ckd:\n #only save upper half of the matrix\n if i > j:\n #save the connection\n connection.append([j, i])\n #save the relative distance of the nodes\n connection_distance.append(np.hypot(coord_list[i,0]-data[0], coord_list[i,1]-data[1]))\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n\n\n return connection, connection_distance",
"def FindPointsWithinRadius(self, p_float, , vtkIdList):\n ...",
"def find_nearest_DPRIPs (swrad,DPRIPs):\r\n s_pt = swrad.geometry\r\n new2d =[] \r\n for cood in s_pt: \r\n pt_2d = Point(cood.x,cood.y)\r\n new2d.append(pt_2d)\r\n s_pt = new2d\r\n ND_list = [] \r\n N_DPRIP = [] \r\n # for all DPRIPs \r\n for gp in zip(s_pt,DPRIPs):\r\n s = gp[0]\r\n DPRIP = gp[1]\r\n # Create empty list for storing distance \r\n all_dist = [] \r\n # check DPRIPs for each source \r\n for pt in DPRIP: \r\n if pt.type == \"MultiPoint\": \r\n split_pt = [(n.x, n.y) for n in pt]\r\n for spt in split_pt: \r\n npt = Point(spt)\r\n dist = npt.distance(s)\r\n all_dist.append(dist)\r\n else: \r\n dist = pt.distance(s)\r\n all_dist.append(dist)\r\n # find nearest intersection point index\r\n near_dist = min(all_dist)\r\n n_id = all_dist.index(near_dist)\r\n n_DPRIP = DPRIP[n_id]\r\n\r\n ND_list.append(near_dist)\r\n N_DPRIP.append(n_DPRIP)\r\n \r\n return ND_list,N_DPRIP",
"def nearest_in_n_sphere(self, value, r):\n return self.nearest_in_bounding_box(value, r)\n \n # This seems right\n # return self.binary_search_find_nearest_neighbors_in_radius(value, r)\n \n # This seems wrong\n # return self.recur_find_nearest_n_neighbor(value, r)"
]
| [
"0.6175332",
"0.60992473",
"0.6093694",
"0.60548466",
"0.5978704",
"0.5970773",
"0.5889855",
"0.5846486",
"0.57988584",
"0.57911736",
"0.5780814",
"0.5780404",
"0.5774157",
"0.5773087",
"0.57641494",
"0.57413846",
"0.5715195",
"0.57103866",
"0.57003397",
"0.5674021",
"0.5669006",
"0.5641752",
"0.56353927",
"0.5629811",
"0.5614074",
"0.56126326",
"0.5610503",
"0.5583303",
"0.558083",
"0.5569348"
]
| 0.7034364 | 0 |
Load data file, parse spike data, then send initalization data to dim reduction | def setup(self):
# get unsorted vs sorted units
data_dict = mat73.loadmat(self.file)
units_unsorted = []
units_sorted = []
for ch_curr in data_dict['spikes']:
units_unsorted.append(ch_curr[0]) # from data description, first unit is unsorted
for unit_curr in ch_curr[1:]:
if unit_curr is not None:
units_sorted.append(unit_curr)
# getting binned spikes
bin_size_ms = 10
# window for binning
mintime, maxtime = 100, 0
for spk_times_curr in units_sorted:
mintime = min(spk_times_curr[0], mintime)
maxtime = max(spk_times_curr[-1], maxtime)
# get binned spikes
spk_bins = np.arange(mintime, maxtime, bin_size_ms/1000)
spks_binned = []
for i, unit_curr in enumerate(units_sorted):
spks_binned_curr, _ = np.histogram(unit_curr, bins=spk_bins, range=(spk_bins[0], spk_bins[-1]))
spks_binned.append(spks_binned_curr)
self.data = np.array(spks_binned).T
self.l = 1 # columns per update
l1 = 100 # columns used to initialize
self.t = l1
self.num_iters = np.floor((self.data.shape[0] - l1 - self.l)/self.l).astype('int')
#send to dim reduction
init_id = self.client.put([self.data.shape[0], self.data[:l1, :]], "init_data")
logger.info("Putted init data")
self.q_out.put(init_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadSimData(datafile):\n \n global dt, ti, Lx, Ly, nsamp, N, M, L, B, totalStep, Fmc, Kbend, kT, \\\n dtSamp, T, box_area, nt, body_length, Pe, persistence, flexure \n\n datafile = open(datafile,\"r\")\n for line in datafile:\n A = line.split()\n if A[0] == \"dt\": # Time interval between MD steps\n dt = float(A[-1])\n elif A[0] == \"ti\": # Beginning time for data acquisition\n ti = float(A[-1])\n elif A[0] == \"Lx\": # Box size in x\n Lx = float(A[-1]) \n elif A[0] == \"Ly\": # Box size in y\n Ly = float(A[-1])\n elif A[0] == \"totalStep\": # Total MD steps\n totalStep = float(A[-1])\n elif A[0] == \"nsamp\": # Data sampling frequency\n nsamp = float(A[-1])\n elif A[0] == \"nfil\": # Number of particles per polymer\n N = float(A[-1])\n elif A[0] == \"L\": # Number of particles\n L = float(A[-1])\n elif A[0] == \"B\": # Bond length between particles of a body\n B = float(A[-1])\n elif A[0] == \"kT\": # Boltzmann constant*Temperature\n kT = float(A[-1])\n elif A[0] == \"Fmc\": # Self propulsion force constant\n Fmc = float(A[-1]) \n elif A[0] == \"Kbend\": # Bending constant\n Kbend = float(A[-1])\n \n Lx /= B\n Ly /= B\n M = L/N\n dtSamp = dt*nsamp\n T = totalStep - ti\n nt = T/nsamp\n box_area = Lx*Ly\n body_length = B*(N-1)\n Pe = Fmc*body_length**2/kT\n persistence = Kbend/(kT*body_length)\n flexure = Pe/persistence",
"def loadSimData(datafile):\n \n global dt, ti, Lx, Ly, nsamp, N, M, L, B, totalStep, Fmc, Kbend, kT, \\\n dtSamp, T, box_area, nt, body_length, Pe, persistence, flexure \n\n datafile = open(datafile,\"r\")\n for line in datafile:\n A = line.split()\n if A[0] == \"dt\": # Time interval between MD steps\n dt = float(A[-1])\n elif A[0] == \"ti\": # Beginning time for data acquisition\n ti = float(A[-1])\n elif A[0] == \"Lx\": # Box size in x\n Lx = float(A[-1]) \n elif A[0] == \"Ly\": # Box size in y\n Ly = float(A[-1])\n elif A[0] == \"totalStep\": # Total MD steps\n totalStep = float(A[-1])\n elif A[0] == \"nsamp\": # Data sampling frequency\n nsamp = float(A[-1])\n elif A[0] == \"nfil\": # Number of particles per polymer\n N = int(float(A[-1]))\n elif A[0] == \"L\": # Number of particles\n L = int(float(A[-1]))\n elif A[0] == \"B\": # Bond length between particles of a body\n B = float(A[-1])\n elif A[0] == \"kT\": # Boltzmann constant*Temperature\n kT = float(A[-1])\n elif A[0] == \"Fmc\": # Self propulsion force constant\n Fmc = float(A[-1]) \n elif A[0] == \"Kbend\": # Bending constant\n Kbend = float(A[-1])\n \n Lx /= B\n Ly /= B\n M = L/N\n dtSamp = dt*nsamp\n T = totalStep - ti\n nt = T/nsamp\n box_area = Lx*Ly\n body_length = B*(N-1)\n Pe = Fmc*body_length**2/kT\n persistence = Kbend/(kT*body_length)\n flexure = Pe/persistence",
"def loadSimData(datafile):\n \n global dt, ti, Lx, Ly, nsamp, N, M, L, B, totalStep, Fmc, Kbend, kT, \\\n dtSamp, T, box_area, nt, body_length, Pe, persistence, flexure \n\n datafile = open(datafile,\"r\")\n for line in datafile:\n A = line.split()\n if A[0] == \"dt\": # Time interval between MD steps\n dt = float(A[-1])\n elif A[0] == \"ti\": # Beginning time for data acquisition\n ti = float(A[-1])\n elif A[0] == \"Lx\": # Box size in x\n Lx = float(A[-1]) \n elif A[0] == \"Ly\": # Box size in y\n Ly = float(A[-1])\n elif A[0] == \"totalStep\": # Total MD steps\n totalStep = float(A[-1])\n elif A[0] == \"nsamp\": # Data sampling frequency\n nsamp = float(A[-1])\n elif A[0] == \"nfil\": # Number of particles per polymer\n N = float(A[-1])\n elif A[0] == \"L\": # Number of particles\n L = float(A[-1])\n elif A[0] == \"B\": # Bond length between particles of a body\n B = float(A[-1])\n elif A[0] == \"kT\": # Boltzmann constant*Temperature\n kT = float(A[-1])\n elif A[0] == \"Fmc\": # Self propulsion force constant\n Fmc = float(A[-1]) \n elif A[0] == \"Kbend\": # Bending constant\n Kbend = float(A[-1])\n \n L = int(L)\n N = int(N)\n Lx /= B\n Ly /= B\n M = L/N\n dtSamp = dt*nsamp\n T = totalStep - ti\n nt = T/nsamp\n box_area = Lx*Ly\n body_length = B*(N-1)\n Pe = Fmc*body_length**2/kT\n persistence = Kbend/(kT*body_length)\n flexure = Pe/persistence\n \n return",
"def __init__(self, data_path):\n self.perf_data = dill.load(open(data_path, 'rb'))\n #print(self.perf_data[0])\n print(len(self.perf_data))\n self.length = len(self.perf_data)\n\n # perform a few pre-processing steps\n for i in range(self.length):\n # store the length of the pitch contours for use later\n self.perf_data[i]['length'] = len(\n self.perf_data[i]['pitch_contour'])\n # store the length of the pitch contours for use later\n self.perf_data[i]['pitch_contour'] = self.normalize_pitch_contour(\n self.perf_data[i]['pitch_contour'])\n print(self.perf_data[0])",
"def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)",
"def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0",
"def loadData(self, file):\n self.data = batchImport(file, self.ps)",
"def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]",
"def ReadData(self, tstep):\n fname = self.fname[tstep]\n t0 = self.tind[tstep]\n \n print 'Reading SUNTANS data at time: %s...'%datetime.strftime(self.timei[tstep],'%Y-%m-%d %H:%M:%S') \n nc = Dataset(fname)\n \n self.time = nc.variables['time'][t0]\n \n self.temp = nc.variables['temp'][t0,:,:]\n self.salt = nc.variables['salt'][t0,:,:]\n self.uc = nc.variables['uc'][t0,:,:]\n self.vc = nc.variables['vc'][t0,:,:]\n self.nu_v = nc.variables['nu_v'][t0,:,:]\n self.rho = nc.variables['rho'][t0,:,:]\n self.tau_x = nc.variables['tau_x'][t0,:]\n self.tau_y = nc.variables['tau_y'][t0,:]\n self.eta = nc.variables['eta'][t0,:]",
"def read_data(self):\n data = np.genfromtxt(self.__file) # Planck SED\n self.__nu = 10.0**data[:,0]\n self.__nuF = 10.0**data[:,2]\n self.__err = 10.0**data[:,3]\n #self.__W = 10.0**data[:,4]\n self.__yerr = [ self.__nuF - self.__nuF / self.__err, \\\n self.__nuF * self.__err - self.__nuF ]\n self.__maxY = max( self.__nuF )\n self.__minY = min( self.__nuF )",
"def __init__(self):\n f = open(configuration.dataDirectory+'MuonEfficiencies_Run_2012A_2012_B_53X.pkl', 'r')\n if f :\n self._map = pickle.load(f)\n self._eta_range = ''\n self._pt_range = ''\n else :\n print 'ERROR: Input file for Trigger efficiencies not existing!'",
"def __init__(self):\n f = open(configuration.dataDirectory+'MuonEfficiencies_Run_2012A_2012_B_53X.pkl', 'r')\n if f :\n self._map = pickle.load(f)\n self._eta_range = ''\n self._pt_range = ''\n else :\n print 'ERROR: Input file for Trigger efficiencies not existing!'",
"def Data_init(**kwargs):\n if 'file' in kwargs:\n print \"Reading the file\"\n else:\n print \"Randomizing the initial data\"\n XV = np.random.rand(kwargs['particles'],kwargs['dimensions']*2) * 2 - 1\n M = np.random.rand(kwargs['particles'])\n\n t_f,num = kwargs['time']\n t = np.linspace(0,t_f,num)\n\n return XV,M,t",
"def load_training(file_name, target_val, training_data, training_targets, \n elements):\n\n file = open(file_name, \"r\")\n\n # Iterate over file until empty line recieved\n while True:\n chunk = file.readline()\n\n if(chunk == ''):\n break\n\n ret = load_chunk(chunk, elements)\n\n training_targets.append(target_val)\n\n # Convert data to frequency domain using fft()\n training_data.append([i.real for i in fft(ret)])",
"def load(self,filename,offp=None,maxchan = None,tscrunch=None):\n idx = 0 # only used to get parameters of integration, not data itself\n \n self.filename = filename\n self.ar = psrchive.Archive_load(filename)\n \n self.data = self.ar.get_data() #we load all data here, so this should probably change in the long run\n if maxchan:\n bwfact = maxchan/(1.0*self.data.shape[2]) # bwfact used to indicate the actual bandwidth of the data if we're not using all channels.\n self.data = self.data[:,:,:maxchan,:]\n else:\n bwfact = 1.0\n if offp:\n self.data = self.data/(np.abs(self.data[:,:,:,offp[0]:offp[1]]).mean(3)[:,:,:,None])\n if tscrunch:\n for k in range(1,tscrunch):\n self.data[:-k,:,:,:] += self.data[k:,:,:,:]\n# d = self.data\n# nsub = d.shape[0]/tscrunch\n# ntot = nsub*tscrunch\n# self.data = d[:ntot,:,:,:].reshape((nsub,tscrunch,d.shape[1],d.shape[2],d.shape[3])).mean(1)\n subint = self.ar.get_Integration(idx)\n self.nspec,self.npol,self.nchan,self.nbin = self.data.shape\n \n epoch = subint.get_epoch()\n try:\n self.imjd = np.floor(epoch)\n self.fmjd = np.fmod(epoch,1)\n except: #new version of psrchive has different kind of epoch\n self.imjd = epoch.intday()\n self.fmjd = epoch.fracday()\n self.ref_phase = 0.0\n self.ref_freq = 1.0/subint.get_folding_period()\n self.bw = np.abs(subint.get_bandwidth()) * bwfact\n self.rf = subint.get_centre_frequency()\n \n self.source = self.ar.get_source() # source name\n\n self.nlag = self.nchan\n self.nphase = self.nbin\n self.nharm = self.nphase/2 + 1\n \n self.dynamic_spectrum = np.zeros((self.nspec,self.nchan))\n self.optimized_filters = np.zeros((self.nspec,self.nchan),dtype='complex')\n self.intrinsic_profiles = np.zeros((self.nspec,self.nbin))\n self.nopt = 0\n self.nloop = 0",
"def load_data(self, task):\n params = self.params\n data = {splt: {} for splt in ['train', 'valid', 'test']}\n dpath = os.path.join(params.data_path, 'eval', task)\n\n self.n_sent = 1 if task in ['SST-2', 'CoLA'] else 2\n\n for splt in ['train', 'valid', 'test']:\n\n # load data and dictionary\n data1 = load_binarized(os.path.join(dpath, '%s.s1.pth' % splt), params)\n data2 = load_binarized(os.path.join(dpath, '%s.s2.pth' % splt), params) if self.n_sent == 2 else None\n data['dico'] = data.get('dico', data1['dico'])\n\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n if self.n_sent == 2:\n set_dico_parameters(params, data, data2['dico'])\n\n # create dataset\n if self.n_sent == 1:\n data[splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n else:\n data[splt]['x'] = ParallelDataset(\n data1['sentences'], data1['positions'],\n data2['sentences'], data2['positions'],\n params\n )\n\n # load labels\n if splt != 'test' or task in ['MRPC']:\n # read labels from file\n with open(os.path.join(dpath, '%s.label' % splt), 'r') as f:\n lines = [l.rstrip() for l in f]\n # STS-B task\n if task == 'STS-B':\n assert all(0 <= float(x) <= 5 for x in lines)\n y = [float(l) for l in lines]\n # QQP\n elif task == 'QQP':\n UNK_LABEL = 0\n lab2id = {x: i for i, x in enumerate(sorted(set(lines) - set([''])))}\n y = [lab2id.get(x, UNK_LABEL) for x in lines]\n # other tasks\n else:\n lab2id = {x: i for i, x in enumerate(sorted(set(lines)))}\n y = [lab2id[x] for x in lines]\n data[splt]['y'] = torch.LongTensor(y)\n assert len(data[splt]['x']) == len(data[splt]['y'])\n\n # compute weights for weighted training\n if task != 'STS-B' and params.weighted_training:\n weights = torch.FloatTensor([\n 1.0 / (data['train']['y'] == i).sum().item()\n for i in range(len(lab2id))\n ]).npu()\n self.weights = weights / weights.sum()\n else:\n self.weights = None\n\n return data",
"def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source",
"def data_prepare(raw_datapath, save_path, sample_size=256):\n ## data path\n data_path = raw_datapath\n ## sample size\n data_size = sample_size\n\n ## data lists\n pts = ['100', '104', '108', '113', '117', '122', '201', '207', '212', '217', '222', '231',\n '101', '105', '109', '114', '118', '123', '202', '208', '213', '219', '223', '232',\n '102', '106', '111', '115', '119', '124', '203', '209', '214', '220', '228', '233',\n '103', '107', '112', '116', '121', '200', '205', '210', '215', '221', '230', '234']\n\n ## map the ~19 classes to 5 classes\n ## according to the paper https://arxiv.org/pdf/1805.00794.pdf\n mapping = {'N': 0, 'L': 0, 'R': 0, 'e': 0, 'j': 0, 'B': 0, # N = 0\n 'A': 1, 'a': 1, 'J': 1, 'S': 1, # S = 1\n 'V': 2, 'E': 2, 'r': 2, 'n': 2, # V = 2\n 'F': 3, # F = 3\n '/': 4, 'f': 4, 'Q': 4, '?': 4} # Q = 4\n ignore = ['+', '!', '[', ']', 'x', '~', '|', '\"']\n\n ## we split the each set of the data into size 256( which we can see the ecg pulse, just one pulse)\n def dataSaver(dataset=pts, data_size=data_size):\n input_size = data_size ## default\n\n def dataprocess():\n ecg = np.zeros((1, input_size))\n label = np.zeros((1, 1))\n for num in tqdm(dataset):\n print(num, 'now')\n idx = 0 ## count for the matrixes\n record = wfdb.rdrecord(data_path + num, smooth_frames=True)\n\n ## normalize the data ecg\n signals0 = np.nan_to_num(record.p_signal[:, 0])\n # signals1 = np.nan_to_num(record.p_signal[:, 1])\n min_max_scaler = preprocessing.MinMaxScaler()\n signals0 = min_max_scaler.fit_transform(signals0.reshape(-1, 1))\n # signals1 = min_max_scaler.fit_transform(signals1.reshape(-1, 1))\n signals0 = signals0.reshape(-1)\n # signals1 = signals1.reshape(-1)\n\n ## find peaks # R-peaks\n ## we only use the channel 0\n peaks, _ = find_peaks(signals0, distance=150)\n\n X = np.zeros((len(peaks), input_size))\n Y = np.zeros((len(peaks), 1))\n\n # skip a first peak to have enough range of the sample\n # in the for loop, we look for the annotation\n for peak in tqdm(peaks[1:-1]):\n start, end = peak - input_size // 2, peak + input_size // 2\n start = max([0, start])\n end = min([len(signals0), end])\n ann = wfdb.rdann(data_path + num, extension='atr', sampfrom=start, sampto=end,\n return_label_elements=['symbol'])\n symbol = ann.symbol\n count = 0\n if len(symbol) != 1:\n for sym in symbol:\n if sym in ignore:\n count += 1\n continue\n elif sym == 'N':\n continue\n else:\n symbol = sym\n break\n if count > 0 and len(symbol) > 1:\n symbol = '+'\n elif len(symbol) > 1:\n symbol = 'N'\n elif len(symbol) == 0:\n symbol = '+'\n assert len(symbol) <= 1, \"the symbol is not only one.{} len\".format(len(symbol))\n\n if len(symbol) == 1:\n for ss in symbol:\n if ss in ignore:\n continue\n else:\n Y[idx, 0] = mapping[ss]\n sig = signals0[start:end]\n X[idx, :len(sig)] = sig\n idx += 1\n ecg = np.concatenate((ecg, X), axis=0)\n label = np.concatenate((label, Y), axis=0)\n ecg = ecg[1:, :]\n label = label[1:, :]\n ecg = pd.DataFrame(ecg)\n label = pd.DataFrame(label)\n\n return ecg, label\n ecg, label = dataprocess()\n return ecg, label\n\n ecg, label = dataSaver(pts)\n ecg_path = save_path + \"/ecg_signal_{}.csv\".format(data_size)\n label_path = save_path + \"/label_{}.csv\".format(data_size)\n ecg.to_csv(ecg_path, index=None, header=None)\n label.to_csv(label_path, index=None, header=None)\n return ecg, label",
"def parse_data(filepath):\n settings = dict()\n intensity = list()\n # Boolean flags to check when to start/stop\n # reading parameters\n read_params = False\n read_int = False\n read_zeeman = False\n finished = False\n fieldoff_intensities = list()\n fieldon_intensities = list()\n with open(filepath) as read_file:\n for line in read_file:\n if \"*****\" in line:\n read_int = False\n if finished is True:\n break\n if \"Scan\" in line:\n if \"[Field ON]\" in line:\n read_zeeman = True\n scan_details = line.split()\n settings[\"ID\"] = int(scan_details[1])\n # settings[\"Date\"] = str(scan_details[4])\n read_params = True\n read_int = False\n continue\n if read_int is True:\n if read_zeeman is False:\n fieldoff_intensities += [float(value) for value in line.split()]\n else:\n fieldon_intensities += [float(value) for value in line.split()]\n finished = True\n if read_params is True and len(line.split()) > 1:\n # Read in the frequency step, frequency, and other info\n # needed to reconstruct the frequency data\n scan_params = line.split()\n shift = 1\n settings[\"Frequency\"] = float(scan_params[0])\n settings[\"Frequency step\"] = float(scan_params[1])\n if len(scan_params) == 4:\n settings[\"Multiplier\"] = 1.\n shift = 0\n # If the multiplier data is there, we don't shift the read\n # index over by one\n else:\n settings[\"Multiplier\"] = float(scan_params[2])\n settings[\"Center\"] = float(scan_params[2 + shift])\n settings[\"Points\"] = int(scan_params[3 + shift])\n read_params = False\n # Start reading intensities immediately afterwards\n read_int = True\n continue\n fieldoff_intensities = np.array(fieldoff_intensities)\n fieldon_intensities = np.array(fieldon_intensities)\n\n # Generate the frequency grid\n settings[\"Frequency step\"] = settings[\"Frequency step\"] * settings[\"Multiplier\"]\n # This calculates the length of either side\n side_length = settings[\"Frequency step\"] * (settings[\"Points\"] // 2)\n start_freq = settings[\"Frequency\"] - side_length\n end_freq = settings[\"Frequency\"] + side_length\n frequency = np.linspace(start_freq, end_freq, settings[\"Points\"])\n\n return frequency, fieldoff_intensities, fieldon_intensities, settings",
"def __init__(self,filename='slices.000000',nvar=8,nbuf=3,field=None):\n\n #Open file\n f=tables.openFile(filename)\n\n #Dataset \"para_real\"\n self.time=f.root.para_real[0]\n self.dt =f.root.para_real[1]\n self.dx =f.root.para_real[2]\n self.dy =f.root.para_real[3]\n self.dz =f.root.para_real[4]\n\n #Dataset \"para_int\"\n self.ndump =f.root.para_int[0]\n self.nhist =f.root.para_int[1]\n self.nspec =f.root.para_int[2]\n self.nx =f.root.para_int[3]\n self.ny =f.root.para_int[4]\n self.nz =f.root.para_int[5]\n self.nxslice=f.root.para_int[6]\n self.nyslice=f.root.para_int[7]\n self.nzslice=f.root.para_int[8]\n\n self.dim =f.root.para_int[3:6]\n self.slice =f.root.para_int[6:9] \n self.dim_glob=self.dim*self.slice\n\n #Dataset \"x\", \"y\" and \"z\n self.x=f.root.x[:]\n self.y=f.root.y[:]\n self.z=f.root.z[:]\n\n #Dataset \"uin\"\n if not field:\n self.xyz=f.root.uin[:,:,:,:]\n else:\n if field == 'rho' : self.xyz = f.root.uin[0,:,:,:]\n if field == 'E' : self.xyz = f.root.uin[4,:,:,:]\n if field == 'rhoux': self.xyz = f.root.uin[1,:,:,:]\n if field == 'rhouy': self.xyz = f.root.uin[2,:,:,:]\n if field == 'rhouz': self.xyz = f.root.uin[3,:,:,:]\n if field == 'Bx' : self.xyz = f.root.uin[5,:,:,:]\n if field == 'By' : self.xyz = f.root.uin[6,:,:,:]\n if field == 'Bz' : self.xyz = f.root.uin[7,:,:,:]\n\n #Dataset \"para_mpi\"\n if (self.nxslice*self.nyslice*self.nzslice>1):\n self.xleft =f.root.para_mpi[0]\n self.xright=f.root.para_mpi[1]\n self.yleft =f.root.para_mpi[2]\n self.yright=f.root.para_mpi[3]\n self.zleft =f.root.para_mpi[4]\n self.zright=f.root.para_mpi[5]\n self.xposition=f.root.para_mpi[6]\n self.yposition=f.root.para_mpi[7]\n self.zposition=f.root.para_mpi[8]\n\n #Close file\n f.close()",
"def __init__(self, segyfile, \r\n channelRange, \r\n frameWidth, \r\n downsampleFactor=1,\r\n skipInterval = 1,\r\n isIntegrate=False, traces=[]): \r\n iloc = segyfile.find('iDAS')\r\n self.filename = segyfile[iloc:-4]\r\n #\r\n startime=time.time()\r\n self.load(segyfile)\r\n print ('loading seg-y took', time.time()-startime)\r\n\r\n self.gather = None\r\n self.frameWidth = frameWidth\r\n self.dsfactor = downsampleFactor\r\n self.skipInt = skipInterval\r\n self.channelRange = np.arange(channelRange[0],channelRange[1])\r\n self.isIntegrate = isIntegrate\r\n self._getGather()",
"def _load_raw(self, filename, scalarize=lambda x: x[0]):\n\n # Load all the data in this object.\n data = list()\n with open(filename, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n data += [list(row)]\n\n # Attempt to parse all the data into their respective variables.\n try:\n # Load the header information.\n self.n = int(data[0][0])\n self.ns = int(data[0][1])\n self.m = int(data[0][2])\n\n k = int(data[0][3])\n self.s0 = int(data[0][4])\n self.ng = int(data[0][5])\n\n self.horizon = int(data[0][6])\n self.gamma = float(data[0][7])\n self.epsilon = float(0.01)\n\n # Functions to convert flattened NumPy arrays to C arrays.\n array_type_ng_uint = ct.c_uint * (self.ng)\n array_type_nmns_int = ct.c_int * (self.n * self.m * self.ns)\n array_type_nmns_float = ct.c_float * (self.n * self.m * self.ns)\n array_type_nm_float = ct.c_float * (self.n * self.m)\n\n # Load each of the larger data structures into memory and immediately\n # convert them to their C object type to save memory.\n rowOffset = 1\n self.goals = array_type_ng_uint(*np.array([int(data[rowOffset][s]) for s in range(self.ng)]).flatten())\n\n rowOffset = 2\n self.S = array_type_nmns_int(*np.array([[[int(data[(self.n * a + s) + rowOffset][sp]) \\\n for sp in range(self.ns)] \\\n for a in range(self.m)] \\\n for s in range(self.n)]).flatten())\n\n rowOffset = 2 + self.n * self.m\n self.T = array_type_nmns_float(*np.array([[[float(data[(self.n * a + s) + rowOffset][sp]) \\\n for sp in range(self.ns)] \\\n for a in range(self.m)] \\\n for s in range(self.n)]).flatten())\n\n rowOffset = 2 + self.n * self.m + self.n * self.m\n self.R = array_type_nm_float(*scalarize(np.array([[[float(data[(self.m * i + a) + rowOffset][s])\n for a in range(self.m)] \\\n for s in range(self.n)] \\\n for i in range(k)])).flatten())\n\n self.Rmax = max([self.R[i] for i in range(self.n * self.m)])\n self.Rmin = min([self.R[i] for i in range(self.n * self.m)])\n\n except Exception:\n print(\"Failed to load file '%s'.\" % (filename))\n raise Exception()",
"def read_data(self,filename):\n self.x = [] #Input values\n self.t = [] #Target values\n\n with open(filename, \"r\") as infile:\n lines = infile.readlines()\n self.n = len(lines)\n for line in lines:\n words = line.split()\n self.x.append(float(words[0]))\n self.t.append(float(words[1]))\n\n self.x = np.array(self.x)\n self.t = np.array(self.t)\n self.create_design_matrix()",
"def __init__(self, file_path):\n\n self.file_path = file_path\n measurement_list = []\n key_list = []\n if self.file_path.endswith(\".h5\"):\n data = h5py.File(self.file_path, 'r')\n for counter, measurement in enumerate(data.keys()):\n measurement_list.append(measurement)\n key_list.append(counter)\n self.exp_labels_list = measurement_list\n self.exp_key_list = key_list\n print('Datafile intialised successfully \\n')\n else:\n self.exp_labels_list = measurement_list\n self.exp_key_list = key_list\n print('Error: the file is not a .h5 file \\n')",
"def __init__(self, source):\n self.ntimepoints = None\n self.timepoints = None\n self.nsamples = None\n self.channelids = []\n self.data = []\n self.samplingrate = None\n\n # open textfiles\n if isinstance(source, str):\n if source.endswith('.gz'):\n externals.exists('gzip', raiseException=True)\n import gzip\n source = gzip.open(source, 'r')\n else:\n source = open(source, 'r')\n\n # read file\n for line in source:\n # split ID\n colon = line.find(':')\n\n # ignore lines without id\n if colon == -1:\n continue\n\n id = line[:colon]\n data = line[colon+1:].strip()\n if id == 'Sample Number':\n timepoints = N.fromstring(data, dtype=int, sep='\\t')\n # one more as it starts with zero\n self.ntimepoints = int(timepoints.max()) + 1\n self.nsamples = int(len(timepoints) / self.ntimepoints)\n elif id == 'Time':\n self.timepoints = N.fromstring(data,\n dtype=float,\n count=self.ntimepoints,\n sep='\\t')\n self.samplingrate = self.ntimepoints \\\n / (self.timepoints[-1] - self.timepoints[0])\n else:\n # load data\n self.data.append(\n N.fromstring(data, dtype=float, sep='\\t').reshape(\n self.nsamples, self.ntimepoints))\n # store id\n self.channelids.append(id)\n\n # reshape data from (channels x samples x timepoints) to\n # (samples x chanels x timepoints)\n self.data = N.swapaxes(N.array(self.data), 0, 1)",
"def load_data(self) -> None:",
"def parse(self):\n self.isParsingNeeded = False\n localizations = StormReader(self.file_path)\n localizations.readfile()\n localizations.get_header_info()\n\n #array = stormfile(self.file_path)\n #array.getHeaderInfo()\n self.stormData = localizations.data\n\n #prevent negative x,y values. Set to Zero\n self.stormData[...,0] = self.stormData[...,0]-self.stormData[...,0].min()\n self.stormData[...,1] = self.stormData[...,1]-self.stormData[...,1].min()\n self.size = np.array([self.stormData[...,0].max(), self.stormData[...,1].max()])\n #Build structured array with title name and value of columns.\n storm_reshaped = np.negative(np.ones((self.stormData.shape[0], 6)))\n for i,j in enumerate(localizations.dataColumn):\n if j >=0:\n storm_reshaped[...,int(i)] = self.stormData[..., int(j)]\n #set precision to 10 nm if no value given\n if (storm_reshaped[...,2]<0).all():\n storm_reshaped[...,2] = 10\n self.stormData = storm_reshaped",
"def test_large_import(self):\n self.create_sample_data_set_dir(\"node59p1.dat\", TELEM_DIR)\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED,1,60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED,750,400)",
"def load_data(filename, sequence_length, start = None):\n \n if start == None:\n start = 0\n \n #Read the data file\n raw_data = pd.read_csv(filename, nrows = 20000 ,dtype = float).values\n \n #Change all zeros to the number before the zero occurs\n for x in range(0, raw_data.shape[0]):\n for y in range(0, raw_data.shape[1]):\n if(raw_data[x][y] == 0):\n raw_data[x][y] = raw_data[x-1][y]\n \n #Convert the file to a list\n data = raw_data.tolist()\n #Convert the data to a 3D array (a x b x c) \n #Where a is the number of days, b is the window size, and c is the number of features in the data file\n\n result = []\n for index in range(len(data) - sequence_length):\n result.append(data[index: index + sequence_length])\n \n# print (result[-1], len(result))\n #Normalizing data by going through each window\n #Every value in the window is divided by the first value in the window, and then 1 is subtracted\n\n d0 = np.array(result)\n# print(d0[0])\n dr = np.zeros_like(d0)\n dr[:,1:,:] = d0[:,1:,:] / d0[:,0:1,:] - 1\n \n #Keeping the unnormalized prices for Y_test\n #Useful when graphing bitcoin price over time later\n end = int(dr.shape[0])\n unnormalized_bases = d0[start:end + 1,0:1,4]\n \n print(\"Total dr shape\", dr.shape)\n \n #Splitting data set into training (First 90% of data points) and testing data (last 10% of data points)\n split_line = round(0.9 * dr.shape[0])\n training_data = dr[:int(split_line), :]\n \n #Shuffle the data\n np.random.shuffle(training_data)\n \n #Training Data\n #the 4 is the column of the data that we want to train for\n X_train = training_data[:, :-1]\n Y_train = training_data[:, -1]\n Y_train = Y_train[:, 4]\n \n #Testing data\n X_test = dr[int(split_line):, :-1]\n Y_test = dr[int(split_line):, 49, :]\n Y_test = Y_test[:, 4]\n\n #Get the day before Y_test's price\n Y_daybefore = dr[int(split_line):, 48, :]\n Y_daybefore = Y_daybefore[:, 4]\n \n #Get window size and sequence length\n sequence_length = sequence_length\n window_size = sequence_length - 1 #because the last value is reserved as the y value\n \n return X_train, Y_train, X_test, Y_test, Y_daybefore, unnormalized_bases, window_size",
"def __init__(self, path=None):\n # Verify path to data set.\n if path is None:\n path = Path(os.path.abspath(__file__))\n _root = path.parents[2]\n self.data_path = _root / \"GTS/01BasicInputData\"\n else:\n self.data_path = Path(path)\n\n logger.info(f\"GTS-ISC data located at: {self.data_path}.\")\n assert self.data_path.is_dir()\n\n # ========= CONSTANTS ==========================================================================================\n # Swiss to gts coordinates.\n self.gts_coordinates = np.array((667400, 158800, 1700))\n\n # Name of boreholes.\n self.borehole_types = {\n \"FBS\": np.array([1, 2, 3]),\n \"SBH\": np.array([1, 3, 4]), # Note the skip of numbering for SBH\n \"INJ\": np.array([1, 2]),\n \"PRP\": np.array([1, 2, 3]),\n \"GEO\": np.array([1, 2, 3, 4]),\n }\n\n self.boreholes = [\n bh_set + str(bh_num)\n for bh_set in self.borehole_types\n for bh_num in self.borehole_types[bh_set]\n ]\n\n # Name of shearzones\n self.shearzone_types = {\"S1\": np.array([1, 2, 3]), \"S3\": np.array([1, 2])}\n\n self.shearzones = [\n sz_set + \"_\" + str(sz_num)\n for sz_set in self.shearzone_types\n for sz_num in self.shearzone_types[sz_set]\n ]\n\n # ============ LOAD DATA =======================================================================================\n\n # 1. Step: Load all available data. ============================================================================\n # Load borehole data\n self.borehole_geometry = self._borehole_data()\n\n # Load borehole structure data\n self.borehole_structures = self._borehole_structure_data()\n\n # Load tunnel structures (only shear-zones and fractures)\n self.tunnel_structures = self._tunnel_shearzone_data()\n\n # Load interpolation-ready shear-zone - borehole intersections\n # i.e. 1-1 (-0) mapping between shear-zones and boreholes.\n self.shearzone_borehole_geometry = self._shearzone_borehole_data()\n\n # 2. Step: All characterized structures ========================================================================\n self.structures = self._full_structure_geometry()"
]
| [
"0.65616596",
"0.6557325",
"0.65071505",
"0.63667995",
"0.61828595",
"0.61617154",
"0.60358214",
"0.6017118",
"0.60101986",
"0.5990661",
"0.5955847",
"0.5955847",
"0.5915603",
"0.5910003",
"0.58991474",
"0.5885808",
"0.5876386",
"0.586066",
"0.58460534",
"0.58357775",
"0.58249843",
"0.58119667",
"0.5808834",
"0.57913893",
"0.57846963",
"0.5780304",
"0.5764974",
"0.57645214",
"0.5750728",
"0.57418245"
]
| 0.67746013 | 0 |
Alters the cube, scaling, transforming | def b_transform_cube(b_obj):
b_scale_object()
b_scale_single_face(b_obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform(self, data):\n self.cube = self.trf.transform(data)",
"def transform_cube(self,\n cube: xr.Dataset,\n gm: GridMapping,\n cube_config: CubeConfig) -> TransformedCube:",
"def scale(self, x, y, z) -> None:\n ...",
"def update_transform(self):\n\n self.a = self.scale * self.pixel_size * math.cos(self.angle)\n self.d = self.scale * self.pixel_size * math.sin(self.angle)\n self.b = self.d\n self.e = -self.a\n self.c = self.point.x() - self.a*self.width/2.0 - self.b*self.height/2.0\n self.f = self.point.y() - self.d*self.width/2.0 - self.e*self.height/2.0\n\n self.bounding_box = [[self.c,self.f],[self.c+self.a*self.width,self.f+self.d*self.width],[self.c+self.a*self.width+self.b*self.height,self.f+self.d*self.width+self.e*self.height],[self.c+self.b*self.height,self.f+self.e*self.height],]",
"def recalculate_transform(self):\n bias_mat = wireframe.translationMatrix(*self.bias)\n rot_mat = wireframe.rotateMatrix(*self.view_angle)\n scale_mat = wireframe.scaleMatrix(*self.scale)\n\n self.tf_wireframe.nodes = (self.wireframe.nodes - self._center_half) @ \\\n rot_mat @ scale_mat @ bias_mat",
"def transform(self, interval):\n c = self.physics_canvas.canvas\n c.scale(self.canvas_id, 0,1, 1.01, 1.01)",
"def transform_cube(self,\n cube: xr.Dataset,\n gm: GridMapping,\n cube_config: CubeConfig) -> TransformedCube:\n return cube, gm, cube_config",
"def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)",
"def _force_rescale(self, setpoint_x, setpoint_y):",
"def scale_positions_and_cell(self):\n\n taupscl = self.dt / self.taup\n stress = self.atoms.get_stress()\n old_pressure = self.atoms.get_isotropic_pressure(stress)\n scl_pressure = 1.0 - taupscl * self.compressibility / 3.0 * \\\n (self.pressure - old_pressure)\n\n #print \"old_pressure\", old_pressure\n #print \"volume scaling by:\", scl_pressure\n\n cell = self.atoms.get_cell()\n cell = scl_pressure * cell\n self.atoms.set_cell(cell, scale_atoms=True)",
"def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)",
"def scaleAll(self, scale):\n center = [self.width/2, self.height/2, 0, 0]\n matrix = self.scaleMatrix(scale, scale, scale)\n\n for wireframe in self.wireframes.values():\n wireframe.scale(center, matrix)",
"def scale(self):",
"def setLocalTransform(self):\n\n # Position\n posX = OpenMaya.MPlug(self.thisObj, self.localPositionX).asFloat()\n posY = OpenMaya.MPlug(self.thisObj, self.localPositionY).asFloat()\n posZ = OpenMaya.MPlug(self.thisObj, self.localPositionZ).asFloat()\n\n glFT.glTranslatef(posX, posY, posZ)\n\n # Scale\n scaleX = OpenMaya.MPlug(self.thisObj, self.localScaleX).asFloat()\n scaleY = OpenMaya.MPlug(self.thisObj, self.localScaleY).asFloat()\n scaleZ = OpenMaya.MPlug(self.thisObj, self.localScaleZ).asFloat()\n\n glFT.glScalef(scaleX, scaleY, scaleZ)",
"def resetTransforms(self, translate, rotate, scale, name):\n\n cmds.select(name + \"_mover_grp\", hi=True)\n selection = cmds.ls(sl=True)\n\n globalMovers = []\n offsetMovers = []\n geoMovers = []\n\n for each in selection:\n if each.find(\"_mover\") != -1:\n if each.partition(\"_mover\")[2] == \"\":\n globalMovers.append(each)\n if each.find(\"_mover_offset\") != -1:\n if each.partition(\"_mover_offset\")[2] == \"\":\n offsetMovers.append(each)\n if each.find(\"_mover_geo\") != -1:\n if each.partition(\"_mover_geo\")[2] == \"\":\n geoMovers.append(each)\n\n cmds.select(clear=True)\n\n for moverList in [globalMovers, offsetMovers, geoMovers]:\n for each in moverList:\n if translate:\n for attr in [\".tx\", \".ty\", \".tz\"]:\n try:\n cmds.setAttr(each + attr, 0)\n except:\n pass\n if rotate:\n for attr in [\".rx\", \".ry\", \".rz\"]:\n try:\n cmds.setAttr(each + attr, 0)\n except:\n pass\n if scale:\n for attr in [\".sx\", \".sy\", \".sz\"]:\n try:\n cmds.setAttr(each + attr, 1)\n except:\n pass\n if cmds.window(\"ART_ResetXformsModeWin\", exists=True):\n cmds.deleteUI(\"ART_ResetXformsModeWin\", wnd=True)",
"def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')",
"def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)",
"def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True, render=True):\n if xscale is None:\n xscale = self.scale[0]\n if yscale is None:\n yscale = self.scale[1]\n if zscale is None:\n zscale = self.scale[2]\n self.scale = [xscale, yscale, zscale]\n\n # Reset all actors to match this scale\n for actor in self.actors.values():\n if hasattr(actor, 'SetScale'):\n actor.SetScale(self.scale)\n\n self.parent.render()\n if reset_camera:\n self.update_bounds_axes()\n self.reset_camera(render=render)\n self.Modified()",
"def reset(self):\n self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',\n 'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',\n 'trans_x', 'trans_y', 'trans_z'))",
"def transform_view(self):\n translation = self.get_translation()\n scale = self.get_scaling()\n # update all non static visuals\n for visual in self.paint_manager.get_visuals():\n if not visual.get('is_static', False):\n self.set_data(visual=visual['name'], \n scale=scale, translation=translation)",
"def fix_data(self, cube: Cube) -> Cube:\n return cube",
"def setUp(self):\n self.cube = _create_2d_cube()",
"def change_scaling(self, scales=None, offsets=None) -> None:\n self.points.change_scaling(scales, offsets)\n\n self.header.scales = scales\n self.header.offsets = offsets",
"def _rescale_vertices(self, scale, rescaling_type=RescalingTypeMin):\n vertex_array = np.array(self.mesh_.vertices())\n min_vertex_coords = np.min(self.mesh_.vertices(), axis=0)\n max_vertex_coords = np.max(self.mesh_.vertices(), axis=0)\n vertex_extent = max_vertex_coords - min_vertex_coords\n\n # find minimal dimension\n if rescaling_type == MeshProcessor.RescalingTypeMin:\n dim = np.where(vertex_extent == np.min(vertex_extent))[0][0]\n relative_scale = vertex_extent[dim]\n elif rescaling_type == MeshProcessor.RescalingTypeMed:\n dim = np.where(vertex_extent == np.med(vertex_extent))[0][0]\n relative_scale = vertex_extent[dim]\n elif rescaling_type == MeshProcessor.RescalingTypeMax:\n dim = np.where(vertex_extent == np.max(vertex_extent))[0][0]\n relative_scale = vertex_extent[dim]\n elif rescaling_type == MeshProcessor.RescalingTypeRelative:\n relative_scale = 1.0\n elif rescaling_type == MeshProcessor.RescalingTypeDiag:\n diag = np.linalg.norm(vertex_extent)\n relative_scale = diag / 3.0 # make the gripper size exactly one third of the diagonal\n\n # compute scale factor and rescale vertices\n scale_factor = scale / relative_scale \n vertex_array = scale_factor * vertex_array\n self.mesh_.vertices_ = vertex_array.tolist()\n self.mesh_._compute_bb_center()\n self.mesh_._compute_centroid()\n self.mesh_.set_center_of_mass(self.mesh_.bb_center_)",
"def scale(self, k_x, k_y = None, k_z = None):\r\n if (k_y is None):\r\n return vec3(k_x*self.x, k_x*self.y, k_x*self.z)\r\n else:\r\n return vec3(k_x*self.x, k_y*self.y, k_z*self.z)",
"def update_model(self):\n self.model = [[self.cubes[i][j].value for j in range(self.cols)] for i in range(self.rows)]",
"def scale(self, scale=1):\n self.x *= scale\n self.y *= scale\n self.width *= scale\n self.height *= scale\n\n # Always update the corners after operation\n self.update_corners()\n return",
"def cube(self):\n\n dims = self.voxels.shape\n max_dim = max(dims)\n \n x_target = (max_dim - dims[0]) / 2\n y_target = (max_dim - dims[1]) / 2\n z_target = (max_dim - dims[2]) / 2\n\n self.voxels = np.pad(self.voxels,\n ((int(np.ceil(x_target)), int(np.floor(x_target))),\n (int(np.ceil(y_target)), int(np.floor(y_target))),\n (int(np.ceil(z_target)), int(np.floor(z_target)))),\n 'constant',\n constant_values=(0))\n\n self.point_position = self.point_position + [np.ceil(z_target),\n np.ceil(y_target),\n np.ceil(x_target)]\n\n return(self)",
"def transform(self, mat: TxMatrix) -> None:\n super().transform(mat)\n self.scale = self.scale * mat.scale",
"def update(self):\n self.setVector(0.15, 0.0)"
]
| [
"0.6694423",
"0.66168797",
"0.6367737",
"0.6105498",
"0.6039283",
"0.60163206",
"0.5980319",
"0.5951304",
"0.5907383",
"0.5883287",
"0.58437085",
"0.58133626",
"0.5810219",
"0.57776093",
"0.5776965",
"0.57480514",
"0.5731953",
"0.57181156",
"0.5717602",
"0.5705528",
"0.56746197",
"0.5674519",
"0.5636666",
"0.56278896",
"0.5586931",
"0.55793816",
"0.55695224",
"0.55640894",
"0.5544335",
"0.55183446"
]
| 0.7299694 | 0 |
Scale a single face of the object. | def b_scale_single_face(b_obj):
# scale single face
for poly in b_obj.data.polygons:
poly.select = False
b_obj.data.polygons[2].select = True
for b_vert_index in b_obj.data.polygons[2].vertices:
b_obj.data.vertices[b_vert_index].co[1] = b_obj.data.vertices[b_vert_index].co[1] * 0.5
b_obj.data.vertices[b_vert_index].co[2] = b_obj.data.vertices[b_vert_index].co[2] * 0.5 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scale(self, size):\n self._surf = pygame.transform.smoothscale(self._surf, size).convert_alpha()",
"def scale(self, size):\n self._surf = pygame.transform.smoothscale(self._surf,\n size).convert_alpha()\n self._version += 1\n return self",
"def scale(self, sf):\n self.scale(sf, sf)",
"def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)",
"def scale(self, x, y, z) -> None:\n ...",
"def b_transform_cube(b_obj):\n \n b_scale_object()\n b_scale_single_face(b_obj)",
"def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n\n if isinstance(scale_factor, float):\n self.x *= scale_factor\n self.y *= scale_factor\n self.width *= scale_factor\n self.height *= scale_factor\n\n elif isinstance(scale_factor, tuple):\n scale_x, scale_y = scale_factor\n self.x *= scale_x\n self.y *= scale_y\n self.width *= scale_x\n self.height *= scale_y",
"def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)",
"def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n\n if isinstance(scale_factor, float):\n self.width *= scale_factor\n self.height *= scale_factor\n elif isinstance(scale_factor, tuple):\n scale_x, scale_y = scale_factor\n self.width *= scale_x\n self.height *= scale_y",
"def scale(self, scale=1):\n self.x *= scale\n self.y *= scale\n self.width *= scale\n self.height *= scale\n\n # Always update the corners after operation\n self.update_corners()\n return",
"def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)",
"def scale(self, scale):\n self.coords = self.coords * scale\n return self",
"def ScaleShape(shape, scale_x, scale_y):\n for i, pt in enumerate(shape.points):\n x, y = pt\n shape.points[i] = [scale_x * x, scale_y * y]",
"def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)",
"def scale(first,scalar):\n if isinstance(first,FreeCAD.Vector):\n return FreeCAD.Vector(first.x*scalar, first.y*scalar, first.z*scalar)",
"def scale(self):",
"def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res",
"def ScaleObject(object_id, origin, scale, copy=False):\n rc = ScaleObjects(object_id, origin, scale, copy )\n if rc: return rc[0]\n return scriptcontext.errorhandler()",
"def scale(self, scale):\n\n self._scale = scale",
"def scale(self, scale):\n\n self._scale = scale",
"def scale(self, scale_factor: float) -> None:\n self.tensor[:, :3] *= scale_factor",
"def scaleAll(self, scale):\n center = [self.width/2, self.height/2, 0, 0]\n matrix = self.scaleMatrix(scale, scale, scale)\n\n for wireframe in self.wireframes.values():\n wireframe.scale(center, matrix)",
"def scale(self, factor):\n self.x *= factor\n self.y *= factor\n for a in self.annotations:\n a.scale(factor)",
"def setScale(self, *args):\n return _libsbml.Unit_setScale(self, *args)",
"def scale(self,scale_by):\n x = self._x * scale_by\n y = self._y * scale_by\n return Point(x,y)",
"def scale(self, scale):\n\t\tself._current_score *= scale",
"def scale(self, axis, value):\r\n assert (axis < EventStream.numAxes), \"Axis number out of range\"\r\n if self.absInfo[axis]:\r\n return self.absInfo[axis].scale(value)\r\n else:\r\n return value",
"def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value",
"def change_face(self, face):\n if self.face is not None:\n self.face.remove_point(self)\n\n self.face = face\n self.face.add_point(self)",
"def setScale(self, sx, sy=None, sz=None):\n self.transform.setScale(sx, sy, sz)"
]
| [
"0.70263743",
"0.6940709",
"0.6471854",
"0.6386272",
"0.63822234",
"0.636569",
"0.63525623",
"0.63352567",
"0.62949914",
"0.6202851",
"0.6201046",
"0.61532867",
"0.6143922",
"0.6116454",
"0.6085282",
"0.6083255",
"0.60789496",
"0.60042995",
"0.597115",
"0.597115",
"0.59677833",
"0.5928427",
"0.58997595",
"0.5883604",
"0.58764786",
"0.58593196",
"0.58544976",
"0.5851239",
"0.58427423",
"0.58368224"
]
| 0.7857056 | 0 |
Returns resturant ratings from file as a dictionary. | def return_restaurant_rating_dictionary(filename):
the_file = open(filename)
for line in the_file:
line = line.rstrip()
ratings = line.split(":")
restaurant_name = ratings[0]
rating = ratings[1]
restaurant_ratings[restaurant_name] = rating
return restaurant_ratings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_ratings(filename):\n\n all_ratings = {}\n\n with open(filename) as f:\n for line in f:\n line = line.rstrip()\n restaurant, rating = line.split(\":\")\n all_ratings[restaurant] = rating\n\n return all_ratings",
"def process_file(file_name):\n \n restaurant_ratings = {}\n \n # open file, iterate line by line\n restaurant_file = open(file_name)\n # split by colon, returns a list\n for line in restaurant_file:\n restaurant_name, restaurant_rating = line.rstrip().split(\":\")\n restaurant_ratings[restaurant_name] = int(restaurant_rating)\n\n # close file\n restaurant_file.close()\n return restaurant_ratings",
"def gets_restaurant_ratings(text_file):\n file = open(text_file)\n restaurant_ratings_dictionary = {}\n\n for line in file:\n line = line.rstrip().split(\":\")\n restaurant_name, rating = line\n restaurant_ratings_dictionary[restaurant_name] = rating\n\n file.close()\n\n return restaurant_ratings_dictionary",
"def restaurant_ratings(filename):\n\n lines = open(filename)\n\n restaurant_dict = {}\n\n\n for restaurants in lines:\n restaurants = restaurants.rstrip()\n restaurant, rating = restaurants.split(\":\")\n\n restaurant_dict[restaurant] = int(rating)\n\n # user_input_restaurant()\n\n # restaurant_dict[user_restaurant_input[0]] = user_restaurant_input[1]\n\n # sorted_restaurants = sorted(restaurant_dict.items())\n\n\n # for restaurant, rating in sorted_restaurants:\n # print \"%s is rated at %s\" % (restaurant, rating)\n\n return restaurant_dict",
"def parseRating(ratings_file):\n ratings_raw_data = sc.textFile(ratings_file)\n ratings_raw_data_header = ratings_raw_data.take(1)[0]\n ratings_data = ratings_raw_data.filter(lambda line: line != ratings_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (tokens[0], tokens[1], tokens[2])).cache()\n return ratings_data",
"def restaurant_rating(filename):\n\n rating_file = open(filename)\n\n restaurant_ratings = {}\n\n for line in rating_file:\n line = line.rstrip()\n restaurant_name, rating = line.split(\":\")\n\n restaurant_ratings[restaurant_name] = rating\n\n # for restaurant, number in sorted(restaurant_ratings.items()):\n # print \"{} is rated at {}\".format(restaurant, number)\n\n\n\n # restaurant_name_input, restaurant_score_input = user_input()\n # restaurant_ratings[restaurant_name_input] = restaurant_score_input\n\n for restaurant, number in sorted(restaurant_ratings.items()):\n print \"{} is rated at {}\".format(restaurant, number)",
"def load_ratings_from_file_path(file_path):\n ratings = []\n with open(file_path) as file:\n file.readline() # remove header\n for line in file:\n ratings.append(parse_line(line))\n return ratings",
"def read_restaurant(file):\r\n name_to_rating = {}\r\n price_to_names = {'$':[], '4$':[],'$$$':[],'$$':[]}\r\n cuisine_to_names = {}",
"def load_ratings():\n res = {}\n with open(RATINGS_PATH, newline='', encoding=RATINGS_ENCRYPTION) as csvfile:\n spamreader = csv.reader(csvfile)\n for i, row in enumerate(spamreader):\n if i:\n title = row[3]\n res[title] = imdbData(row)\n return res",
"def processRatings(file):\n ratingsPerSubmission = {}\n with open(file) as ratings_file:\n next(ratings_file)\n for line in ratings_file:\n regexRes = re.search(\"^([a-z0-9]{24}),([0-4]),([0-4]),([0-4]),(.*)$\", line)\n if regexRes:\n if not regexRes.group(1) in ratingsPerSubmission:\n ratingsPerSubmission[regexRes.group(1)] = {\"DQual\": [], \"DIntrp\": [], \"TDepth-subtopic1\": 0,\n \"TDepth-subtopic2\": 0, \"TDepth-subtopic3\": 0, \"TDepth-subtopic4\": 0}\n if regexRes.group(2) != '0':\n ratingsPerSubmission[regexRes.group(1)]['TDepth-subtopic' + regexRes.group(2)] += 1\n ratingsPerSubmission[regexRes.group(1)]['DQual'].append(int(regexRes.group(3)))\n ratingsPerSubmission[regexRes.group(1)]['DIntrp'].append(int(regexRes.group(4)))\n\n # Calculate T-Depth\n for (submission, ratings) in ratingsPerSubmission.items():\n ratingsPerSubmission[submission]['TDepth'] = avg([min(3, ratings[\"TDepth-subtopic\" + str(i)]) for i in range(1, 5)])\n\n return ratingsPerSubmission",
"def parse_file(filename):\n user_ratings = {}\n movie_ratings = {}\n \n import csv\n with open(filename, 'r') as f: \n reader = csv.reader(f) \n for row in reader:\n movie_id = int(row[0])\n user_id = int(row[1])\n rating = float(row[2]) \n user_ratings.setdefault(user_id, {}).update({movie_id: rating})\n movie_ratings.setdefault(movie_id,{}).update({user_id: rating})\n \n return user_ratings, movie_ratings",
"def loadRatings(ratingsFile):\n if not isfile(ratingsFile):\n print \"File %s does not exist.\" % ratingsFile\n sys.exit(1)\n f = open(ratingsFile, 'r')\n ratings = filter(lambda r: r[2] > 0, [parseRating(line)[1] for line in f])\n f.close()\n if not ratings:\n print \"No ratings provided.\"\n sys.exit(1)\n else:\n return ratings",
"def restaurant_rater(input_filename):\n\n #Open input file\n restaurant_scores_source = open(input_filename)\n\n #Initialize restaurant_ratings dictionary\n restaurant_ratings = {}\n\n #Ask user for a restaurant name and rating and store resulting strings\n user_add_restaurant = raw_input(\"Please add the name of a restaurant you would like to rate: \")\n user_add_score = int(raw_input(\"Please enter the rating: \"))\n\n #Add user input to dictionary\n restaurant_ratings.update({user_add_restaurant: user_add_score})\n\n #Loop through each line in input file\n for line in restaurant_scores_source:\n #Strips and splits each line at : and unpacks list into name and rating\n name, rating = line.rstrip().split(\":\")\n\n #Add keys and values to restaurant_ratings based on name and rating\n restaurant_ratings[name] = int(rating)\n\n for restaurant in sorted(restaurant_ratings):\n print restaurant, \"is rated at\", restaurant_ratings[restaurant]\n \n restaurant_scores_source.close()",
"def parse_rating_dict(self, line):\n pass",
"def read_input(filename):\n ratings = []\n queries = []\n movies = {}\n\n with open(filename, 'r') as file:\n num_ratings, num_movies = file.readline().split()\n num_ratings, num_movies = int(num_ratings), int(num_movies)\n num_queries = 5\n\n for _ in range(num_ratings):\n user, movie, rating = file.readline().split()\n user, movie, rating = int(user), int(movie), float(rating)\n ratings.append((user, movie, rating))\n\n for _ in range(num_movies):\n line = file.readline()\n try:\n movie, metadata = line.split(maxsplit=1)\n except ValueError:\n movie, metadata = int(line.split()[0]), ''\n\n movie = int(movie)\n metadata = metadata.strip()\n movies[movie] = metadata\n\n for _ in range(num_queries):\n user, movie = file.readline().split()\n user, movie = int(user), int(movie)\n queries.append((user, movie))\n\n return ratings, movies, queries",
"def loadRatingScoreMappingFromFile(file):\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda line: ((line[0], line[1]), line[2]))\n\t , partial(takewhile, lambda line: len(line) > 2 and line[0] != '')\n\t , lambda t: t[1]\n\t , lambda lines: (pop(lines), lines)\n\t , fileToLines\n \t , partial(join, getDataDirectory())\n\t)(file)",
"def get_recipes_dict(filename, mode_type, encode):\n with open(filename, mode_type, encoding=encode) as file:\n recipe_dict = dict()\n for line in file:\n dish = line.strip()\n amount = int(file.readline())\n buffer_list = list()\n for item in range(amount):\n ingredient, quantity, measure = file.readline().split('|')\n buffer_list.append(\n {'ingredient_name': ingredient.strip(), 'quantity': int(quantity), 'measure': measure.strip()}\n )\n recipe_dict[dish] = buffer_list\n file.readline()\n return recipe_dict",
"def get_ave_score(input_file):\n if not os.path.exists(input_file):\n return {}\n linenum = 0\n record_dict = {}\n score_dict = {}\n fp = open(input_file, encoding=\"utf-8\")\n for line in fp:\n if linenum == 0:\n linenum += 1\n continue\n item = line.strip().split(',')\n if len(item) < 4:\n continue\n userid, itemid, rating = item[0], item[1], float(item[2])\n if itemid not in record_dict:\n record_dict[itemid] = [0,0]\n record_dict[itemid][0]+=1\n record_dict[itemid][1]+=rating\n fp.close()\n for itemid in record_dict:\n score_dict[itemid] = round(record_dict[itemid][1]/record_dict[itemid][0],3)\n return score_dict",
"def getRawRatings(self):\n\n try:\n judgeNotesLogger.info(\"getRawRatings: Retrieving Raw Ratings from '%s'\", self.notesFile)\n for rating in self.ratingsToSongs.keys():\n numOfSongsWithRating = len(self.ratingsToSongs[rating])\n self.ratingsRaw[rating] = numOfSongsWithRating\n\n except:\n judgeNotesLogger.warning(\"getRawRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def get_ratings_from_s3():\n s3 = boto3.resource('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n s3_object = s3.Object(S3_BUCKET_NAME, S3_FILE_NAME)\n ratings_string = s3_object.get()['Body'].read()\n rating_string_io = io.BytesIO(ratings_string)\n ratings_dict_reader = csv.DictReader(rating_string_io)\n\n ratings_dict = {}\n for player in ratings_dict_reader:\n ratings_dict[player['name']] = int(player['rating'])\n\n return ratings_dict",
"def loadRatingsDB(path=\"\"):\n ratings = \"BX-Book-Ratings.csv\"\n users = dict()\n with codecs.open(path + ratings, \"r\", \"utf8\") as data:\n for line in data:\n fields = line.split(\";\")\n user = fields[0].strip('\"')\n isbn = binascii.hexlify(fields[1].strip('\"').encode('utf-8'))\n rating = int(fields[2].strip().strip('\"'))\n # if object in dict\n if user in users:\n users[user].rate_product(isbn, rating)\n else:\n # create object and add to dict\n users[user] = Consumer.Consumer(user)\n users[user].rate_product(isbn, rating)\n return users",
"def _read_scores(self,path):\n scores = dict()\n fin = open(path,'r')\n for line in fin:\n k, v = line.split()\n scores[k.strip()] = float(v.strip())\n return scores",
"def load_user_ratings(filepath, max_num_user=10000):\n user_ratings = dict()\n with open(filepath, 'rb') as file:\n reader = csv.reader(file, delimiter=',')\n next(reader, None) # Skip header\n for row in reader:\n user_id, movie_id, rating = int(row[0]), int(row[1]), float(row[2])\n if user_ratings.get(user_id) is None:\n user_ratings[user_id] = dict()\n\n user_ratings[user_id][movie_id] = rating\n\n reduced_user_ratings = dict()\n for user_id in user_ratings:\n if len(user_ratings[user_id]) > 300:\n reduced_user_ratings[user_id] = user_ratings[user_id]\n\n if len(reduced_user_ratings) == max_num_user:\n return reduced_user_ratings\n\n return reduced_user_ratings",
"def read_file(file):\n\n rfile = {}\n line_num = 1\n\n ## Iterate through file\n if _debug: print \"RAW File contents:\"\n infile = open(file,\"r\")\n while infile:\n line = infile.readline()\n\tline = line.rstrip(\"\\n\")\n\tif not line: break\n\n\tif _debug: print \"%d: %s\" % (line_num, line)\n\tline_num += 1\n\tif \"#\" in line: continue\n\n args = line.split()\n\tif len(args) > 0: key = args[0]\n\targs.reverse()\n\tif rfile.has_key(float(key)):\n\t if len(args) >= 5: rfile[float(key)].append(Request(args.pop(), args.pop(), args.pop(), args.pop(), args.pop()))\n\t elif len(args) >= 3: rfile[float(key)].append(Request(args.pop(), args.pop(), args.pop(), None, None))\n\t else: continue\n\telse:\n\t if len(args) >= 5: rfile[float(key)] = [Request(args.pop(), args.pop(), args.pop(), args.pop(), args.pop())]\n\t elif len(args) >= 3: rfile[float(key)] = [Request(args.pop(), args.pop(), args.pop(), None, None)]\n\t else: continue;\n\t#rfile.append(Request(args[0], args[1]))\n\n return rfile",
"def load_sample_rates(path: str) -> Dict[str, int]:\n sample_rates = pd.read_csv(path)\n result_dict = {x['filename'].split('.')[0]: x['frame_rate'] for index, x in sample_rates.iterrows()}\n return result_dict",
"def alphabetize_restaurant_ratings(filename):\n\n restaurant_log = open(filename)\n\n restaurants = {}\n\n for line in restaurant_log:\n data = line.rstrip().split(\":\")\n restaurants[data[0]] = data[1]\n\n alphabetized_restaurants = sorted(restaurants.items(), key=lambda row: row[0])\n\n for restaurant in alphabetized_restaurants:\n print \"%s is rated at %s.\" % (restaurant[0], restaurant[1])",
"def read(self):\n dictionary = {}\n with open(self.path) as file:\n key_header = \"\"\n for line in file:\n entry = line.strip().split()\n if len(entry) == 0:\n continue\n if len(entry) == 1:\n key_header = entry[0]+\"_\"\n else:\n key = entry[0].strip()\n value = reduce(lambda x1, y1: x1+\" \" + y1, entry[1:])\n dictionary[key_header+key] = value\n return dictionary",
"def all_ratings(self):\n return {\n 'average': self.average_rating(),\n 'total': self.proto.aggregateRating.ratingsCount,\n 'oneStar': self.proto.aggregateRating.oneStarRatings,\n 'twoStar': self.proto.aggregateRating.twoStarRatings,\n 'threeStar': self.proto.aggregateRating.threeStarRatings,\n 'fourStar': self.proto.aggregateRating.fourStarRatings,\n 'fiveStar': self.proto.aggregateRating.fiveStarRatings,\n }",
"def get_rating_data(rating_file='ml-1m/ratings.dat', max_rows=1e6):\n userID_ls = []\n movieID_ls = []\n rating_ls = []\n\n for i, line in enumerate(ZIPFILE.open(rating_file).readlines()):\n if i >= max_rows:\n break\n try:\n x = line.decode('utf-8').split('::')\n except Exception:\n continue\n userID_ls.append(int(x[0]))\n movieID_ls.append(int(x[1]))\n rating_ls.append(int(x[2]))\n\n rating_dict = {'userID': np.array(userID_ls),\n 'movieID': np.array(movieID_ls),\n 'rating': np.array(rating_ls)}\n\n return pd.DataFrame(rating_dict)",
"def parseFile(self, file):\n return_dict = {}\n with open(file) as f:\n for line in f:\n line = line.strip()\n\n if line:\n if line.startswith('Left'):\n return_dict['Left'] = self.getStats(f)\n elif line.startswith('Right'):\n return_dict['Right'] = self.getStats(f)\n elif line.startswith('Aligned'):\n return_dict['Aligned'] = self.getStats(f, line)\n elif line.startswith('Reads'):\n return_dict['Reads'] = self.getStats(f)\n else:\n matched_summary = re.search('([\\d|.%]+)', line)\n return_dict['Overall'] = matched_summary.group(1)\n\n #return_dict['Summary'] = re.search('(\\d+\\.\\d+%)', line).group(1)\n\n return return_dict"
]
| [
"0.8314891",
"0.7977691",
"0.76466507",
"0.74871945",
"0.7121011",
"0.705934",
"0.7026679",
"0.699601",
"0.68067664",
"0.6801764",
"0.6788466",
"0.6650362",
"0.65753245",
"0.6558319",
"0.63981",
"0.62826574",
"0.61762506",
"0.6133874",
"0.61258125",
"0.6095542",
"0.60633177",
"0.6043403",
"0.5935354",
"0.5908735",
"0.5902238",
"0.5894409",
"0.58675605",
"0.58311594",
"0.5792044",
"0.57538456"
]
| 0.8010539 | 1 |
Prints restaurant ratings, alphabetized by restaurant name. | def print_alph_restaurant_ratings(restaurant_dict):
for restaurant_name, rating in sorted(restaurant_dict.items()):
# print "{} is rated at {}.".format(restaurant_name,
# rating)
restaurant_name = restaurant_dict.items[0]
rating = restaurant_dict.items[1]
print restaurant_name, rating | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_ratings(all_ratings):\n print(\"Here is the current list of all ratings:\")\n for restaurant, rating in sorted(all_ratings.items()):\n print(f'{restaurant} is rated at {rating}.')",
"def alphabetized_restaurant_ratings(restaurant_ratings_dictionary):\n for name, rating in sorted(restaurant_ratings_dictionary.items()):\n print(f\"{name} is rated at {rating}.\")",
"def display_ratings(ratings):\n # only attempt to display the ratings if any were found\n if ratings:\n print('\\n[RATINGS]\\n')\n\n for rating in ratings:\n print(f' {rating}', end=' ')\n # needed to get printing back to normal\n print()",
"def alphabetize_restaurant_ratings(filename):\n\n restaurant_log = open(filename)\n\n restaurants = {}\n\n for line in restaurant_log:\n data = line.rstrip().split(\":\")\n restaurants[data[0]] = data[1]\n\n alphabetized_restaurants = sorted(restaurants.items(), key=lambda row: row[0])\n\n for restaurant in alphabetized_restaurants:\n print \"%s is rated at %s.\" % (restaurant[0], restaurant[1])",
"def alphabetize_restaurant(filename):\n\n restaurant_reviews = open(filename)\n\n book_of_reviews = []\n for line in restaurant_reviews:\n reviews = line.rstrip()\n reviews_list = reviews.split(\":\")\n book_of_reviews.append(reviews_list)\n \n dictionary_reviews = {}\n for review in book_of_reviews: \n dictionary_reviews[review[0]] = review[1]\n\n new_restaurant = input(\"What is the name of your restaurant? > \") \n new_rating = int(input(\"What is the rating of your restaurant? > \"))\n \n dictionary_reviews[new_restaurant] = new_rating \n \n dictionary_reviews = sorted(dictionary_reviews.items())\n \n for restaurant_name in dictionary_reviews:\n print(f\"{restaurant_name[0]} is rated at {restaurant_name[1]}.\")",
"def restaurant_rater(input_filename):\n\n #Open input file\n restaurant_scores_source = open(input_filename)\n\n #Initialize restaurant_ratings dictionary\n restaurant_ratings = {}\n\n #Ask user for a restaurant name and rating and store resulting strings\n user_add_restaurant = raw_input(\"Please add the name of a restaurant you would like to rate: \")\n user_add_score = int(raw_input(\"Please enter the rating: \"))\n\n #Add user input to dictionary\n restaurant_ratings.update({user_add_restaurant: user_add_score})\n\n #Loop through each line in input file\n for line in restaurant_scores_source:\n #Strips and splits each line at : and unpacks list into name and rating\n name, rating = line.rstrip().split(\":\")\n\n #Add keys and values to restaurant_ratings based on name and rating\n restaurant_ratings[name] = int(rating)\n\n for restaurant in sorted(restaurant_ratings):\n print restaurant, \"is rated at\", restaurant_ratings[restaurant]\n \n restaurant_scores_source.close()",
"def restaurant_rating(filename):\n\n rating_file = open(filename)\n\n restaurant_ratings = {}\n\n for line in rating_file:\n line = line.rstrip()\n restaurant_name, rating = line.split(\":\")\n\n restaurant_ratings[restaurant_name] = rating\n\n # for restaurant, number in sorted(restaurant_ratings.items()):\n # print \"{} is rated at {}\".format(restaurant, number)\n\n\n\n # restaurant_name_input, restaurant_score_input = user_input()\n # restaurant_ratings[restaurant_name_input] = restaurant_score_input\n\n for restaurant, number in sorted(restaurant_ratings.items()):\n print \"{} is rated at {}\".format(restaurant, number)",
"def restaurant_ratings(filename):\n\n lines = open(filename)\n\n restaurant_dict = {}\n\n\n for restaurants in lines:\n restaurants = restaurants.rstrip()\n restaurant, rating = restaurants.split(\":\")\n\n restaurant_dict[restaurant] = int(rating)\n\n # user_input_restaurant()\n\n # restaurant_dict[user_restaurant_input[0]] = user_restaurant_input[1]\n\n # sorted_restaurants = sorted(restaurant_dict.items())\n\n\n # for restaurant, rating in sorted_restaurants:\n # print \"%s is rated at %s\" % (restaurant, rating)\n\n return restaurant_dict",
"def describeRestaurant(self):\n print (f\"{self.name} has the best {self.cuisineType}\")",
"def describe_restaurant(self):\n\t\tprint(f\"The resaurant name is {self.restaurant_name}.\")\n\t\tprint(f\"The resaurant type is {self.restaurant_type}.\")",
"def print_recommendations(self):\n\n rec_vector = self.generate_recommendation()\n\n print(\"Recommendations for user {} \".format(self.username))\n\n for ranking, subreddit_name in enumerate(rec_vector, 1):\n print(\"{}.: {}\".format(ranking, subreddit_name))\n\n if ranking%10 == 0 and ranking!=0:\n check_if_move_on = True\n print(\"\\nType c and press enter for the next 10 subreddits.\\n\")\n print(\"Type q and press enter to return to main menu.\\n\")\n\n while check_if_move_on:\n choice = input()\n\n if choice == 'c':\n break\n\n elif choice == 'q':\n break\n\n else:\n print(\"Not a valid entry, please enter again.\")\n\n # break the whole thing if they want to quit\n if choice == 'q':\n break",
"def display_player_ratings(player_ratings):\r\n print('\\nCLASSEMENT DES PARTICIPANTS:\\n Nom ELO Score')\r\n for i in range(0, len(player_ratings)):\r\n print(players_table.get(doc_id=player_ratings[i][0])['Nom'],\r\n players_table.get(doc_id=player_ratings[i][0])['ELO'],\r\n player_ratings[i][1])",
"def random_rating(restaurant_ratings):\n rests = list(restaurant_ratings.keys())\n random_restaurant = choice(rests)\n\n print(f\"\"\"\n the chosen restaurant is {random_restaurant}\n with the rating {restaurant_ratings[random_restaurant]}\n \"\"\")\n\n new_rating = input(\"what should the new rating be? \")\n restaurant_ratings[random_restaurant] = new_rating",
"def printJudgeRatings(self):\n\n try:\n judgeNotesLogger.info(\"printJudgeRatings: Printing out judge ratings from '%s'\\n\", self.notesFile)\n\n # Print Normal List First.\n for ratingTuple in self.judgedSongList:\n if ratingTuple[0][2] != \"\":\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\", \"(\"+ratingTuple[0][2]+\")\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"/10]\\n\")\n else:\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"/10]\\n\")\n\n # Print Special List Second.\n for ratingTuple in self.specialSongList:\n if ratingTuple[0][2] != \"\":\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\", \"(\"+ratingTuple[0][2]+\")\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"]\\n\")\n else:\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"]\\n\")\n \n except:\n judgeNotesLogger.warning(\"printJudgeRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def describe_restaurant(self):\n\t\tprint(f\"{self.restaurant_name.title()} serves {self.cuisine_type}.\")",
"def print_food(self):\n for dish in self.food:\n print(dish.get_name())",
"def describe_restaurant(self):\r\n print(\"\\n==========This is our restaurant \" + self.restaurant.title() + \"===============\")\r\n print(\"We serve you amazing \" + self.cuisine + \" 's cusine\")",
"def describe_restaurant(self):\n\t\tdetails = f\"{self.restaurant_name} is a {self.cuisine_type} restaurant.\"\n\t\tprint(f\"\\n{details}\")",
"def describe_restaurant(self):\r\n\t\tprint(\"Our restaurant is \" + self.restaurant_name.title() + \".\")\r\n\t\tprint(\"We are known for our \" + self.cuisine_type.title())",
"def __str__(self):\n return str(self.get_rating())",
"def printRawRatings(self):\n\n try:\n judgeNotesLogger.info(\"printRawRatings: Retrieving Raw Ratings from '%s'\\n\", self.notesFile)\n sortedRatings = sorted(self.ratingsRaw.keys(), key=float)\n for rating in sortedRatings:\n print(\"[\"+str(rating)+\"/10]:\"+str(self.ratingsRaw[rating]))\n ratingSum = self.getRatingSum()\n sortedRatings = sorted(self.specialRatingsRaw.keys(), key=str.lower)\n for rating in sortedRatings:\n print(\"[\"+str(rating)+\"]:\"+str(self.specialRatingsRaw[rating]))\n print(\"TOTAL:\"+str(round(ratingSum, 1)))\n print(\"JUDGEDFILES:\"+str(self.numJudgedFiles))\n print(\"SPECIALFILES:\"+str(self.numSpecialFiles))\n print(\"TOTALFILES:\"+str(self.numTotalFiles))\n print(\"AVERAGE:\"+str(round(self.average, 2))+\"\\n\")\n\n except:\n judgeNotesLogger.warning(\"printRawRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def describe_restaurant(self):\n print(f\"\\nRestaurant name: {self.restaurant_name}\")\n print(f\"Cuisine type: {self.cuisine_type}\")",
"def describe_restaurant(self):\n\t\tprint(\"name of the restaurant is \" + self.restaurant_name)\n\t\tprint(\"cuisine type is \" + self.cuisine_type)",
"def find_rating():\n print(\"***** Finding Star/Rating *****\")\n while (True):\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(\"This business is rated \" + str(\n business_object['stars']) + \" stars with \" + str(\n business_object['review_count']) + \" reviews.\\n\")\n\n print_business(business_object)",
"def add_restaurant_rating(restaurant_ratings):\n\n user_restaurant = raw_input(\"Enter a restaurant name: \")\n user_rating = int(raw_input(\"Enter a restaurant rating between 1-5: \"))\n\n # prompt user for new restaurant and rating, add to dictionary\n restaurant_ratings[user_restaurant] = user_rating\n\n return restaurant_ratings",
"def describe_restaurant(self):\n msg = f\"{self.name} serves wonderful {self.cuisine_type}.\"\n print(f\"\\n{msg}\")",
"def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating",
"def printRatingsToSongs(self):\n judgeNotesLogger.info(\"printRatingsToSongs: Printing songs for each rating parsed\")\n try:\n\n # Print out normal ratings first.\n sortedRatings = sorted(self.ratingsToSongs.keys(), key=float)\n for rating in sortedRatings:\n print(\"\") # For neater printing. Newline still occurs here\n songsInRating = self.ratingsToSongs[rating]\n print(\"[\"+str(rating)+\"/10]\")\n for song in songsInRating:\n if song[2] != \"\":\n print(\"-->\", song[0], \"{\"+song[1]+\"}\", \"(\"+song[2]+\")\")\n else:\n print(\"-->\", song[0], \"{\"+song[1]+\"}\")\n\n # Print out special ratings after.\n sortedRatings = sorted(self.specialRatingsToSongs.keys(), key=str.lower)\n for rating in sortedRatings:\n print(\"\") # For neater printing. Newline still occurs here\n songsInRating = self.specialRatingsToSongs[rating]\n print(\"[\"+str(rating)+\"]\")\n for song in songsInRating:\n if song[2] != \"\":\n print(\"-->\", song[0], \"{\"+song[1]+\"}\", \"(\"+song[2]+\")\")\n else:\n print(\"-->\", song[0], \"{\"+song[1]+\"}\")\n \n print(\"\") # For neater printing. Newline still occurs here\n except:\n judgeNotesLogger.warning(\"printRatingsToSongs: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def menuPrint(self):\n print(self.menu[['name', 'review_count', 'rating']].sort_values(['rating'], ascending = False).sort_index())\n \n menuCheck = str(input(\"\\nIs this what you're looking for? (Yes (y) or No (n)) \"))\n #If no, prompt ask question again\n while menuCheck not in ['yes', 'y']:\n menuPrompt.ask(self)\n print(self.menu[['name', 'review_count', 'rating']].sort_values(['rating'], ascending = False).sort_index())\n menuCheck = str(input(\"\\nIs this what you're looking for? (Yes (y) or No (n)) \"))",
"def netflix_print(writer, rating):\n if isinstance(rating, float):\n writer.write(('%.1f' % rating) + \"\\n\")\n elif isinstance(rating, int):\n writer.write(str(rating) + \":\\n\")\n else:\n writer.write(rating + \"\\n\")"
]
| [
"0.80544746",
"0.7714753",
"0.7306745",
"0.72122705",
"0.69183373",
"0.6862691",
"0.67796004",
"0.6312141",
"0.63108295",
"0.61446047",
"0.61174047",
"0.61114484",
"0.6057809",
"0.6020447",
"0.6011145",
"0.59918296",
"0.5936555",
"0.5930612",
"0.5908128",
"0.5902657",
"0.58812743",
"0.5822975",
"0.58046365",
"0.5802522",
"0.5799204",
"0.5776501",
"0.57737213",
"0.57732576",
"0.574393",
"0.57154304"
]
| 0.7954401 | 1 |
This function samples the brightness parameters, mu_flor and mu_back using Hamiltonian Monte Carlo. | def sample_mu(self, val) -> None:
# get data
data = self.data.reshape((1, -1))
# get values
gain = val.gain
states = val.states
mu_flor = val.mu_flor
mu_flor_mean = val.mu_flor_mean
mu_flor_shape = val.mu_flor_shape
mu_back = val.mu_back
mu_back_mean = val.mu_back_mean
mu_back_shape = val.mu_back_shape
num_data = val.num_data
num_rois = val.num_rois
num_states = val.num_states
# initialze variables
num_vars = num_states + num_rois
idx = np.where(val.mu_flor_mean > 0)[0]
# shape
shape = np.zeros((num_vars, 1))
shape[:num_states, 0] = mu_flor_shape[:]
shape[num_states:, 0] = mu_back_shape
# scale
scale = np.zeros((num_vars, 1))
scale[idx, 0] = mu_flor_mean[idx] / mu_flor_shape[idx]
scale[num_states:, 0] = (mu_back_mean / mu_back_shape)[:]
# initialize a mu vector containing the variables we wish to sample, mu_flor and mu_back
q = np.zeros((num_vars, 1))
q[:num_states, 0] = mu_flor[:]
q[num_states:, 0] = mu_back[:]
q_old = q.copy()
idy = q > 0 # keep track of which states are dark (we only sample bright states)
num_var = q.shape[0]
# hmc dynamics variables
h = np.random.exponential() / 100
masses = (1 + np.random.pareto(1, size=q.shape))
masses_inv = np.zeros(shape=masses.shape) # negative mass is interpretted as an unchanging variable
masses_inv[masses > 0] = 1 / masses[masses > 0]
num_steps = np.random.poisson(25)
# create populations array
pops = np.zeros((num_vars, num_rois * num_data))
"""
pops is an array such that each element i, j corresponds to the
multiplicitive factor in front of q[i] for data point j in the
likelihood. For example, if in ROI 1 at time level 17 there are two
fluorophores in the bright state, then we find the element, j,
corresponding to ROI 1 and time level 17, and we find the element,
i, corresponding to the bright state, and we set q[i,j]=2 (because
there are two bright fluorophores), then we would find the i
corresponding to the background brightness of ROI 1, and we would
set this q[i,j]=1 (the multiplicitive factor in front of the
background brightness is 1 when it is the corresponding ROI and 0
otherwise).
"""
for r in range(num_rois):
idx = np.arange(r*num_data, (r+1)*num_data)
pops[:num_states, idx] = states_to_pops(states[r, :, :], num_states)
pops[num_states + r, idx] = 1
# the conditional probability for the mu vector
def probability(q_, p_):
if np.sum(q_ < 0) > 0:
prob = -np.inf
else:
prob = (
np.sum(stats.gamma.logpdf(data, a=q_.T @ pops, scale=gain)) # likelihood
+ np.sum(stats.gamma.logpdf(q_[idy], a=shape[idy], scale=scale[idy])) # prior
+ np.sum(stats.norm.logpdf(p_[idy], loc=0, scale=np.sqrt(masses[idy]))) # momentum
)
return prob
# the gradient of the Hamiltonian with respect to the mu_vector
def dH_dq(q_):
if np.any(q_ < 0):
"""
In the event that q_new becomes negative, fast_digamma becomes
slow. Since q should never be negative anyway, there is no
need for further computation and we can skip this step knowing
that this value of q will be rejected anyway.
"""
return q_
q_new = np.zeros(q_.shape)
q_new[idy] = (
(shape[idy] - 1) / q_[idy] - 1 / scale[idy]
+ (pops @ (np.log(data / gain) - fast_digamma(q_.T @ pops)).T)[idy]
)
return q_new
# sample momentum
p = np.random.randn(num_var, 1) * np.sqrt(masses)
p_old = p.copy()
# run the HMC
for i in range(num_steps):
p = p + .5 * h * dH_dq(q)
q = q + h * p * masses_inv
p = p + .5 * h * dH_dq(q)
# find acceptance ratio
P_new = probability(q, p)
P_old = probability(q_old, p_old)
if (P_new - P_old) < np.log(np.random.rand()):
q = q_old
# update the new mu values
val.mu_flor[:] = q[:num_states, 0]
val.mu_back[:] = q[num_states:, 0]
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_posterior(self):\n \n# print (\"SAMPLING FROM LINEAR SIMILARITY VB\")\n if (self.posterior_mean == False):\n self.weight = Vil.sample_posterior(self.mu_weight, Vil.softplus(self.rho_weight))\n self.bias = Vil.sample_posterior(self.mu_bias, Vil.softplus(self.rho_bias))\n# print (self.bias)\n else:\n self.weight.data = self.mu_weight.data\n self.bias.data = self.mu_bias.data",
"def draw_samples(self):\n if self._integrator == 'HMC': \n self.momentum = torch.distributions.Normal(torch.zeros_like(self.parameters), torch.ones_like(self.parameters)).sample()\n start = time.time()\n if (self._integrator == 'RMHMC'): #torch has trouble differentiating through repeated eigenvalues\n self.jitters = self.jitter * torch.rand(self.parameters.shape[0])\n self.jitters[0] = 0.\n self.jitters[1] = 0.\n self.potential_ = self.get_potential()\n self.hamiltonian_ = self.get_hamiltonian()\n self.momentum = self.resample_momenta(init=True)\n self.momenta.append(self.momentum)\n if self.shadow:\n self.shadow_ = self.get_shadow()\n finished = 0\n counter = 0\n if self.verbose:\n for sample in range(self.n_samples):\n self.step()\n if self.degenerate:\n break\n finished += 1\n else:\n# for _ in tqdm(range(self.n_samples)):\n for sample in range(self.n_samples):\n self.step()\n if self.degenerate:\n break\n finished += 1\n counter += 1\n if counter > self.n_samples * 0.05:\n counter = 0\n print('('+str(int((sample+1)/self.n_samples*100))+'% complete)', int(self.accepted),'of', int(self.accepted + self.rejected), 'accepted', '('+str(int((self.accepted)/(self.accepted+self.rejected)*100))+'%)')\n total = float(self.accepted + self.rejected)\n end = time.time()\n if total >= self.n_samples:\n self.completed = True\n self.elapsed += end-start\n print('\\n', int(self.accepted), ' of ', int(self.accepted + self.rejected), ' samples accepted in', self.elapsed, ' seconds (', 100 * self.accepted/total,'%).')\n return None\n else:\n self.degenerates +=1\n self.find_mode()\n self.parameters = params_init + torch.randn(self.parameters.shape[0])/100\n self.reinitiate_samples()\n self.resample_momenta(init = True)\n return None",
"def _sample_lam(self, cur_y, cur_z):\n old_loglik = self._loglik(cur_y, cur_z)\n old_lam = self.lam\n \n # modify the feature ownership matrix\n self.lam = np.random.beta(1,1)\n new_loglik = self._loglik(cur_y, cur_z)\n move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));\n if random.random() < move_prob:\n pass\n else:\n self.lam = old_lam",
"def sample_from_belief(self):\n if self._use_information_bottleneck:\n posteriors = [torch.distributions.Normal(m, torch.sqrt(s)) for m, s in zip(torch.unbind(self.z_means), torch.unbind(self.z_vars))]\n z = [d.rsample() for d in posteriors]\n self.z = torch.stack(z)\n else:\n self.z = self.z_means",
"def run(mu_v, Sigma_w, Sigma_z, a_mu, l_sensor):\n N = 1000\n # Init tracking\n mu_x = np.zeros(N) # Belief or estimation of hidden state \n F = np.zeros(N) # Free Energy of AI neuron\n mu_y = np.zeros(N) # Belief or prediction of sensory signal \n x = np.zeros(N) # True hidden state\n y = np.zeros(N) # Sensory signal as input to AI neuron\n\n robot_brain = pp_unit(dt, mu_v, Sigma_w, Sigma_z, a_mu) #make pp object\n \n \n\n start_time = time.time()\n for i in np.arange(1, N):\n #Active inference\n y[i] = l_sensor.ambient_light_intensity #take sensor reading\n print('light reading', y[i])\n F[i], mu_x[i], mu_y[i] = robot_brain.inference_step(i, mu_v, y[i])\n\n\n t_elapsed = time.time() - start_time\n\n print(\"Elapsed Time\", t_elapsed, \"sec\")\n return F, mu_x, mu_y, x, y",
"def sample(log_prob_func, params_init, num_samples=10, num_steps_per_sample=10, step_size=0.1, burn=0, jitter=None, inv_mass=None, normalizing_const=1., softabs_const=None, explicit_binding_const=100, fixed_point_threshold=1e-5, fixed_point_max_iterations=1000, jitter_max_tries=10, sampler=Sampler.HMC, integrator=Integrator.IMPLICIT, metric=Metric.HESSIAN, debug=False, desired_accept_rate=0.8, store_on_GPU = True, pass_grad = None, verbose = False):\n\n # Needed for memory moving i.e. move samples to CPU RAM so lookup GPU device\n device = params_init.device\n\n if params_init.dim() != 1:\n raise RuntimeError('params_init must be a 1d tensor.')\n\n if burn >= num_samples:\n raise RuntimeError('burn must be less than num_samples.')\n\n NUTS = False\n if sampler == Sampler.HMC_NUTS:\n if burn == 0:\n raise RuntimeError('burn must be greater than 0 for NUTS.')\n sampler = Sampler.HMC\n NUTS = True\n step_size_init = step_size\n H_t = 0.\n eps_bar = 1.\n\n # Invert mass matrix once (As mass is used in Gibbs resampling step)\n mass = None\n if inv_mass is not None:\n if type(inv_mass) is list:\n mass = []\n for block in inv_mass:\n mass.append(torch.inverse(block))\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n mass = torch.inverse(inv_mass)\n elif len(inv_mass.shape) == 1:\n mass = 1/inv_mass\n\n params = params_init.clone().requires_grad_()\n param_burn_prev = params_init.clone()\n if not store_on_GPU:\n ret_params = [params.clone().detach().cpu()]\n else:\n ret_params = [params.clone()]\n\n num_rejected = 0\n # if sampler == Sampler.HMC:\n if not verbose:\n util.progress_bar_init('Sampling ({}; {})'.format(sampler, integrator), num_samples, 'Samples')\n for n in range(num_samples):\n if not verbose:\n util.progress_bar_update(n)\n try:\n momentum = gibbs(params, sampler=sampler, log_prob_func=log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, metric=metric, mass=mass)\n\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, normalizing_const=normalizing_const, sampler=sampler, integrator=integrator, metric=metric, inv_mass=inv_mass)\n\n leapfrog_params, leapfrog_momenta = leapfrog(params, momentum, log_prob_func, sampler=sampler, integrator=integrator, steps=num_steps_per_sample, step_size=step_size, inv_mass=inv_mass, jitter=jitter, jitter_max_tries=jitter_max_tries, fixed_point_threshold=fixed_point_threshold, fixed_point_max_iterations=fixed_point_max_iterations, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, metric=metric, store_on_GPU = store_on_GPU, debug=debug, pass_grad = pass_grad)\n if sampler == Sampler.RMHMC and integrator == Integrator.EXPLICIT:\n\n # Step required to remove bias by comparing to Hamiltonian that is not augmented:\n ham = ham/2 # Original RMHMC\n\n params = leapfrog_params[0][-1].detach().requires_grad_()\n params_copy = leapfrog_params[-1].detach().requires_grad_()\n params_copy = params_copy.detach().requires_grad_()\n momentum = leapfrog_momenta[0][-1]\n momentum_copy = leapfrog_momenta[-1]\n\n leapfrog_params = leapfrog_params[0]\n leapfrog_momenta = leapfrog_momenta[0]\n\n # This is trying the new (unbiased) version:\n new_ham = rm_hamiltonian(params, momentum, log_prob_func, jitter, normalizing_const, softabs_const=softabs_const, sampler=sampler, integrator=integrator, metric=metric) # In rm sampler so no need for inv_mass\n # new_ham = hamiltonian([params,params_copy] , [momentum,momentum_copy], log_prob_func, jitter=jitter, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, normalizing_const=normalizing_const, sampler=sampler, integrator=integrator, metric=metric)\n\n else:\n params = leapfrog_params[-1].to(device).detach().requires_grad_()\n momentum = leapfrog_momenta[-1].to(device)\n new_ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, normalizing_const=normalizing_const, sampler=sampler, integrator=integrator, metric=metric, inv_mass=inv_mass)\n\n\n\n # new_ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, normalizing_const=normalizing_const, sampler=sampler, integrator=integrator, metric=metric)\n rho = min(0., acceptance(ham, new_ham))\n if debug == 1:\n print('Step: {}, Current Hamiltoninian: {}, Proposed Hamiltoninian: {}'.format(n,ham,new_ham))\n\n if rho >= torch.log(torch.rand(1)):\n if debug == 1:\n print('Accept rho: {}'.format(rho))\n if n > burn:\n if store_on_GPU:\n ret_params.append(leapfrog_params[-1])\n else:\n # Store samples on CPU\n ret_params.append(leapfrog_params[-1].cpu())\n else:\n param_burn_prev = leapfrog_params[-1].to(device).clone()\n else:\n num_rejected += 1\n if n > burn:\n params = ret_params[-1].to(device)\n # leapfrog_params = ret_params[-num_steps_per_sample:] ### Might want to remove grad as wastes memory\n if store_on_GPU:\n ret_params.append(ret_params[-1].to(device))\n else:\n # Store samples on CPU\n ret_params.append(ret_params[-1].cpu())\n else:\n params = param_burn_prev.clone()\n if debug == 1:\n print('REJECT')\n\n if NUTS and n <= burn:\n if n < burn:\n step_size, eps_bar, H_t = adaptation(rho, n, step_size_init, H_t, eps_bar, desired_accept_rate=desired_accept_rate)\n if n == burn:\n step_size = eps_bar\n print('Final Adapted Step Size: ',step_size)\n\n # if not store_on_GPU: # i.e. delete stuff left on GPU\n # # This adds approximately 50% to runtime when using colab 'Tesla P100-PCIE-16GB'\n # # but leaves no memory footprint on GPU after use.\n # # Might need to check if variables exist as a log prob error could occur before they are assigned!\n #\n # del momentum, leapfrog_params, leapfrog_momenta, ham, new_ham\n # torch.cuda.empty_cache()\n\n except util.LogProbError:\n num_rejected += 1\n params = ret_params[-1].to(device)\n if n > burn:\n params = ret_params[-1].to(device)\n # leapfrog_params = ret_params[-num_steps_per_sample:] ### Might want to remove grad as wastes memory\n if store_on_GPU:\n ret_params.append(ret_params[-1].to(device))\n else:\n # Store samples on CPU\n ret_params.append(ret_params[-1].cpu())\n else:\n params = param_burn_prev.clone()\n if debug == 1:\n print('REJECT')\n if NUTS and n <= burn:\n # print('hi')\n rho = float('nan') # Acceptance rate = 0\n # print(rho)\n step_size, eps_bar, H_t = adaptation(rho, n, step_size_init, H_t, eps_bar, desired_accept_rate=desired_accept_rate)\n if NUTS and n == burn:\n step_size = eps_bar\n print('Final Adapted Step Size: ',step_size)\n\n if not store_on_GPU: # i.e. delete stuff left on GPU\n # This adds approximately 50% to runtime when using colab 'Tesla P100-PCIE-16GB'\n # but leaves no memory footprint on GPU after use in normal HMC mode. (not split)\n # Might need to check if variables exist as a log prob error could occur before they are assigned!\n momentum = None; leapfrog_params = None; leapfrog_momenta = None; ham = None; new_ham = None\n\n del momentum, leapfrog_params, leapfrog_momenta, ham, new_ham\n torch.cuda.empty_cache()\n\n # var_names = ['momentum', 'leapfrog_params', 'leapfrog_momenta', 'ham', 'new_ham']\n # [util.gpu_check_delete(var, locals()) for var in var_names]\n # import pdb; pdb.set_trace()\n\n\n # import pdb; pdb.set_trace()\n if not verbose:\n util.progress_bar_end('Acceptance Rate {:.2f}'.format(1 - num_rejected/num_samples)) #need to adapt for burn\n if NUTS and debug == 2:\n return list(map(lambda t: t.detach(), ret_params)), step_size\n elif debug == 2:\n return list(map(lambda t: t.detach(), ret_params)), 1 - num_rejected/num_samples\n else:\n return list(map(lambda t: t.detach(), ret_params))",
"def sample(args):\n mu = args[0]\n log_sigma = args[1]\n noise = tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)\n z = mu + tf.exp(log_sigma/2.0) * noise\n return z",
"def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")",
"def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf",
"def _sample_likelihood_params(self):\r\n self._sample_omega()\r\n self._sample_beta()\r\n self._sample_r()",
"def sample_states_r(r):\n\n print('<', end='')\n\n states_r = states[r, :, :].copy()\n\n # shuffle the loads that get sampled together\n shuffled_load_IDs = np.random.permutation(num_load)\n\n for g in range(0, num_load, num_together):\n\n IDs = shuffled_load_IDs[g:g+num_together]\n\n loglhoodmtx = np.zeros((num_combos, num_data))\n for unique_combo in np.unique(degenerate_combos, axis=0):\n \"\"\"\n We save computation time by calculating the log likelihood\n only for unique brightness states and then assigning them\n to the corresponding states afterwords. For example if we\n have two states: dark and bright, and we are sampling the\n joint phototrajectory for fluorophores A and B, then\n A-bright-B-dark and A-dark-B-bright would have the same\n log likelihood. Rather than compute this twice we caclulate\n the log likelihood for one-fluorophore-bright and assign it\n to both the above joint states.\n \"\"\"\n idx = (degenerate_combos == unique_combo).all(axis=1)\n for i in range(num_together):\n states_r[IDs[i], :] = unique_combo[i]\n brightness = mu_flor @ states_to_pops(states_r, num_states) + mu_back[r]\n loglhoodmtx[idx, :] = stats.gamma.logpdf(data[r,:], a=brightness, scale=gain)\n\n # demand final state is photobleached\n loglhoodmtx[:-num_end:, -1] = -np.inf\n loglhoodmtx[-1, -1] = 0\n\n # softmax the log likelihood matrix to take it out of log space\n lhoodmtx = np.exp(loglhoodmtx - np.max(loglhoodmtx, axis=0))\n lhoodmtx += (loglhoodmtx > -np.inf) * 1e-300 # for numerical stability\n\n # run forward-filter-backwards-sample algorithm using numba\n trajectory = FFBS(lhoodmtx, pi_comb)\n\n # convert from combined state space to regular state space\n states_r[IDs, :] = state_combos[trajectory, :].T\n\n print('>', end='')\n\n return states_r",
"def sample_agn_luminosity(self, z):\n\t\t# Assign redshift bin\n\t\tis_less_than_right_edge = (z < self.z_bins)\n\t\talpha = self.alphas[is_less_than_right_edge][0]\n\t\tbeta = self.betas[is_less_than_right_edge][0]\n\t\tM_star = self.M_stars[is_less_than_right_edge][0]\n\n\t\t# Evaluate function\n\t\tpmf = self.get_double_power_law(alpha, beta, M_star)\n\n\t\t# Sample luminosity\n\t\tsampled_M = np.random.choice(self.M_grid, None, replace=True, p=pmf)\n\t\treturn sampled_M",
"def paramSamples(self):\n\n if self._paramSamples is not None:\n return self._paramSamples\n timescale = self.mjdmax - self.mjdmin\n T0Vals = self.randomState.uniform(size=self.numSN) * timescale \\\n + self.mjdmin\n mB, x1, c, m = SALT2_MMDist(self.numSN)\n print(\"range of sampled mB\", mB.min(), mB.max())\n x0 = np.zeros(len(mB))\n mB += self.randomState.normal(loc=0., scale=self.Mdisp,\n size=self.numSN)\n H70cosmo = self.cosmo.clone(name='H70cosmo',\n H0=self.cosmo.H0 * (70/self.cosmo.H0.value))\n MB = mB + H70cosmo.distmod(self.zSamples).value - \\\n self.cosmo.distmod(self.zSamples).value\n model = sncosmo.Model(source='SALT2')\n for i, z in enumerate(self.zSamples):\n model.set(z=z, x1=x1[i], c=c[i])\n model.set_source_peakabsmag(MB[i], 'bessellB', 'ab',\n cosmo=self.cosmo)\n x0[i] = model.get('x0')\n mB[i] = model.source.peakmag('bessellB', 'ab')\n df = pd.DataFrame(dict(x0=x0, mB=mB, x1=x1, c=c,\n t0=T0Vals, z=self.zSamples, snid=self.snids))\n self._paramSamples = df\n return self._paramSamples",
"def sample_low_rank(self, n_samples, mu, logvar, F):\n #F = torch.unsqueeze(F, dim=1).repeat(1, n_samples, 1, 1) # [self.batch_size, n_samples, self.Y_dim, self.rank]\n F = F.repeat(n_samples, 1, 1) # [self.batch_size*n_samples, self.Y_dim, self.rank]\n mu = mu.repeat(n_samples, 1) # [self.batch_size*n_samples, self.Y_dim]\n logvar = logvar.repeat(n_samples, 1) # [self.batch_size*n_samples, self.Y_dim]\n eps_low_rank = torch.randn(self.batch_size*n_samples, self.rank, 1)\n eps_diag = torch.randn(self.batch_size*n_samples, self.Y_dim)\n half_var = torch.exp(0.5*logvar) # [self.batch_size*n_samples, self.Y_dim]\n samples = torch.bmm(F, eps_low_rank).squeeze() + mu + half_var*eps_diag\n samples = samples.reshape(n_samples, self.batch_size, self.Y_dim)\n samples = samples.transpose(0, 1)\n samples = self.unwhiten_back(samples)\n samples = samples.data.cpu().numpy()\n return samples",
"def get_mf_samples(self, data = True):\n \n if not data:\n \n mf_vals = T.tile(self.mf_vis_p,(1,self.num_samples))\n \n else:\n \n mf_vals = self.mf_vis_p\n \n samples = self.theano_rand_gen.binomial(size= (self.num_vars,\n self.num_samples),\n n = 1, \n p = mf_vals,\n dtype=theano.config.floatX)\n \n log_q_vals = self.get_importance_evals(samples, mf_vals)\n \n return T.transpose(samples), log_q_vals",
"def sampling(args):\n z_mean, z_log_sigma = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean = 0., stddev=0.1)\n return z_mean + K.exp(z_log_sigma) * epsilon",
"def _sample(self, rnn_output, temperature):\n pass",
"def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._adjust_brightness_img(results, self.factor)\n return results",
"def sample_model(model, x, y, params_init, model_loss='multi_class_linear_output' ,num_samples=10, num_steps_per_sample=10, step_size=0.1, burn=0, inv_mass=None, jitter=None, normalizing_const=1., softabs_const=None, explicit_binding_const=100, fixed_point_threshold=1e-5, fixed_point_max_iterations=1000, jitter_max_tries=10, sampler=Sampler.HMC, integrator=Integrator.IMPLICIT, metric=Metric.HESSIAN, debug=False, tau_out=1.,tau_list=None, store_on_GPU = True, desired_accept_rate=0.8, verbose = False):\n\n device = params_init.device\n params_shape_list = []\n params_flattened_list = []\n build_tau = False\n if tau_list is None:\n tau_list = []\n build_tau = True\n for weights in model.parameters():\n params_shape_list.append(weights.shape)\n params_flattened_list.append(weights.nelement())\n if build_tau:\n tau_list.append(torch.tensor(1.))\n\n log_prob_func = define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=normalizing_const, device = device)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n return sample(log_prob_func, params_init, num_samples=num_samples, num_steps_per_sample=num_steps_per_sample, step_size=step_size, burn=burn, jitter=jitter, inv_mass=inv_mass, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, fixed_point_threshold=fixed_point_threshold, fixed_point_max_iterations=fixed_point_max_iterations, jitter_max_tries=jitter_max_tries, sampler=sampler, integrator=integrator, metric=metric, debug=debug, desired_accept_rate=desired_accept_rate, store_on_GPU = store_on_GPU, verbose = verbose)",
"def demo_rbm_tutorial(\n eta = 0.01,\n n_hidden = 500,\n n_samples = None,\n minibatch_size = 10,\n plot_interval = 10,\n w_init_mag = 0.01,\n n_epochs = 1,\n persistent = False,\n seed = None\n ):\n if is_test_mode():\n n_samples=50\n n_epochs=1\n plot_interval=50\n n_hidden = 10\n\n data = get_mnist_dataset(flat = True).training_set.input[:n_samples]\n n_visible = data.shape[1]\n rng = np.random.RandomState(seed)\n activation = lambda x: (1./(1+np.exp(-x)) > rng.rand(*x.shape)).astype(float)\n\n w = w_init_mag*np.random.randn(n_visible, n_hidden)\n b_hid = np.zeros(n_hidden)\n b_vis = np.zeros(n_visible)\n\n if persistent:\n hid_sleep_state = np.random.rand(minibatch_size, n_hidden)\n\n for i, vis_wake_state in enumerate(minibatch_iterate(data, n_epochs = n_epochs, minibatch_size=minibatch_size)):\n hid_wake_state = activation(vis_wake_state.dot(w)+b_hid)\n if not persistent:\n hid_sleep_state = hid_wake_state\n vis_sleep_state = activation(hid_sleep_state.dot(w.T)+b_vis)\n hid_sleep_state = activation(vis_sleep_state.dot(w)+b_hid)\n\n # Update Parameters\n w_grad = (vis_wake_state.T.dot(hid_wake_state) - vis_sleep_state.T.dot(hid_sleep_state))/float(minibatch_size)\n w += w_grad * eta\n b_vis_grad = np.mean(vis_wake_state, axis = 0) - np.mean(vis_sleep_state, axis = 0)\n b_vis += b_vis_grad * eta\n b_hid_grad = np.mean(hid_wake_state, axis = 0) - np.mean(hid_sleep_state, axis = 0)\n b_hid += b_hid_grad * eta\n\n if i % plot_interval == 0:\n dbplot(w.T[:100].reshape(-1, 28, 28), 'weights')\n dbplot(vis_sleep_state.reshape(-1, 28, 28), 'dreams')\n print 'Sample %s' % i",
"def sampling(args):\n t_mean, t_log_var = args\n # YOUR CODE HERE\n epsilon = K.random_normal(t_mean.shape)\n z = epsilon * K.exp(0.5 * t_log_var) + t_mean\n return z",
"def gibbs_sampler(batch_size, mu, kappa, lambda_):\n x_0 = tf.zeros([batch_size])\n x_1 = tf.zeros([batch_size])\n x_2 = tf.zeros([batch_size])\n samples = []\n for i in tf.range(burn_in + avg_count):\n phi_0 = lambda_[:, 0] * tf.sin(x_1 - mu[:, 1]) + lambda_[:, 1] * tf.sin(x_2 - mu[:, 2])\n k_neg_0 = tf.sqrt(kappa[:, 0] * kappa[:, 0] + phi_0 * phi_0)\n mu_neg_0 = mu[:, 0] + tf.atan(phi_0 / kappa[:, 0])\n dist_0 = tfd.VonMises(loc=mu_neg_0, concentration=k_neg_0)\n x_0 = dist_0.sample()\n\n phi_1 = lambda_[:, 0] * tf.sin(x_0 - mu[:, 0]) + lambda_[:, 2] * tf.sin(x_2 - mu[:, 2])\n k_neg_1 = tf.sqrt(kappa[:, 1] * kappa[:, 1] + phi_1 * phi_1)\n mu_neg_1 = mu[:, 1] + tf.atan(phi_1 / kappa[:, 1])\n dist_1 = tfd.VonMises(loc=mu_neg_1, concentration=k_neg_1)\n x_1 = dist_1.sample()\n\n phi_2 = lambda_[:, 1] * tf.sin(x_0 - mu[:, 0]) + lambda_[:, 2] * tf.sin(x_1 - mu[:, 1])\n k_neg_2 = tf.sqrt(kappa[:, 2] * kappa[:, 2] + phi_2 * phi_2)\n mu_neg_2 = mu[:, 2] + tf.atan(phi_2 / kappa[:, 2])\n dist_2 = tfd.VonMises(loc=mu_neg_2, concentration=k_neg_2)\n x_2 = dist_2.sample()\n\n if i >= burn_in:\n samples.append(tf.stack([x_0, x_1, x_2], axis=1)) # [avg_count, B, out_dim]\n\n return tf.reduce_mean(tf.stack(samples), axis=0)",
"def resample_from_mf(self):\n self.g = np.zeros((self.K, self.K, self.B))\n for k1 in xrange(self.K):\n for k2 in xrange(self.K):\n self.g[k1,k2,:] = np.random.dirichlet(self.mf_gamma[k1,k2,:])",
"def _sample_r(self):\r\n phi_X = self.phi(self.X, self.W, add_bias=True)\r\n F = phi_X @ self.beta.T\r\n P = logistic(F)\r\n for j in range(self.J):\r\n A = self._crt_sum(j)\r\n # `maximum` is element-wise, while `max` is not.\r\n maxes = np.maximum(1 - P[:, j], -np.inf)\r\n B = 1. / -np.sum(np.log(maxes))\r\n self.R[j] = np.random.gamma(A, B)\r\n # `R` cannot be zero.\r\n self.R[np.isclose(self.R, 0)] = 0.0000001",
"def main():\n# pixel_to_wavelen_dir = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\\n# Spectral_Band_pass\\Pixel_to_wavelen_map'\n\n file_path = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass\\\n All_FWHM_only_Gaussian'\n radiance_file = read_radiance_data()\n file_path_2 = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass\\\n All_FWHM\\spectral_bandpass_1400'\n\n #start with Gaussian Bandpass\n# data_names = [each for each in os.listdir(file_path)\n# if each.startswith(\"Params_Gauss\")]\n#\n#\n# sample_data = []\n# for data_files in data_names[9:]:\n# #print(data_files)\n#\n# wavelen_suffix = data_files.split('_')[-1]\n#\n# pixel_to_wvl_map_data = sorted([each for each in os.listdir(pixel_to_wavelen_dir)\n# if each.endswith(wavelen_suffix)])\n#\n# gaussian_files = os.path.join(file_path, data_files)\n#\n# dframe = pd.read_csv(gaussian_files)\n# #dframe = dframe[['A1', 'A2', 'Sigma1', 'Sigma2']]\n# dframe = dframe[['A1', 'Sigma1']] # for Gaussian only\n# pixel_to_wav_map = os.path.join(pixel_to_wavelen_dir, pixel_to_wvl_map_data[0])\n# dframe1 = pd.read_csv(pixel_to_wav_map)\n# dframe['CW'] = dframe1['CW']\n# dframe = dframe.iloc[1400]\n# sample_data.append(dframe.values)\n # for flat top Gaussian\n# #gaussian_values = perform_spectral_interpolation(np.array(sample_data))\n\n# gaussian_values = perform_spectral_interpolation_only_gaussian(np.array(sample_data))\n#\n##\n## # Let us now create a spectral bandpass\n# #create_spectral_bandpass(gaussian_values, radiance_file, file_path) # flat top Gaussian\n# create_spectral_bandpass_only_gaussian(gaussian_values, radiance_file, file_path)\n#\n#\n## #Make sure that the center wavelength of Gaussians are the same\n## sample_val = []\n## data_names_interpol = sorted([each for each in os.listdir(file_path_2)\n## if each.endswith('csv')])\n## interpol_wavelen = []\n## interpol_rad = [ ]\n##\n## for i in range(0, 64):\n## sub_sample_wvl = []\n## sub_sample_rad = []\n##\n## for files in data_names_interpol[9:]:\n##\n## interpol_rsr = os.path.join(file_path_2, files)\n## dframe = pd.read_csv(interpol_rsr, usecols=[\"wavelength\", \"rad\"])\n##\n## wavelength = dframe['wavelength'][i]\n## rad = dframe['rad'][i]\n## sub_sample_wvl.append(wavelength)\n## sub_sample_rad.append(rad)\n## dframe = perform_point_interpolation(sub_sample_wvl, sub_sample_rad,\n #np.array(sample_data)[:,-1])\n## interpol_rad.append(dframe['rad'].values)\n## interpol_wavelen.append(dframe['wavelength'].values)\n## create_spectral_bandpass_interpol(np.array(interpol_wavelen),\n #np.array(interpol_rad),\n #np.array(sample_data)[:,-1], file_path_2)\n# cc\n##\n#\n##\n###\n## # let us now perfrom spectral convolution with high res. radiance data\n calculate_in_band_irradiance(file_path, file_path_2, radiance_file)",
"def eg_bootmu():\n\n a = []\n b = []\n\n for _ in range(100):\n a.append(utils.gaussian(10, 1))\n\n print(\"\", \"mu\", \"sd\", \"cliffs\", \"boot\", \"both\", sep=\"\\t\")\n print(\"\", \"--\", \"--\", \"------\", \"----\", \"----\", sep=\"\\t\")\n\n for mu in range(100, 111):\n b = []\n\n for _ in range(100):\n b.append(utils.gaussian(mu / 10, 1))\n\n cl = utils.cliffsDelta(a, b)\n bs = stats.bootstrap(a, b)\n\n print(\"\", mu / 10, 1, cl, bs, cl and bs, sep=\"\\t\")",
"def sampler(QU, qu, x_mean, x_err, mask, data_mean,\\\n Nside=256, Niter=10000, R_Pp=None):\n \n print(data_mean)\n print(x_mean, np.shape(x_mean))\n print(x_err)\n N = len(mask)\n QU_model = np.full((2, hp.nside2npix(Nside)), np.nan)\n QU_star = np.full((2, hp.nside2npix(Nside)), np.nan)\n model_err = np.full((3, hp.nside2npix(Nside)), np.nan)\n star_err = np.full((3, hp.nside2npix(Nside)), np.nan)\n burnin = int(Niter/2)\n \n t0 = time.time()\n # Initialize function:\n log_like = partial(logLike, data=QU[:,mask])\n log_prior = partial(logPrior, mu=data_mean, sigma=x_err)\n func = partial(QU_func, qu=qu[:,mask])\n print(np.mean(data_mean[N:-1])) \n print(log_like(func(data_mean)))\n cov0 = Cov(x_mean)\n #print(cov0)\n\n # Initialize:\n params0, model0, loglike0, logprior0 = Initialize(log_like,\\\n log_prior,\\\n func, x_mean,\\\n cov0)\n #sys.exit()\n # Metropolis Hastrings:\n params_maxL, params = MH(log_like, log_prior, func, params0,\\\n model0, loglike0, logprior0, x_mean,\\\n cov0, burnin, Niter)\n\n t1 = time.time()\n print('Sampling time: {} s'.format(t1-t0))\n print(params_maxL[:len(mask)], params_maxL[len(mask):-1], params_maxL[-1])\n print_params(params_maxL, int((len(x_mean)-1)/2))\n \n #print(np.std(params[burnin:,:], axis=0))\n #print(np.shape(params))\n QU_model[:,mask] = QU_func(params_maxL, qu[:,mask])\n QU_star[:,mask] = QU_func(params_maxL, qu[:,mask], star=True)\n bkgr_model = background(params_maxL[len(mask):])\n sample_err = Error_estimation(params[burnin:, :], par=True) #!\n model_err[:,mask], star_err[:,mask], bkgr_err\\\n = Error_estimation(params[burnin:,:], qu[:,mask])\n\n mod0 = QU_func(data_mean, qu[:,mask])\n plt.figure()\n plt.plot(qu[0,mask], QU[0,mask], '.k')\n plt.plot(qu[1,mask], QU[1,mask], '.b')\n plt.scatter(qu[0,mask], QU_model[0,mask], marker='x', c='gray')\n plt.scatter(qu[1,mask], QU_model[1,mask], marker='x', c='skyblue')\n #plt.scatter(qu[0,mask], mod0[0,:], marker='^', c='orange')\n #plt.scatter(qu[1,mask], mod0[1,:], marker='^', c='g')\n #\"\"\"\n x = np.linspace(np.min(qu), np.max(qu), 10)\n b = np.mean(bkgr_model, axis=1)\n print(b, np.shape(model_err), '<-----------')\n print(np.corrcoef(params_maxL, data_mean))\n\n plt.plot(x, -np.mean(params_maxL[:N])*x, '-r')\n plt.plot(x, -np.mean(params_maxL[:N])*x + b[0], '-k')\n plt.plot(x, -np.mean(params_maxL[:N])*x + b[1], '-b')\n plt.grid(True)\n #\"\"\"\n \n compare_params(params_maxL, data_mean, N)\n \n plot_params(params_maxL, xlab='hei', ylab='hopp')\n #plot_params(data_mean, xlab='hei', ylab='hopp')\n plot_params(params[:,:], xlab='hei', ylab='hopp')\n #plot_params(params[burnin:,:], hist=True, xlab='hei', ylab='hopp')\n print(np.std(params[burnin:,:], axis=0))\n #plt.show()\n #sys.exit()\n return([QU_model, QU_star, bkgr_model, params_maxL, params[burnin:,:]],\\\n [model_err, star_err, bkgr_err, sample_err])\n # end",
"def metropolis_hastings(posterior_stats):\n\titerations = 5000\n\ttheta = np.array([[-0.05], [0.5]])\n\tproposal_stdev = np.array([[0.1], [0.1]])\n\tln_posterior = calculate_ln_posterior(theta, posterior_stats)\n\taccepts = 0\n\tmcmc_samples = theta \n\n\tfor i in range(iterations):\n\t\ttheta_proposed = generate_candidates(theta, proposal_stdev)\n\t\tln_posterior_proposed = calculate_ln_posterior(theta_proposed, posterior_stats)\n\t\t\n\t\thastings_ratio = calculate_hastings_ratio(ln_posterior_proposed, ln_posterior)\t\n\t\t\n\t\tacceptance_probability = min([1, hastings_ratio])\n\n\t\tif (random.uniform(0,1) < acceptance_probability):\n\t\t\t#Then accept proposed theta\n\t\t\ttheta = theta_proposed\n\t\t\tln_posterior = ln_posterior_proposed\n\t\t\taccepts += 1\n\t\tmcmc_samples = np.hstack((mcmc_samples, theta))\n\n\tmcmc_mean = np.array([ [np.mean(mcmc_samples[0])], [np.mean(mcmc_samples[1])] ])\n\tcovariance = np.cov(mcmc_samples)\n\tmcmc = {'samples': mcmc_samples.transpose(), 'mean': mcmc_mean, 'covar': covariance} \n\tprint('acceptance ratio init')\n\tacceptance_ratio = accepts / iterations\n\tprint(acceptance_ratio)\n\n\treturn mcmc",
"def resample_from_mf(self):\n raise NotImplementedError()\n self.g = np.zeros((self.K, self.K, self.B))\n for k1 in xrange(self.K):\n for k2 in xrange(self.K):\n self.g[k1,k2,:] = np.random.dirichlet(self.mf_gamma[k1,k2,:])",
"def sampling(self, args):\n self.z_mean, self.z_log_var = args\n batch = K.shape(self.z_mean)[0]\n dim = K.int_shape(self.z_mean)[1]\n epsilon = K.random_uniform(shape=(batch, dim))\n \n return self.z_mean + K.exp(0.5 * self.z_log_var) * epsilon"
]
| [
"0.598156",
"0.5978819",
"0.5822451",
"0.5607292",
"0.5564162",
"0.55608475",
"0.5511193",
"0.54922384",
"0.5477683",
"0.5445564",
"0.5433154",
"0.542647",
"0.54067206",
"0.5380345",
"0.53725094",
"0.53361946",
"0.53319806",
"0.5330416",
"0.53189874",
"0.5314222",
"0.5311392",
"0.5297106",
"0.5286653",
"0.5240146",
"0.52272207",
"0.52256715",
"0.52204114",
"0.5214797",
"0.5206796",
"0.5193724"
]
| 0.6618311 | 0 |
This function runs a Gibbs sampler algorithm over the posterior to collect samples from the posterior. | def gibbs_sampler(self, data=None, parameters=None, save_name='test', save_path='outfiles/', plot_status=False, log_file=False, **kwargs):
print('\n{}\n{}\n{}'.format('-'*len(save_name), save_name, '-'*len(save_name)))
# creates a log file if specified
if log_file:
log = save_name + '.log'
with open(log, 'w') as handle:
handle.write('[[[[{}]]]]\n'.format(save_name))
handle.write('starting Gibbs sampler\n')
# extract values
if parameters is None:
parameters = {}
parameters = {**PARAMETERS, **parameters, **kwargs}
# data should be a 2d array where each row is the brightness of a different ROI
data = np.atleast_2d(data)
self.data = data
# set variables for gibbs sampler
np.random.seed(parameters['seed']) # set RNG
val = self.initialize_variables(data, parameters)
num_iter = val.num_iter
# set history
self.history = HistoryH5(
save_name=save_name,
path=save_path,
variables=val,
num_iter=num_iter,
fields=[
'num_flor',
'mu_flor',
'mu_back',
'transitions',
'P',
],
)
# run the gibbs sampler
print('starting Gibbs sampler')
print('parameters:')
for key in parameters:
text = str(getattr(val, key)).replace('\n', ', ')
print('--{} = {}'.format(key, text))
if log_file:
with open(log, 'a') as handle:
handle.write('--{} = {}\n'.format(key, text))
for iter_num in range(num_iter):
print('iteration {} of {} ['.format(iter_num + 1, num_iter), end='')
t = time.time()
self.sample_states(val)
print('%', end='')
self.sample_mu(val)
print('%', end='')
self.sample_transitions(val)
print('%', end='')
if plot_status:
self.plot_variables(val)
print('%', end='')
self.history.checkpoint(val, iter_num)
print('%', end='')
print('] ({} s)'.format(round(time.time()-t, 2)))
print('num_flors=[{}]'.format(','.join(str(num_flor) for num_flor in val.num_flor)))
if log_file:
with open(log, 'a') as handle:
handle.write('iteration {} of {} ({}s)\n'.format(iter_num + 1, num_iter, round(time.time()-t, 2)))
handle.write('num_flors=[{}]\n'.format(','.join(str(num_flor) for num_flor in val.num_flor)))
print('sampling complete')
if log_file:
with open(log, 'a') as handle:
handle.write('sampling complete\n')
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gibbs_sample(self):\n # Initialize the initial state of Markov Chain.\n self.initialize()\n # Gibbs Sampling.\n for iteration_index in range(0, self.iteration_number, 1):\n for m in range(0,self.document_number,1):\n for n in range(0, len(self.documents[m]), 1):\n # Change the state of word_m_n according to it's full conditional probability.\n self.sample_by_full_condition(m=m,n=n)\n print 'iteration:', iteration_index,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if iteration_index > self.burn_in and iteration_index % self.update_cycle == 0:\n # Update the distribution after burn in.\n self.update_distribution()\n else:\n pass\n # calculate the final distribution.\n self.get_distribution()",
"def _gibbs_sampling_iteration(self):\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n k = self.z_mn[m, n]\n self.n_mk[m, k] -= 1\n self.n_m[m] -= 1\n self.n_kt[k, w_mn] -= 1\n self.n_k[k] -= 1\n k = self._conditional_z(\n self.n_components, self.alpha, self.beta,\n self.n_mk, self.n_kt, m, w_mn, self.beta_sum, self.n_k)\n self.z_mn[m, n] = k\n self.n_mk[m, k] += 1\n self.n_m[m] += 1\n self.n_kt[k, w_mn] += 1\n self.n_k[k] += 1",
"def gibbs_sampling(F, Y, gprior_params=(10, 0.1), verbose=True):\n\tlatent_dim = F.shape[1]\n\tmu_0 = np.repeat(0, latent_dim) # prior mean 0 for regression coefficients\n\tSigma_0 = np.diag(np.repeat(1, latent_dim)) # prior variance 1\n\ta_0 = 1 # Inverse-Gamma(1, 1) is fairly diffused\n\tb_0 = 1\n\tloading_matrix, Y_variance = blr_mv(Y, F, mu_0, Sigma_0, a_0, b_0)\n\tY_hat = np.matmul(F, loading_matrix)\n\tmse = np.mean((Y - Y_hat) ** 2)\n\tif verbose:\n\t\tprint(mse)\n\tcovs = []\n\tgp_traces = []\n\tfor j in range(latent_dim):\n\t\tcov, gp_trace = sample_gp_posterior(F[:, j], gprior_params=gprior_params, test=True)\n\t\tcovs.append(cov)\n\t\tgp_traces.append(gp_trace)\n\tS1, S2, S3, S4 = build_covariance_blocks(covs, loading_matrix, Y_variance)\n\tF = sample_conditional_F(Y, S1, S2, S3, S4)\n\treturn F, loading_matrix, Y_variance, gp_traces, mse",
"def gibbs_sample(G, M, num_iters):\n\n # number of games\n N = G.shape[0]\n # Array containing mean skills of each player, set to prior mean\n w = np.zeros((M, 1))\n # Array that will contain skill samples\n skill_samples = np.zeros((M, num_iters))\n # Array containing skill variance for each player, set to prior variance\n pv = 0.5 * np.ones(M)\n\n # number of iterations of Gibbs\n for i in tqdm(range(num_iters)):\n # sample performance given differences in skills and outcomes\n t = np.zeros((N, 1))\n for g in range(N):\n\n s = w[G[g, 0]] - w[G[g, 1]] # difference in skills\n t[g] = s + np.random.randn() # Sample performance\n while t[g] < 0: # rejection step\n t[g] = s + np.random.randn() # resample if rejected\n\n # Jointly sample skills given performance differences\n m = np.zeros((M, 1))\n for p in range(M):\n # fill in m[p] prediction (natural param conditional)\n wins_array = np.array(G[:, 0] == p).astype(int)\n loss_array = np.array(G[:, 1] == p).astype(int)\n m[p] = np.dot(t[:,0], (wins_array - loss_array))\n\n iS = np.zeros((M, M)) # Container for sum of precision matrices (likelihood terms)\n for g in range(N):\n # Build the iS matrix\n winner = G[g, 0]\n loser = G[g, 1]\n\n iS[winner, winner] += 1\n iS[winner, loser] -= 1\n iS[loser, winner] -= 1\n iS[loser, loser] += 1\n\n # Posterior precision matrix\n iSS = iS + np.diag(1. / pv)\n\n # Use Cholesky decomposition to sample from a multivariate Gaussian\n iR = scipy.linalg.cho_factor(iSS) # Cholesky decomposition of the posterior precision matrix\n mu = scipy.linalg.cho_solve(iR, m, check_finite=False) # uses cholesky factor to compute inv(iSS) @ m\n\n # sample from N(mu, inv(iSS))\n w = mu + scipy.linalg.solve_triangular(iR[0], np.random.randn(M, 1), check_finite=False)\n skill_samples[:, i] = w[:, 0]\n\n return skill_samples",
"def gibbs(train_data, train_label, w_init, num_iterations, display_frequency, collect_final_sample_frequency): \n dim = train_data.shape[1]\n w_covariance = np.eye(dim) + np.sum(np.matmul(train_data[:, :, np.newaxis], train_data[:, np.newaxis, :]), axis= 0)\n w_covariance = np.linalg.inv(w_covariance)\n \n w = w_init\n _, z = get_output(w, train_data)\n sigma = 1\n \n sampled = [] # Keeps track of all samples\n final = [] # Keeps track of final samples which are sampled in a cyclic manner\n\n for i in range(num_iterations):\n # Sample weight\n w_new_mean = np.matmul(w_covariance, np.sum(z*train_data, axis= 0)[:, np.newaxis]) # dim x 1\n w = np.random.multivariate_normal(w_new_mean[:, 0], w_covariance, 1).T # dim x 1\n \n # Now sample hidden variable\n _, z_new = get_output(w, train_data)\n lower= np.zeros((train_data.shape[0], 1))\n upper= INFINITY*np.ones ((train_data.shape[0], 1)) \n lower[train_label < 0.5, :] = -INFINITY\n upper[train_label < 0.5, :] = 0\n\n\n X = stats.truncnorm((lower - z_new) / sigma, (upper - z_new) / sigma, loc= z_new, scale= sigma) \n z_new = X.rvs((train_data.shape[0],1))\n \n z = copy.deepcopy(z_new)\n \n if i % collect_final_sample_frequency == 0:\n # Sample from the current parameters\n final.append(w)\n\n if (i+1) % display_frequency == 0 or i == num_iterations-1:\n print(\"Iter {:6d} done\".format(i+1))\n \n return np.array(final), w",
"def posterior_sample(self):\n pass",
"def _run(self, **params):\n# if softEvidence is None:\n# self.softEvidence = self.mln.softEvidence\n# else:\n# self.softEvidence = softEvidence\n # initialize chains\n chains = MCMCInference.ChainGroup(self)\n for i in range(self.chains):\n chain = GibbsSampler.Chain(self, self.queries)\n chains.chain(chain)\n# if self.softEvidence is not None:\n# chain.setSoftEvidence(self.softEvidence)\n # do Gibbs sampling\n# if verbose and details: print \"sampling...\"\n converged = 0\n steps = 0\n if self.verbose:\n bar = ProgressBar(color='green', steps=self.maxsteps)\n while converged != self.chains and steps < self.maxsteps:\n converged = 0\n steps += 1\n print('STEP {} / {}'.format(steps, self.maxsteps))\n for chain in chains.chains:\n chain.step()\n if self.verbose:\n bar.inc()\n bar.label('%d / %d' % (steps, self.maxsteps))\n# if self.useConvergenceTest:\n# if chain.converged and numSteps >= minSteps:\n# converged += 1\n# if verbose and details:\n# if numSteps % infoInterval == 0:\n# print \"step %d (fraction converged: %.2f)\" % (numSteps, float(converged) / numChains)\n# if numSteps % resultsInterval == 0:\n# chainGroup.getResults()\n# chainGroup.printResults(shortOutput=True)\n # get the results\n return chains.results()[0]",
"def gibbs_sample(self, trial_count):\n values = {}\n count = total_trials = 0\n\n # Initialize\n for letter in self.letters:\n if (letter in self.query.evidence):\n # Fix evidence variables\n values[letter] = self.query.evidence[letter]\n else:\n # Initialize non-evidence to True\n values[letter] = True\n\n # Collect non-evidence variables\n non_evidence_letters = []\n for letter in self.letters:\n if (letter not in self.query.evidence):\n non_evidence_letters.append(letter)\n\n for i in xrange(trial_count):\n for letter in non_evidence_letters:\n\n # Probability of x, given its parents\n pos_prob = self.variables[letter].get_prob(values)\n # Probability of x's children, given their parents\n values[letter] = True # FIX TO BE TRUE\n for child in self.variables[letter].children:\n child_prob = self.variables[child].get_prob(values)\n\n if (values[child]):\n pos_prob *= child_prob\n else:\n pos_prob *= (1 - child_prob)\n\n ### DO SAME THING FOR FALSE PROB\n\n # Probability of x, given its parents\n neg_prob = 1 - self.variables[letter].get_prob(values)\n # Probability of x's children, given their parents\n values[letter] = False # FIX TO BE FALSE\n for child in self.variables[letter].children:\n child_prob = self.variables[child].get_prob(values)\n\n if (values[child]):\n neg_prob *= child_prob\n else:\n neg_prob *= (1 - child_prob)\n\n ### NORMALIZE\n prob = pos_prob / (pos_prob + neg_prob)\n\n ### SAMPLE\n values[letter] = self.sample(prob)\n\n if values[self.query.variable]:\n count += 1\n\n total_trials += 1\n\n return float(count) / total_trials",
"def run_gibbs(expt, save=True, show_progress=False):\n if isinstance(expt, str):\n expt = get_experiment(expt)\n tr_expt = get_training_expt(expt)\n\n for it in tr_expt.save_after:\n for avg in AVG_VALS:\n print 'Iteration', it, avg\n try:\n rbm = load_rbm(expt, it, avg)\n except:\n continue\n log_Z = storage.load(expt.log_Z_file(it, avg)).as_numpy_array()\n final_states = storage.load(expt.final_states_file(it, avg))\n\n # sample the states proportionally to the Z estimates\n p = log_Z - np.logaddexp.reduce(log_Z)\n p /= p.sum() # not needed in theory, but numpy complains if it doesn't sum exactly to 1\n idxs = np.random.multinomial(1, p, size=expt.annealing.num_particles).argmax(1)\n states = binary_rbms.RBMState(final_states.v[idxs, :], final_states.h[idxs, :])\n\n if show_progress:\n pbar = misc.pbar(expt.gibbs_steps)\n\n for st in range(expt.gibbs_steps):\n states = rbm.step(states)\n\n if show_progress:\n pbar.update(st)\n\n if show_progress:\n pbar.finish()\n\n if save:\n storage.dump(states, expt.gibbs_states_file(it, avg))",
"def gibbs(params, sampler=Sampler.HMC, log_prob_func=None, jitter=None, normalizing_const=1., softabs_const=None, mass=None, metric=Metric.HESSIAN):\n\n if sampler == Sampler.RMHMC:\n dist = torch.distributions.MultivariateNormal(torch.zeros_like(params), fisher(params, log_prob_func, jitter, normalizing_const, softabs_const, metric)[0])\n elif mass is None:\n dist = torch.distributions.Normal(torch.zeros_like(params), torch.ones_like(params))\n else:\n if type(mass) is list:\n # block wise mass list of blocks\n samples = torch.zeros_like(params)\n i = 0\n for block in mass:\n it = block[0].shape[0]\n dist = torch.distributions.MultivariateNormal(torch.zeros_like(block[0]), block)\n samples[i:it+i] = dist.sample()\n i += it\n return samples\n elif len(mass.shape) == 2:\n dist = torch.distributions.MultivariateNormal(torch.zeros_like(params), mass)\n elif len(mass.shape) == 1:\n dist = torch.distributions.Normal(torch.zeros_like(params), mass ** 0.5) # Normal expects standard deviation so need sqrt\n return dist.sample()",
"def posterior_sample(self, burnin=0):\n self.__burnin = burnin\n sampler = self.sampler\n nwalkers = self.nwalkers\n if self.__sampler == \"EnsembleSampler\":\n chain = sampler.chain\n lnprob = sampler.lnprobability[:, -1]\n elif self.__sampler == \"PTSampler\":\n chain = np.squeeze(sampler.chain[0, ...])\n lnprob = np.squeeze(sampler.lnprobability[0, :, -1])\n samples = chain[:, burnin:, :].reshape((-1, self.ndim))\n return samples",
"def gibbs_sample(self, X):\n K = self.K # number of topics\n M, V = X.shape\n alpha = self.alpha\n lmda = self.lmda\n topics = np.arange(0, K)\n\n #initialize everything uniformly, sparse topics\n Beta = np.ones(shape=(K, V), dtype=float) / V\n Theta = np.ones(shape=(M, K), dtype=float)/ K\n\n #Running sum\n MC_z = np.array(range(M), dtype=object)\n MC_beta = np.zeros(shape=(K, V), dtype=float)\n MC_theta = np.zeros(shape=(M, K), dtype=float)\n\n for d in range(M):\n #allocate topics randomly -- this is really not needed in this case\n word_indices = X[d, :].nonzero()[1]\n random_ks = np.random.choice(topics, size = len(word_indices))\n Theta[d] = np.random.dirichlet(np.ones(K)*alpha)\n MC_z[d] = sp.coo_matrix((V, K), dtype=np.int8).tolil()\n for k in topics:\n Beta[k] = np.random.dirichlet(np.ones(V)*lmda)\n\n log_Xs = []\n perplexities = []\n t = 0\n for epoch in xrange(self.nr_em_epochs):\n print \"Epoch\", epoch\n t +=1\n C = np.zeros((K, V))\n for d in np.random.permutation(np.arange(M)):\n x = X[d]\n ixw = np.nonzero(x)[1]\n p_s = Beta[:, ixw].T * Theta[d, :]\n Z = [np.random.choice(topics, p=(p/np.sum(p))) for p in p_s]\n N_d = sp.coo_matrix((np.ones(len(ixw)), (ixw, Z)), shape=(V, K)).tolil()\n C = C + N_d.A.T\n # sample theta given z and beta\n c_theta = (np.sum(N_d.A, axis=0) + alpha)\n Theta[d, :] = np.random.dirichlet(c_theta)\n MC_z[d] += N_d\n\n # Sample beta given all z and thetas\n for k in topics:\n c_beta = C[k, :]\n Beta[k, :] = np.random.dirichlet(c_beta + lmda)\n\n MC_theta += Theta\n MC_beta += Beta\n\n log_X = 0\n Theta_hat = MC_theta / t\n Beta_hat = MC_beta / t\n\n for d in range(M):\n ixw = np.nonzero(X[d, :])[1]\n log_X += np.sum(_doc_probability_from_p_of_z(Theta_hat[d, :], Beta_hat[:, ixw]))\n\n log_Xs.append(log_X)\n print log_X\n perplexities.append(self._perplexity(X, log_X))\n return Theta_hat, Beta_hat, log_Xs, perplexities",
"def gibbs_init(self, sigma2_s_param=None, sigma2_g_param=None):\n #Gibbs : Initialization step\n self.gibbs_init_step(self.nb_days, self.nb_particles, sigma2_s_param, sigma2_g_param)\n\n #Gibbs : step t > 0\n for j in range(1, self.nb_particles):\n if(j%(self.nb_particles/10)==0 or j==1):\n print(\"Gibbs sampling for particle \" + str(j) + \"/\" + str(self.nb_particles))\n\n\n self.s[:,j] = self.s[:,j-1]\n self.g_heat[:,j] = self.g_heat[:,j-1]\n self.sigma_s_star_2[:,j] = self.sigma_s_star_2[:,j-1]\n self.sigma_g_star_2[:,j] = self.sigma_g_star_2[:,j-1]\n\n # Compute s[0] for particle j (j>0)\n self.compute_s_0(j)\n\n # Compute s[n] for particle j (n>0 and j>0)\n for i in range(1, self.nb_days):\n self.compute_s(i,j)\n\n # Compute g_heat[O] for particle j (and j>0)\n self.compute_g_0(j)\n\n # Compute g_heat[n] for particle j (n>0 and j>0)\n for i in range(1, self.nb_days):\n self.compute_g(i,j)\n\n shape = 0.01 + ((self.nb_days - 1)/2)\n # Compute the new sigma_s_star2 for particle j (j>0) (follow Inverse Gamma)\n self.sigma_s_star_2[0, j] = self.compute_sigma_star_2(shape, self.s, j)\n\n # Compute the new sigma_g_star2 for particle j (j>0) (follow Inverse Gamma)\n self.sigma_s_star_2[0, j] = self.compute_sigma_star_2(shape, self.g_heat, j)\n\n #Compute x\n self.compute_x()\n #Compute w\n self.compute_w()",
"def gibbs_sampler(dna, k, t, N):\n\n motifs = random_kmers(k, t, dna)\n best_motifs = motifs.copy()\n\n for j in range(N):\n random_dna_seq_index = random.randrange(t)\n random_dna_seq = dna[random_dna_seq_index, :]\n mask = np.ones(t, dtype=bool)\n mask[random_dna_seq_index] = False\n\n count_mat = count_nucleotides(motifs[mask, :])\n prof_mat = profile(count_mat + 1, t - 1 + 4)\n\n mpk = profile_most_probable_kmer(random_dna_seq, k, prof_mat)\n motifs[random_dna_seq_index, :] = mpk\n\n if score_motif(motifs) < score_motif(best_motifs):\n best_motifs = motifs.copy()\n\n return best_motifs, score_motif(best_motifs)",
"def sample_posterior(\n rng_key: random.PRNGKey,\n model,\n data: np.ndarray,\n Nsamples: int = 1000,\n alpha: float = 1,\n sigma: float = 0,\n T: int = 10,\n gibbs_fn=None,\n gibbs_sites=None):\n\n if gibbs_fn is None or gibbs_sites is None:\n return sample_posterior_with_predictive(rng_key, model, data, Nsamples,\n alpha=alpha, sigma=sigma, T=T)\n else:\n return sample_posterior_gibbs(rng_key, model, data, Nsamples,\n alpha=alpha, sigma=sigma, T=T,\n gibbs_fn=gibbs_fn, gibbs_sites=gibbs_sites)",
"def run_model_sampler(Y, latent_dim, n_iter):\n\tF_sample = []\n\tloading_sample = []\n\tvariance_sample = []\n\ttrace_sample = []\n\tmse_history = []\n\tF = initiate_factors(Y, latent_dim)\n\tfor i in tqdm(range(n_iter)):\n\t\tF, loading_matrix, Y_variance, gp_traces, mse = gibbs_sampling(F, Y)\n\t\tF_sample.append(F)\n\t\tloading_sample.append(loading_matrix)\n\t\tvariance_sample.append(Y_variance)\n\t\ttrace_sample.append(gp_traces)\n\t\tmse_history.append(mse)\n\treturn F_sample, loading_sample, variance_sample, trace_sample, mse_history",
"def sample_posterior(self):\n \n# print (\"SAMPLING FROM LINEAR SIMILARITY VB\")\n if (self.posterior_mean == False):\n self.weight = Vil.sample_posterior(self.mu_weight, Vil.softplus(self.rho_weight))\n self.bias = Vil.sample_posterior(self.mu_bias, Vil.softplus(self.rho_bias))\n# print (self.bias)\n else:\n self.weight.data = self.mu_weight.data\n self.bias.data = self.mu_bias.data",
"def sample_from_bm(self,\n num_chains, \n num_samples,\n num_steps,\n save_to_path,\n num_burn_in,\n test_inputs = None,\n print_p_tilda = False,\n print_gibbs = False):\n \n if type(test_inputs) is np.ndarray:\n \n print(\"Will initialize gibbs chains with dataset images\\n\")\n \n num_test_examples = test_inputs.shape[0]\n \n self.test_inputs = theano.shared(np.asarray(test_inputs,\n dtype=theano.config.floatX),\n borrow= True) \n \n select_examples = np.random.choice(num_test_examples, \n num_chains, \n replace=False)\n \n init_chains = np.asarray(\n self.test_inputs.get_value(borrow=True)[select_examples,:],\n dtype=theano.config.floatX)\n \n else:\n \n print(\"Will initialize gibbs chains with random images\\n\")\n init_chains = self.np_rand_gen.binomial(n=1,p=0.5, \n size = (num_chains, self.num_vars))\n \n images = np.zeros([num_chains*num_samples+num_chains, self.num_vars])\n \n images[0:num_chains,:] = init_chains\n \n theano.config.exception_verbosity = 'high'\n \n self.x_gibbs = theano.shared(init_chains, name= \"x_gibbs\")\n \n if self.num_hidden > 0:\n print(\"Running gibbs chains for RBM ...\\n\")\n \n (\n [ _,\n _,\n _,\n x_inputs,\n p_xi_given_x_,\n x_samples\n ],\n updates\n ) = theano.scan(\n self.gibbs_step_rbm_vis,\n outputs_info=[None, None, None, None, None, self.x_gibbs],\n n_steps= num_steps)\n \n output_vars = [p_xi_given_x_[-1], x_samples[-1]]\n \n updates.update({self.x_gibbs: x_samples[-1]})\n \n else:\n \n print(\"Running gibbs chains for BM ...\\n\")\n \n (p_xi_given_x_, x_samples), updates =\\\n theano.scan(self.gibbs_step_fully_visible, n_steps = num_steps)\n \n output_vars = [p_xi_given_x_[num_burn_in:],\n x_samples[num_burn_in:]]\n \n take_step = (num_steps - num_burn_in) // self.num_vars \n \n if take_step == 0:\n \n take_step = 1\n \n get_samples = theano.function(inputs = [],\n outputs = output_vars, \n updates = updates)\n \n for ind in range(num_samples):\n \n p_all, samples_all = get_samples()\n \n if num_steps != 1 and self.num_hidden == 0:\n \n p_out, samples_out = self.assemble_image(p_all, \n samples_all,\n num_chains,\n step = take_step)\n \n elif num_steps ==1 and self.num_hidden == 0:\n \n p_out = p_all[-1]\n \n samples_out = samples_all[-1]\n \n elif self.num_hidden > 0:\n \n p_out = p_all\n \n samples_out = samples_all\n \n if self.num_hidden == 0:\n \n p_out = np.transpose(p_out) \n \n # without resetting the chains are persistent for\n # fully visible Boltzmann Machines\n # (self.x_gibbs are modified continuously)\n # self.x_gibbs.set_value(init_chains)\n \n print(\"Sample %d -- max pixel activations for %d gibbs chains:\"%\n (ind, num_chains))\n print(np.max(p_out, axis= 1))\n print(\"\")\n \n if print_gibbs:\n self.print_gibbs_conditionals(p_vals = p_all)\n \n if print_p_tilda: \n is_samples = self.np_rand_gen.binomial(n=1, \n p=0.5, \n size =(10000, self.num_vars))\n \n gibbs_p_tilda, rand_p_tilda = \\\n self.test_p_tilda(np.transpose(samples_out), \n is_samples,\n training = False)\n \n print(\"p_tilda values for gibbs samples:\")\n print(gibbs_p_tilda)\n print(\"\")\n print(\"p_tilda values for randomly chosen importance samples:\")\n print(rand_p_tilda)\n print(\"\")\n \n images[num_chains*(ind+1):num_chains*(ind+2),:] = np.round(p_out)\n \n make_raster_plots(images, \n num_samples, \n num_chains, \n reshape_to = [self.side, self.side], \n save_to_path = save_to_path)",
"def gibbs_step(self, visible):\n hidden_prob = self.probabilities_hidden(visible)\n hidden_state = self.sample(hidden_prob)\n visible_prob = self.probabilities_visible(hidden_state)\n visible_state = visible_prob\n return hidden_prob, hidden_state, visible_prob, visible_state",
"def gibbs_segmentation(image, burn_in, collect_frequency, n_samples):\n (Nx, Ny, _) = image.shape\n\n distribution = np.zeros( (Nx, Ny) )\n\n # Initialize binary estimates at every pixel randomly. \n estimates = (np.random.random( (Nx, Ny) ) > .5).astype(int)\n\n total_iterations = burn_in + (collect_frequency * (n_samples - 1) + 1)\n pixel_indices = list(itertools.product(range(Nx),range(Ny)))\n\n for iteration in range(total_iterations):\n\n # Loop over entire grid, using a random order for faster convergence\n random.shuffle(pixel_indices)\n for (i,j) in pixel_indices:\n xf = observation_model(image[i,j,:], 0)\n xb = observation_model(image[i,j,:], 1)\n for neighbor in get_neighbors(estimates, i, j):\n xf *= edge_model(0, neighbor)\n xb *= edge_model(1, neighbor)\n pb = xb / (xf + xb)\n estimates[i,j] = (np.random.random() < pb).astype(int)\n if iteration > burn_in and (iteration - burn_in + collect_frequency)%collect_frequency == 1:\n distribution += estimates\n \n distribution /= n_samples\n\n return distribution",
"def run_sampler(iterations, X, alpha, epsilon, lamb, p, max_newK):\n \n Z, Y = initialize_ZY(X, alpha, max_newK)\n\n for iter in xrange(1, iterations+1):\n \n processing_time = time.time()\n \n Z, Y = sample_Z(X, Z, Y, alpha, epsilon, lamb, p, max_newK)\n Y = sample_Y(X, Z, Y, alpha, epsilon, lamb, p)\n Z, Y = sort_ZY(Z, Y)\n Z, Y = remove_empties_ZY(Z, Y)\n\n lhood = log_lhood(X, Z, Y, alpha, epsilon, lamb) \n \n processing_time = time.time() - processing_time \n\n print(\"iteration %d finished in %d seconds with log-likelihood %g\"\n % (iter, processing_time, lhood))\n \n return Z, Y",
"def gibbs(self, v):\n h_ = self.sample_h(v)\n v_ = self.sample_v(h_)\n \n return v_",
"def sample_b(self):\n # don't sample b-factors with em\n if not self.options.sample_bfactors or self.options.em:\n return\n\n new_coor = []\n new_bfactor = []\n multiplication_factors = [1.0, 1.3, 1.5, 0.9, 0.5]\n coor_b_pairs = zip(self._coor_set, self._bs)\n for (coor, b), multi in itertools.product(coor_b_pairs, multiplication_factors):\n new_coor.append(coor)\n new_bfactor.append(b * multi)\n self._coor_set = new_coor\n self._bs = new_bfactor",
"def gibbs_sample(self, visible_sample, steps=3):\n batch_size = visible_sample.shape[0]\n for step in range(steps):\n # Updating the hidden states\n # Section 3.1 PG_RBM\n hidden_activations = sigm(np.dot(visible_sample, self.weights))\n hidden_activations[:,0] = 1. #Biases are always 1\n hidden_sample = hidden_activations > np.random.rand(batch_size, self.num_hidden + 1)\n \n # Updating the visible states\n # Section 3.2 PG_RBM\n visible_activations = sigm(np.dot(hidden_sample, self.weights.T))\n visible_activations[:,0] = 1. #Biases are always 1\n visible_sample = visible_activations > np.random.rand(batch_size, self.num_visible + 1)\n return hidden_activations, hidden_sample, visible_activations, visible_sample",
"def sample_posterior(self):\n if(self.Bayesian):\n for i in range(self.num_layers):\n getattr(self, 'LSTMCell%i'%(i+1)).sample_posterior()",
"def run(self, repetitions, **kwargs):\n\t\tself.sampler.sample(repetitions, **kwargs)",
"def gpbandits(model, data, iters=10, kernel='se', cl=0.1, v=0.0, num_samples=500, verbose=True, best_model_log=False):\n\n num_dims = model.num_dims # number of hyperparameter dimensions\n\n # initial model evaluation\n points = model.encode()[np.newaxis,:]\n scores = np.array([model.train_test_cv(data)])\n\n # best model and corresponding value at each iteration\n if best_model_log:\n best_point_tmp = []\n best_point_tmp.append(points[0,:])\n\n # print update\n if verbose:\n print(\"Iteration: %03d | Score: %.06e\" %(0, scores[0]))\n #print(\"Iteration: %03d | Design Point: %f | Score: %.06e\" %(0,points[0,:] scores[0]))\n\n # loop\n for i in range(iters):\n\n # sample num_Samples random points from [0,1)^num_dims\n candidates = sample(num_dims, num_samples)\n\n # find GP posterior\n A = formK(candidates, candidates, kernel, cl)\n B = formK(points, points, kernel, cl) + v*np.eye(points.shape[0])\n C = formK(candidates, points, kernel, cl)\n tmp = C.dot(np.linalg.inv(B))\n mu = tmp.dot(scores)\n Sigma = A - tmp.dot(C.T)\n var = np.diagonal(Sigma) + np.finfo(float).eps\n sig = np.sqrt(var)\n\n # choose new point with best expected improvement\n exp_imp = expected_improvement(scores.min(), mu, sig)\n best_idx = np.argmax(exp_imp)\n best_point = candidates[best_idx]\n\n # set hyperparameters with best sampled point\n model.decode(best_point)\n\n # return re-encoded point\n new_point = model.encode()\n\n # evaluate model\n new_score = model.train_test_cv(data)\n\n # append to points/scores lists\n points = np.vstack((points, best_point)) # use best_point, not re-encoded new_point to break discrete symmetries\n scores = np.append(scores, new_score)\n\n # save progress\n save_checkpoint(points, scores)\n\n # print update\n if verbose:\n print(\"Iteration: %03d | Score: %.06e\" %(i+1, new_score))\n #print(\"Iteration: %03d | Design Point: %f | Score: %.06e\" %(i+1, best_point, new_score))\n\n if best_model_log:\n ind = np.argmin(scores)\n best_point_tmp.append(points[ind])\n\n\n\n # return best model\n ind = np.argmin(scores)\n best_overall_point = points[ind]\n model.decode(best_overall_point)\n\n if not best_model_log:\n return model\n else:\n return model, best_point_tmp",
"def gibbs_ask_traffic(self, X, e, Z, bn, N):\n\n #makes copies\n X = e\n e = e\n\n #probability\n probability = [0,0]\n numerator = 0\n\n\n #True, False\n\n for x in range(N):\n # second joint\n if Z == True: # if non evidence variable\n random_choice = np.random.choice([0,1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][0]\n else:\n random_choice = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][1]\n\n # first joint\n if X[1] == 0.8 or X[1] == 0.2: # Rain is true\n X[0] = bn[0][0]\n else: # Rain is False\n X[0] = bn[0][1]\n\n # third joint\n if X[1] == 0.8 or X[1] == 0.1: # traffic\n random_late = np.random.choice([0,1], 1, True, [0.5,0.5])[0]\n X[2] = bn[2][0][random_late]\n else: # no traffic\n random_late = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0]\n X[2] = bn[2][1][random_late]\n\n # print(X)\n if X[0] == 0.1:\n probability[0] += 1\n else:\n probability[1] += 1\n\n\n probability[0] = probability[0] / N\n probability[1] = probability[1] / N\n # print(probability)\n return probability",
"def resample_gmms(model_set):\n samples = np.zeros(iter_num)\n\n for i in range(iter_num):\n rand_num = random()\n # For each distribution in the model\n for gmm_distro in model_set:\n # If the random number is less than the distribution's weight, where the weight is the sum of all\n # distribution's weights so far\n if rand_num < gmm_distro[3]:\n # Then sample from the distribution and save it as the path cost, then skip to the next iteration\n samples[i] = gauss(gmm_distro[0], gmm_distro[1])\n break\n\n # plt.hist(samples, bins=50, density=True)\n # plt.show()\n\n return samples",
"def test_m2b_via_uniform (self):\n nt = 5\n ns = 1\n num_giter = 100\n net = self.m2b\n\n tmr = mytime.timeit()\n\n # For this test, each sample is tested independently rather than aggregated\n for i in xrange(ns):\n arrv = net.sample (nt)\n print arrv\n obs = arrv.subset (lambda a,e: a.is_initial (e), copy_evt)\n gsmp = net.gibbs_resample (obs, 0, num_giter, sample_final=False)\n for tid in xrange(nt):\n # For each task, check that the Gibbs distribution is correctly uniform\n times = []\n for smp_id in xrange(1,len(gsmp)):\n byt = gsmp[smp_id].events_of_task (tid)\n self.assertEquals (3, len(byt))\n times.append (byt[1].d)\n \n # examine gibbs function\n e0 = arrv.events_of_task (tid)[1]\n e1 = arrv.events_of_task (tid)[2]\n L = e0.a\n U = e1.d\n cdist = net.gibbs_for_departure (obs, e0)\n xs = [ L+ i*(U-L)/10 for i in xrange(10) ]\n for x in xs: print \" x %.4f p(d = x | A) %.4f\" % (x, cdist(x))\n \n # generate true sample\n s = [ numpy.random.uniform (L, U) for i in xrange(num_giter) ] \n\n # now check the cdfs\n s.sort()\n times.sort()\n print summarize (times)\n netutils.check_quantiles (self, s, times, num_giter)\n\n elapsed = tmr.total() \n print \"Events resampled per sec = \", (nt * 2 * ns * num_giter) / elapsed"
]
| [
"0.7207733",
"0.6881152",
"0.66835207",
"0.6576732",
"0.64802915",
"0.64319247",
"0.6256353",
"0.6235843",
"0.6132594",
"0.6085217",
"0.6061682",
"0.6050526",
"0.5930381",
"0.5884115",
"0.58757025",
"0.58688474",
"0.58540446",
"0.58322746",
"0.5747067",
"0.5743532",
"0.5740403",
"0.5734549",
"0.568354",
"0.5675892",
"0.5670521",
"0.56528306",
"0.56516755",
"0.5592415",
"0.5572117",
"0.5545252"
]
| 0.6882195 | 1 |
load pretrained word embedding | def load_pretrained_embedding(self, pre_embeddings):
assert (pre_embeddings.size()[1] == self.embedding_dim)
self.word_embeds.weight = nn.Parameter(pre_embeddings) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_embedding_file(self):\n if self.language == 'en':\n embed_file_dir = self.embedding_path\n wv = KeyedVectors.load_word2vec_format(embed_file_dir, binary=True)\n self.pretrained_embedding = {}\n for word in wv.vocab.keys():\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n self.pretrained_embedding[normalized_word] = wv[word]\n self.embed_dim = 300\n\n else:\n embed_file_dir = self.embedding_path\n fin = open(embed_file_dir, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n data = {}\n for line in fin:\n if len(line.split()) == 2: # header\n continue\n tokens = line.rstrip().split(' ')\n word = tokens[0]\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n data[normalized_word] = np.array(tokens[1:])\n self.pretrained_embedding = data\n self.embed_dim = 300",
"def load_embeddings():\n return embedding_utils.PretrainedWordEmbeddings(\n lowercase=FLAGS.lowercase,\n embeddings_path=FLAGS.fasttext_embeddings,\n max_vocab_size=FLAGS.max_vocab_size,\n skip_header=True)",
"def load_pretrained_words_data(embeddings_filename, vocab):\n words = dict()\n emb_dim = None\n with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n if len(fields) == 0:\n continue\n word = fields[0]\n if emb_dim is None:\n emb_dim = len(fields) - 1\n if emb_dim < 10: # my pretrained file is poisonous 😭\n emb_dim = None\n else:\n assert emb_dim == len(fields) - 1, \"{}, {}\".format(emb_dim, len(fields) - 1)\n words.update({word: [float(i) for i in fields[1:]]})\n print(\"Embedding dim: {}\".format(emb_dim))\n tokens = vocab.get_index_to_token_vocabulary(\"tokens\")\n n_tokens = len(tokens)\n data = []\n for i in tokens:\n if tokens[i] in words:\n data.append(words[tokens[i]])\n else:\n data.append([0] * emb_dim)\n return torch.tensor(data), emb_dim",
"def load_pretrained_embeddings(self, embeddings):\r\n self.embedding.weight = nn.Parameter(embeddings)",
"def load_embeddings(path, vocab, source_domain, target_domain, emb_name):\n\n pkl = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n if os.path.exists(pkl):\n print(\"Load embeddings from existing pkl file %s...\" % pkl)\n # word embeddings weights have been loaded\n embeddings = pickle.load(open(pkl, 'rb'))\n else:\n print(\"Load embedding from %s...\" % path)\n raw_embeddings = {}\n if emb_name == 'yelp_electronics':\n with open(path) as fp:\n for line in fp:\n word_vector = line.split(\",\")[:-1]\n vector_list = []\n for element in word_vector[len(word_vector) - 100:]:\n vector_list.append(float(element))\n word = ','.join(word_vector[:len(word_vector) - 100])\n vector = np.asarray(vector_list)\n if word in vocab:\n raw_embeddings[word] = vector\n else:\n with open(path) as fp:\n for line in fp:\n eles = line.strip().split(' ')\n word = eles[0]\n if word in vocab:\n raw_embeddings[word] = eles[1:]\n\n dim_w = len(raw_embeddings['the'])\n n_words = len(vocab)\n embeddings = np.zeros(shape=(n_words, dim_w))\n for w in vocab:\n wid = vocab[w]\n if w in raw_embeddings:\n embeddings[wid] = np.array([float(ele) for ele in raw_embeddings[w]])\n else:\n # for OOV words, add random initialization\n embeddings[wid] = np.random.uniform(-0.25, 0.25, dim_w)\n print(\"Find %s word embeddings...\" % len(embeddings))\n if not os.path.exists('./work/embeddings'):\n os.mkdir('./work/embeddings')\n emb_path = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n # write the embedding weights back to the disk\n pickle.dump(embeddings, open(emb_path, 'wb'))\n embeddings = np.array(embeddings, dtype='float32')\n return embeddings",
"def _load_word_embedding(self, lang):\n dict_fold = 'train' # which fold of the data will be used to produce results\n if self.args.task == 'conneau' or self.args.task == 'xling':\n data_dir = os.path.join(self.args.data_dir, 'MUSE')\n lang_path = os.path.join(data_dir, 'wiki.' + lang + '.vec')\n elif self.args.task == 'dinu':\n data_dir = os.path.join(self.args.data_dir, 'dinu')\n lang_path = os.path.join(data_dir, 'embeddings', lang + '.emb.txt')\n elif self.args.task == 'zhang':\n order = [lang,trg]\n if lang == 'en':\n order = order[::-1]\n data_dir = os.path.join(self.args.home_dir,'pkg/UBiLexAT/data/','-'.join(order))\n lang_path = os.path.join(data_dir, 'word2vec.' + lang)\n\n langfile = open(lang_path, encoding=self.args.encoding, errors='surrogateescape')\n words, xs = embeddings.read(langfile, self.args.maxs)\n langfile.close()\n # Build word to index map\n word2ind = {word: i for i, word in enumerate(words)}\n\n return xs, words, word2ind",
"def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)",
"def build_pre_embedding(self, use_saved_embed=False):\n\n if use_saved_embed and\\\n self.config.parser['embedding_save_dir'] is not '':\n Print(\n f'reading saved embedding file from '\\\n f'{self.config.parser[\"embedding_save_dir\"]}',\n 'information'\n )\n with open(self.config.parser['embedding_save_dir'], 'rb') as f:\n pretrain_embed = pickle.load(f)\n else:\n if self.config.parser['embed_dir'] is None:\n Print('Pre-trained embedding file not available.', 'error')\n return\n\n embed_file = self.config.parser['embed_dir']\n\n # load in pre-trained Glove model, save it as a dict\n pretrain_embed = {}\n with open(embed_file, 'r', encoding='utf-8') as f:\n tqdm_iter = tqdm.tqdm(f.readlines())\n tqdm_iter.set_description('read from pre-trained file', False)\n for line in tqdm_iter:\n embed_content = line.strip().split()\n word, embed_content = embed_content[0], embed_content[1:]\n if self.config.parser['word_embed_dim'] < 0:\n self.config.parser['word_embed_dim'] = len(embed_content)\n elif self.config.parser['word_embed_dim'] != len(embed_content):\n # invalid embedding word\n continue\n embed_content = np.array([float(x) for x in embed_content])\n pretrain_embed[word] = embed_content\n \n if self.config.parser['embedding_save_dir'] is not '':\n with open(self.config.parser['embedding_save_dir'], 'wb') as f:\n pickle.dump(pretrain_embed, f)\n Print(\n f'pre-trained embedding dictionary is saved at '\\\n f'{self.config.parser[\"embedding_save_dir\"]}',\n 'success'\n )\n\n embed_dim = self.config.parser['word_embed_dim']\n\n # build embedding if find it in pre-trained model\n # else randomly generate one.\n self.embedding = np.empty([\n self.word_dict.word_size, embed_dim\n ])\n scale = np.sqrt(3 / embed_dim)\n perfect_match, case_match, not_match = 0, 0, 0\n for word, index in self.word_dict.word2idx.items():\n if word in pretrain_embed:\n self.embedding[index, :] = self.norm2one(pretrain_embed[word]) \\\n if self.config.parser['norm_word_embed'] else pretrain_embed[word]\n perfect_match += 1\n if word.lower() in pretrain_embed:\n self.embedding[index, :] = self.norm2one(pretrain_embed[word.lower()]) \\\n if self.config.parser['norm_word_embed'] else pretrain_embed[word.lower()]\n case_match += 1\n else:\n # not found\n self.embedding[index,\n :] = np.random.uniform(-scale, scale, [embed_dim])\n not_match += 1\n Print(\n f'Pre-trained embedding loaded in from {self.config.parser[\"embed_dir\"]},\\n'\\\n f'pre-train words: {len(pretrain_embed)}, perfect match {perfect_match},\\n'\\\n f'case match {case_match}, not match {not_match},\\n'\\\n f'oov {not_match / self.word_dict.word_size}', 'success'\n )\n return self.embedding",
"def pretrained(name=\"glove_100d\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(WordEmbeddingsModel, name, lang, remote_loc)",
"def load_pretrained_embeddings(self, embedding_path):\n trained_embeddings = {}\n with open(embedding_path, 'r') as fin:\n for line in fin:\n contents = line.strip().split()\n token = contents[0]\n if token not in self.token2id:\n continue\n trained_embeddings[token] = list(map(float, contents[1:]))\n embed_size = len(contents) - 1\n # load embeddings\n self.embeddings = np.random.randn([self.size, embed_size])\n for token in self.id2token:\n if token in trained_embeddings:\n self.embeddings[self.token2id[token]] = trained_embeddings[token]",
"def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer",
"def load_pretrain_embedding(vocab, embed_size, embedding_path):\n model = KeyedVectors.load_word2vec_format(embedding_path)\n\n print('{} {}'.format(vocab.size(), embed_size))\n for token, id in vocab.token2id.items():\n if token in model:\n print('{} {}'.format(token, ' '.join(map(str, model[token]))))\n else:\n emb = np.random.random((embed_size,)) - 0.5\n print('{} {}'.format(token, ' '.join(map(str, emb))))",
"def augment_with_pretrained(dictionary, ext_emb_path, words):\n print('Loading pretrained embeddings from %s...' % ext_emb_path)\n assert os.path.isfile(ext_emb_path)\n\n # Load pretrained embeddings from file\n pretrained = set([\n line.rstrip().split()[0].strip()\n for line in codecs.open(ext_emb_path, 'r', 'utf-8')\n if len(ext_emb_path) > 0\n ])\n\n # We either add every word in the pretrained file,\n # or only words given in the `words` list to which\n # we can assign a pretrained embedding\n if words is None:\n for word in pretrained:\n if word not in dictionary:\n dictionary[word] = 0\n else:\n for word in words:\n if any(x in pretrained for x in [\n word,\n word.lower(),\n re.sub('\\d', '0', word.lower())\n ]) and word not in dictionary:\n dictionary[word] = 0\n\n word_to_id, id_to_word = create_mapping(dictionary)\n return dictionary, word_to_id, id_to_word",
"def load_word2vec(emb_path, id_to_word, word_dim, old_weights):\n new_weights = old_weights\n print('Loading pretrained embeddings from {}...'.format(emb_path))\n pre_trained = {}\n emb_invalid = 0\n for i, line in enumerate(codecs.open(emb_path, 'r', 'utf-8')):\n line = line.rstrip().split()\n if len(line) == word_dim + 1:\n pre_trained[line[0]] = np.array(\n [float(x) for x in line[1:]]\n ).astype(np.float32)\n else:\n emb_invalid += 1\n if emb_invalid > 0:\n print('WARNING: %i invalid lines' % emb_invalid)\n c_found = 0\n c_lower = 0\n c_zeros = 0\n n_words = len(id_to_word)\n # Lookup table initialization\n for i in range(n_words):\n word = id_to_word[i]\n if word in pre_trained:\n new_weights[i] = pre_trained[word]\n c_found += 1\n elif word.lower() in pre_trained:\n new_weights[i] = pre_trained[word.lower()]\n c_lower += 1\n elif re.sub('\\d', '0', word.lower()) in pre_trained:\n new_weights[i] = pre_trained[\n re.sub('\\d', '0', word.lower())\n ]\n c_zeros += 1\n print('Loaded %i pretrained embeddings.' % len(pre_trained))\n print('%i / %i (%.4f%%) words have been initialized with '\n 'pretrained embeddings.' % (\n c_found + c_lower + c_zeros, n_words,\n 100. * (c_found + c_lower + c_zeros) / n_words)\n )\n print('%i found directly, %i after lowercasing, '\n '%i after lowercasing + zero.' % (\n c_found, c_lower, c_zeros\n ))\n return new_weights",
"def load_embedding(path=PROJECT_DIR / \"outputs/models/embedding.pkl\"):\n try:\n with open(path, \"rb\") as inp:\n embedding = pickle.load(inp)\n return embedding\n\n except FileNotFoundError:\n logger.error(f\"There is no embedding to load at {path}\")",
"def load_pretrained_vectors(self, emb_file, fixed):\n if emb_file:\n pretrained = torch.load(emb_file)\n self.word_lut.weight.data.copy_(pretrained)\n if fixed:\n self.word_lut.weight.requires_grad = False",
"def load_pretrained_embeddings(self, embedding_path):\n trained_embeddings = {}\n with open(embedding_path, 'r', encoding='utf-8') as fin:\n for line in fin:\n contents = line.strip().split(\" \")\n term = contents[0]\n if term not in self.term2id:\n continue\n trained_embeddings[term] = list(map(float, contents[1:]))\n if self.embed_dim is None:\n self.embed_dim = len(contents) - 1\n filtered_terms = trained_embeddings.keys()\n # rebuild the term x id map\n self.term2id = {}\n self.id2term = {}\n for term in self.initial_terms:\n self.add(term, count=0)\n for term in filtered_terms:\n self.add(term, count=0)\n # load embeddings\n self.embeddings = np.zeros([self.size(), self.embed_dim])\n for term in self.term2id.keys():\n if term in trained_embeddings:\n self.embeddings[self.get_id(term)] = trained_embeddings[term]",
"def augment_with_pretrained(dictionary, ext_emb_path, chars):\n print('Loading pretrained embeddings from %s...' % ext_emb_path)\n assert os.path.isfile(ext_emb_path)\n\n # Load pretrained embeddings from file\n pretrained = set([\n line.rstrip().split()[0].strip()\n for line in codecs.open(ext_emb_path, 'r', 'utf-8')\n if len(ext_emb_path) > 0\n ])\n\n # We either add every word in the pretrained file,\n # or only words given in the `words` list to which\n # we can assign a pretrained embedding\n if chars is None:\n for char in pretrained:\n if char not in dictionary:\n dictionary[char] = 0\n else:\n for char in chars:\n if any(x in pretrained for x in [\n char,\n char.lower(),\n re.sub('\\d', '0', char.lower())\n ]) and char not in dictionary:\n dictionary[char] = 0\n\n word_to_id, id_to_word = create_mapping(dictionary)\n return dictionary, word_to_id, id_to_word",
"def load_word_embed(path: str,\n dimension: int,\n *,\n skip_first: bool = False,\n freeze: bool = False,\n sep: str = ' '\n ) -> Tuple[nn.Embedding, Dict[str, int]]:\n vocab = {'$$$UNK$$$': 0}\n embed_matrix = [[0.0] * dimension]\n with open(path) as r:\n if skip_first:\n r.readline()\n for line in r:\n segments = line.rstrip('\\n').rstrip(' ').split(sep)\n word = segments[0]\n vocab[word] = len(vocab)\n embed = [float(x) for x in segments[1:]]\n embed_matrix.append(embed)\n print('Loaded %d word embeddings' % (len(embed_matrix) - 1))\n \n embed_matrix = torch.FloatTensor(embed_matrix)\n \n word_embed = nn.Embedding.from_pretrained(embed_matrix,\n freeze=freeze,\n padding_idx=0)\n return word_embed, vocab",
"def load_glove_embeddings():\n\n emmbed_file = Path(\"./embeddings.pkl\")\n if emmbed_file.is_file():\n # embeddings already serialized, just load them\n print(\"Local Embeddings pickle found, loading...\")\n with open(\"./embeddings.pkl\", 'rb') as f:\n return pk.load(f)\n else:\n # create the embeddings\n print(\"Building embeddings dictionary...\")\n data = open(\"glove.6B.50d.txt\", 'r', encoding=\"utf-8\")\n embeddings = [[0] * EMBEDDING_SIZE]\n word_index_dict = {'UNK': 0} # first row is for unknown words\n index = 1\n for line in data:\n splitLine = line.split()\n word = tf.compat.as_str(splitLine[0])\n embedding = [float(val) for val in splitLine[1:]]\n embeddings.append(embedding)\n word_index_dict[word] = index\n index += 1\n data.close()\n\n # pickle them\n with open('./embeddings.pkl', 'wb') as f:\n print(\"Creating local embeddings pickle for faster loading...\")\n # Pickle the 'data' dictionary using the highest protocol available.\n pk.dump((embeddings, word_index_dict), f, pk.HIGHEST_PROTOCOL)\n\n return embeddings, word_index_dict",
"def load_word2vec_en_pretrained():\r\n log.info(\"Load W2V Model\")\r\n model = api.load(\"glove-wiki-gigaword-50\")\r\n return PreTrainedGensimEN(model)",
"def load_pretrained_embeddings(vocabulary: dict, max_size: int):\n # get GloVe 6B pre-trained word embeddings, of dimension 100\n glove_vec = torchtext.vocab.GloVe(name=\"6B\", dim=100, unk_init=torch.Tensor.normal_)\n\n pretrained = []\n for k, _ in vocabulary.stoi.items():\n if k == \"<PAD>\":\n emb = torch.zeros([glove_vec.dim])\n elif k == \"<UNK>\":\n emb = torch.rand([glove_vec.dim])\n else:\n emb = glove_vec.get_vecs_by_tokens(k, lower_case_backup=True)\n pretrained.append(emb) \n\n # return a tensor of size [vocab_size, emb_dim]\n return torch.stack(pretrained, dim=0)",
"def loadEmbModel(embFile, logger):\n logger.info(\"Loading Embedding Model\")\n f = open(embFile,'r')\n model = {}\n v = []\n for line in f:\n splitLine = line.split(' ')\n word = splitLine[0]\n try:\n embedding = np.array([float(val) for val in splitLine[1:]])\n except:\n logger.info(len(v), line)\n model[word] = embedding\n v.append(embedding)\n mean = np.array(v).mean(0)\n logger.info(mean.shape)\n model['<unk>'] = torch.tensor(mean)\n model['<pad>'] = torch.zeros(embedding.shape)\n model['<start>'] = torch.zeros(embedding.shape)\n model['<end>'] = torch.zeros(embedding.shape)\n logger.info(\"Done.\",len(model),\" words loaded!\")\n return model",
"def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()",
"def pretrained(name=\"elmo\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(ElmoEmbeddings, name, lang, remote_loc)",
"def load_embeddings(emb_file, word_map):\n\n # Find embedding dimension\n with open(emb_file, 'r') as f:\n emb_dim = len(f.readline().split(' ')) - 1\n\n vocab = set(word_map.keys())\n\n # Create tensor to hold embeddings, initialize\n embeddings = torch.FloatTensor(len(vocab), emb_dim)\n init_embedding(embeddings)\n\n # Read embedding file\n print(\"\\nLoading embeddings...\")\n for line in open(emb_file, 'r'):\n line = line.split(' ')\n\n emb_word = line[0]\n embedding = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))\n\n # Ignore word if not in train_vocab\n if emb_word not in vocab:\n continue\n\n embeddings[word_map[emb_word]] = torch.FloatTensor(embedding)\n\n return embeddings, emb_dim",
"def init(self, preload_embeddings):\n\t\tself.__find_metadata()\n\t\tself.__parse_embedding_metadata()\n\t\tself.__parse_model_metadata()\n\t\t# should we load all of the word embeddings into memory now?\n\t\tif preload_embeddings:\n\t\t\tlog.info(\"Preloading word embeddings ...\")\n\t\t\tfor embed_id in self.embedding_meta:\n\t\t\t\tself.get_embedding(embed_id)\t\n\t\t\tlog.info(\"Preloaded %d word embeddings\" % len(self.embedding_cache))",
"def load_preprocessed(self):\n with open(self.words_vocab_file, 'rb') as f:\n self.word_to_id, self.unk_word_list = pickle.load(f)\n self.word_vocab_size = len(self.word_to_id)\n\n if self.unit != \"word\":\n with open(self.sub_vocab_file, 'rb') as f:\n if self.unit == \"char\":\n self.max_word_len = self.get_max_word_length(self.word_to_id) + 2\n self.char_to_id, self.unk_char_list, self.max_word_len = pickle.load(f)\n self.subword_vocab_size = len(self.char_to_id)\n elif self.unit == \"char-ngram\":\n self.ngram_to_id, self.unk_char_list, self.unk_ngram_list, \\\n self.max_ngram_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.ngram_to_id)\n elif self.unit == \"morpheme\":\n self.morpheme_to_id, self.unk_char_list, self.unk_morph_list, \\\n self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n elif self.unit == \"oracle\":\n self.morpheme_to_id, self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n else:\n sys.exit(\"Unknown unit\")",
"def load_embedding(fpath, VOCAB):\n print(\"Loading embeddings...\")\n emb = dict()\n wv_from_bin = KeyedVectors.load_word2vec_format(fpath, limit=VOCAB)\n for word, vector in tqdm(zip(wv_from_bin.vocab, wv_from_bin.vectors)):\n coefs = np.asarray(vector, dtype='float32')\n if word not in emb:\n emb[word] = coefs\n return emb",
"def load_data(self, debug=False):\n with open(self.config.vocab_file,'r') as f:\n self.vocab = pickle.load(f)\n self.char = Character()\n self.encoded_data = pickle.load(open(\"sp/encoded_test\", \"rb\"))\n self.data_chars = pickle.load(open(\"sp/test_chars\", \"rb\"))\n\n char_embedding = np.zeros([len(self.char), len(self.char)], dtype=np.int32)\n for i in range(len(self.char)):\n char_embedding[i][i] = 1\n self.char_embedding = tf.constant(char_embedding)"
]
| [
"0.81451064",
"0.78315616",
"0.7747075",
"0.7655655",
"0.76149315",
"0.76103157",
"0.7528007",
"0.74930793",
"0.7491543",
"0.74675125",
"0.7453024",
"0.7405483",
"0.73980623",
"0.7379225",
"0.73778546",
"0.73011637",
"0.7276894",
"0.7208229",
"0.7112189",
"0.71097624",
"0.7045951",
"0.701323",
"0.69768226",
"0.6971104",
"0.69562894",
"0.695343",
"0.69402957",
"0.69360584",
"0.6927151",
"0.6886277"
]
| 0.8000602 | 1 |
Handle the installation of the firmware file. | def install_firmware(self, firmware_file_path: str) -> None:
raise NotImplementedError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def install_firmware(self, image_path):\n \"\"\"Not Implement\"\"\"\n return False",
"def test_handle_install_file_present(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=\"1.0\"\n )\n mock_firmware_handler.install_firmware = MagicMock()\n\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.INSTALLING\n )\n\n file_handle = open(\"test_file\", \"w\")\n firmware_update.handle_install(\"test_file\")\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )\n file_handle.close()\n os.remove(\"test_file\")\n os.remove(\"last_firmware_version.txt\")",
"def install_file(self, file_path):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def install_firmware(self, pbz_path, recovery=False):\n\n\t\tresources = None\n\t\twith zipfile.ZipFile(pbz_path) as pbz:\n\t\t\tbinary = pbz.read(\"tintin_fw.bin\")\n\t\t\tif not recovery:\n\t\t\t\tresources = pbz.read(\"system_resources.pbpack\")\n\n\t\tself.system_message(\"FIRMWARE_START\")\n\t\ttime.sleep(2)\n\n\t\tif resources:\n\t\t\tclient = PutBytesClient(self, 0, \"SYS_RESOURCES\", resources)\n\t\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\t\tclient.init()\n\t\t\twhile not client._done and not client._error:\n\t\t\t\tpass\n\t\t\tif client._error:\n\t\t\t\traise PebbleError(self.id, \"Failed to send firmware resources %s/system_resources.pbpack\" % pbz_path)\n\n\n\t\tclient = PutBytesClient(self, 0, \"RECOVERY\" if recovery else \"FIRMWARE\", binary)\n\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\tclient.init()\n\t\twhile not client._done and not client._error:\n\t\t\tpass\n\t\tif client._error:\n\t\t\traise PebbleError(self.id, \"Failed to send firmware binary %s/tintin_fw.bin\" % pbz_path)\n\n\t\tself.system_message(\"FIRMWARE_COMPLETE\")",
"def do_post_install(self, context):\n pass",
"def _install(self):\n\n pass",
"def assemble_firmware(self):\n\n # Check that the layout is available from the firmware configuration file\n if \"layout\" not in self.project.firmware_definition:\n self.project.logging.critical(\"The firmware layout is not defined in configuration file\")\n exit(1)\n\n # Check that the stacking method is available from the firmware configuration file\n if \"method\" not in self.project.firmware_definition[\"layout\"]:\n self.project.logging.critical(\"The firmware stacking method is not defined\")\n exit(1)\n\n # Ensure firmware generation path exists and is a dir\n if not os.path.isdir(self.project.firmware_directory):\n os.makedirs(self.project.firmware_directory)\n\n # Ensure firmware exists\n # TODO : iterate the list of squashfs files\n if not os.path.isfile(self.project.firmware_filename):\n logging.critical(\"The firmware does not exist (\" +\n self.project.firmware_filename + \")\")\n exit(1)\n\n # Remove existing initscript if needed\n if os.path.isfile(self.project.init_filename):\n os.remove(self.project.init_filename)\n\n # Copy the init script to the target directory\n\n # Generate the stacking script\n self.generate_stack_script()",
"def on_install(self, event):\n unit = self.model.unit\n\n # Install your software and its dependencies\n\n unit.status = ActiveStatus()",
"def __do_single_binary_install(item):\n\n name = item.name\n local_name = item.local_name\n install_name = item.install_name\n\n # First copy the new file.\n if copy_file(local_name, install_name, DTF_BINARIES_DIR) != 0:\n log.e(TAG, \"Error copying binary '%s'\" % (local_name))\n return -1\n\n # Update database\n if __update_binary(item) == 0:\n log.e(TAG, \"Failed to update binary '%s' details in database.\"\n % (name))\n return -2\n\n log.i(TAG, \"Binary '%s' installed successfully!\" % name)\n return 0",
"def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)",
"def do_install(self, args):\n if args:\n try:\n plugin_name, file_path = args.split()[0], args.split()[1]\n except Exception as e:\n return print(display_messages(\"the argument is invalid please type ?install for more information\", error=True))\n if not path.isfile(file_path):\n return print(\n display_messages(\n \"the file {} not found \".format(file_path), error=True\n )\n )\n head, tail = os.path.split(file_path)\n dest = copyfile(file_path, \"{}/{}\".format(self.temp_path, tail))\n print(display_messages(\"copy content file .zip to {}\".format(dest), info=True))\n \n path_to_zip_file = tempfile.gettempdir() + \"/{}\".format(tail)\n with ZipFile(path_to_zip_file, \"r\") as zip_ref:\n zip_ref.extractall(tempfile.gettempdir())\n temp_path_file_extracted = \"{}/{}.py\".format(self.temp_path, plugin_name)\n print(\n display_messages(\n \"extracted files on : {}\".format(temp_path_file_extracted), info=True\n )\n )\n if not path.isfile(temp_path_file_extracted):\n return print(\n display_messages(\n \"the file {} not found \".format(temp_path_file_extracted), error=True\n )\n )\n temp_templates_path = \"{}/{}\".format(self.temp_path, plugin_name)\n if not path.isdir(temp_templates_path):\n return print(\n display_messages(\n \"the directory template {} not found \".format(temp_templates_path), error=True\n )\n )\n source = temp_path_file_extracted\n destination = \"{}/{}.py\".format(self.captiveflask_setup_path, plugin_name)\n dest = copyfile(source, destination)\n print(display_messages(\"copy content file to {}\".format(dest), info=True))\n\n copy_tree(\n temp_templates_path, C.user_config_dir + \"/config/templates/{}\".format(plugin_name)\n )\n print(\n display_messages(\n \"plugin {} install {}\".format( plugin_name,setcolor(\"sucessful\", color=\"green\")),\n info=True,\n )\n )\n return \n print(\n display_messages(\"unknown command: {} \".format(args), error=True)\n )",
"def post_installation(self, exc_value):\n pass",
"def test_handle_install_existing_version_file(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n\n file_handle = open(\"last_firmware_version.txt\", \"w\")\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ERROR,\n FirmwareUpdateErrorType.UNKNOWN,\n )\n\n firmware_update.handle_install(\"some_file\")\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )\n\n file_handle.close()\n os.remove(\"last_firmware_version.txt\")",
"def test_handle_install_file_not_present(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ERROR,\n FirmwareUpdateErrorType.UNKNOWN_FILE,\n )\n\n firmware_update.handle_install(\"some_file\")\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def pack_firmware(self, work_dir, jobclient, version_string=\"\"):\n raise NotImplementedError(\"Abstract method not implemented\")",
"def install_component_firmware(self, component_name, image_path):\n raise NotImplementedError",
"def do(self):\r\n parameters = ParametersParserStr(self.args_parameters).get()\r\n self.core.install(self.product_names, parameters, with_dependencies=True)",
"def _create_initial_install_file():\n if not _are_components_installed():\n touch(INITIAL_INSTALL_FILE)",
"def update_firmware(self) -> str:",
"def install(self):\n if not self._is_installed():\n _logger.debug('Installing {name}...'.format(name=self.file_name))\n self._download_archive()\n self._unpack_archive()\n self._remove_archive()\n self._make_executable()\n else:\n _logger.debug('{name} is already installed.'.format(name=self.file_name))\n\n self._add_variables()",
"async def async_post_installation(self):\n if self.data.config_flow:\n if self.data.full_name != \"hacs/integration\":\n await self.reload_custom_components()\n if self.data.first_install:\n self.pending_restart = False\n return\n self.pending_restart = True",
"def post_install(self, installable_pkgs):\n pass",
"def software_load(self, filename: str) -> None:\n pass # Most boards can use serialboot.",
"def pre_installation(self):\n pass",
"def run(self):\n if pakit.conf.IDB.get(self.recipe.name, None) is None:\n print(self.recipe.name + ': Not Installed')\n return\n\n walk_and_unlink(self.recipe.install_dir, self.recipe.link_dir)\n try:\n shutil.rmtree(self.recipe.install_dir)\n except OSError: # pragma: no cover\n pass\n del pakit.conf.IDB[self.recipe.name]\n pakit.conf.IDB.write()",
"def with_firmware_update(self, firmware_handler: FirmwareHandler): # type: ignore\n self.logger.debug(f\"Firmware handler: {firmware_handler}\")\n if self.file_management is None:\n raise RuntimeError(\n \"File management must be enabled before firmware update\"\n )\n self.firmware_update = OSFirmwareUpdate(\n firmware_handler, self._on_firmware_update_status\n )\n\n return self",
"def post_install_pkg(self, installable_pkg):\n pass",
"def execute(self):\n\n super(BasicInstaller, self).execute()",
"def execute(self):\n\n super(BasicInstaller, self).execute()",
"def on_install(self, request, trigger_context):\n raise NotImplementedError"
]
| [
"0.68683445",
"0.6780528",
"0.6478671",
"0.64072794",
"0.62866247",
"0.6233456",
"0.6227008",
"0.61777616",
"0.6163451",
"0.6149231",
"0.6125689",
"0.61148953",
"0.60847825",
"0.6048365",
"0.5996529",
"0.5972869",
"0.59377974",
"0.59254813",
"0.5879711",
"0.58544075",
"0.5840503",
"0.5824839",
"0.5808386",
"0.5783343",
"0.57558775",
"0.5747489",
"0.57118356",
"0.5706367",
"0.5706367",
"0.56913954"
]
| 0.7618573 | 0 |
Test passing an invalid firmware handler. | def test_invalid_firmware_handler(self):
mock_status_callback = MagicMock()
self.assertRaises(
ValueError, OSFirmwareUpdate, 1, mock_status_callback
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_handle_install_file_not_present(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ERROR,\n FirmwareUpdateErrorType.UNKNOWN_FILE,\n )\n\n firmware_update.handle_install(\"some_file\")\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"async def test_firmware_check_error(hass: HomeAssistant) -> None:\n\n config_entry = MockConfigEntry(\n domain=DOMAIN, unique_id=\"qsw_unique_id\", data=CONFIG\n )\n config_entry.add_to_hass(hass)\n\n with patch(\n \"homeassistant.components.qnap_qsw.QnapQswApi.check_firmware\",\n side_effect=APIError,\n ), patch(\n \"homeassistant.components.qnap_qsw.QnapQswApi.validate\",\n return_value=None,\n ), patch(\n \"homeassistant.components.qnap_qsw.QnapQswApi.update\",\n return_value=None,\n ):\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n assert config_entry.state is ConfigEntryState.LOADED",
"def test_handle_install_not_idle(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n firmware_update.logger.warning = MagicMock()\n\n firmware_update.current_status = 1 # Not None\n\n firmware_update.handle_install(\"some_file\")\n\n firmware_update.logger.warning.assert_called_once()",
"def test_firmware_version(self):\n self._verify_firmware_version()",
"def test_invalid_device_type():\n _aws_device(wires=2, device_type=\"foo\", shots=None)",
"def test_on_invalid(self):\n assert self.route.on_invalid(str).route[\"on_invalid\"] == str",
"def test_invalid_signal(self):\n mediator = Mediator()\n\n try:\n mediator.signal(\"foo\")\n except:\n raise AssertionError()",
"def _verify_firmware_version(self):\n firmware_version = self.device.firmware_version\n self.assertTrue(firmware_version)\n self.assertIsInstance(firmware_version, str)",
"async def test_bad_trigger_platform(hass):\n with pytest.raises(vol.Invalid) as ex:\n await async_validate_trigger_config(hass, [{\"platform\": \"not_a_platform\"}])\n assert \"Invalid platform 'not_a_platform' specified\" in str(ex)",
"def test_invalid_upload_mode(self):\n # verify mode doesn't exist\n\n mode = \"invalid_mode\"\n self.assertFalse(mode in UPLOAD_MODES)\n\n with self.assertRaises(Exception):\n upload_helpers.verify_upload_mode(mode)",
"def test_handle_abort_when_not_idle(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ABORTED\n )\n firmware_update.current_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.INSTALLING\n )\n firmware_update.handle_abort()\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def test_invalid_validation(self, mock_api_handler, mock_set_and_write):\n stub_validation_result = self.StubValidationResult()\n stub_validation_result.valid = False\n mock_api_handler.prepare_and_validate_for_upload.side_effect = [stub_validation_result]\n mock_set_and_write.side_effect = [True]\n\n with self.assertRaises(Exception):\n upload_helpers.irida_prep_and_validation(\"seqrun\", \"\")\n\n mock_api_handler.prepare_and_validate_for_upload.assert_called_with(\"seqrun\")\n mock_set_and_write.assert_called_with(\"\", DirectoryStatus.ERROR,\n 'Sequencing run can not be uploaded, Errors: []')",
"def test_raise_on_invalid(self):\n assert \"raise_on_invalid\" not in self.route.route\n assert self.route.raise_on_invalid().route[\"raise_on_invalid\"]",
"def test_handle_flag_error(self):\n pass",
"def test_handle_abort_when_not_idle_and_version_file(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n firmware_update.current_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.INSTALLING\n )\n file_handle = open(\"last_firmware_version.txt\", \"w\")\n file_handle.close()\n firmware_update.handle_abort()\n\n self.assertFalse(os.path.exists(\"last_firmware_version.txt\"))",
"def test_not_implemented(self):\n\n test_handler = EventHandler(self.mock_interruption_event)\n\n with self.assertRaises(NotImplementedError):\n test_handler.handle()",
"def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))",
"def test_validate_fake_wire(self, circuit):\n with pytest.raises(ValueError, match=r\"Wire ID 4 falls outside the range \\[0, 4\\).\"):\n circuit._validate_wire_ids(wire_ids=[4])",
"def test_no_response(self):\r\n self.assertRaises(ValueError, unpack_answ, {ERROR: 'Bad Request'})",
"def test_lsusb_parse_error_generic(self):\n self.assertRaises(ParseError, jc.parsers.lsusb.parse, self.generic_lsusb_t, quiet=True)",
"def test_notRunning_pump_noValve(self):\n\t\tdef mock_def(uri, *args):\n\t\t\tif(uri == u\"ch.db.getdevicedata\"):\n\t\t\t\treturn {'name':'testPump', 'type':'pump', 'group':-1, 'id':1, 'description': ''}\n\t\t\telif(uri == u\"ch.gpio.isrunning\"):\n\t\t\t\treturn False\n\t\t\telif(uri == u\"ch.db.getdevicegroup\"):\n\t\t\t\treturn([])\n\t\t\telse:\n\t\t\t\traise ValueError(\"Given URI does not exist ({})\".format(uri))\n\n\t\tdm = DeviceManager()\n\t\tdm.sessionID = None\n\t\tdm.call=MagicMock(side_effect=mock_def)\n\t\tdm.publish=MagicMock()\n\t\tself.failureResultOf(dm.switchIfAllowed('testPump', None), ApplicationError)",
"def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def test_bad_signature(fail_on_mismatch, settings, rf):\n app_key = '123appkey'\n setattr(settings, APP_KEY_SETTING, app_key)\n setattr(settings, FAIL_ON_MISMATCH_SETTING, fail_on_mismatch)\n view = OurVeryOwnReceiverView.as_view()\n request = rf.post(\n WEBHOOK_URL,\n GOOD_EVENT_LIST_JSON,\n content_type='application/json')\n\n response = view(request)\n if fail_on_mismatch:\n assert response.status_code == 400\n assert response.content == ErrorMessages.INVALID_SIGNATURE\n else:\n assert response.status_code == 200",
"def test_handler_fail_silent(self):\n\n @intent_handler\n def date_test(date: datetime.date):\n return date\n\n r = create_request(\"TEST_CONTEXT\", date=[\"not a date\"])\n result = date_test(r)\n self.assertIsInstance(result, EntityValueException)\n\n @intent_handler\n def int_test(integer: int):\n return integer\n\n r = create_request(\"TEST_CONTEXT\", integer=[\"not a number\"])\n result = int_test(r)\n self.assertIsInstance(result, EntityValueException)",
"def test_bad_integration(self, bad_provider):\n p = bad_provider()\n with pytest.raises(NotImplementedError):\n p.notify(**self.valid_data)",
"def test_pauli_error_raise_invalid(self):\n self.assertRaises(NoiseError, lambda: pauli_error([('S', 1)]))",
"def test_unsupported_action(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body({'action': 'wrongAction'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'unsupported',\r\n 'description': 'Target does not support the requested operation.',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)",
"def test_invalid_action(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory()\n\n # Ask for permission.\n state_changer = request.state_changer\n self.assertFalse(state_changer.can_perform(context, a.COMPLETE))\n\n # Beg for forgiveness.\n err = fysom.FysomError\n self.assertRaises(err, state_changer.perform, context, a.COMPLETE, None)",
"def test_tolerate_decorated_function_raise_if_switch_fail():\n def test_function():\n raise AttributeError()\n def test_switch(*args, **kwargs):\n return False, args, kwargs\n fn = tolerate(switch=test_switch)(test_function)\n fn()",
"def test_unsupported_action(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body({'action': 'wrongAction'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'unsupported',\n 'description': 'Target does not support the requested operation.',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)"
]
| [
"0.66562074",
"0.64156896",
"0.60923696",
"0.6086644",
"0.59546596",
"0.58879626",
"0.5856399",
"0.58014596",
"0.5759754",
"0.57540214",
"0.5742313",
"0.57324404",
"0.5703995",
"0.5692802",
"0.5641137",
"0.5623746",
"0.56231046",
"0.5616834",
"0.56164026",
"0.56005",
"0.5594427",
"0.5591971",
"0.55700624",
"0.5566553",
"0.5560758",
"0.55507255",
"0.5541598",
"0.5535439",
"0.5532251",
"0.552529"
]
| 0.8097426 | 0 |
Test getting firmware version from firmware handler. | def test_get_firmware_version(self):
mock_status_callback = MagicMock()
mock_firmware_handler = self.MockFirmwareHandler()
firmware_version = "1.0"
mock_firmware_handler.get_current_version = MagicMock(
return_value=firmware_version
)
firmware_update = OSFirmwareUpdate(
mock_firmware_handler, mock_status_callback
)
self.assertEqual(
firmware_version, firmware_update.get_current_version()
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_firmware_version(self):\n self._verify_firmware_version()",
"def test_fw_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'FW version' in result.output\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0",
"def test_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0",
"def firmware_version(self):\n return self._get_system_status()[\"firmware\"]",
"def _verify_firmware_version(self):\n firmware_version = self.device.firmware_version\n self.assertTrue(firmware_version)\n self.assertIsInstance(firmware_version, str)",
"def test_update_hyperflex_server_firmware_version(self):\n pass",
"def test_get_hyperflex_server_firmware_version_by_moid(self):\n pass",
"def test_patch_hyperflex_server_firmware_version(self):\n pass",
"def get_firmware_version(self):\n cmd = protocol.GET_FIRMWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.firmware_version = value[0][1:]\n else:\n return False",
"def test_create_hyperflex_server_firmware_version(self):\n pass",
"async def test_get_fw_version(subject: Controller):\n _, fw_version = _find_smoothie_file()\n assert subject._cached_fw_version == fw_version",
"def firmware_version(self):\n return self.data.get('fw_ver')",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def test_get_version(self):\n pass",
"def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()",
"def hw_version(self) -> str | None:\n return self.status.get(\"FIRMWARE\")",
"async def get_firmware_version(self):\n current_time = time.time()\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n elapsed_time = time.time()\n if elapsed_time - current_time > 2:\n return None\n await asyncio.sleep(self.sleep_tune)\n reply = ''\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)",
"async def get_firmware_version(self):\n if self.debug:\n print(\"Sending GET_FIRMWARE_VERSION\")\n\n response = await self.call_function(_COMMAND_GETFIRMWAREVERSION)\n if response is None:\n raise RuntimeError('Failed to detect the PN532')\n return tuple(response)",
"def test_get_version():\n result = uflash.get_version()\n assert result == '.'.join([str(i) for i in uflash._VERSION])",
"def fw_version(self):\n return self.capabilities.get(\"fw_ver\")",
"def get_firmware_version(self):\n request_command = self.parser_invoker.get_firmware_version_command_bytes(self.sequence_id, self.product_id)\n response_command_content = self.connectObj.send_receive_command(request_command)\n return response_command_content",
"def firmware_version(self):\n return self._read(MX_FIRMWARE_VERSION)",
"def firmware_version(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()",
"def test_report_result_changed_version(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n with open(\"last_firmware_version.txt\", \"w\") as file:\n file.write(firmware_update.firmware_handler.get_current_version())\n\n firmware_version = \"2.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.SUCCESS\n )\n\n firmware_update.report_result()\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def test_invalid_firmware_handler(self):\n mock_status_callback = MagicMock()\n\n self.assertRaises(\n ValueError, OSFirmwareUpdate, 1, mock_status_callback\n )",
"def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)",
"def get_firmware_version(self):\n response = self.call_function(PN532_COMMAND_GETFIRMWAREVERSION, 4)\n if response is None:\n raise RuntimeError('Failed to detect the PN532! Make sure there is sufficient power (use a 1 amp or greater power supply), the PN532 is wired correctly to the device, and the solder joints on the PN532 headers are solidly connected.')\n return (response[0], response[1], response[2], response[3])",
"def get_fw_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"async def get_firmware_version(self):\n current_time = time.time()\n #logstring(\"setting current time {}\".format(current_time))\n #logstring(\"1\")\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n #logstring(\"2\")\n #logstring(\"checking time now 1 {}\".format(time.time()))\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n #logstring(\"checking time now 2 {}\".format(time.time()))\n #logstring(\"3\")\n if self.serial_port.IsPortOpen == False:\n #logstring(\"Looks like that port wasn't working!!!!!!!!!!!!!????\")\n return None\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n #logstring(\"4\")\n elapsed_time = time.time()\n #logstring(\"setting elapsed time {}\".format(elapsed_time))\n #logstring(\"5\")\n if elapsed_time - current_time > 3:\n #logstring(\"really took too long: {} {} {}\".format(elapsed_time, current_time, elapsed_time - current_time))\n return None\n #logstring(\"7\")\n if self.serial_port.IsPortOpen == False:\n #logstring(\"Looks like that port wasn't working!!!!!!!!!!!!!\")\n return None\n await asyncio.sleep(self.sleep_tune)\n #logstring(\"8\")\n #logstring(\"Geez, that took: {} {} {} ??????????????????\".format(elapsed_time, current_time, elapsed_time - current_time))\n\n reply = ''\n #logstring(\"9\")\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n #logstring(\"10\")\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)",
"def get_hardware_version(self):\n cmd = protocol.GET_HARDWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.hardware_version = value[0][1:]\n else:\n return False"
]
| [
"0.8165977",
"0.77345246",
"0.7501908",
"0.742611",
"0.7408754",
"0.73981726",
"0.7255102",
"0.72426367",
"0.7198219",
"0.71152556",
"0.71115816",
"0.7102065",
"0.70699406",
"0.70607394",
"0.69946396",
"0.6961693",
"0.6862018",
"0.68567574",
"0.6851694",
"0.6814915",
"0.66981435",
"0.6654387",
"0.66406965",
"0.66329765",
"0.6609573",
"0.65390164",
"0.6523319",
"0.6520274",
"0.6491164",
"0.6468635"
]
| 0.8004265 | 1 |
Test receiving install command and file exists on device. | def test_handle_install_file_present(self):
mock_status_callback = MagicMock()
mock_firmware_handler = self.MockFirmwareHandler()
mock_firmware_handler.get_current_version = MagicMock(
return_value="1.0"
)
mock_firmware_handler.install_firmware = MagicMock()
firmware_update = OSFirmwareUpdate(
mock_firmware_handler, mock_status_callback
)
firmware_update.logger.setLevel(logging.CRITICAL)
expected_status = FirmwareUpdateStatus(
FirmwareUpdateStatusType.INSTALLING
)
file_handle = open("test_file", "w")
firmware_update.handle_install("test_file")
firmware_update.status_callback.assert_called_once_with(
expected_status
)
file_handle.close()
os.remove("test_file")
os.remove("last_firmware_version.txt") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verifyInstalled(cmd):\n\tprint \"Verifying %s works...\" % cmd\n\tif (sys.platform == 'win32'):\n\t\ttry:\n\t\t\tstatus = subprocess.call(shlex.split(cmd))\n\t\t\tprint \"Installation was successful.\"\n\t\t\treturn True\n\t\texcept OSError as e:\n\t\t\tprint >>sys.stderr, \"Execution failed with verification: \",e\n\t\t\tprint cmd + \" was not installed correctly.\"\n\t\t\treturn False\n\telse:\n\t\tstatus = os.system(cmd)\n\t\tif (status == NOT_INSTALLED):\n\t\t\tprint status\n\t\t\tprint \"An error occured with installation/environment variables. %s is still not installed.\" % cmd\n\t\t\treturn False\n\t\telse:\n\t\t\tprint \"Installation was successful.\"\n\t\t\treturn True",
"def installer_exists(self, platform):\n \n validations.validate_platform(platform)\n \n installer_filename = os.path.join(\n settings.CUSTOM_INSTALLER_ROOT,\n self.build_id,\n constants.PLATFORM_BUNDLES[platform]\n )\n\n if os.path.isfile(installer_filename):\n return True\n \n return False",
"def _is_installed(self) -> bool:\n try:\n sh.Command(self._file_full_path)\n return True\n except sh.CommandNotFound:\n return False",
"def test_azurecli_binary_exists(host):\n host.file(PACKAGE_BINARY).exists",
"def test_installed(self):\n self.assertTrue(self.qi.isProductInstalled(PROJECTNAME))",
"def test_local_install(self):\n\t\tfile = ffmpeg.install_local(local_dir=self.dir, verbose=False, force_download=True)\n\t\tself.assertTrue(file, msg='FFmpeg was not downloaded!')\n\t\tself.assertIn('ffmpeg', file, msg='FFmpeg filename was invalid!')\n\t\tself.assertTrue(os.path.exists(file), msg='FFmpeg binary does not exist!')\n\t\toutput = subprocess.check_output([file, '-version']).decode('utf-8')\n\t\tself.assertIn('ffmpeg version', output, msg='Failed to execute ffmpeg version check!')",
"def test_installed(self):\n # OSA script should have been installed in setUp function\n self.assertTrue(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))\n # Clean up install\n self.run_function(\"assistive.remove\", [OSA_SCRIPT])\n # Installed should now return False\n self.assertFalse(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))",
"def test_install(self):\n pass",
"def test_pydroid_pip_install_cmd_line(self):\n\n module_name = 'simplekv'\n package_dir = os.path.join(site_packages_dir(), module_name)\n self.assertFalse(os.path.exists(package_dir))\n cmd = ['pydroid', 'pip', 'install', module_name]\n subprocess.call(cmd)\n self.assertTrue(os.path.exists(package_dir))",
"def test_handle_install_file_not_present(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ERROR,\n FirmwareUpdateErrorType.UNKNOWN_FILE,\n )\n\n firmware_update.handle_install(\"some_file\")\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def test_verify_installation(self):\n self.installer._pretty_print = Mock()\n self.installer._run_command = Mock()\n stdout = Mock()\n stderr = Mock()\n self.installer._run_command.return_value = (stdout, stderr)\n stderr.read().splitlines.return_value = []\n self.assertEqual(\"Success\", self.installer.verify_installation())\n stderr.read().splitlines.return_value = [\"error\"]\n self.assertEqual(\"Fail\", self.installer.verify_installation())",
"def test_install(self):\n self.installer._run_command = Mock()\n self.installer._pretty_print = Mock()\n self.installer.verify_installation = Mock()\n stdout = Mock()\n stderr = Mock()\n self.installer._run_command.return_value = (stdout, stderr)\n stdout.channel.recv_exit_status.return_value = 0\n self.installer.verify_installation.return_value = \"Success\"\n self.installer.install()\n # self.installer.verify_installation.return_value = \"Fail\"\n # with self.assertRaises(Exception):\n # self.installer.install()\n self.installer.verify_installation.return_value = \"Success\"\n stdout.channel.recv_exit_status.return_value = -1\n with self.assertRaises(Exception):\n self.installer.install()\n self.installer._run_command.side_effect = SSHException\n with self.assertRaises(SSHException):\n self.installer.install()",
"def is_installed(cmd):\n rc, _, _ = execute(\"which %s\" % cmd, die=False)\n if rc:\n return False\n else:\n return True",
"def check_install():\n if platform.dist()[0] not in ['fedora', 'redhat', 'centos']:\n print \"{} not supported\".format(platform.dist()[0])\n sys.exit(1)\n print \"\\ndetected {} {} ...\".format(platform.dist()[0], platform.dist()[1])\n\n import yum\n # Remove loggin. Taken from: https://stackoverflow.com/a/46716482\n from yum.logginglevels import __NO_LOGGING\n yumloggers = [\n 'yum.filelogging.RPMInstallCallback', 'yum.verbose.Repos',\n 'yum.verbose.plugin', 'yum.Depsolve', 'yum.verbose', 'yum.plugin',\n 'yum.Repos', 'yum', 'yum.verbose.YumBase', 'yum.filelogging',\n 'yum.verbose.YumPlugins', 'yum.RepoStorage', 'yum.YumBase',\n 'yum.filelogging.YumBase', 'yum.verbose.Depsolve'\n ]\n for loggername in yumloggers:\n logger = logging.getLogger(loggername)\n logger.setLevel(__NO_LOGGING)\n\n yumbase = yum.YumBase()\n pkg = 'Percona-XtraDB-Cluster-server-<%= @percona_major_version %>'\n if yumbase.rpmdb.searchNevra(name=pkg):\n pkg_list = yumbase.rpmdb.searchNevra(name=pkg)\n print 'detected {} ...'.format(pkg_list[0])\n else:\n print \"{}{} not installed{}\".format(RED, pkg, WHITE)\n sys.exit(1)\n return 'percona'",
"def test_install_should_call_subprocess_run(self, mock_subprocess):\n manifest = self.generate_mock_manifest(cfg={\n EXTCFG_SECTION.INSTALL: {\n EXTCFG_OPTION.EXEC_EXT_CMD: ['command'],\n }\n })\n ext_manager = PkgInstExtrasManager(manifest)\n ext_manager.handle_install_extras()\n mock_subprocess.assert_called_with(\n 'command',\n check=True,\n stderr=-1,\n stdout=-1,\n timeout=90,\n universal_newlines=True)",
"def test_scripts_are_installed(self):\n fits_file = os.path.join(self.datadir, 'monol_testA.evt')\n command = 'HENreadfile {0}'.format(fits_file)\n sp.check_call(command.split())",
"def test_product_is_installed(self):\n try:\n result = self.installer.is_product_installed(PROJECT_NAME)\n except AttributeError:\n result = self.installer.isProductInstalled(PROJECT_NAME)\n self.assertTrue(result)",
"def test_is_package_installed(self, mock_run):\n\n build_cmake_project.is_package_installed('tmux')\n mock_run.assert_called_once_with(\n args='dpkg-query -l tmux', check=True, shell=True)",
"def check_installation():\n print(\n 'Hooray! CCurl is installed correctly!'\n if is_installed()\n else 'Aww, man! CCurl is NOT installed correctly!'\n )\n print('For support, visit the #iota-libs-pyota channel on the IOTA Slack.')\n print('https://slack.iota.org/')",
"def is_installed(programme: str) -> bool:\n res = shutil.which(programme)\n return res is not None",
"def _check_install(self):\n\n try:\n # Get chrome version\n chrome_version = subprocess.run(\"google-chrome --version\", shell=True, capture_output=True, text=True, check=True).stdout\n chrome_version_number = chrome_version.split(' ')[2]\n chrome_version_number = '.'.join(chrome_version_number.split('.')[0:3])\n\n # Get driver version\n driver_version = subprocess.run(\"chromedriver --version\", shell=True, capture_output=True, text=True, check=True).stdout\n driver_version_number = driver_version.split(' ')[1]\n driver_version_number = '.'.join(driver_version_number.split('.')[0:3])\n\n # https://chromedriver.chromium.org/downloads/version-selection\n return True if chrome_version_number == driver_version_number else False\n\n # If there is an exception, that means the install is missing\n except subprocess.CalledProcessError:\n return False",
"def _check_for_cmd(command):\n slab_logger.log(15, 'Checking if %s is installed' % command)\n # Note: Using type git here to establish if posix system has a binary\n # called git instead of which git b/c which often doesn't return\n # proper 0 or 1 exit status' and type does. Which blah on many\n # systems returns 0, which is bad.\n if os.name == \"posix\":\n returncode, myinfo = run_this('type %s' % command)\n return(returncode, myinfo)\n elif os.name == \"nt\":\n # test windows for git\n pass",
"def test_azurecli_binary_isfile(host):\n assert host.file(PACKAGE_BINARY).is_file",
"def test_installed(self):\n script = which('parsefin')\n if not script:\n raise SkipTest(\"Not installed\")\n script = script[0]\n\n return self.runScript(script)",
"def test_importtleCommandExists(self):\n self.assertIn('importtle', get_commands())",
"def test_call(self):\n output, _error = self.executor.pip('install', 'attrs').batch()\n self.assertEqual(output, 'attrs installed')",
"def check_install(self, gppkg_filename):\n cmd = \"gppkg -q %s\" % gppkg_filename\n results = run_command(cmd)\n test_str = ''.join(gppkg_filename.split('-')[:1]) + \" is installed\"\n is_installed = test_str in results\n return is_installed and CheckFile(os.path.join(ARCHIVE_PATH, gppkg_filename)).run()",
"def test_install(self):\n # This call should not throw an exception\n checked_subprocess_run(f\"{self.python} -m pip install .\")\n\n # Check the version number from `pip info`\n info, _ = checked_subprocess_run(f\"{self.python} -m pip show {PACKAGE_NAME}\")\n\n # The info section from pip is formatted as a RFC 2882 mail header.\n parser = HeaderParser()\n data = parser.parsestr(info)\n version = data[\"version\"]\n\n # Version should be set, should not be the default 0.0.0, and should\n # match __version__ set in the package.\n self.assertTrue(version)\n self.assertNotEqual(version, \"0.0.0\")\n self.assertEqual(version, __version__)",
"def test_product_is_installed(self):\n qi_tool = getToolByName(self.portal, 'portal_quickinstaller')\n pid = 'imio.media'\n installed = [p['id'] for p in qi_tool.listInstalledProducts()]\n self.assertTrue(pid in installed,\n 'package appears not to have been installed')",
"def test_subversion_binary_exists(host):\n assert host.file(PACKAGE_BINARY).exists"
]
| [
"0.67005706",
"0.6697824",
"0.66566634",
"0.653048",
"0.65262645",
"0.64361185",
"0.6433776",
"0.64265186",
"0.64114606",
"0.638161",
"0.63736176",
"0.6365181",
"0.6316361",
"0.6308801",
"0.6239025",
"0.62156874",
"0.62092596",
"0.6183086",
"0.6174404",
"0.6156247",
"0.6145892",
"0.6115465",
"0.61061925",
"0.6099411",
"0.6088555",
"0.6066645",
"0.60642874",
"0.605324",
"0.605111",
"0.60497034"
]
| 0.68001217 | 0 |
Test receiving the abort command when module not idle. | def test_handle_abort_when_not_idle(self):
mock_status_callback = MagicMock()
mock_firmware_handler = self.MockFirmwareHandler()
firmware_update = OSFirmwareUpdate(
mock_firmware_handler, mock_status_callback
)
firmware_update.logger.setLevel(logging.CRITICAL)
expected_status = FirmwareUpdateStatus(
FirmwareUpdateStatusType.ABORTED
)
firmware_update.current_status = FirmwareUpdateStatus(
FirmwareUpdateStatusType.INSTALLING
)
firmware_update.handle_abort()
firmware_update.status_callback.assert_called_once_with(
expected_status
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def abort() -> NoReturn:\n raise AbortSignal",
"def abort(self):\n print(\"abort\")",
"def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')",
"def aborting(self):\n \n pass",
"def _thread_check_abort_event(self):\n self._require_controller_modes('thread_initialized')\n return self.thread.check_abort_event()",
"def abortEvent(self,event):\n # TODO: make interactorObserverTags a map to we can\n # explicitly abort just the event we handled - it will\n # be slightly more efficient\n for tag in self.interactorObserverTags:\n cmd = self.interactor.GetCommand(tag)\n if cmd is not None:\n cmd.SetAbortFlag(1)",
"def do_abort(self):\n self.abort = True\n if self.monitor: self.monitor.stop( )",
"def halt_cmd(ctx):\n pass",
"def _doAbort(self):\n self._cmdAbort()",
"def abort(self):\n raise NotImplementedError",
"def abort(self):\n self.trig_mode |= 1 << 3",
"def Abort(self):\n handler = self.get_command_object(\"Abort\")\n handler()",
"def test_abort(exception_app):\n request, response = exception_app.test_client.get('/abort/401')\n assert response.status == 401\n\n request, response = exception_app.test_client.get('/abort')\n assert response.status == 500",
"def abort_command(pnd):\n return _nfc.abort_command(pnd)",
"def abort(self):\r\n LOG(\"Aborting execution\")\r\n self.controller.abort()",
"def abortConnection():\n pass",
"def _abort(self, context):\n if self._on_fail is not None:\n context.change_state(self._on_fail)\n else:\n context.revert_state()\n\n arrive_msg = Telegram(context.agent_id, None, MessageTypes.MSG_PATH_FAIL, context.location)\n context.world.dispatch(arrive_msg)",
"def test_terminate_run(self):\n pass",
"def abort_requested():\n if KODI_VERSION_MAJOR > 13:\n return MONITOR.abortRequested()\n\n return xbmc.abortRequested",
"def deny():\n raise InterruptEvent",
"def abort(self):\n self.write(\":ABORT\")",
"def ctxAbort(*args, **kwargs)->None:\n pass",
"def test_abort():\n\n code, response = abort(404)\n assert code == 404\n assert 'errors' in response\n assert response['errors'][0]['message'] == '404 Not Found'",
"def set_abort_flag(self):\r\n self.abort_flag = True",
"def set_abort_flag(self):\r\n self.abort_flag = True",
"def halt(*_, **kwargs):\n raise ExecutionFinished(\"Reached halt\")",
"def GET_kill(self):\n sys.exit(0)",
"def __abort_script(message):\n print(message)\n sys.exit()",
"def interrupt_kernel(self):",
"def abortAndBrake(self):\n return self.set_command(\"B\")"
]
| [
"0.686748",
"0.6848484",
"0.6717514",
"0.6581567",
"0.65720046",
"0.6555663",
"0.6496909",
"0.645464",
"0.6448417",
"0.64369285",
"0.6343544",
"0.6331832",
"0.63009477",
"0.6273014",
"0.6263834",
"0.62392724",
"0.62390983",
"0.62250054",
"0.6198961",
"0.6179422",
"0.61697525",
"0.61688685",
"0.6129191",
"0.60817045",
"0.60817045",
"0.6074457",
"0.60682285",
"0.6067448",
"0.60622364",
"0.6061558"
]
| 0.6867068 | 1 |
Test the abort command when not idle and version file exists. | def test_handle_abort_when_not_idle_and_version_file(self):
mock_status_callback = MagicMock()
mock_firmware_handler = self.MockFirmwareHandler()
firmware_update = OSFirmwareUpdate(
mock_firmware_handler, mock_status_callback
)
firmware_update.logger.setLevel(logging.CRITICAL)
firmware_update.current_status = FirmwareUpdateStatus(
FirmwareUpdateStatusType.INSTALLING
)
file_handle = open("last_firmware_version.txt", "w")
file_handle.close()
firmware_update.handle_abort()
self.assertFalse(os.path.exists("last_firmware_version.txt")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_handle_abort_when_not_idle(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ABORTED\n )\n firmware_update.current_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.INSTALLING\n )\n firmware_update.handle_abort()\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def aborting(self):\n \n pass",
"def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')",
"def test_exit_on_missing_file(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=MISSING_FILE)",
"def abort() -> NoReturn:\n raise AbortSignal",
"def abort(self):\n print(\"abort\")",
"def abort_requested():\n if KODI_VERSION_MAJOR > 13:\n return MONITOR.abortRequested()\n\n return xbmc.abortRequested",
"def state_failsafe_exit(cfg, app, win):",
"def __abort_script(message):\n print(message)\n sys.exit()",
"def exit_openbox(self):\n if self.are_you_sure(\"Bist du sicher, dass du zur Konsole zurück willst?\")=='yes':\n ret = os.system(\"openbox --exit\")",
"def test_readiness(self):\n self.command.package = self.input_ovf\n ready, reason = self.command.ready_to_run()\n self.assertFalse(ready)\n self.assertRegex(reason, \"No file information\")\n self.assertRaises(InvalidInputError, self.command.run)\n\n self.command.file_path = \"input.vmdk\"\n ready, reason = self.command.ready_to_run()\n self.assertTrue(ready)\n\n self.command.file_path = None\n self.command.file_id = \"file1\"\n ready, reason = self.command.ready_to_run()\n self.assertTrue(ready)",
"def test_deletes_lockfile_on_exit(self):\n self.lock.__enter__()\n self.assertTrue(os.path.exists(self.lock.lockfile_path))\n self.lock.__exit__(None, None, None)\n self.assertFalse(os.path.exists(self.lock.lockfile_path))",
"def test_finished_no_vm(self):\n self.command.finished()",
"def test_exit_on_wrong_extension(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=WRONG_EXT_FILE)",
"def CheckForUnknownFiles(self):\r\n unknown_files = self.GetUnknownFiles()\r\n if unknown_files:\r\n print \"The following files are not added to version control:\"\r\n for line in unknown_files:\r\n print line\r\n prompt = \"Are you sure to continue?(y/N) \"\r\n answer = raw_input(prompt).strip()\r\n if answer != \"y\":\r\n ErrorExit(\"User aborted\")",
"def hgHisteditAbort(self, name):\n # find the root of the repo\n repodir = self.vcs.splitPath(name)[0]\n while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):\n repodir = os.path.dirname(repodir)\n if os.path.splitdrive(repodir)[1] == os.sep:\n return False\n \n args = self.vcs.initCommand(\"histedit\")\n args.append(\"--abort\")\n args.append(\"-v\")\n \n editor = os.path.join(\n os.path.dirname(__file__), \"HgHisteditEditor.py\")\n env = {\"HGEDITOR\": \"{0} {1}\".format(sys.executable, editor)}\n \n dia = HgDialog(\n self.tr(\"Abort histedit session\"),\n self.vcs,\n useClient=False)\n res = dia.startProcess(args, repodir, environment=env)\n if res:\n dia.exec_()\n res = dia.hasAddOrDelete()\n self.vcs.checkVCSStatus()\n return res",
"def confirm_file(self,fn):\n\t\t\n\t\tif self.autoreload: \n\t\t\tprint '[STATUS] autoreloading %s'%fn\n\t\t\treturn True\n\t\telse:\n\t\t\tcaller = sys._getframe().f_back.f_code.co_name\n\t\t\tprint \"[STATUS] function %s found %s\"%(caller,fn)\n\t\t\tans = raw_input('[QUESTION] is this file valid else quit (y/N)? ')\n\t\t\tif re.match('^(y|Y)',ans): return True\n\t\t\telse: raise Exception('\\n[ERROR] file was invalid and must be deleted manually:\\n%s'%fn)\n\t\t\t#---! may want to later allow a file deletion if the user says the file is invalid\t\t",
"def CheckForUnknownFiles(self):\n unknown_files = self.GetUnknownFiles()\n if unknown_files:\n print \"The following files are not added to version control:\"\n for line in unknown_files:\n print line\n prompt = \"Are you sure to continue?(y/N) \"\n answer = raw_input(prompt).strip()\n if answer != \"y\":\n ErrorExit(\"User aborted\")",
"def abort(msg=''):\n if msg:\n print >> sys.stderr, msg\n sys.exit(1)",
"def test_bad_file() -> None:\n\n bad = random_string()\n rv, out = getstatusoutput(f'{RUN} {bad}')\n assert rv != 0\n assert out.lower().startswith('usage:')\n assert re.search(f\"No such file or directory: '{bad}'\", out)",
"def msg_app_exitcheck(self,msg):\r\n #check for unsaved files\r\n res= self.frame.notebook.CheckClose()\r\n if res is False:\r\n self.app.VetoExit()",
"def abort(self):\n raise NotImplementedError",
"def state_wait_exit(cfg, app, win):",
"def abort(message):\n\n sys.stderr.write(message + '\\n')\n sys.exit(1)",
"def test_not_ready_if_insufficient_working_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available for\"\n \" temporary file storage\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)",
"def quit(self):\n if askokcancel(\"Verify Exit\", \"Really quit?\"):\n sys.exit(0)",
"def abort(self):\r\n if self.nib and isinstance(self.nib, NIB):\r\n status = self.nib.abort()\r\n self.log.debug('aborting... status: %s' % status)\r\n else:\r\n self.log.debug('No active Nova Image Builder instance found, nothing to abort.')",
"def check_abort(*args):\n if getAbortState(args[0]) == 1:\n args[0].Controls.ValveState.valve_state = [0] * args[0].Controls.valve_number\n args[0].Controls.IgnitorState.ignitor_state = 0\n return args[0].Controls.ValveState.valve_state, args[0].Controls.IgnitorState.ignitor_state",
"def _doAbort(self):\n self._cmdAbort()",
"def test_flag_aborted(self):\n container_dir = os.path.join(self.root, 'apps', 'proid.myapp#001',\n 'data')\n fs.mkdir_safe(container_dir)\n\n app_abort.flag_aborted(container_dir,\n why=app_abort.AbortedReason.INVALID_TYPE,\n payload='test')\n\n aborted_file = os.path.join(container_dir, 'aborted')\n with io.open(aborted_file) as f:\n aborted = json.load(f)\n\n self.assertEqual('invalid_type', aborted.get('why'))\n self.assertEqual('test', aborted.get('payload'))"
]
| [
"0.65638506",
"0.62183833",
"0.6206509",
"0.6062271",
"0.60525274",
"0.60190946",
"0.59591633",
"0.5886924",
"0.5834233",
"0.5819208",
"0.5789556",
"0.5770664",
"0.5753073",
"0.5743728",
"0.57052815",
"0.56777036",
"0.56483227",
"0.5645267",
"0.56043744",
"0.56035477",
"0.5601301",
"0.5584454",
"0.55786806",
"0.5560491",
"0.5555281",
"0.55530673",
"0.55529493",
"0.5552303",
"0.5551738",
"0.5544259"
]
| 0.7593396 | 0 |
Test reporting result with no stored firmware version. | def test_report_result_no_stored_file(self):
mock_status_callback = MagicMock()
mock_firmware_handler = self.MockFirmwareHandler()
firmware_update = OSFirmwareUpdate(
mock_firmware_handler, mock_status_callback
)
firmware_update.logger.setLevel(logging.CRITICAL)
firmware_update.logger.debug = MagicMock()
firmware_update.report_result()
self.assertEqual(2, firmware_update.logger.debug.call_count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_firmware_version(self):\n self._verify_firmware_version()",
"def test_report_result_unchanged_version(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n with open(\"last_firmware_version.txt\", \"w\") as file:\n file.write(firmware_update.firmware_handler.get_current_version())\n\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ERROR,\n FirmwareUpdateErrorType.INSTALLATION_FAILED,\n )\n\n firmware_update.report_result()\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))",
"def _verify_firmware_version(self):\n firmware_version = self.device.firmware_version\n self.assertTrue(firmware_version)\n self.assertIsInstance(firmware_version, str)",
"def test_update_hyperflex_server_firmware_version(self):\n pass",
"def test_patch_hyperflex_server_firmware_version(self):\n pass",
"def test_create_hyperflex_server_firmware_version(self):\n pass",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def check_fw_versions(self, sys_info, api_results):\n if not api_results.get(\"latest_efi_version\"):\n # Call the API to see what the latest version of EFI you are\n # expected to be running given OS ver and mac model\n api_results[\n self.current_endpoint][\"latest_efi_version\"] = self.__make_api_get(\n '/apple/latest_efi_firmware/%s/%s' %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\")))\n\n self.message(\"\\n\\tEFI firmware version check:\")\n\n # Validate response from API\n if self._validate_response(api_results[\"latest_efi_version\"]):\n # Valid response from API - now interpret it\n\n # This is kind messy but it's so as we can detect newer and older firmware and message accordingly rather than just looking for 'different' versions\n # the way that EFI versions are denoted by Apple makes this more of\n # a pain thatit really needs to be quite honestly\n api_efi_str = api_results[\"latest_efi_version\"][\"msg\"].split(\".\")\n my_efi_str = sys_info.get(\"rom_ver\").split(\".\")\n\n api_efi_ver = int(api_efi_str[1], 16)\n api_efi_build = int(api_efi_str[2].replace(\"B\", \"\"), 16)\n\n if all([x.isdigit() for x in my_efi_str]):\n # Newer EFI versions do not include a build number\n # or the Mac model code. The output will be something\n # like 256.0.0, whereas with the old format it would\n # be MBP133.0256.B00.\n my_efi_ver = int(my_efi_str[0], 16)\n my_efi_build = 0\n else:\n my_efi_ver = int(my_efi_str[1], 16)\n my_efi_build = int(my_efi_str[2].replace(\"B\", \"\"), 16)\n\n if api_efi_str == my_efi_str:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n elif my_efi_ver == api_efi_ver and my_efi_build == api_efi_build:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n\n elif (my_efi_ver > api_efi_ver) or (my_efi_ver > api_efi_ver and my_efi_build > api_efi_build) or (my_efi_ver == api_efi_ver and my_efi_build > api_efi_build):\n # Looks like you're running a beta or a dev build - pretty much\n # all bets are off here as the dataset doens't cover dev builds\n # but a nicer message makes sense\n self.message(\n \"\\t\\t[!] ATTENTION - It looks like your EFI version (%s) is NEWER than the latest production release that is in the dataset (%s). This is most likely because you are now, or have in the past, installed a developer preview OS and as part of that you also had newer EFI firmware installed. The EFIgy API currently only has reliable data for production OS releases.\" %\n (sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))\n\n else:\n self.message(\n \"\\t\\t[-] ATTENTION - You are running an unexpected firmware version given the model of your system (%s) and OS build you have installed (%s). Your firmware is %s, the firmware we expected to see is %s.\\n\" %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))",
"def test_get_hyperflex_server_firmware_version_by_moid(self):\n pass",
"def test_report_result_changed_version(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n with open(\"last_firmware_version.txt\", \"w\") as file:\n file.write(firmware_update.firmware_handler.get_current_version())\n\n firmware_version = \"2.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.SUCCESS\n )\n\n firmware_update.report_result()\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def test_no_unlisted(self):\n Version.objects.get(pk=self.version_1_2_2).update(\n channel=amo.RELEASE_CHANNEL_UNLISTED)\n self.addon.reload()\n assert self.addon.status == amo.STATUS_PUBLIC\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"async def get_firmware_version(self):\n current_time = time.time()\n #logstring(\"setting current time {}\".format(current_time))\n #logstring(\"1\")\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n #logstring(\"2\")\n #logstring(\"checking time now 1 {}\".format(time.time()))\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n #logstring(\"checking time now 2 {}\".format(time.time()))\n #logstring(\"3\")\n if self.serial_port.IsPortOpen == False:\n #logstring(\"Looks like that port wasn't working!!!!!!!!!!!!!????\")\n return None\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n #logstring(\"4\")\n elapsed_time = time.time()\n #logstring(\"setting elapsed time {}\".format(elapsed_time))\n #logstring(\"5\")\n if elapsed_time - current_time > 3:\n #logstring(\"really took too long: {} {} {}\".format(elapsed_time, current_time, elapsed_time - current_time))\n return None\n #logstring(\"7\")\n if self.serial_port.IsPortOpen == False:\n #logstring(\"Looks like that port wasn't working!!!!!!!!!!!!!\")\n return None\n await asyncio.sleep(self.sleep_tune)\n #logstring(\"8\")\n #logstring(\"Geez, that took: {} {} {} ??????????????????\".format(elapsed_time, current_time, elapsed_time - current_time))\n\n reply = ''\n #logstring(\"9\")\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n #logstring(\"10\")\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)",
"async def get_firmware_version(self):\n current_time = time.time()\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n elapsed_time = time.time()\n if elapsed_time - current_time > 2:\n return None\n await asyncio.sleep(self.sleep_tune)\n reply = ''\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)",
"def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)",
"def firmware_version(self):\n return self._get_system_status()[\"firmware\"]",
"def test_handle_install_file_not_present(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ERROR,\n FirmwareUpdateErrorType.UNKNOWN_FILE,\n )\n\n firmware_update.handle_install(\"some_file\")\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def test_installed_beta_no_newer_stable(self):\n self.change_version(self.version_1_2_2, '1.2beta')\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def test_version(self):\n result = check_output([b\"flocker-reportstate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))",
"def test_fw_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'FW version' in result.output\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0",
"def test_delete_hyperflex_server_firmware_version(self):\n pass",
"def test_not_public(self):\n self.change_status(self.version_1_2_2, amo.STATUS_NULL)\n self.addon.update(status=amo.STATUS_NULL)\n version, file = self.get('1.2.1', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def test_uninstalled(self):\n self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))",
"def test_missing_version(self):\n\n params_82 = {'ReQuEsT': \"DescribeCoverage\", 'SeRvIcE': \"WCS\", \"BOGUS\": \"SSS\"}\n response = self.query_server(params_82)\n soup = BeautifulSoup(response.text, 'xml')\n self.assertTrue(\n soup.find('ServiceExceptionReport'),\n msg=\"The server should return an exception if the version is not included in a DescribeCoverage request.\")",
"def test_get_software(self):\n pass",
"def test_low_client(self):\n version, file = self.get('', '3000000001100',\n self.app, self.platform)\n assert version == self.version_1_0_2",
"def test_get_version(self):\n pass",
"def test_release_update_available_NO(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(None, next)",
"def test_10_16_first_hotfix(self):\n self.data['version'] = ''\n self.data['appVersion'] = '16.0.1'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20121019.01') > -1",
"def test_version_do_not_exist(self, caplog, mock_database):\n create_experiment(\n \"a\", space={\"x\": \"uniform(0, 10)\"}, storage=mock_database.storage\n )\n\n experiment = get_experiment(\"a\", 2, storage=mock_database.storage)\n\n assert experiment.version == 1\n assert (\n \"Version 2 was specified but most recent version is only 1. Using 1.\"\n in caplog.text\n )"
]
| [
"0.7455106",
"0.71597755",
"0.69234776",
"0.68397397",
"0.65961516",
"0.6509185",
"0.6464707",
"0.6347834",
"0.63399565",
"0.62981254",
"0.6261175",
"0.62452495",
"0.622332",
"0.62195474",
"0.6128874",
"0.6124425",
"0.6118026",
"0.6103822",
"0.6097841",
"0.6080674",
"0.6051037",
"0.60387635",
"0.60330063",
"0.60178804",
"0.6005658",
"0.598935",
"0.59572816",
"0.594908",
"0.59410065",
"0.59295374"
]
| 0.7216541 | 1 |
Test reporting result with unchanged version. | def test_report_result_unchanged_version(self):
mock_status_callback = MagicMock()
mock_firmware_handler = self.MockFirmwareHandler()
firmware_version = "1.0"
mock_firmware_handler.get_current_version = MagicMock(
return_value=firmware_version
)
firmware_update = OSFirmwareUpdate(
mock_firmware_handler, mock_status_callback
)
firmware_update.logger.setLevel(logging.CRITICAL)
with open("last_firmware_version.txt", "w") as file:
file.write(firmware_update.firmware_handler.get_current_version())
expected_status = FirmwareUpdateStatus(
FirmwareUpdateStatusType.ERROR,
FirmwareUpdateErrorType.INSTALLATION_FAILED,
)
firmware_update.report_result()
firmware_update.status_callback.assert_called_once_with(
expected_status
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_version(self):\n pass",
"def test_report_result_changed_version(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n with open(\"last_firmware_version.txt\", \"w\") as file:\n file.write(firmware_update.firmware_handler.get_current_version())\n\n firmware_version = \"2.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.SUCCESS\n )\n\n firmware_update.report_result()\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )",
"def test_get_version(self):\n pass",
"def test_version_check_outdated(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_outdated\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_outdated\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def test_changeVersions(self):\n self._testVersionChanging(8, 2, 3)",
"def test_check_version_non_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.1.0-dev\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\n \"INFO:dakara_feeder.version:\" \"Dakara feeder 0.1.0-dev (1970-01-01)\",\n \"WARNING:dakara_feeder.version:\"\n \"You are running a dev version, use it at your own risks!\",\n ],\n )",
"def test_upgrade_to_same_version(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\"upgrade\", *self.LOCAL, self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID)],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_version(self):\n result = check_output([b\"flocker-reportstate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))",
"def test_version(self):\n result = check_output([b\"flocker-changestate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))",
"def test_update9(self):\n pass",
"def test_version_1(self):\r\n self.assertEqual(1, self._version_test(self.version1_nodrafts))",
"def test_check_version_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.0.0\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\"INFO:dakara_feeder.version:\" \"Dakara feeder 0.0.0 (1970-01-01)\"],\n )",
"def test_d(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n\n self.assertTrue(v1 != '3.4')",
"def testGetVersion(self):\n helper = pylint.PylintHelper()\n\n helper._GetVersion()",
"def test_a(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='2.2.3', name='bar')\n\n self.assertTrue(v1 != v2)\n self.assertTrue(v2 != v1)",
"def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertTrue(v1 != v2)\n self.assertTrue(v2 != v1)",
"def test_updateVersion(self):\n project = self.makeProject(Version(\"bar\", 2, 1, 0))\n newVersion = Version(\"bar\", 3, 2, 9)\n project.updateVersion(newVersion)\n self.assertEquals(project.getVersion(), newVersion)\n self.assertEquals(\n project.directory.child(\"topfiles\").child(\"README\").getContent(),\n \"3.2.9\")",
"def test_can_downgrade(self):\n self.change_status(self.version_1_2_0, amo.STATUS_PENDING)\n for v in Version.objects.filter(pk__gte=self.version_1_2_1):\n v.delete()\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n\n assert version == self.version_1_1_3",
"def test_patch(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[2] = int(new_version_parts[2]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True",
"def test_implementation_version(self):\n\n one = '1\\n'\n two = '2\\n'\n target, task = self._fixture(incremental=True)\n\n # Run twice, with a different implementation version the second time.\n DummyTask._implementation_version = 0\n self._create_clean_file(target, one)\n vtA = task.execute()\n self.assertContent(vtA, one)\n DummyTask._implementation_version = 1\n self._create_clean_file(target, two)\n vtB = task.execute()\n\n # No incrementalism.\n self.assertFalse(vtA.is_incremental)\n self.assertFalse(vtB.is_incremental)\n\n # Confirm two unassociated current directories, and unassociated stable directories.\n self.assertContent(vtA, one)\n self.assertContent(vtB, two)\n self.assertNotEqual(vtA.current_results_dir, vtB.current_results_dir)\n self.assertNotEqual(vtA.results_dir, vtB.results_dir)",
"def test_version_rename_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('version rename 1.0 9.9')\n rv, output = self._execute('version list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_upgrade_to_latest_but_same_version(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_c(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2.3', name='bar')\n\n self.assertFalse(v1 != v2)\n self.assertFalse(v2 != v1)",
"def test_update_no_version_change(\n dbbackup, update, version_file=None, orig_version=None\n):\n version_file = cli.version_file()\n open(version_file, \"w\").write(orig_version)\n cli.initialize()\n update.assert_not_called()\n dbbackup.assert_not_called()",
"def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)",
"def test_prefer_successful_alternate(self):\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-fail.xml'\n ))\n actual = self._analyze_make_output()\n self.assertEqual(0, actual)",
"def test_c(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='2.2.3', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)",
"def test_recheck_fails(self):\n raise NotImplementedError",
"def test_version_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('version remove 1.0')\n rv, output = self._execute('version list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_new_version_no_op(mocker, state, slack, clusters):\n state.exists.return_value = True\n state.get.return_value = upgrade_version # same version, already notified\n ouw.notify_cluster_new_version(clusters, state=state, slack=slack)\n assert slack.chat_post_message.call_count == 0\n assert state.add.call_count == 0"
]
| [
"0.72260493",
"0.70254093",
"0.69303596",
"0.6838647",
"0.6824102",
"0.67868227",
"0.66719574",
"0.6652942",
"0.66527915",
"0.6641951",
"0.6631518",
"0.6617154",
"0.65960985",
"0.653902",
"0.6528611",
"0.6521069",
"0.6505069",
"0.64921314",
"0.64686817",
"0.6458891",
"0.6457613",
"0.644471",
"0.6444459",
"0.6438997",
"0.6438816",
"0.6432372",
"0.64299554",
"0.6424522",
"0.6418757",
"0.6409573"
]
| 0.73677313 | 0 |
Build a heap from ``data`` inplace. Returns a sequence of swaps performed by the algorithm. | def build_heap(data):
# The following naive implementation just sorts the given sequence
# using selection sort algorithm and saves the resulting sequence
# of swaps. This turns the given array into a heap, but in the worst
# case gives a quadratic number of swaps.
#
# TODO: replace by a more efficient implementation
swaps = []
for position in range(len(data)//2, 0, -1):
curr = position - 1
while curr < len(data):
# print("curr:", curr, data[curr])
left = 2*curr + 1
right = 2*curr + 2
min_index = curr
if (left<len(data)) and (data[min_index] > data[left]):
min_index = left
if (right<len(data)) and (data[min_index] > data[right]):
min_index = right
if min_index != curr:
swaps.append((curr, min_index))
data[curr], data[min_index] = data[min_index], data[curr]
curr = min_index
print(data)
else:
# print("break==>", data)
break
# print(data)
return swaps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_heap(data):\n # The following naive implementation just sorts the given sequence\n # using selection sort algorithm and saves the resulting sequence\n # of swaps. This turns the given array into a heap, but in the worst\n # case gives a quadratic number of swaps.\n #\n data_ = [0] * (len(data) + 1)\n data_[1:] = data\n n = len(data)\n swaps = []\n for i in reversed(range(n // 2 + 1)):\n if i == 0:\n break\n sift_down(data_, i, swaps)\n\n return swaps",
"def build_heap(data: List[int]) -> List[Tuple[int, int]]:\n swaps: List[Tuple[int, int]] = []\n\n n = len(data)\n start = ceil(n/2) - 1\n for i in range(start, -1, -1):\n swaps = sink_down(i, data, swaps)\n\n return swaps",
"def build_heap(data):\n size = len(data)\n for i in range(size//2, -1,-1):\n shiftDown(data, i)",
"def build_heap(data):\n n = len(data) # elements 0 .. n-1\n swaps = []\n def swap(i, j):\n t = data[i]\n data[i] = data[j]\n data[j] = t\n swaps.append((i,j))\n def sift_down(i):\n # 3-way comparison to restore heap property to i\n new_i = i\n l = left(i); r = right(i)\n if l < n and data[l] < data[new_i]: new_i = l\n if r < n and data[r] < data[new_i]: new_i = r\n if not i == new_i:\n # i did not satsify heap property, swap and carry on down\n swap(i, new_i)\n sift_down(new_i)\n # starting from end, parent of n-1 is first that may break heap condition\n for i in range(parent(n - 1), -1, -1):\n sift_down(i)\n return swaps",
"def heap_sort(self, data, draw, speed):\n \n # building max-heap\n # first index of a non-leaf node → len(data)//2 - 1 \n for i in range(len(data) // 2 - 1, -1, -1):\n self.heapify(data, len(data), i)\n \n # extract elements (remove root and heapify)\n for i in range(len(data)-1, 0, -1):\n \n # swap root with last element\n data[i], data[0] = data[0], data[i]\n \n # heapify root\n self.heapify(data, i, 0)\n draw(data, [\"Orange\" if x == i or x == self.largest else \"#a871e3\" for x in range(len(data))])\n time.sleep(speed)",
"def sink_down(index: int, data: List[int], swaps: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n while index * 2 + 1 < len(data):\n j = index * 2 + 1\n # the other child exist and is smaller than the current one.\n if (j+1 < len(data)) and data[j+1] < data[j]:\n j += 1\n # heap order already satisfied.\n if data[index] <= data[j]:\n return swaps\n else:\n swap(index, j, data)\n swaps.append((index, j))\n index = j\n return swaps",
"def build_heap(arr):\n for i in range(len(arr)-1, -1, -1):\n down_heapify(arr, len(arr), i)",
"def insert(self, data):\n # add data to list'end\n self.heap_list.append(data)\n # adjust max-heap from bottom to top\n self.sift_up(len(self.heap_list)-1)",
"def build_heap(self, arr):\n i = len(arr) // 2\n self.size = len(arr)\n self.heap_list = [-1] + arr[:]\n while i > 0:\n self.percolate_down(i)\n i = i - 1",
"def heapify(self):\r\n if self._size:\r\n start = self._parent(len(self._data)-1) # who'se the last parent?\r\n for index in range(start, -1, -1): # for all parents\r\n self.down_heap(index) # fix your heap\r",
"def heapify(seq):\n minheap = [0] + seq\n for i in range(len(seq)//2, 0, -1): #len(seq)//2 -= 1 to index 1\n minHeapify(minheap, i, seq)\n seq[:] = minheap[1:]\n return seq",
"def build_heap(arr):\n for i in range((len(arr)//2), -1, -1):\n heapify(arr,index=i, size=len(arr)-1)",
"def buildHeap(A):\n n = len(A)\n for i in range(n/2-1, -1, -1):\n heapify(A, i, n)",
"def heapify(data_list, size, root_index, draw_data, time_value):\n\n # declare and locate the largest index and the children of the root\n largest_index = root_index\n left_index = (2 * root_index) + 1\n right_index = (2 * root_index) + 2\n\n # change the largest if the root is smaller than the left child\n if (left_index < size) and (data_list[root_index] < data_list[left_index]):\n largest_index = left_index\n\n # change the largest if the largest is smaller than the right child\n if (right_index < size) and (data_list[largest_index] < data_list[right_index]):\n largest_index = right_index\n\n # only changes if either the left or right child is larger than the root\n if largest_index != root_index:\n _swap(data_list, root_index, largest_index)\n\n # generate the color list to be visualized\n color_list = [\"red\" for x in range(len(data_list))]\n\n # color the two elements being swapped as blue\n for x in range(len(color_list)):\n if (x == root_index) or (x == largest_index):\n color_list[x] = \"blue\"\n\n # visualize the step and wait for the specified amount of time\n draw_data(data_list, color_list)\n time.sleep(time_value)\n\n # recurse again so that it is a complete heap\n heapify(data_list, size, largest_index, draw_data, time_value)",
"def _heapify(self):\n start = self._parent(len(self) - 1)\n for i in range(start, -1, -1):\n self._down_heap(i)",
"def build_heap(self):\n n = int((len(self.array) / 2) - 1)\n\n while n >= 0:\n self.heapify_top_down(n)\n n -= 1",
"def heapSort(sequence):\n _buildHeap(sequence)\n for i in range(len(sequence) - 1, 0, -1):\n sequence[0], sequence[i] = sequence[i], sequence[0]\n _shiftDown(sequence, i - 1, 0)",
"def heapify(x):\n pass",
"def heap_sort(data_list, draw_data, time_value):\n\n # heapifies the list\n for i in range((len(data_list) // 2) - 1, -1, -1):\n heapify(data_list, len(data_list), i, draw_data, time_value)\n\n # draw the heapified list as blue before starting the popping from the heap\n draw_data(data_list, [\"blue\" for i in range(len(data_list))])\n time.sleep(time_value)\n\n for i in range(len(data_list) - 1, 0, -1):\n _swap(data_list, i, 0)\n\n # generate the color list to be visualized\n color_list = [\"red\" for x in range(len(data_list))]\n\n # color the two elements being swapped green\n for x in range(len(color_list)):\n if (x == i) or (x == 0):\n color_list[x] = \"green\"\n\n # visualize the swap and wait the specified amount of time\n draw_data(data_list, color_list)\n time.sleep(time_value)\n\n # heapify the remaining portion of the list\n heapify(data_list, i, 0, draw_data, time_value)\n\n # color the whole list as green after the sort\n draw_data(data_list, [\"green\" for i in range(len(data_list))])",
"def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]",
"def build_heap(self, A: list):\n self.size = len(A)\n med = (self.size // 2) - 1 #Mid point of array\n for i in range(0, med + 1): #Reverse iteration\n self.heapify(A, med - i) #Reverse iteration",
"def heapsort(A):\n \n buildHeap(A)\n for i in range(len(A)-1, 0, -1):\n A[0],A[i] = A[i],A[0]\n heapify(A, 0, i)",
"def heap_sort(array):\n highest_index = len(array)-1\n Heap.heapify(array, highest_index)\n for end in range(highest_index, 0, -1):\n array[end], array[0] = array[0], array[end]\n Heap.sift_down(array, 0, end-1)",
"def __init__(self, data, draw, speed):\n self.heap_sort(data, draw, speed)",
"def build_heap(self, da: DynamicArray) -> None:\n # clear current content\n for _ in range(self.heap.length()):\n self.heap.pop()\n\n # create copy array\n for i in range(da.length()):\n self.heap.append(da.get_at_index(i))\n\n # arr = copy array\n arr = self.heap\n\n # get first non leaf element and save index\n end = arr.length() - 1\n\n # parent of first non leaf element to work back in heap\n track_index = (end - 1) // 2\n\n while track_index >= 0:\n # value at tracking index\n value = arr.get_at_index(track_index)\n\n # child indices/nodes\n left_i = (2 * track_index) + 1\n right_i = (2 * track_index) + 2\n left_child = arr.get_at_index(left_i)\n right_child = arr.get_at_index(right_i)\n\n # index to percolate down\n index = track_index\n\n # while there is a replacement node\n while self.find_replacement(left_i, right_i, left_child, right_child, value):\n node = self.find_replacement(\n left_i, right_i, left_child, right_child, value)\n\n # swap nodes, set new parent/child indices\n arr.swap(node, index)\n index = node\n left_i = (node * 2) + 1\n right_i = (node * 2) + 2\n\n # decrement track index\n track_index -= 1\n\n return",
"def sift_down(heap, start, end):\n # Swap first node with children until no longer smaller.\n i = start\n heaped = False\n while not heaped:\n left = i * 2 + 1\n right = i * 2 + 2\n largest = i\n\n # Find largest of i, left and right\n if left < end and compare(heap[left], heap[largest]) > 0:\n largest = left\n if right < end and compare(heap[right], heap[largest]) > 0:\n largest = right\n\n # If left or right is larger than i, swap and repeat\n if largest == i:\n heaped = True\n else:\n heap[i], heap[largest] = heap[largest], heap[i]\n i = largest",
"def restructureHeap(self):\n\n self.i = 1\n # Storing the elements that already exist in a temporary list\n tempList = []\n for heapElement in self.heap:\n if heapElement != \"NaN\" :\n tempList.append( heapElement )\n\n # Initializing new heap\n self.heap = [\"NaN\"] * self.noOfElements\n\n # Storing all the elements in the temporary list in a continuous fashion in the new heap\n for element in tempList:\n self.insertElement(element, self.i)",
"def heappop(heap):\n pass",
"def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])",
"def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])"
]
| [
"0.8641238",
"0.8321778",
"0.8191848",
"0.7983638",
"0.6863657",
"0.6691737",
"0.64889646",
"0.64148825",
"0.63708085",
"0.6335683",
"0.62689",
"0.6231121",
"0.6208545",
"0.6204041",
"0.61974216",
"0.6182186",
"0.6120005",
"0.6103361",
"0.6030695",
"0.600491",
"0.5994693",
"0.59075993",
"0.58861524",
"0.58757603",
"0.58114094",
"0.57940197",
"0.5741872",
"0.5716863",
"0.5714753",
"0.5714753"
]
| 0.85100245 | 1 |
Reads waypoints from a csv file. | def read_from_csv_file(cls, csv_file_name: str, target_speed):
csv_file = open(csv_file_name)
csv_reader = csv.reader(csv_file)
waypoints = []
for row in csv_reader:
x = float(row[0])
y = float(row[1])
z = float(row[2])
waypoint = pylot.utils.Transform(pylot.utils.Location(x, y, z),
pylot.utils.Rotation(0, 0, 0))
waypoints.append(waypoint)
target_speeds = deque([target_speed for _ in range(len(waypoints))])
return cls(deque(waypoints), target_speeds) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_csv():",
"def walk_csv(self, filepath: str):\n with open(filepath, encoding='ISO-8859-1') as f:\n reader = csv.DictReader(f)\n for row in reader:\n logger.debug('Loading map {}'.format(row.get('id', None)))\n yield row",
"def loadCSV(input_file):",
"def read_csv():\n points = []\n with open(sys.argv[1], \"rU\") as f:\n reader = csv.reader(f)\n for row in reader:\n if len(row) > 3:\n print(\"Points in CSV file are greater than 3 dimensions\")\n sys.exit(0)\n # If set of points is 2 dimensional, autogenerate the 3rd dimension\n elif len(row) == 2:\n row.append(['0'])\n points.append(tuple(map(float, row)))\n return points",
"def read_csv_file(self):\n pass",
"def parse_places(filename):\n places = defaultdict(list)\n with open(filename, 'r') as fn:\n reader = csv.reader(fn)\n skip_rows(reader, 2)\n for row in reader:\n places[int(row[0])].append(Point(float(row[1]), float(row[2])))\n \n return places",
"def _xy_from_csv(file_path):\n\n def pt_from_line(line):\n return [float(x) for x in line.split(',')]\n\n with open(file_path) as csv:\n return [pt_from_line(x) for x in csv]",
"def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)",
"def load_csv(path):\n points = []\n with open(path, 'r') as infile:\n for line in infile:\n line = line.strip().split(',')\n entry = [int(line[0]), int(line[1]), int(line[2]), int(line[3])]\n points.append(entry)\n points = np.array(points)\n return points",
"def get_data(self, csv_file):\n pass",
"def _read_csv(input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f)\n lines = []\n for line in reader:\n lines.append(line)\n return lines[1:] # remove header",
"def read(self, args):\n\t\twith open(self.filename, 'rb') as csvfile:\n\t\t\tfilereader = csv.reader(csvfile)\n\t\t\tfor row in filereader:\t\t\t#reads the csv line by line\n\t\t\t\tfor num in row:\t\t\t\t#reads each entry in the csv\n\t\t\t\t\tif num != 'NA' and not num.startswith('V'): \t#cuts out the crap we don't care about\n\t\t\t\t\t\tself.all_likes.add((row[0],num))\t\t\t#adds a tuple to the set 'all_likes' with (<IDnum>, <likedIDnum>)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def load_pointing(name):\r\n pointing_data = np.loadtxt(os.path.join(data_dir, '{}.csv'.format(name)), delimiter=',')[:, 1:3]\r\n return pointing_data",
"def read_waypoints():\n\tfilename = \"waypoints.txt\"\n\tfile = open(filename, \"r\")\n\twp_list = []\n\n\tfor line in file:\n\t\t# Get the individual elements, splitting by whitespace\n\t\tdata_list = line.split()\n\t\tcoordinate = {'x': data_list[0], 'y': data_list[1], 'z': data_list[2]}\n\t\twaypoint = {'radius': data_list[3], 'point': coordinate}\n\n\t\twp_list.append (waypoint)\n\n\treturn wp_list",
"def load_map(input_filename: str) -> \"Route\":\n with open(input_filename) as file:\n reader = csv.DictReader(file)\n\n route = Route()\n for row in reader:\n try:\n point = Point(\n id = row[\"index\"],\n x = row[\"x_coord\"],\n y = row[\"y_coord\"],\n )\n route.add(point)\n except ValueError as e:\n # Produce a better error\n row = \"{},{},{}\".format(row[\"index\"],row[\"x_coord\"], row[\"y_coord\"])\n raise ValueError(\"Mapfile contained invalid row \\\"%s\\\"\",row) from e\n return route",
"def _init_goals_from_csv(self, goals_file_path):\n data_array = np.genfromtxt(goals_file_path, dtype=float, delimiter=\",\", skip_header=1)[:, 0:3]\n for i in range(data_array.shape[0]):\n self._goals.append(data_array[i])\n #\n # with open(goals_file_path) as csv_file:\n # reader = csv.reader(csv_file, delimiter=',')\n # line_count = 0\n # for row in reader:\n # if line_count > 0:\n # goal = []\n # for point in row:\n # goal.append(float(point))\n # self._goals.append(np.array(goal))\n # line_count += 1\n self._num_goals = len(self._goals)\n rospy.logwarn(\"[BeliefTracker] Read {} goals from file {} !\".format(self._num_goals, goals_file_path))",
"def loadCSVSeeds(self, csvFilePath):\n labels = []\n with open(csvFilePath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n labels.append([row[0], row[1], [float(row[2]), float(row[3]), float(row[4]) ]])\n print(csvFilePath + \": labels loaded\")\n return labels",
"def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data",
"def read_csv(csv_file):\n LOGGER.debug(\"Opening file: %s\", csv_file)\n with open(csv_file) as f:\n for line in f:\n yield line",
"def from_csv(cls, path: typing.Union[str, Path]) -> \"VLEPoints\":\n frame = pandas.read_csv(path)\n\n if list(frame.columns) != VLE_COLUMNS:\n raise ValueError(\"Incorrect columns: %s\" % list(frame.columns))\n\n points = []\n\n d = frame.iloc[0].to_dict()\n components = [getattr(Components, d[\"first_component\"]),\n getattr(Components, d[\"second_component\"])]\n\n for _, row in frame.iterrows():\n d = row.to_dict()\n points.append(VLEPoint.from_dict(d))\n\n return VLEPoints(components=components, data=points)",
"def read_map_file(mapfile_path):\n\n assert (os.path.isfile(mapfile_path)), \"Cannot find file:\\t\" + mapfile_path\n\n map_reader = csv.reader(open(mapfile_path, 'r'))\n map_reader.next() # Skip the header\n\n # Open the mapping file and fill list\n maplist = list()\n\n for rowitem in map_reader:\n maplist.append(\n {\n 'from_field': rowitem[0],\n 'from_units': rowitem[1],\n 'to_table_name': rowitem[2],\n 'to_field': rowitem[3],\n }\n )\n\n return maplist",
"def read_file(filename):\n reader = csv.reader(open(filename))\n names, distances = [], []\n for row in reader:\n names.append(row[0].strip())\n distances.append(tuple(int(value) for value in row[1:]))\n return names, distances",
"def load_data(filepath):\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n yield row",
"def read_map_file(mapfile_path):\n\n mapfile_path = Path(mapfile_path)\n assert mapfile_path.exists(), f\"Cannot find file: {str(mapfile_path)}\"\n\n map_reader = csv.reader(open(mapfile_path, 'r'))\n map_reader.__next__() # Skip the header\n\n # Open the mapping file and fill list\n maplist = list()\n\n for rowitem in map_reader:\n maplist.append(\n {\n 'from_field': rowitem[0],\n 'from_units': rowitem[1],\n 'to_table_name': rowitem[2],\n 'to_field': rowitem[3],\n }\n )\n\n return maplist",
"def read_from_csv(file):\n with open(file) as f:\n next(f)\n data = []\n for line in csv.reader(f, delimiter='\\t'):\n data.append(list(line))\n return data",
"def loadCalibrationPoints(self):\n\n with open('cali_points.csv', 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\", quotechar=\"|\")\n i = 0\n\n for row in csvreader:\n j = 0\n for col in row:\n \n if i < 5:\n self.rgb_click_points[i][j] = int(col)\n j += 1\n if j == 2:\n j = 0\n elif i > 4 :\n self.depth_click_points[i-5][j] = int(col)\n j += 1\n if j ==2:\n j = 0\n i+=1\n self.cameraCalibration()\n pass",
"def read_csv(path):\r\n output = []\r\n for row in csv.DictReader(open(path)):\r\n output.append(row)\r\n return output",
"def _load_csv_data(kingdom_csv_path: str):\n\n file_path = os.getcwd() + \"/\" + RESOURCES_DIR_PATH + \"/\" + kingdom_csv_path\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr",
"def read_csv(path):\n output = []\n for row in csv.DictReader(open(path)):\n output.append(row)\n return output"
]
| [
"0.676114",
"0.6605252",
"0.6556858",
"0.64591825",
"0.6443623",
"0.626401",
"0.6216206",
"0.62039834",
"0.62009734",
"0.6184218",
"0.6182193",
"0.6118941",
"0.60933214",
"0.6092776",
"0.6089563",
"0.60847956",
"0.6071656",
"0.6070581",
"0.60534495",
"0.60489684",
"0.6046502",
"0.60212725",
"0.602074",
"0.60133797",
"0.60013884",
"0.59963965",
"0.59664756",
"0.5966465",
"0.5965842",
"0.59655684"
]
| 0.72340965 | 0 |
Returns the waypoints as a numpy array of lists of x and y. | def as_numpy_array_2D(self):
wx = []
wy = []
for wp in self.waypoints:
wx.append(wp.location.x)
wy.append(wp.location.y)
return np.array([wx, wy]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def points(self):\n return np.vstack((self.x(), self.y()))",
"def points(self):\n p = []\n for v in self.iter():\n p.append((v.x, v.y))\n return p",
"def points(self):\n return self._arr.T.ravel().view(\n dtype=[('x', self.dtype), ('y', self.dtype), ('z', self.dtype)])",
"def coordinates(self):\n return np.array([[f.x, f.y] for f in self])",
"def to_list(self):\n path = []\n for point in self.points:\n path.append(point.to_dict())\n\n return path",
"def path_response_to_array(get_path_response):\n point_list = list()\n for pose in get_path_response.path.poses:\n arr = np.array([pose.pose.position.x,\n pose.pose.position.y,\n pose.pose.position.z])\n\n point_list.append(arr)\n return np.array(point_list)",
"def np(self):\n return np.array([self.x, self.y])",
"def coordinates(self):\n return np.array([self.x, self.y])",
"def dlib_point_to_np_array(point: dlib.point):\n return np.array([point.x, point.y])",
"def _vertex_arrays_to_list(x_coords_metres, y_coords_metres):\n\n _check_polyline(\n x_coords_metres=x_coords_metres, y_coords_metres=y_coords_metres)\n\n num_vertices = len(x_coords_metres)\n vertex_list_xy_metres = []\n for i in range(num_vertices):\n vertex_list_xy_metres.append((x_coords_metres[i], y_coords_metres[i]))\n\n return vertex_list_xy_metres",
"def get_coordinates(self):\n return np.array([(n.x, n.y) for n in self.nodes])",
"def to_list(self):\n return [self.x, self.y, self.z]",
"def xy(self):\n xcol = self.xx.reshape(-1, 1)\n ycol = self.yy.reshape(-1, 1)\n return np.column_stack([xcol, ycol])",
"def get_all_coordinates(self):\n coordinates = []\n\n for relative_coordinate in self.shape:\n co = [self.coordinate[0] + relative_coordinate[0], self.coordinate[1] + relative_coordinate[1]]\n coordinates.append(co)\n return coordinates",
"def getRuptureAsArrays(self):\n return (np.array(self._lon),\n np.array(self._lat),\n np.array(self._depth))",
"def get_coordinates(self):\r\n coordinates_list = []\r\n for i in range(self.__length):\r\n if self.__orientation == Direction.VERTICAL:\r\n temp = (self.__location[0] + i, self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n temp = (self.__location[0], self.__location[1] + i)\r\n coordinates_list.append(temp)\r\n return coordinates_list",
"def to_list(self):\n return [self._position, self._focal_point, self._viewup]",
"def get_coords(self):\n return [self.x,self.y,self.w,self.h]",
"def obtain_points(self):\n # Swapaxes makes the output a column rather than a row\n return np.swapaxes(np.array([np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateX\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateY\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"GridCoordinates\"][\"CoordinateZ\"][\" data\"][:, :, :])]), 0, 1)",
"def get_coordinates_list(self):\n return [tweet['coordinates'][::-1] for tweet in self.tweets_data]",
"def coords(self):\n return np.column_stack((self.x_coord_list, self.y_coord_list, self.z_coord_list))",
"def get_features(self):\n x,y = self.agent\n return np.array([x,y])",
"def _build_points(self, xs, ys):\n return np.hstack([xs, ys])",
"def convert_shapely_points_to_tuples(list_of_points) -> list:\n return [(p.x, p.y) for p in list_of_points]",
"def getDoubleArray2D(self) -> typing.List[typing.List[float]]:\n ...",
"def vertices(self) -> list[Point]:\n a = Point(self.array[..., 0, :], copy=False)\n b = Point(self.array[..., 1, :], copy=False)\n return [a, b]",
"def points(self):\n angles_positions = [a.position[0] for a in self.angles]\n x, y = 0, 0\n points = [[x, y]]\n for angle, length in zip(angles_positions, self.lengths):\n x += length * math.sin(angle)\n y += -length * math.cos(angle)\n points.append([x, y])\n return points",
"def xy_coordinates(self):\n\n return np.meshgrid(self.x_coord, self.y_coord)",
"def to_y(self):\n return [self.x, self.y, self.vx, self.vy]",
"def convert(points):\n distance = []\n for i in points:\n x = int(i[0])\n y = int(i[1])\n distance.append([x,y])\n return distance"
]
| [
"0.74419016",
"0.71239907",
"0.69828236",
"0.6943635",
"0.69008195",
"0.67944545",
"0.6707484",
"0.6685052",
"0.66268176",
"0.65956616",
"0.65912074",
"0.6582299",
"0.65334374",
"0.65076655",
"0.65031314",
"0.64859104",
"0.6408881",
"0.63892144",
"0.63890857",
"0.63744074",
"0.63551766",
"0.62981355",
"0.62740076",
"0.6233475",
"0.62121415",
"0.6187566",
"0.6164635",
"0.61526865",
"0.6125741",
"0.6117667"
]
| 0.8419584 | 0 |
Finds the closest waypoint to the location. | def closest_waypoint(self, location: pylot.utils.Location):
min_dist = np.infty
min_index = 0
for index, waypoint in enumerate(self.waypoints):
dist = waypoint.location.distance(location)
if dist < min_dist:
min_dist = dist
min_index = index
return min_index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_closest_waypoint(self, pose):\n # Find the nearest waypoint\n closest_distance = float('inf')\n closest_waypoint = 0\n for i in range(len(self.waypoints)):\n this_distance = self.distance_to_position(self.waypoints, i, pose.position)\n if this_distance < closest_distance:\n closest_distance = this_distance\n closest_waypoint = i\n return closest_waypoint",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.x,\n pose.y))\n else:\n return 0",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.position.x,\n pose.position.y))\n else:\n return 0",
"def get_closest_waypoint(self, x, y):\n # TODO implement\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n return closest_idx",
"def get_closest_waypoint(self, pose, waypoints):\n #TODO implement\n\n\tmin_dist = float(\"inf\")\n\tclosest_wp_idx = -1\t\n\n for idx, wp in enumerate(waypoints):\n\t\tdist = self.dist_to_point(pose, wp.pose.pose)\n\t\tif(dist < min_dist):\n\t\t\tmin_dist = dist\n\t\t\tclosest_wp_idx = idx\n\treturn closest_wp_idx",
"def _get_closest_waypoint(self, pose):\n pos = pose.position\n x = pos.x\n y = pos.y\n closest_idx = self.waypoints_tree.query([x,y],1)[1]\n\n return closest_idx",
"def get_closest_lane_waypoint(self, location):\n loc = to_carla_location(location)\n waypoint = self._map.get_waypoint(loc,\n project_to_road=True,\n lane_type=carla.LaneType.Any)\n return waypoint",
"def __get_closest_waypoint_index(self, x, y):\n return self.__waypoint_tree.query([x, y], 1)[1]",
"def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx",
"def get_closest_waypoint(self, x, y):\n closest_idx = self.waypoint_tree.query([x, y])[1] # ckd tree (1st closest, idx)\n\n # Check if closest waypoint is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coors\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n # Car is ahead of the closest waypoint\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx",
"def get_closest_waypoint_idx(self):\n\n # TODO:\n # The churchlot waypoints are roughly circular but have self-\n # intersecting endpoints, so I'm not sure how this code will \n # yield good results. Might need some additional filtering\n # logic to force a choice consistent with the vehicle pose yaw\n # in order to avoid jumping onto the wrong path.\n\n # Vehicle position short reference\n pos = self.pose.pose.position\n\n # Find the closest waypoint index\n # If closest index is zero bump to 1 since we don't want slice for \n # prev_coord to look at the final map waypoint.\n closest_idx = max(self.waypoint_tree.query([pos.x, pos.y], 1)[1], 1)\n\n # Get closest point\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n # Convert coordinates into 2D numpy vectors\n closest_vec = np.array(closest_coord)\n prev_vec = np.array(prev_coord)\n pos_vec = np.array([pos.x, pos.y])\n\n # Find vec(close-prev) dot vec(pos-close) \n val = np.dot(closest_vec - prev_vec, pos_vec - closest_vec)\n\n # If pos is ahead of closest...\n if val > 0: \n\n # Advance index so that closest is ahead of pos\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n # Return closest index\n return closest_idx",
"def get_closest_waypoint(self, pose):\n wpclosestDist = sys.maxint\n for index in range(len(self.waypoints.waypoints)):\n wp = self.waypoints.waypoints[index]\n wpdist = self.calcDistance_PoseStamped(pose, wp.pose)\n if(wpclosestDist > wpdist):\n wpclosestDist = wpdist\n wpindex = index\n return wpindex",
"def get_closest_waypoint_idx(self):\n\n # Position\n x = self.car_pose.pose.position.x\n y = self.car_pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # Coordinates\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Hyper Plane\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx",
"def FindClosestPoint(self, ):\n ...",
"def test_find_closest_waypoints_nearest(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n\n planner.position = Vector3(0, 0, 0)\n waypoints = planner.find_closest_waypoints(1)\n self.assertEqual(1, len(waypoints))\n self.assertEqual(0, waypoints[0].pose.pose.position.x)\n self.assertEqual(0, waypoints[0].pose.pose.position.y)\n self.assertEqual(0, waypoints[0].pose.pose.position.z)\n\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(2)\n self.assertEqual(2, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n\n # Check it wraps back around to the start.\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(3)\n self.assertEqual(3, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n self.assertEqual(0, waypoints[2].pose.pose.position.x)\n self.assertEqual(0, waypoints[2].pose.pose.position.y)",
"def get_closest_waypoint_idx(self, pose):\n d, kdwp = self.kdtree.query((pose.position.x, pose.position.y))\n return kdwp",
"def distance_to_current_waypoint():\n nextwaypoint = vehicle.commands.next\n if nextwaypoint == 0:\n return None\n missionitem = vehicle.commands[nextwaypoint -\n 1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat, lon, alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame,\n targetWaypointLocation)\n return distancetopoint",
"def find_closest_path(self):\n\t\tclosest_distance = sys.maxint\n\t\tclosest_path = 0\n\t\tbike_position = (self.map_model.bike.xB, self.map_model.bike.yB)\n\t\tfor path_index in range(len(self.map_model.paths)):\n\t\t\tnearest_point = geometry.nearest_point_on_path(self.map_model.paths[path_index], bike_position)\n\t\t\tdistance_to_bike = geometry.distance(nearest_point, bike_position)\n\t\t\tif (closest_distance > distance_to_bike):\n\t\t\t\tclosest_distance = distance_to_bike\n\t\t\t\tclosest_path = path_index \n\t\tdisp_next = self.displacement_to_turn(target_path = (closest_path+1)%len(self.map_model.paths))\n\t\ttarget_path = (closest_path+1)%len(self.map_model.paths)\n\t\tdistance_next = geometry.distance_from_path(bike_position, self.map_model.paths[target_path])\n\t\tif disp_next - np.abs(distance_next)>-0.01:\n\t\t\tclosest_path = np.mod(closest_path + 1,len(self.map_model.paths))\n\t\treturn closest_path",
"def closest_other_location(state):\n locations = others_locations(state)\n target = closest_other(state)\n return locations[target]",
"def getClosestLocation(self, locations):\n obs = self.observation\n if len(locations) > 0:\n min_i = 0\n min_dist = point_dist(obs.loc, locations[0][0:2])\n for i in range(1, len(locations)):\n dist = point_dist(obs.loc, locations[i][0:2])\n if dist < min_dist:\n min_i = i\n min_dist = dist\n return locations[min_i][0:2]\n else:\n return None",
"def distance_to_current_waypoint(self):\n next_waypoint = self.vehicle.commands.next\n if next_waypoint == 1:\n return None\n mission_item = self.vehicle.commands[next_waypoint]\n lat = mission_item.x\n lon = mission_item.y\n alt = mission_item.z\n waypoint_location = Location(lat, lon, alt, is_relative=True)\n distance = get_distance_meters(self.vehicle.location, waypoint_location)\n return distance",
"def closest_point_to(self, x):\n x = np.array(x)\n v = self.p1 - self.p0\n b = self.p0 - x\n\n t = -np.dot(v, b) / np.dot(v, v)\n if (0 <= t <= 1):\n closest = t*(self.p1 - self.p0) + self.p0\n return closest\n else:\n if np.linalg.norm(x - self.p0) < np.linalg.norm(x - self.p1):\n return self.p0\n else:\n return self.p1",
"def get_next_destination(self) -> Location:\n # Find index of current location in route\n i = 0\n while i < len(self.route):\n this_loc = self.route[i]\n #\n if i > 0 and this_loc == self.route[-1]:\n return None\n if this_loc == self.current_location:\n return self.route[i + 1]\n i += 1",
"def distance_to_current_waypoint(vehicle):\n nextwaypoint = vehicle.commands.next\n if nextwaypoint==0:\n return None\n missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)\n return distancetopoint",
"def test_find_closest_waypoints_no_position(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n self.assertIsNone(planner.find_closest_waypoints(1))",
"def _update_next_waypoint(self):\n if not self.base_waypoints:\n #rospy.logwarn(\"Waypoints not updated: base_waypoints not available yet.\")\n return False\n\n if not self.current_pose:\n #rospy.logwarn(\"Waypoints not updated: current_pose not available yet.\")\n return False\n\n # Get ego car variables\n ego_x = self.current_pose.position.x\n ego_y = self.current_pose.position.y\n ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)\n\n # If I do have a next_waypoint, I will start looking from it, and stop looking\n # as soon as get a local minimum. Otherwise I will do a full search across the whole track\n t = time.time()\n wp = None\n yaw = 0\n dist = 1000000 # Long number\n if self.next_waypoint:\n idx_offset = self.next_waypoint\n full_search = False\n else:\n idx_offset = 0\n full_search = True\n num_base_wp = len(self.base_waypoints)\n\n for i in range(num_base_wp):\n idx = (i + idx_offset)%(num_base_wp)\n wp_x = self.base_waypoints[idx].pose.pose.position.x\n wp_y = self.base_waypoints[idx].pose.pose.position.y\n wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)\n\n if wp_d < dist:\n dist = wp_d\n wp = idx\n if debugging:\n # Angle betwee car heading and waypoint heading\n yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta\n elif not full_search:\n # Local minimum. If the waypoint makes sense, just use it and break\n if dist < max_local_distance:\n break; # Found a point\n else:\n # Seem to have lost track. Do search again\n rospy.logwarn(\"Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.\", dist, i+1)\n full_search = True\n\n if debugging:\n rospy.loginfo(\"New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs\", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)\n\n if wp is None:\n rospy.logwarn(\"Waypoint updater did not find a valid waypoint\")\n return False\n\n self.next_waypoint = wp\n return True",
"def get_waypoint(self, location):\n try:\n response = self._get_waypoint_client(location)\n return response.waypoint\n except (rospy.ServiceException, rospy.ROSInterruptException) as e:\n if not rospy.is_shutdown:\n rospy.logwarn(\"Service call failed: {}\".format(e))",
"def closest_other(state):\n locations = others_locations(state)\n distances_ = distances(my_location(state), list(locations.values()))\n dist_dict = {key: dist for key, dist in zip(locations, distances_)}\n target = util.argmin_dict(dist_dict)\n return target",
"def _find_closest_epw(lat, lon, df):\n from shapely.ops import nearest_points\n\n # find the nearest point and return the corresponding Place value\n pts = df.unary_union\n nearest = df.geometry == nearest_points(Point(lon, lat), pts)[1]\n\n return df.loc[nearest, [\"url\", \"title\"]].iloc[0]",
"def closest_point(point, points):\n return points[cdist([point], points).argmin()]"
]
| [
"0.80371714",
"0.77580124",
"0.7752407",
"0.7701288",
"0.7662863",
"0.7629899",
"0.7607416",
"0.7569968",
"0.7476396",
"0.74598616",
"0.72960234",
"0.7292274",
"0.7150513",
"0.7143684",
"0.7138067",
"0.7007108",
"0.69992584",
"0.69735056",
"0.682834",
"0.66521144",
"0.66409045",
"0.65152925",
"0.64989334",
"0.64467067",
"0.63704336",
"0.6291384",
"0.6269494",
"0.622321",
"0.6196694",
"0.61938804"
]
| 0.783909 | 1 |
Removes the first waypoint if it is less than distance m away. | def remove_waypoint_if_close(self,
location: pylot.utils.Location,
distance: float = 5) -> bool:
if self.waypoints is None or len(self.waypoints) == 0:
return False
if location.distance(self.waypoints[0].location) < distance:
self.waypoints.popleft()
if self.target_speeds:
self.target_speeds.popleft()
if self.road_options:
self.road_options.popleft()
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_to(self, waypoint):\n self.set_final_wp(waypoint)\n self.go()\n currPos = np.asarray(self.rexarm.get_positions())\n while(np.linalg.norm(np.asarray(waypoint) - currPos) > 0.15):\n time.sleep(0.01)",
"def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)",
"def truncate(self, distance):\n position = np.searchsorted(self._cum_norm, distance)\n offset = distance - self._cum_norm[position - 1]\n\n if offset < constants.tol_path.merge:\n truncated = self._points[:position + 1]\n else:\n vector = util.unitize(np.diff(\n self._points[np.arange(2) + position],\n axis=0).reshape(-1))\n vector *= offset\n endpoint = self._points[position] + vector\n truncated = np.vstack((self._points[:position + 1],\n endpoint))\n assert (util.row_norm(np.diff(\n truncated, axis=0)).sum() -\n distance) < constants.tol_path.merge\n\n return truncated",
"def removeNextMove(self):\n self.path.remove(self.path[self.index])",
"def prune_path(self, path):\n while len(path.poses) >= 2:\n pose0 = pose322(r2g(path.poses[0].pose))\n pose1 = pose322(r2g(path.poses[1].pose))\n d0 = np.linalg.norm(g2n(pose0.between(self.current_frame.pose)))\n d1 = np.linalg.norm(g2n(pose1.between(self.current_frame.pose)))\n if d1 < d0:\n path.poses.pop(0)\n else:\n break\n return path",
"def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance",
"def get_closest_waypoint(self, pose):\n wpclosestDist = sys.maxint\n for index in range(len(self.waypoints.waypoints)):\n wp = self.waypoints.waypoints[index]\n wpdist = self.calcDistance_PoseStamped(pose, wp.pose)\n if(wpclosestDist > wpdist):\n wpclosestDist = wpdist\n wpindex = index\n return wpindex",
"def undeletepoints(self, x, y=None):\n if len(self.dxp)<1: return\n if len(self.dxp)==1: \n self.xp.append(self.dxp[0])\n self.wp.append(self.dwp[0])\n self.dxp.__delitem__(0)\n self.dwp.__delitem__(0)\n return\n\n dist=(self.dxp-x)**2\n if y is not None: \n w=self.ws.value(np.array(self.dxp))\n #dist += (self.dwp-w-y)**2\n in_minw=dist.argmin()\n\n self.xp.append(self.dxp[in_minw])\n self.wp.append(self.dwp[in_minw])\n self.dxp.__delitem__(in_minw)\n self.dwp.__delitem__(in_minw)\n\n return",
"def distance_to_current_waypoint(self):\n next_waypoint = self.vehicle.commands.next\n if next_waypoint == 1:\n return None\n mission_item = self.vehicle.commands[next_waypoint]\n lat = mission_item.x\n lon = mission_item.y\n alt = mission_item.z\n waypoint_location = Location(lat, lon, alt, is_relative=True)\n distance = get_distance_meters(self.vehicle.location, waypoint_location)\n return distance",
"def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx",
"def removeIfDead(self):\n if self.y < 0:\n del projectiles[findPlace(self, projectiles)]",
"def getPointAwayFrom(startPoint, direction, distance):\n x = vectorMultiply(direction, distance)\n return vectorAdd(startPoint, x)",
"def remove_point(self, x):\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n for i in range(self.nstrats):\n self.sampling_strategies[i].remove_point(x)\n return True\n return False",
"def distance_to_current_waypoint():\n nextwaypoint = vehicle.commands.next\n if nextwaypoint == 0:\n return None\n missionitem = vehicle.commands[nextwaypoint -\n 1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat, lon, alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame,\n targetWaypointLocation)\n return distancetopoint",
"def _update_next_waypoint(self):\n if not self.base_waypoints:\n #rospy.logwarn(\"Waypoints not updated: base_waypoints not available yet.\")\n return False\n\n if not self.current_pose:\n #rospy.logwarn(\"Waypoints not updated: current_pose not available yet.\")\n return False\n\n # Get ego car variables\n ego_x = self.current_pose.position.x\n ego_y = self.current_pose.position.y\n ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)\n\n # If I do have a next_waypoint, I will start looking from it, and stop looking\n # as soon as get a local minimum. Otherwise I will do a full search across the whole track\n t = time.time()\n wp = None\n yaw = 0\n dist = 1000000 # Long number\n if self.next_waypoint:\n idx_offset = self.next_waypoint\n full_search = False\n else:\n idx_offset = 0\n full_search = True\n num_base_wp = len(self.base_waypoints)\n\n for i in range(num_base_wp):\n idx = (i + idx_offset)%(num_base_wp)\n wp_x = self.base_waypoints[idx].pose.pose.position.x\n wp_y = self.base_waypoints[idx].pose.pose.position.y\n wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)\n\n if wp_d < dist:\n dist = wp_d\n wp = idx\n if debugging:\n # Angle betwee car heading and waypoint heading\n yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta\n elif not full_search:\n # Local minimum. If the waypoint makes sense, just use it and break\n if dist < max_local_distance:\n break; # Found a point\n else:\n # Seem to have lost track. Do search again\n rospy.logwarn(\"Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.\", dist, i+1)\n full_search = True\n\n if debugging:\n rospy.loginfo(\"New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs\", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)\n\n if wp is None:\n rospy.logwarn(\"Waypoint updater did not find a valid waypoint\")\n return False\n\n self.next_waypoint = wp\n return True",
"def move_distance(self, distance, speed=1.0):\n distance = random.normalvariate(distance, self.standard_deviation)\n\n start_point = self.get_position()\n traveled_distance = 0\n while traveled_distance < distance:\n self.forward(speed)\n current_point = self.get_position()\n traveled_distance = math.sqrt(\n math.pow((start_point[0] - current_point[0]), 2) + math.pow((start_point[1] - current_point[1]), 2))\n self.stop()",
"def free_spot(self, start, distance, p1):\n free = False\n spot = 25 - start - distance\n #do we have a valid position to consider?\n if (spot > 0):\n #which player are we?\n if (p1):\n if (self.p2vec[spot] < 2):\n free = True\n else:\n if (self.p1vec[spot] < 2):\n free = True\n if (spot == 0):\n free = True\n return free",
"def update_wp_position(self, event):\n wp = -1\n cur_pos = np.array(\n [self.global_pose.latitude, self.global_pose.longitude])\n for idx, waypoint in enumerate(self.waypoints):\n temp = np.array([waypoint['lat'], waypoint['long']])\n alt_diff = abs(self._rel_alt[-1] - waypoint['rel_alt'])\n if idx == 0 and (np.linalg.norm(cur_pos - temp) < self._radius):\n wp = idx\n break\n elif (np.linalg.norm(cur_pos - temp) <\n self._radius) and (alt_diff < self._alt_radius):\n wp = idx\n break\n self._current_wp = wp",
"def remove_completed(self,\n location: pylot.utils.Location,\n ego_transform: pylot.utils.Transform = None):\n min_index = self.closest_waypoint(location)\n # Remove waypoints that are before the closest waypoint. The ego\n # vehicle already completed them.\n while min_index > 0:\n self.waypoints.popleft()\n if self.target_speeds:\n self.target_speeds.popleft()\n if self.road_options:\n self.road_options.popleft()\n min_index -= 1\n\n if self.waypoints is None or len(self.waypoints) == 0:\n if ego_transform is not None:\n self.waypoints = deque([ego_transform])\n self.target_speeds = deque([0])\n self.road_options = deque([pylot.utils.RoadOption.LANE_FOLLOW])\n else:\n raise ValueError('No more waypoints to follow')",
"def remove_self(self):\n if self.game.rules[\"trapping\"]:\n [neighbor.untrap() for neighbor in self.get_neighbors() if neighbor.trapped and self in neighbor.get_sandwichers() and len(neighbor.get_sandwichers()) == 2]\n self.game.empty_square(self.position)\n self.position = None",
"def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a",
"def get_closest_waypoint(self, pose):\n # Find the nearest waypoint\n closest_distance = float('inf')\n closest_waypoint = 0\n for i in range(len(self.waypoints)):\n this_distance = self.distance_to_position(self.waypoints, i, pose.position)\n if this_distance < closest_distance:\n closest_distance = this_distance\n closest_waypoint = i\n return closest_waypoint",
"def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index",
"def remove_point(self, x):\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False",
"def remove_point(self, x):\n\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False",
"def remove_point(self, x):\n\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False",
"def deletepoints(self, x, y=None, save=False):\n dist=(np.array(self.xp)-x)**2\n\n #assumes you are using the error plot\n if y is not None: \n w=self.ws.value(np.array(self.xp))\n norm=self.xarr.max()/abs(self.wp-w).max()\n dist += norm*(self.wp-w-y)**2\n #print y, norm, dist.min()\n #print y, dist.min()\n in_minw=dist.argmin()\n\n if save:\n self.dxp.append(self.xp[in_minw])\n self.dwp.append(self.wp[in_minw])\n self.xp.__delitem__(in_minw)\n self.wp.__delitem__(in_minw)",
"def get_closest_waypoint(self, pose, waypoints):\n #TODO implement\n\n\tmin_dist = float(\"inf\")\n\tclosest_wp_idx = -1\t\n\n for idx, wp in enumerate(waypoints):\n\t\tdist = self.dist_to_point(pose, wp.pose.pose)\n\t\tif(dist < min_dist):\n\t\t\tmin_dist = dist\n\t\t\tclosest_wp_idx = idx\n\treturn closest_wp_idx",
"def delete_min(self):\n\n self.switch(0, -1)\n\n min = self.heap.pop(-1)\n\n self.bubble_down(0)\n\n return min",
"def next_gps(self):\n \n return Waypoint(0.0, 0.0)"
]
| [
"0.5926494",
"0.58535093",
"0.57727855",
"0.56410366",
"0.5638744",
"0.56016695",
"0.5599047",
"0.557397",
"0.5569027",
"0.55392486",
"0.55283374",
"0.5494964",
"0.5493939",
"0.5467968",
"0.54155433",
"0.5393493",
"0.5383858",
"0.53701687",
"0.53489786",
"0.5347584",
"0.5343529",
"0.53366977",
"0.53067243",
"0.5273255",
"0.52376884",
"0.52376884",
"0.5193455",
"0.5186026",
"0.516807",
"0.51581556"
]
| 0.7161757 | 0 |
Removes waypoints that the ego vehicle has already completed. The method first finds the closest waypoint to the location, and then removes all waypoints that are before the closest waypoint. | def remove_completed(self,
location: pylot.utils.Location,
ego_transform: pylot.utils.Transform = None):
min_index = self.closest_waypoint(location)
# Remove waypoints that are before the closest waypoint. The ego
# vehicle already completed them.
while min_index > 0:
self.waypoints.popleft()
if self.target_speeds:
self.target_speeds.popleft()
if self.road_options:
self.road_options.popleft()
min_index -= 1
if self.waypoints is None or len(self.waypoints) == 0:
if ego_transform is not None:
self.waypoints = deque([ego_transform])
self.target_speeds = deque([0])
self.road_options = deque([pylot.utils.RoadOption.LANE_FOLLOW])
else:
raise ValueError('No more waypoints to follow') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_waypoint_if_close(self,\n location: pylot.utils.Location,\n distance: float = 5) -> bool:\n if self.waypoints is None or len(self.waypoints) == 0:\n return False\n if location.distance(self.waypoints[0].location) < distance:\n self.waypoints.popleft()\n if self.target_speeds:\n self.target_speeds.popleft()\n if self.road_options:\n self.road_options.popleft()\n return True\n return False",
"def _update_next_waypoint(self):\n if not self.base_waypoints:\n #rospy.logwarn(\"Waypoints not updated: base_waypoints not available yet.\")\n return False\n\n if not self.current_pose:\n #rospy.logwarn(\"Waypoints not updated: current_pose not available yet.\")\n return False\n\n # Get ego car variables\n ego_x = self.current_pose.position.x\n ego_y = self.current_pose.position.y\n ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)\n\n # If I do have a next_waypoint, I will start looking from it, and stop looking\n # as soon as get a local minimum. Otherwise I will do a full search across the whole track\n t = time.time()\n wp = None\n yaw = 0\n dist = 1000000 # Long number\n if self.next_waypoint:\n idx_offset = self.next_waypoint\n full_search = False\n else:\n idx_offset = 0\n full_search = True\n num_base_wp = len(self.base_waypoints)\n\n for i in range(num_base_wp):\n idx = (i + idx_offset)%(num_base_wp)\n wp_x = self.base_waypoints[idx].pose.pose.position.x\n wp_y = self.base_waypoints[idx].pose.pose.position.y\n wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)\n\n if wp_d < dist:\n dist = wp_d\n wp = idx\n if debugging:\n # Angle betwee car heading and waypoint heading\n yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta\n elif not full_search:\n # Local minimum. If the waypoint makes sense, just use it and break\n if dist < max_local_distance:\n break; # Found a point\n else:\n # Seem to have lost track. Do search again\n rospy.logwarn(\"Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.\", dist, i+1)\n full_search = True\n\n if debugging:\n rospy.loginfo(\"New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs\", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)\n\n if wp is None:\n rospy.logwarn(\"Waypoint updater did not find a valid waypoint\")\n return False\n\n self.next_waypoint = wp\n return True",
"def test_find_closest_waypoints_nearest(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n\n planner.position = Vector3(0, 0, 0)\n waypoints = planner.find_closest_waypoints(1)\n self.assertEqual(1, len(waypoints))\n self.assertEqual(0, waypoints[0].pose.pose.position.x)\n self.assertEqual(0, waypoints[0].pose.pose.position.y)\n self.assertEqual(0, waypoints[0].pose.pose.position.z)\n\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(2)\n self.assertEqual(2, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n\n # Check it wraps back around to the start.\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(3)\n self.assertEqual(3, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n self.assertEqual(0, waypoints[2].pose.pose.position.x)\n self.assertEqual(0, waypoints[2].pose.pose.position.y)",
"def move_to(self, waypoint):\n self.set_final_wp(waypoint)\n self.go()\n currPos = np.asarray(self.rexarm.get_positions())\n while(np.linalg.norm(np.asarray(waypoint) - currPos) > 0.15):\n time.sleep(0.01)",
"def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx",
"def get_closest_waypoint(self, pose):\n wpclosestDist = sys.maxint\n for index in range(len(self.waypoints.waypoints)):\n wp = self.waypoints.waypoints[index]\n wpdist = self.calcDistance_PoseStamped(pose, wp.pose)\n if(wpclosestDist > wpdist):\n wpclosestDist = wpdist\n wpindex = index\n return wpindex",
"def get_closest_waypoint(self, pose, waypoints):\n #TODO implement\n\n\tmin_dist = float(\"inf\")\n\tclosest_wp_idx = -1\t\n\n for idx, wp in enumerate(waypoints):\n\t\tdist = self.dist_to_point(pose, wp.pose.pose)\n\t\tif(dist < min_dist):\n\t\t\tmin_dist = dist\n\t\t\tclosest_wp_idx = idx\n\treturn closest_wp_idx",
"def test_find_closest_waypoints_no_position(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n self.assertIsNone(planner.find_closest_waypoints(1))",
"def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index",
"def get_closest_waypoint(self, pose):\n # Find the nearest waypoint\n closest_distance = float('inf')\n closest_waypoint = 0\n for i in range(len(self.waypoints)):\n this_distance = self.distance_to_position(self.waypoints, i, pose.position)\n if this_distance < closest_distance:\n closest_distance = this_distance\n closest_waypoint = i\n return closest_waypoint",
"def process_waypoint(self, waypoint: Waypoint) -> Union[Trip, None]:\n\n # ignore the first entry, just remember it for further compares\n if not self.prev_point:\n self.prev_point = waypoint\n return None\n\n if self.is_driving(self.prev_point, waypoint):\n if not self.start_point:\n # indicates trip start\n self.start_point = self.prev_point\n else:\n # indicates trip finish\n if self.start_point:\n d = self.calc_distance(self.start_point, self.prev_point)\n trip = Trip(d, self.start_point, self.prev_point)\n self.start_point = None\n return trip\n self.prev_point = waypoint\n return None",
"def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)",
"def update_wp_position(self, event):\n wp = -1\n cur_pos = np.array(\n [self.global_pose.latitude, self.global_pose.longitude])\n for idx, waypoint in enumerate(self.waypoints):\n temp = np.array([waypoint['lat'], waypoint['long']])\n alt_diff = abs(self._rel_alt[-1] - waypoint['rel_alt'])\n if idx == 0 and (np.linalg.norm(cur_pos - temp) < self._radius):\n wp = idx\n break\n elif (np.linalg.norm(cur_pos - temp) <\n self._radius) and (alt_diff < self._alt_radius):\n wp = idx\n break\n self._current_wp = wp",
"def backtrack(self):\n last_intersection = self.intersection.pop()\n retrace = Shortest_path().shortestPath(self.graph, self.current, last_intersection)\n print retrace\n print \"Moving back...\"\n self.current = retrace.pop(0)\n if self.current in self.intersection:\n self.intersection.remove(self.current)\n while retrace:\n position = retrace.pop(0)\n self.move_to_position(position)\n if position in self.intersection:\n self.intersection.remove(position)",
"def remove_premature_departures(trips):\n\t# sort ascending by arrival \n\t# then iteratively remove trips not also sorted by departure\n\tstarting_length = len(trips) # for logging\n\t#\n\ttrips.sort(key = lambda x: x.arrive_ts) # arrival, first to last\n\ti = 1\n\twhile i < len(trips):\n\t\t# if departure is before that of earlier-arriving trip\n\t\tif trips[i].depart_ts <= trips[i-1].depart_ts: \n\t\t\ttrips.pop(i)\n\t\t\tcontinue\n\t\ti+=1\n\t# there should be no simultaneous departures\n\tassert len(set([t.depart_ts for t in trips])) == len(trips)",
"def _remove_points(self, points_to_remove, teams_population):\n for team in teams_population:\n for point in points_to_remove:\n if point.point_id_ in team.results_per_points_:\n team.results_per_points_.pop(point.point_id_)",
"def get_closest_waypoint(self, x, y):\n closest_idx = self.waypoint_tree.query([x, y])[1] # ckd tree (1st closest, idx)\n\n # Check if closest waypoint is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coors\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n # Car is ahead of the closest waypoint\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx",
"def removeNextMove(self):\n self.path.remove(self.path[self.index])",
"def find_closest_path(self):\n\t\tclosest_distance = sys.maxint\n\t\tclosest_path = 0\n\t\tbike_position = (self.map_model.bike.xB, self.map_model.bike.yB)\n\t\tfor path_index in range(len(self.map_model.paths)):\n\t\t\tnearest_point = geometry.nearest_point_on_path(self.map_model.paths[path_index], bike_position)\n\t\t\tdistance_to_bike = geometry.distance(nearest_point, bike_position)\n\t\t\tif (closest_distance > distance_to_bike):\n\t\t\t\tclosest_distance = distance_to_bike\n\t\t\t\tclosest_path = path_index \n\t\tdisp_next = self.displacement_to_turn(target_path = (closest_path+1)%len(self.map_model.paths))\n\t\ttarget_path = (closest_path+1)%len(self.map_model.paths)\n\t\tdistance_next = geometry.distance_from_path(bike_position, self.map_model.paths[target_path])\n\t\tif disp_next - np.abs(distance_next)>-0.01:\n\t\t\tclosest_path = np.mod(closest_path + 1,len(self.map_model.paths))\n\t\treturn closest_path",
"def satisfied_waypoints(cls, home_pos, waypoints, uas_telemetry_logs):\n # Form utm for use as projection in distance calcluations.\n zone, north = distance.utm_zone(home_pos.latitude, home_pos.longitude)\n utm = distance.proj_utm(zone, north)\n\n # Reduce telemetry from telemetry to waypoint hits.\n # This will make future processing more efficient via data reduction.\n # While iterating, compute the best distance seen for feedback.\n best = {}\n hits = []\n for iu, start_log in enumerate(uas_telemetry_logs):\n end_log = None\n if iu + 1 < len(uas_telemetry_logs):\n end_log = uas_telemetry_logs[iu + 1]\n for iw, waypoint in enumerate(waypoints):\n dist = cls.closest_interpolated_distance(\n start_log, end_log, waypoint, utm)\n best[iw] = min(best.get(iw, dist), dist)\n score = cls.score_waypoint(dist)\n if score > 0:\n hits.append((iw, dist, score))\n # Remove redundant hits which wouldn't be part of best sequence.\n # This will make future processing more efficient via data reduction.\n hits = [\n max(g, key=lambda x: x[2])\n for _, g in itertools.groupby(hits, lambda x: x[0])\n ]\n\n # Find highest scoring sequence via dynamic programming.\n # Implement recurrence relation:\n # S(iw, ih) = s[iw, ih] + max_{k=[0,ih)} S(iw-1, k)\n dp = defaultdict(lambda: defaultdict(lambda: (0, None, None)))\n highest_total = None\n highest_total_pos = (None, None)\n for iw in xrange(len(waypoints)):\n for ih, (hiw, hdist, hscore) in enumerate(hits):\n # Compute score for assigning current hit to current waypoint.\n score = hscore if iw == hiw else 0.0\n # Compute best total score, which includes this match score and\n # best of all which could come before it.\n prev_iw = iw - 1\n total_score = score\n total_score_back = (None, None)\n if prev_iw >= 0:\n for prev_ih in xrange(ih + 1):\n (prev_total_score, _) = dp[prev_iw][prev_ih]\n new_total_score = prev_total_score + score\n if new_total_score > total_score:\n total_score = new_total_score\n total_score_back = (prev_iw, prev_ih)\n dp[iw][ih] = (total_score, total_score_back)\n # Track highest score seen.\n if total_score > highest_total:\n highest_total = total_score\n highest_total_pos = (iw, ih)\n # Traceback sequence to get scores and distance for score.\n scores = defaultdict(lambda: (0, None))\n cur_pos = highest_total_pos\n while cur_pos != (None, None):\n cur_iw, cur_ih = cur_pos\n hiw, hdist, hscore = hits[cur_ih]\n if cur_iw == hiw:\n scores[cur_iw] = (hscore, hdist)\n _, cur_pos = dp[cur_iw][cur_ih]\n\n # Convert to evaluation.\n waypoint_evals = []\n for iw, waypoint in enumerate(waypoints):\n score, dist = scores[iw]\n waypoint_eval = mission_pb2.WaypointEvaluation()\n waypoint_eval.id = iw\n waypoint_eval.score_ratio = score\n if dist is not None:\n waypoint_eval.closest_for_scored_approach_ft = dist\n if iw in best:\n waypoint_eval.closest_for_mission_ft = best[iw]\n waypoint_evals.append(waypoint_eval)\n return waypoint_evals",
"def get_closest_waypoint_idx(self):\n\n # TODO:\n # The churchlot waypoints are roughly circular but have self-\n # intersecting endpoints, so I'm not sure how this code will \n # yield good results. Might need some additional filtering\n # logic to force a choice consistent with the vehicle pose yaw\n # in order to avoid jumping onto the wrong path.\n\n # Vehicle position short reference\n pos = self.pose.pose.position\n\n # Find the closest waypoint index\n # If closest index is zero bump to 1 since we don't want slice for \n # prev_coord to look at the final map waypoint.\n closest_idx = max(self.waypoint_tree.query([pos.x, pos.y], 1)[1], 1)\n\n # Get closest point\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n # Convert coordinates into 2D numpy vectors\n closest_vec = np.array(closest_coord)\n prev_vec = np.array(prev_coord)\n pos_vec = np.array([pos.x, pos.y])\n\n # Find vec(close-prev) dot vec(pos-close) \n val = np.dot(closest_vec - prev_vec, pos_vec - closest_vec)\n\n # If pos is ahead of closest...\n if val > 0: \n\n # Advance index so that closest is ahead of pos\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n # Return closest index\n return closest_idx",
"def loop_for_treasure_locations_array(locations):\n for index in range(len(locations)):\n if turt.distance(locations[index][0], locations[index][1]) <= 20:\n locations.remove(locations[index])\n locations = loop_for_treasure_locations_array(locations)\n break\n return locations",
"def detour(src, dst, pitstop):\n options = on_path([src, dst],query='shell gas station', size=10,urgency=0)\n ret = []\n for place in options:\n title = place['title']\n x = place['latlon']\n addr = place['address']\n A_X = dist(src, x); X_B = dist(x, dst)\n consumer_dist = A_X['distance'] + X_B['distance']\n tour_time = A_X['trafficTime']+X_B['trafficTime']\n last_mile_dist = 2*dist(pitstop, x)['distance']\n total_trip_dist = consumer_dist + last_mile_dist\n carbon_print = total_trip_dist/(1e3 * .621 * .70548)\n ret.append({\"distance\" : consumer_dist,\n \"latlon\" : x,\n \"title\" : title,\n \"time\" : tour_time,\n \"address\" : addr,\n \"carbon\" : carbon_print})\n ret = sorted(ret, key=lambda loc: loc.get('distance'))\n #print(total_trip_dist, consumer_dist, last_mile_dist)\n\n # worst carbon\n consumer_dist = dist(src, dst)['distance']\n last_mile_dist = 2*dist(pitstop, dst)['distance']\n total_trip_dist = consumer_dist + last_mile_dist\n carbon_print = total_trip_dist/(1e3 * .621 * .70548)\n #print(total_trip_dist, consumer_dist, last_mile_dist)\n\n # worst case time A - C - B\n A_C = dist(src, pitstop)\n C_B = dist(pitstop, dst)\n total_time = A_C['trafficTime'] + C_B['trafficTime']\n return {\"meetpoints\" : ret, 'worst_time' : total_time, \"worst_carbon\" : carbon_print}",
"def distance_to_current_waypoint():\n nextwaypoint = vehicle.commands.next\n if nextwaypoint == 0:\n return None\n missionitem = vehicle.commands[nextwaypoint -\n 1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat, lon, alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame,\n targetWaypointLocation)\n return distancetopoint",
"def remove_route(g, origin, destination, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n \n # Removes both directions and returns \n if(choice_dir == \"y\"):\n \n \n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != destination_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != origin_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n \n # Removes one direction and returns\n if(choice_dir == \"n\"):\n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n g.city_dict[key].set_flights_in(new_flights_in)\n \n return g",
"def check_directions_find_waypoint(current_point, current_segment,\n delta_before_after, segmented_points):\n\n delta_lat_before_current = delta_before_after[0]\n delta_lng_before_current = delta_before_after[1]\n\n delta_lat_after_current = delta_before_after[2]\n delta_lng_after_current = delta_before_after[3]\n\n # check to see if the delta x's in both directions are longer\n # than the delta y's in both directions\n if (delta_lat_before_current > delta_lng_before_current) and \\\n (delta_lat_after_current > delta_lng_after_current):\n print \"inside first if\"\n # the latitudes are longer than the longitudes, get waypoints\n # in the longitude direction\n\n # don't forget to generate waypoints\n waypoint_e_w = inspect_waypoints(current_point, \"lngwise\")\n try_waypoints(waypoint_e_w, current_segment, segmented_points)\n elif (delta_lng_before_current > delta_lat_before_current) and \\\n (delta_lng_after_current > delta_lat_after_current):\n print \"inside elif, checks the north and south creation\"\n # the longitudes are longer than the latitudes, get waypoints\n # in the latitude direction\n\n # don't forget to generate waypoints\n waypoint_n_s = inspect_waypoints(current_point, \"latwise\")\n try_waypoints(waypoint_n_s, current_segment, segmented_points)\n else:\n print \"inside else, checks all directions NS-EW\"\n\n # don't forget to generate waypoints\n waypoint_all = inspect_waypoints(current_point, \"all\")\n try_waypoints(waypoint_all, current_segment, segmented_points)\n\n # return only the waypoints and start/end lat,lngs\n return segmented_points",
"def path_correction(data, user_coords):\n # Return list if it only has the destination\n if len(data) == 1:\n return data\n\n # Calculate distance from user to second waypoint\n second_coords = (data[1][\"lat\"], data[1][\"lon\"])\n user_second_dist = geopy.distance.distance(user_coords, second_coords).miles\n\n # Calculate distance from user to first waypoint\n first_coords = (data[0][\"lat\"], data[0][\"lon\"])\n user_first_dist = geopy.distance.distance(user_coords, first_coords).km\n\n # Calculate distance from first waypoint to second waypoint\n first_second_dist = geopy.distance.distance(first_coords, second_coords).miles\n\n # Determine if path correction is applicable\n if user_second_dist < first_second_dist or user_first_dist < 0.01:\n # Delete first element of list so that user doesn't backtrack\n return data[1:]\n else:\n # No path correction needed\n return data",
"def process_waypoint(self, waypoint: Waypoint) -> Union[Trip, None]:\n ...",
"def updatePositionAndClean(self):\n if self.speed < 1:\n possible_path = self.position.getNewPosition(self.direction, self.speed)\n while not self.room.isPositionInRoom(possible_path):\n self.setRobotDirection(random.randrange(359))\n possible_path = self.position.getNewPosition(self.direction, self.speed)\n self.setRobotPosition(possible_path)\n self.room.cleanTileAtPosition(self.position)\n else:\n possible_moves_on_one_clock = int(self.speed)\n for i in range(possible_moves_on_one_clock):\n possible_path = self.position.getNewPosition(self.direction, 1)\n while not self.room.isPositionInRoom(possible_path):\n self.setRobotDirection(random.randrange(359))\n possible_path = self.position.getNewPosition(self.direction, 1)\n self.setRobotPosition(possible_path)\n self.room.cleanTileAtPosition(self.position)\n possible_path = self.position.getNewPosition(self.direction, (self.speed - possible_moves_on_one_clock))\n while not self.room.isPositionInRoom(possible_path):\n self.setRobotDirection(random.randrange(359))\n possible_path = self.position.getNewPosition(self.direction, (self.speed - possible_moves_on_one_clock))\n self.setRobotPosition(possible_path)\n self.room.cleanTileAtPosition(self.position)",
"def distance_to_current_waypoint(vehicle):\n nextwaypoint = vehicle.commands.next\n if nextwaypoint==0:\n return None\n missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)\n return distancetopoint"
]
| [
"0.66268104",
"0.61959696",
"0.584867",
"0.57383746",
"0.56080455",
"0.5463144",
"0.5447195",
"0.5391344",
"0.53584206",
"0.5355911",
"0.531375",
"0.5229986",
"0.5179607",
"0.5134207",
"0.51061606",
"0.50994265",
"0.50901127",
"0.5024311",
"0.50144416",
"0.50069064",
"0.49792922",
"0.4975681",
"0.49746335",
"0.49493155",
"0.49456364",
"0.49381438",
"0.4933992",
"0.49052748",
"0.48939678",
"0.48880577"
]
| 0.76615846 | 0 |
Returns the angle between the transform and the first waypoint that is at least min_distance away. | def get_angle(self, transform: pylot.utils.Transform,
min_distance: float) -> float:
wp_index = self._get_index(transform, min_distance)
angle, _ = transform.get_angle_and_magnitude(
self.waypoints[wp_index].location)
return angle | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)",
"def angle(self):\n return math.degrees(math.atan2(self[1], self[0]))",
"def find_allowable_angle(self, dist: float) -> float:\n angle = math.atan(self.TRUE_TARGET_RADIUS / dist)\n # print(f\"angle tolerance +- {angle} true target radius{self.TRUE_TARGET_RADIUS}\")\n return angle",
"def get_exact_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n return math.atan2(dy,dx)",
"def distance_to_current_waypoint():\n nextwaypoint = vehicle.commands.next\n if nextwaypoint == 0:\n return None\n missionitem = vehicle.commands[nextwaypoint -\n 1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat, lon, alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame,\n targetWaypointLocation)\n return distancetopoint",
"def _get_angle(point1, point2):\n ydelta = point2[0] - point1[0]\n xdelta = point2[1] - point1[1]\n if xdelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arcsin(ydelta / hypot)\n elif ydelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arccos(xdelta / hypot)\n else:\n theta = np.arctan(ydelta / xdelta)\n return theta",
"def get_angle_to_face_point(self, point):\n start = self.swarmie.get_odom_location().get_pose()\n return angles.shortest_angular_distance(\n start.theta,\n math.atan2(point.y - start.y, point.x - start.x)\n )",
"def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)",
"def distance_to_current_waypoint(self):\n next_waypoint = self.vehicle.commands.next\n if next_waypoint == 1:\n return None\n mission_item = self.vehicle.commands[next_waypoint]\n lat = mission_item.x\n lon = mission_item.y\n alt = mission_item.z\n waypoint_location = Location(lat, lon, alt, is_relative=True)\n distance = get_distance_meters(self.vehicle.location, waypoint_location)\n return distance",
"def angle(self) -> float:\n ...",
"def getAngle(self):\n x, y = self.components\n return math.atan2(y, x)",
"def angle_to(self, latlng):\n y_node, x_node = latlng.lat, latlng.lng\n y_self, x_self = self.lat, self.lng\n return atan2(y_node - y_self, x_node - x_self)",
"def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360",
"def angle(self):\n return atan2(self.v.y, self.v.x)",
"def angle(a: Point, b: Point) -> int:\n ang = math.degrees(math.atan2(b.y - a.y, b.x - a.x)) + 90\n return ang + 360 if ang < 0 else ang",
"def get_shortest_angle(target_angle, current_angle):\n a1 = target_angle\n a2 = current_angle\n return math.atan2(math.sin(a1-a2), math.cos(a1-a2))",
"def get_abs_dist(self, pos1, pos2):\n\t\treturn min(abs(pos1 - pos2), abs(pos1 - pos2 + 360))",
"def convergence_angle(self):\n return np.arctan2(self.radius, self.focal_length)",
"def rotation_error(self, goal):\n angerr1 = normalize(goal.theta - self.theta)\n angerr2 = normalize(goal.theta - (self.theta-180))\n if abs(angerr1) < abs(angerr2):\n return angerr1\n else:\n return angerr2",
"def closer_angle(x, a, dir=0):\n if dir == 0:\n return a + smaller_angle(x-a)\n elif dir == 1:\n return a + (x-a)%(2*np.pi)\n elif dir == -1:\n return a + (x-a)%(2*np.pi) - 2*np.pi",
"def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])",
"def angle(self):\n self._normalise()\n norm = np.linalg.norm(self.vector)\n return self._wrap_angle(2.0 * atan2(norm,self.scalar))",
"def _angle_between(self, point_1, point_2):\n angle_1 = math.atan2(point_1.y, point_1.x)\n angle_2 = math.atan2(point_2.y, point_2.x)\n return angles.shortest_angular_distance(angle_1, angle_2)",
"def calculate_attitude_angle(self):\n return np.arctan(np.pi * (1 - self.eccentricity_ratio ** 2) / (4 * self.eccentricity_ratio))",
"def get_angle(self, pos, nextpos):\n delta = nextpos - pos\n theta = np.arctan(delta[1]/delta[0]) * 180 / np.pi\n if delta[0] < 0:\n return theta + 180\n return theta",
"def angle(self) -> int:",
"def angle(self):\n return 0",
"def determine_angle_slope(line, ax):\n x, y = line.get_data()\n\n sp1 = ax.transData.transform_point((x[0],y[0]))\n sp2 = ax.transData.transform_point((x[-1],y[-1]))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n return degrees(atan(rise/run))",
"def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi",
"def get_angle_degrees_between(self, other):\n return math.degrees(self.get_angle_between(other))"
]
| [
"0.64367676",
"0.63680017",
"0.6264427",
"0.6071864",
"0.6054758",
"0.60400385",
"0.6021209",
"0.6013427",
"0.6007506",
"0.59982485",
"0.5993357",
"0.5977916",
"0.5957898",
"0.5948635",
"0.59239256",
"0.59089994",
"0.5888465",
"0.58789474",
"0.5874491",
"0.5872852",
"0.58443975",
"0.5823292",
"0.58009136",
"0.5794767",
"0.5785637",
"0.57697535",
"0.5768415",
"0.57509124",
"0.57490885",
"0.57384145"
]
| 0.8047479 | 0 |
Returns the vector between the transform and the first waypoint that is at least min_distance away. | def get_vector(self, transform: pylot.utils.Transform,
min_distance: float):
wp_index = self._get_index(transform, min_distance)
return self.waypoints[wp_index].location.as_vector_2D() - \
transform.location.as_vector_2D() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx",
"def getPointAwayFrom(startPoint, direction, distance):\n x = vectorMultiply(direction, distance)\n return vectorAdd(startPoint, x)",
"def distance_to_current_waypoint():\n nextwaypoint = vehicle.commands.next\n if nextwaypoint == 0:\n return None\n missionitem = vehicle.commands[nextwaypoint -\n 1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat, lon, alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame,\n targetWaypointLocation)\n return distancetopoint",
"def min_distance(self, target):\n difference = self.pivot - target\n return max(math.sqrt(np.dot(difference, difference)) - self.radius, 0)",
"def find_min_distance():\n return np.argmin(d)",
"def getMinimum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle < v2.angle:\n return self.p1\n else:\n return self.p2",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.x,\n pose.y))\n else:\n return 0",
"def get_closest_waypoint(self, pose, waypoints):\n #TODO implement\n\n\tmin_dist = float(\"inf\")\n\tclosest_wp_idx = -1\t\n\n for idx, wp in enumerate(waypoints):\n\t\tdist = self.dist_to_point(pose, wp.pose.pose)\n\t\tif(dist < min_dist):\n\t\t\tmin_dist = dist\n\t\t\tclosest_wp_idx = idx\n\treturn closest_wp_idx",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.position.x,\n pose.position.y))\n else:\n return 0",
"def closest_point_to(self, x):\n x = np.array(x)\n v = self.p1 - self.p0\n b = self.p0 - x\n\n t = -np.dot(v, b) / np.dot(v, v)\n if (0 <= t <= 1):\n closest = t*(self.p1 - self.p0) + self.p0\n return closest\n else:\n if np.linalg.norm(x - self.p0) < np.linalg.norm(x - self.p1):\n return self.p0\n else:\n return self.p1",
"def distance_to_current_waypoint(self):\n next_waypoint = self.vehicle.commands.next\n if next_waypoint == 1:\n return None\n mission_item = self.vehicle.commands[next_waypoint]\n lat = mission_item.x\n lon = mission_item.y\n alt = mission_item.z\n waypoint_location = Location(lat, lon, alt, is_relative=True)\n distance = get_distance_meters(self.vehicle.location, waypoint_location)\n return distance",
"def find_closest_trajectory_pose(self):\n np_state = numpy.array([[self.x], [self.y]])\n temp_distance = numpy.sum(\n (self.np_trajectory[0:2, :] - np_state) ** 2,\n axis=0)\n best_idx = numpy.argmin(temp_distance)\n return best_idx",
"def getOriginVector(self):\n return self.dest - self.orig",
"def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index",
"def _minimum_distance(self,arg):\n return min([abs(arg-e) for e in self if not e is arg])",
"def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)",
"def distance_to_current_waypoint(vehicle):\n nextwaypoint = vehicle.commands.next\n if nextwaypoint==0:\n return None\n missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)\n return distancetopoint",
"def get_waypoint_pos(pos: list, waypoints: numpy.ndarray):\n dist = np.sum((pos - waypoints)**2, axis=1)\n wp_id = np.argmin(dist)\n return waypoints[wp_id], wp_id",
"def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0",
"def distance_vehicle(waypoint, vehicle_transform):\n loc = vehicle_transform.location\n x = waypoint.transform.location.x - loc.x\n y = waypoint.transform.location.y - loc.y\n\n return math.sqrt(x * x + y * y)",
"def distance_vehicle(waypoint, vehicle_transform):\n loc = vehicle_transform.location\n x = waypoint.transform.location.x - loc.x\n y = waypoint.transform.location.y - loc.y\n\n return math.sqrt(x * x + y * y)",
"def FindClosestPoint(self, ):\n ...",
"def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2",
"def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)",
"def get_closest_waypoint(self, pose):\n # Find the nearest waypoint\n closest_distance = float('inf')\n closest_waypoint = 0\n for i in range(len(self.waypoints)):\n this_distance = self.distance_to_position(self.waypoints, i, pose.position)\n if this_distance < closest_distance:\n closest_distance = this_distance\n closest_waypoint = i\n return closest_waypoint",
"def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))",
"def closest_point(p1: Vector3, p2: Vector3, p3: Vector3) -> Vector3:\n k = ((p2.y - p1.y) * (p3.x - p1.x) - (p2.x - p1.x) * (p3.y - p1.y)) / ((p2.y - p1.y) ** 2 + (p2.x - p1.x) ** 2)\n x4 = p3.x - k * (p2.y - p1.y)\n y4 = p3.y + k * (p2.x - p1.x)\n\n return Vector3(x4, y4, 0)",
"def shortest_distance_to(self, pt):\n return self._nearest_to_point(pt)[0]",
"def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)",
"def closest_point(self, point, maxdist=0.0, return_param=False):\n return self.xyz"
]
| [
"0.6716802",
"0.66186583",
"0.65528345",
"0.64339",
"0.6338922",
"0.6274757",
"0.62589484",
"0.62559295",
"0.62322426",
"0.6176124",
"0.61617625",
"0.61426383",
"0.61366576",
"0.6133993",
"0.60911924",
"0.6088077",
"0.6038042",
"0.6036377",
"0.6024675",
"0.5993986",
"0.5993986",
"0.5989941",
"0.59808254",
"0.59686327",
"0.5934694",
"0.5924156",
"0.5895615",
"0.58897656",
"0.5889045",
"0.5885907"
]
| 0.7559864 | 0 |
Gets the target speed at the first waypoint that is at least min_distance away. | def get_target_speed(self, transform: pylot.utils.Transform,
min_distance: float) -> float:
wp_index = self._get_index(transform, min_distance)
return self.target_speeds[wp_index] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance_to_current_waypoint():\n nextwaypoint = vehicle.commands.next\n if nextwaypoint == 0:\n return None\n missionitem = vehicle.commands[nextwaypoint -\n 1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat, lon, alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame,\n targetWaypointLocation)\n return distancetopoint",
"def distance_to_current_waypoint(self):\n next_waypoint = self.vehicle.commands.next\n if next_waypoint == 1:\n return None\n mission_item = self.vehicle.commands[next_waypoint]\n lat = mission_item.x\n lon = mission_item.y\n alt = mission_item.z\n waypoint_location = Location(lat, lon, alt, is_relative=True)\n distance = get_distance_meters(self.vehicle.location, waypoint_location)\n return distance",
"def distance_to_current_waypoint(vehicle):\n nextwaypoint = vehicle.commands.next\n if nextwaypoint==0:\n return None\n missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)\n return distancetopoint",
"def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index",
"def min_distance(self, target):\n difference = self.pivot - target\n return max(math.sqrt(np.dot(difference, difference)) - self.radius, 0)",
"def get_speed(self) -> float: \r\n if self.distance < self.distance_stop:\r\n print(\"STOP: Obstacle detected ({} cm)\".format(self.distance))\r\n return 0\r\n elif self.distance < self.distance_slow: \r\n return self.speed * 0.8\r\n else:\r\n return self.speed",
"def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance",
"def get_min_distance(self, node):\r\n if self.have_min_distance(node):\r\n return self.table[node][\"dist\"]\r\n return None",
"def heuristic(current, goal):\r\n distance = getDistance(current, goal)\r\n return distance",
"def get_closest_waypoint(self, pose):\n # Find the nearest waypoint\n closest_distance = float('inf')\n closest_waypoint = 0\n for i in range(len(self.waypoints)):\n this_distance = self.distance_to_position(self.waypoints, i, pose.position)\n if this_distance < closest_distance:\n closest_distance = this_distance\n closest_waypoint = i\n return closest_waypoint",
"def shortest_distance_to(self, pt):\n return self._nearest_to_point(pt)[0]",
"def calc_speed(self):\n if self.vars['step'] > 0:\n target_tensor = torch.abs(self.state - self.state_prev)\n speed = torch.max(target_tensor)\n else: # this is the first step, no calculation is possible\n speed = float('NaN')\n return speed",
"def target_speed(self):\n return self._target_speed.value",
"def GetDistanceFromTarget(self):\n return abs(int(self.position[0] - self.target_position[0])) + abs(int(self.position[1] - self.target_position[1]))",
"def get_movement_speed(\n self, pipette_id: str, requested_speed: Optional[float] = None\n ) -> Optional[float]:\n return requested_speed or self._state.movement_speed_by_id[pipette_id]",
"def closest_interpolated_distance(start_log, end_log, waypoint, utm):\n # If no provided end, use start distance.\n if end_log is None:\n return start_log.uas_position.distance_to(waypoint.position)\n\n # Verify that aircraft velocity is within bounds. Don't interpolate if\n # it isn't because the data is probably erroneous.\n d = start_log.uas_position.distance_to(end_log.uas_position)\n t = (end_log.timestamp - start_log.timestamp).total_seconds()\n if (t > settings.MAX_TELMETRY_INTERPOLATE_INTERVAL_SEC or\n (d / t) > settings.MAX_AIRSPEED_FT_PER_SEC):\n return start_log.uas_position.distance_to(waypoint.position)\n\n def uas_position_to_tuple(pos):\n return (pos.gps_position.latitude, pos.gps_position.longitude,\n pos.altitude_msl)\n\n # Linearly interpolate between start and end telemetry and find the\n # closest distance to the waypoint.\n start = uas_position_to_tuple(start_log.uas_position)\n end = uas_position_to_tuple(end_log.uas_position)\n point = uas_position_to_tuple(waypoint.position)\n return distance.distance_to_line(start, end, point, utm)",
"def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx",
"def get_distance(self, source, target, distance):\n return self.graph.edges[source, target][distance]",
"def get_closest_waypoint(self, pose, waypoints):\n #TODO implement\n\n\tmin_dist = float(\"inf\")\n\tclosest_wp_idx = -1\t\n\n for idx, wp in enumerate(waypoints):\n\t\tdist = self.dist_to_point(pose, wp.pose.pose)\n\t\tif(dist < min_dist):\n\t\t\tmin_dist = dist\n\t\t\tclosest_wp_idx = idx\n\treturn closest_wp_idx",
"def get_distance(self) -> int:\n return self.get_measurement_data().distance",
"def _calc_min_distance(self, walker):\n\n cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n\n t2 = time.time()\n # make a traj out of it so we can calculate distances through\n # the periodic boundary conditions\n walker_traj = mdj.Trajectory(walker.state['positions'],\n topology=self._mdj_top,\n unitcell_lengths=cell_lengths,\n unitcell_angles=cell_angles)\n\n t3 = time.time()\n # calculate the distances through periodic boundary conditions\n # and get hte minimum distance\n min_distance = np.min(mdj.compute_distances(walker_traj,\n it.product(self.ligand_idxs,\n self.receptor_idxs),\n periodic=self._periodic)\n )\n t4 = time.time()\n logging.info(\"Make a traj: {0}; Calc dists: {1}\".format(t3-t2,t4-t3))\n\n return min_distance",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.x,\n pose.y))\n else:\n return 0",
"def estimated_speed(self):\n return self._estimates[3].item(0)",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.position.x,\n pose.position.y))\n else:\n return 0",
"def measure_distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance",
"def calc_speed2(self):\n if self.vars['step'] > 0:\n target_tensor = torch.abs(self.state - self.state_prev)\n speed = torch.max(target_tensor) / self.vars['dt']\n else: # this is the first step, no calculation is possible\n speed = float('NaN')\n return speed",
"def next_move(self):\n\n # Calculate all paths to destination from current location and time.\n solution = self.calculate_best_solution((None, None), self.currentTurn, [self.character.path[-1]],\n self.character.spent)\n\n # Add travel weight to spent.\n if solution[1] is not None and solution[1][0] != solution[1][1]:\n self.character.spent += self.pekingMap.get_vertex(solution[1][0]).weight(solution[1][1])\n\n # Return next point in shortest path to location.\n if solution[1] is not None:\n return solution[1][1]\n\n return None",
"def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))",
"def player_goal_distance(self) -> float:\n route = self.best_route\n return sum(route.values())",
"def calc_time_from_waypoints(self, initial_wp, final_wp, max_speed):\n joint_dist_to_cover = np.absolute(\n np.asarray(final_wp) - np.asarray(initial_wp))\n max_joint_dist_to_cover = np.max(joint_dist_to_cover)\n T = max_joint_dist_to_cover / max_speed\n return T\n # pass"
]
| [
"0.73220944",
"0.7246206",
"0.66607845",
"0.6493784",
"0.64006424",
"0.63146424",
"0.6259501",
"0.6218519",
"0.6188682",
"0.6162573",
"0.6152221",
"0.61325127",
"0.60963005",
"0.609057",
"0.6078571",
"0.6012068",
"0.6007863",
"0.600322",
"0.5992872",
"0.5969203",
"0.594895",
"0.5943102",
"0.59340715",
"0.5915076",
"0.59113634",
"0.5907807",
"0.58664584",
"0.58661336",
"0.5853547",
"0.5821032"
]
| 0.7603735 | 0 |
Draw waypoints on a frame. | def draw_on_frame(self, bgr_frame, inverse_transform=None):
extrinsic_matrix = bgr_frame.camera_setup.get_extrinsic_matrix()
intrinsic_matrix = bgr_frame.camera_setup.get_intrinsic_matrix()
for index, wp in enumerate(self.waypoints):
if inverse_transform:
wp = inverse_transform * wp
pixel_location = wp.location.to_camera_view(
extrinsic_matrix, intrinsic_matrix)
bgr_frame.draw_point(pixel_location, [255, 255, 255])
waypoint_txt = ''
if self.target_speeds:
waypoint_txt = '{:.1f}m/s'.format(self.target_speeds[index])
# if self.road_options:
# waypoint_txt = '{} {}'.format(waypoint_txt,
# self.road_options[index])
if waypoint_txt != '':
bgr_frame.draw_text(pixel_location, waypoint_txt,
[255, 255, 255]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_on_world(self, world):\n for index, wp in enumerate(self.waypoints):\n # Adds 0.5 to z to ensure that the point is above the road surface.\n loc = (wp.location +\n pylot.utils.Location(0, 0, 0.5)).as_simulator_location()\n world.debug.draw_point(loc, size=0.1, life_time=DEFAULT_VIS_TIME)\n # if self.road_options and index < len(self.road_options):\n # world.debug.draw_string(loc,\n # str(self.road_options[index]),\n # life_time=DEFAULT_VIS_TIME)",
"def draw(self, frame):\n self.block_bot.draw(frame, self.structure_offset, self.invert_y)\n self.block_mid.draw(frame, self.structure_offset, self.invert_y)\n self.block_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw bars\n self.bars_bot.draw(frame, self.structure_offset, self.invert_y)\n self.bars_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw spring\n self.spring_bot.draw(frame, self.structure_offset, self.invert_y)\n self.spring_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw point C\n self.draw_C(frame)",
"def draw(self, view):\n self._wall.draw(view)\n self._paddle.draw(view)\n self._ball.draw(view)",
"def draw_waypoints(world, waypoints, z=0.5):\n for wpt in waypoints:\n wpt_t = wpt.transform\n begin = wpt_t.location + carla.Location(z=z)\n angle = math.radians(wpt_t.rotation.yaw)\n end = begin + carla.Location(x=math.cos(angle), y=math.sin(angle))\n world.debug.draw_arrow(begin, end, arrow_size=0.3, life_time=1.0)",
"def draw_waypoints(world, waypoints, z=0.01):\n for wpt in waypoints:\n wpt_t = wpt.transform\n begin = wpt_t.location + carla.Location(z=z)\n angle = math.radians(wpt_t.rotation.yaw)\n end = begin + carla.Location(x=math.cos(angle), y=math.sin(angle))\n world.debug.draw_arrow(begin, end, arrow_size=0.1, life_time=1.0)",
"def draw(self, frame):\n for prediction in self.predictions:\n prediction.draw(frame)",
"def draw():",
"def draw_legs(self, frame, location_x, location_y, touching):\n legs_thickness = 3\n\n frame = cv2.line(\n frame,\n (\n Utils.ConvertX_location(self.A[-1].x, location_x),\n Utils.ConvertY_location(self.A[-1].z, location_y)\n ),\n (\n Utils.ConvertX_location(self.C[-1].x, location_x),\n Utils.ConvertY_location(self.C[-1].z, location_y)\n ),\n self.top_color,\n thickness=legs_thickness\n )\n\n frame = cv2.line(\n frame,\n (\n Utils.ConvertX_location(self.B[-1].x, location_x),\n Utils.ConvertY_location(self.B[-1].z, location_y)\n ),\n (\n Utils.ConvertX_location(self.C[-1].x, location_x),\n Utils.ConvertY_location(self.C[-1].z, location_y)\n ),\n (0, 0, 0),\n thickness=legs_thickness\n )\n\n position_bot_left = (\n int(Utils.ConvertX_location(0, location_x)),\n int(Utils.ConvertY_location(-0.01, location_y))\n )\n frame = cv2.putText(\n frame,\n self.name,\n position_bot_left,\n Utils.font,\n Utils.fontScale,\n Utils.gray,\n Utils.text_thickness,\n cv2.LINE_AA\n )\n\n # Draw Ground\n if touching:\n ground_color = Utils.red\n else:\n ground_color = Utils.black\n frame = cv2.line(\n frame,\n (\n Utils.ConvertX_location(-0.1, location_x),\n Utils.ConvertY_location(self.ground_distance, location_y)\n ),\n (\n Utils.ConvertX_location(0.1, location_x),\n Utils.ConvertY_location(self.ground_distance, location_y)\n ),\n color=ground_color,\n thickness=legs_thickness\n )\n return frame",
"def draw_routes(self):\n self.vis.draw_routes()",
"def draw(self):\n self._view.clear()\n for h in range(len(self._bricks)):\n self._bricks[h].draw(self._view)\n \n self._paddle.draw(self._view)\n \n for w in self._FP_list:\n w.draw(self._view)\n \n # draw ball if not None\n if not self._ball is None:\n self._ball.draw(self._view)",
"def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()",
"def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)",
"def draw(self):\n if len(self.__points) >= 2:\n self._total_length = 0\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n coords = self.__line_segment(p1, p2)\n if not coords is None:\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 1, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )\n coords = self.__line_cap(p2)\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 0, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )",
"def draw(self, frame):\n xpos = OFS + self.x * TILE_SIZE\n ypos = OFS + self.y * TILE_SIZE\n frame[ypos:ypos+TILE_SIZE, xpos:xpos+TILE_SIZE] = self.image",
"def drawWarpLines(self):\n # draw warp lines\n for item in self.game.warpLines:\n anwp.sl.engine.drawLine(item[0]+self.bufferX, item[1]+self.bufferY, item[2]+self.bufferX, item[3]+self.bufferY, pyui.colors.blue)",
"def draw(self, frame):\n left, right, top, bottom = self.box['left'], self.box['right'], self.box['top'], self.box['bottom']\n text = '{}: {:.2f} ({:.2f} m)'.format(self.label, self.confidence, self.distance)\n\n # Draw label\n text_size, baseline = cv.getTextSize(text, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n top = max(top, text_size[1])\n\n cv.rectangle(frame, (left, top - text_size[1]), (left + text_size[0], top + baseline), (255, 255, 255), cv.FILLED)\n cv.putText(frame, text, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n\n # Draw bounding box\n cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0))",
"def draw(self, view):\n for i in self.get_aliens():\n for n in i:\n if not n is None:\n n.draw(view)\n self.get_ship().draw(view)\n self.get_ship().get_linevalue().draw(view)\n for i in self.get_PU():\n i.draw(view)\n for i in self.get_bolts():\n i.draw(view)",
"def draw_keypoints(frame, keypoints, analysis_dict=None, reference=None):\n for key in keypoints.keys():\n position = tuple([int(keypoints[key][0]), int(keypoints[key][1])])\n if (key == \"left_shoulder\" or key == \"right_shoulder\") and reference is not None:\n cv2.circle(frame, position, INDICATOR_RADIUS, calculate_heatmap_colour(reference[\"shoulders\"],\n analysis_dict[\"shoulders\"]),\n cv2.FILLED)\n elif (key == \"left_hip\" or key == \"right_hip\") and reference is not None:\n cv2.circle(frame, position, INDICATOR_RADIUS, calculate_heatmap_colour(reference[\"hips\"],\n analysis_dict[\"hips\"]), cv2.FILLED)\n elif reference is not None and key in analysis_dict.keys() and key in reference.keys():\n cv2.circle(frame, position, INDICATOR_RADIUS, calculate_heatmap_colour(reference[key], analysis_dict[key]),\n cv2.FILLED)\n return frame",
"def draw(self):",
"def draw(self):\n self.bufferX = (self.appWidth/2) - self.viewX\n self.bufferY = (self.appHeight/2) - self.viewY\n anwp.sl.engine.clear()\n anwp.sl.engine.drawImage(0, 0, self.appWidth, self.appHeight, self.backgroundImage)\n self.drawWarpLines()\n \n # render engine\n anwp.sl.engine.render()\n self.drawSystemInfo()\n self.drawWarpGateInfo()\n self.drawWarpTradeInfo()",
"def draw_features(self, display):\n ball_pos = self.ball_pos_stamps[-1][0]\n c = COLORS[self.client_id - 1]\n\n if ARGS.hv_dists:\n # dist up\n self.__draw_line(display, c, ball_pos, 0, -self.dist_features[0])\n # dist right\n self.__draw_line(display, c, ball_pos, self.dist_features[2], 0)\n # dist down\n self.__draw_line(display, c, ball_pos, 0, self.dist_features[4])\n # dist left\n self.__draw_line(display, c, ball_pos, -self.dist_features[6], 0)\n\n if ARGS.diag_dists:\n # dist up right\n self.__draw_line(display, c, ball_pos, self.dist_features[1][0], -self.dist_features[1][1])\n # dist down right\n self.__draw_line(display, c, ball_pos, self.dist_features[3][0], self.dist_features[3][1])\n # dist down left\n self.__draw_line(display, c, ball_pos, -self.dist_features[5][0], self.dist_features[5][1])\n # dist up left\n self.__draw_line(display, c, ball_pos, -self.dist_features[7][0], -self.dist_features[7][1])\n\n if ARGS.direc_dist:\n self.__draw_line(display, (100, 100, 255), ball_pos, self.direc_dist[0], self.direc_dist[1])\n\n if ARGS.speed:\n # speed\n self.__draw_line(display, (200, 255, 200), ball_pos, self.speed_features[0], self.speed_features[1])\n\n # checkpoints\n for c in self.checkpoints:\n pygame.draw.circle(display, (200, 200, 200), c, 3)\n\n pygame.draw.circle(display, (200, 200, 0), self.checkpoints[self.current_section_id], 3)\n for i in ARGS.cp_ids:\n pygame.draw.circle(display, (200, 0, 200),\n self.checkpoints[(self.current_section_id + i) % self.num_cps], 3)",
"def draw(self, view):\n for r in self._aliens:\n for alien in r:\n if alien != None:\n alien.draw(view)\n if self._ship != None:\n self._ship.draw(view)\n self._dline.draw(view)\n for bolt in self._bolts:\n bolt.draw(view)",
"def draw(self):\n pass",
"def draw(self):\n pass",
"def draw(self):\n pass",
"def draw(self):\n pass",
"def preview_ways(geodataframe):\n\n # Map tiles from contextily are provided in the Web Mercator coordinate reference system (EPSG:3857).\n gdf_wm = geodataframe.to_crs(epsg='3857')\n # Add a column for the centre of each geometry\n gdf_wm['centroid'] = gdf_wm.geometry.centroid\n # Create plot using matplotlib functionality\n ax = gdf_wm.plot(figsize=(10, 6), color='blue', linewidth=2)\n gdf_wm.centroid.plot(ax=ax, marker='o', color='red', alpha=0.5, markersize=40)\n # Add a basemap from contextily. This map should look a lot like Overpass Turbo!\n ctx.add_basemap(ax, source=ctx.providers.OpenStreetMap.Mapnik)",
"def draw(self, x, y):\r\n for w in self.widgets:\r\n if w.visible:\r\n w.draw()\r\n self.pointer.position(x + self.p_dx, y + self.p_dy, 0.5)\r\n self.pointer.draw()",
"def render(self):\n\n theta = self.angle*math.pi/180.0\n cth = math.cos(theta)\n sth = math.sin(theta)\n pts = []\n cornerpts = []\n\n for vertex in self.points:\n x = vertex[0] + self.pos[0] - self.anchor[0]\n y = vertex[1] + self.pos[1] - self.anchor[1]\n\n xt = x * cth - y * sth\n yt = x * sth + y * cth\n\n x = xt + self.anchor[0]\n y = yt + self.anchor[1]\n\n cornerpts.append([x,y])\n pts.append(gr.Point(self.scale * x, self.win.getHeight() - self.scale*y))\n\n self.corners = cornerpts\n self.vis = [gr.Polygon(pts)]",
"def draw( self ):\n\t\t\t\n\t\ttransposition = lambda point: (point[0] + WINDOW_X, WINDOW_Y - point[1])\n\t\t\t \n\t\tx, y = transposition( self.position.xy )\n\t\tpygame.draw.circle(self.screen, self.color, ( int(x + 0.5), int(y + 0.5) ), self.r)"
]
| [
"0.675669",
"0.65576106",
"0.6354201",
"0.62983316",
"0.62944514",
"0.618031",
"0.61121047",
"0.60885686",
"0.597918",
"0.59416425",
"0.5930113",
"0.59220034",
"0.59125364",
"0.5894867",
"0.5890554",
"0.5888072",
"0.5857224",
"0.5854743",
"0.58358055",
"0.5806921",
"0.57946265",
"0.5757499",
"0.57565796",
"0.57565796",
"0.57565796",
"0.57565796",
"0.5755443",
"0.57320726",
"0.5726691",
"0.5724074"
]
| 0.7305699 | 0 |
Draw waypoints on the simulator world. | def draw_on_world(self, world):
for index, wp in enumerate(self.waypoints):
# Adds 0.5 to z to ensure that the point is above the road surface.
loc = (wp.location +
pylot.utils.Location(0, 0, 0.5)).as_simulator_location()
world.debug.draw_point(loc, size=0.1, life_time=DEFAULT_VIS_TIME)
# if self.road_options and index < len(self.road_options):
# world.debug.draw_string(loc,
# str(self.road_options[index]),
# life_time=DEFAULT_VIS_TIME) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_waypoints(world, waypoints, z=0.01):\n for wpt in waypoints:\n wpt_t = wpt.transform\n begin = wpt_t.location + carla.Location(z=z)\n angle = math.radians(wpt_t.rotation.yaw)\n end = begin + carla.Location(x=math.cos(angle), y=math.sin(angle))\n world.debug.draw_arrow(begin, end, arrow_size=0.1, life_time=1.0)",
"def draw_waypoints(world, waypoints, z=0.5):\n for wpt in waypoints:\n wpt_t = wpt.transform\n begin = wpt_t.location + carla.Location(z=z)\n angle = math.radians(wpt_t.rotation.yaw)\n end = begin + carla.Location(x=math.cos(angle), y=math.sin(angle))\n world.debug.draw_arrow(begin, end, arrow_size=0.3, life_time=1.0)",
"def draw(self, view):\n self._wall.draw(view)\n self._paddle.draw(view)\n self._ball.draw(view)",
"def draw_routes(self):\n self.vis.draw_routes()",
"def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()",
"def draw():",
"def _draw(self):\n display.draw_maze(self._screen, self._maze, self._settings)\n pygame.display.flip()",
"def draw_world(world: World):\r\n draw_option(1 / 4 * WINDOW_WIDTH, 1 / 4 * WINDOW_HEIGHT, \"santa\")\r\n draw_option(1 / 2 * WINDOW_WIDTH, 1 / 4 * WINDOW_HEIGHT, \"snowman\")\r\n draw_option(3 / 4 * WINDOW_WIDTH, 1 / 4 * WINDOW_HEIGHT, \"reindeer\")\r\n draw_option(3 / 4 * WINDOW_WIDTH, 3 / 4 * WINDOW_HEIGHT, world[\"user choice\"])\r\n draw_option(1 / 4 * WINDOW_WIDTH, 3 / 4 * WINDOW_HEIGHT, world[\"cpu choice\"])\r\n draw_instruction()\r\n draw_score(world[\"score\"])\r\n draw_round_result(world[\"Won?\"])\r\n draw_attempts(world[\"attempts\"])",
"def draw(self, state):\n if state is None:\n state = self.model.current_state\n for row in range(len(self.model.maze.walls)):\n self.__draw_row_division()\n print(\" {0:2d} \".format(row), end='') # Imprime número da linha\n\n for col in range(len(self.model.maze.walls[0])):\n if self.model.maze.walls[row][col] == 1:\n print(\"|XXX\", end='') # Desenha parede\n elif self.model.goal_state.get_element(Coordinate(row, col)):\n if state.player.row == row and state.player.col == col:\n print(\"|G-P\", end='') # Desenha objetivo e jogador.\n elif state.get_element(Coordinate(row, col)):\n print(\"|G-B\", end='') # Desenha objetivo e caixa.\n else:\n print(\"| G\", end='') # Desenha objetivo\n elif state.player.row == row and state.player.col == col:\n print(\"| P\", end='') # Desenha jogador\n elif state.get_element(Coordinate(row, col)):\n print(\"| B\", end='') # Desenha caixa.\n else:\n print(\"| \", end='') # Desenha vazio\n print(\"|\")\n if row == (len(self.model.maze.walls) - 1):\n self.__draw_row_division()",
"def draw(self, screen):\n self.draw_left_zone(screen)\n self.draw_middle_zone(screen)\n self.draw_right_zone(screen)",
"def draw(self, verbosity=0):\n\n # Calculate overall scale and position of the map\n self.update_bounds()\n # Draw the dungeon background (everything behind the grid)\n self.draw_background(verbosity)\n # Draw the grid\n self.draw_grid(verbosity)\n # Draw the dungeon foreground (everything in front of the grid)\n self.draw_foreground(verbosity)\n\n pygame.display.flip()",
"def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()",
"def draw(self):\n pt = self.getPoint() # Centre of prism\n\n # Form top,left,right corners\n top = Vector2d(pt.z, pt.y + self.height/2)\n d = self.height*math.tan(self.angle/2)\n left = Vector2d(pt.z - d , pt.y - self.height/2)\n right = Vector2d(pt.z + d, pt.y - self.height/2)\n\n\n top.rotate(self.tilt)\n left.rotate(self.tilt)\n right.rotate(self.tilt)\n\n # Plot them out with plt.plot\n plot([top[0],left[0],right[0],top[0]],[top[1],left[1],right[1],top[1]],\"k\",lw=2.0)",
"def visualize_routes(self):\n visualize_tsp.plotTSP([self.best_solution], self.coords)",
"def drawSAT(self):\r\n\t\tif self.show_weather == True:\r\n\t\t\timport weather\r\n\t\t\tweatherdata = weather.get_weather(self)\r\n\t\t\tweatherdata.start()\r\n\t\tsat = draw_sat(self,self.satBlocks)\r\n\t\tsat.start()\r\n\t\tmap = draw_map(self,self.mapBlocks)\r\n\t\tmap.start()\r\n\t\tself.redraw_markers()\r\n\t\tvirtualEarth = draw_virtualearth(self,self.satBlocks)\r\n\t\tvirtualEarth.start()\r\n\t\tsat.join()\r\n\t\tmap.join()\r\n\t\tvirtualEarth.join()\r\n\t\tif self.routecontainer['enable'] == 1:\r\n\t\t\tself.makeRoute(self.routecontainer['linestring'])\r\n\t\t\tself.route_pic.setVisible(True)\r\n\t\telse:\r\n\t\t\tself.route_pic.setVisible(False)\r\n\t\tif self.hybrid == 1:\r\n\t\t\tself.xbmcearth_communication.get_Google_Analytics( referer_url, urllib.quote(\"Google Hybrid\"), \"/xbmc_earth/browse/Google/Hybrid_act.html\",self)\r\n\t\telif self.hybrid == 2:\r\n\t\t\tself.xbmcearth_communication.get_Google_Analytics( referer_url, urllib.quote(\"Google Map\"), \"/xbmc_earth/browse/Google/Map_act.html\",self)\r\n\t\telif self.hybrid == 3:\r\n\t\t\tself.xbmcearth_communication.get_Google_Analytics( referer_url, urllib.quote(\"Google Area\"), \"/xbmc_earth/browse/Google/Area_act.html\",self)\r\n\t\telif self.hybrid == 0:\r\n\t\t\tself.xbmcearth_communication.get_Google_Analytics( referer_url, urllib.quote(\"Google Satelite\"), \"/xbmc_earth/browse/Google/Satelite_act.html\",self)\r\n\t\telif self.hybrid == 4:\r\n\t\t\tself.xbmcearth_communication.get_Google_Analytics( referer_url, urllib.quote(\"VirtualEarth Satelite\"), \"/xbmc_earth/browse/VirtualEarth/Satelite_act.html\",self)\r\n\t\telif self.hybrid == 5:\r\n\t\t\tself.xbmcearth_communication.get_Google_Analytics( referer_url, urllib.quote(\"VirtualEarth Hybrid\"), \"/xbmc_earth/browse/VirtualEarth/Hybrid_act.html\",self)\r\n\t\telif self.hybrid == 6:\r\n\t\t\tself.xbmcearth_communication.get_Google_Analytics( referer_url, urllib.quote(\"VirtualEarth Map\"), \"/xbmc_earth/browse/VirtualEarth/Map_act.html\",self)",
"def draw(self):\n \n # Draw the background\n self.world.fill(BLUE)\n \n # Draw all the sprite lists that we have\n self.wall_list.draw(self.world)\n self.enemy_list.draw(self.world)\n self.sludge.draw(self.world)\n self.consumeable.draw(self.world)\n self.can_climb.draw(self.world)",
"def draw(self, view):\n for i in self.get_aliens():\n for n in i:\n if not n is None:\n n.draw(view)\n self.get_ship().draw(view)\n self.get_ship().get_linevalue().draw(view)\n for i in self.get_PU():\n i.draw(view)\n for i in self.get_bolts():\n i.draw(view)",
"def refresh_view(self):\n if self._step_number % 2 == 0:\n self._view.draw_enemies(self._game.enemies)\n self._view.draw_towers(self._game.towers)\n self._view.draw_obstacles(self._game.obstacles)",
"def main():\n maze = Vines()\n maze.drawSolu()\n maze.drawDead()",
"def draw(self, screen):\n for branch_points in self.branches:\n pygame.draw.polygon(screen, self.branch_color, branch_points)\n for bottom_points in self.bottom:\n pygame.draw.polygon(screen, self.bottom_color, bottom_points)",
"def draw(self):\n self.bufferX = (self.appWidth/2) - self.viewX\n self.bufferY = (self.appHeight/2) - self.viewY\n anwp.sl.engine.clear()\n anwp.sl.engine.drawImage(0, 0, self.appWidth, self.appHeight, self.backgroundImage)\n self.drawWarpLines()\n \n # render engine\n anwp.sl.engine.render()\n self.drawSystemInfo()\n self.drawWarpGateInfo()\n self.drawWarpTradeInfo()",
"def Draw(self):\n\t\tGameImage.Draw(self, self.coords)",
"def game_draw(self):\n pass",
"def draw(self):\n if (libt.map_is_in_fov(self.handler.fov_map, self.x, self.y) or \n self.handler.world.map[self.x][self.y].seen and self.visible_in_fog):\n libt.console_set_default_foreground(self.handler.game_map, self.colour)\n libt.console_put_char(self.handler.game_map, self.x, self.y, \n self.char, libt.BKGND_NONE)",
"def drawWarpLines(self):\n # draw warp lines\n for item in self.game.warpLines:\n anwp.sl.engine.drawLine(item[0]+self.bufferX, item[1]+self.bufferY, item[2]+self.bufferX, item[3]+self.bufferY, pyui.colors.blue)",
"def drawPoles(wn):\n wn.setworldcoordinates(-1, -5, 3, 20)\n t = turtle.Turtle()\n t.speed(0)\n t.pensize(3)\n t.up()\n t.goto(-.5, 0)\n t.down()\n t.goto(2.5, 0)\n t.up()\n for i in range(3):\n t.goto(i, 0)\n t.down()\n t.goto(i, 10)\n t.up()\n t.hideturtle()",
"def draw_world_state(self):\n self.fig.clf()\n t = time.time()\n subplot = self.fig.add_subplot(2,1,1)\n for w in self.world_model.walls:\n subplot.plot([w,w],[0,1],'b-')\n subplot.hold(True)\n subplot.set_xlim([min(self.walls)-0.2,max(self.walls)+.2])\n subplot.set_ylim([0,1])\n subplot.scatter([p.position for p in self.pf.particles],\n [0.5]*len(self.pf.particles),\n c='r',\n s=[p.weight**0.5*1000 for p in self.pf.particles])\n\n subplot.scatter([p.position for p in self.pf.particles],\n [0.2]*len(self.pf.particles),\n c='k',\n s=[10]*len(self.pf.particles))\n\n if self.true_position != None:\n subplot.scatter([self.true_position], [0.8], c='g', s=[100])\n\n histogram = self.fig.add_subplot(2,1,2)\n\n histogram.hist([p.position for p in self.pf.particles],\n weights=[p.weight for p in self.pf.particles],\n bins=np.arange(-0.5+min(self.walls),0.5+max(self.walls),.02))\n\n histogram.set_xlim([-.2+min(self.walls),0.2+max(self.walls)])\n histogram.set_ylim([0,1])\n plt.draw()\n plt.pause(.01)",
"def drawpath(self,obstacles):\n for i in obstacles:\n self.distance_map[i[0],i[1]]=44\n print(\"Distance map\")\n print(self.distance_map)\n for i in self.footprint:\n self.distance_map[i[0],i[1]]=88\n print(\"Evaluated path\")\n print(self.distance_map)",
"def draw(self, view):\n for r in self._aliens:\n for alien in r:\n if alien != None:\n alien.draw(view)\n if self._ship != None:\n self._ship.draw(view)\n self._dline.draw(view)\n for bolt in self._bolts:\n bolt.draw(view)",
"def draw(self):\n draw(self.graph)"
]
| [
"0.66840714",
"0.66795164",
"0.6600115",
"0.6580789",
"0.6300364",
"0.62473875",
"0.6230317",
"0.62090623",
"0.6182935",
"0.6165176",
"0.61582327",
"0.61322266",
"0.6123043",
"0.6115214",
"0.6110336",
"0.61087155",
"0.6090101",
"0.6089843",
"0.6085808",
"0.6067363",
"0.6062344",
"0.6055954",
"0.60517496",
"0.60503465",
"0.6042484",
"0.604112",
"0.6036267",
"0.60284746",
"0.60177684",
"0.600784"
]
| 0.8217965 | 0 |
params table_name string dynamodb table name params key_name string dynamodb table primary key | def __init__(self, table_name, key_name) -> None:
if not DB_ENDPOINT:
self.client = boto3.resource("dynamodb")
else:
self.client = boto3.resource("dynamodb", endpoint_url=DB_ENDPOINT)
# for testing -> was not working with env variable for some reason.
# need to investigate further
# self.client = boto3.resource("dynamodb", endpoint_url="http://localhost:8000")
self.table_name = table_name
self.table_connector = self.client.Table(self.table_name)
self.primary_key = key_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_table(table_name, filter_key=None, filter_value=None):\n table = dynamodb.Table(table_name)\n\n if filter_key and filter_value:\n filtering_exp = Key(filter_key).eq(filter_value)\n response = table.query(KeyConditionExpression=filtering_exp)\n else:\n response = table.query()\n\n return response",
"def define_table(\n transaction: VersionedTransaction, table: TableNameOrResource, *key_attributes: str,\n) -> VersionedTransaction:\n assert len(key_attributes) > 0 and len(key_attributes) <= 2\n if _table_name(table) in transaction.tables:\n return transaction\n return VersionedTransaction(\n tables={\n **transaction.tables,\n _table_name(table): _TableData(\n items=dict(),\n effects=dict(),\n key_attributes=standard_key_attributes(*key_attributes),\n ),\n }\n )",
"def get_table( db, table_name, key_name, suffix=None, columns='*'):\n \n if suffix == None:\n query = 'SELECT ' + columns + ' FROM ' + table_name + ';'\n else:\n query = 'SELECT ' + columns + ' FROM ' + table_name + ' ' + suffix + ';'\n \n #print query\n rowList = [ x for x in table_generator(db, query)]\n table = {}\n for r in rowList:\n table[r.__getattribute__(key_name)] = r\n \n return table",
"def primary_key(table_name: str) -> str:\n\n return f\"\"\"\n SELECT\n a.attname AS column_name,\n format_type(a.atttypid, a.atttypmod) AS data_type\n FROM\n pg_index i\n JOIN\n pg_attribute a\n ON\n a.attrelid = i.indrelid AND\n a.attnum = ANY(i.indkey)\n WHERE\n i.indrelid = '{table_name}'::regclass AND\n i.indisprimary\n \"\"\"",
"def _update_table(table_name: str, **kwargs) -> dict:\n return _execute_boto3_dynamodb(\n delegate=lambda x: x.update_table(TableName=table_name, **kwargs))",
"def create_table(table_name: str, **db_kwargs) -> str:\n\n # Check if the table exists.\n try:\n client = boto3.client(\"dynamodb\", **db_kwargs)\n resp = client.describe_table(TableName=table_name)\n log.info(\"Table %s exists\", table_name)\n return resp[\"Table\"][\"TableArn\"]\n except Exception as err: # pylint:disable=broad-except\n pass\n\n # Key attributes in the table.\n attrs = [\n {\"AttributeName\": \"Target\", \"AttributeType\": \"S\"},\n {\"AttributeName\": \"PrimaryRangeKey\", \"AttributeType\": \"S\"},\n {\"AttributeName\": \"TargetIDKeys\", \"AttributeType\": \"S\"},\n ]\n\n key_schema = [\n {\"AttributeName\": \"Target\", \"KeyType\": \"HASH\"},\n {\"AttributeName\": \"PrimaryRangeKey\", \"KeyType\": \"RANGE\"},\n ]\n\n global_secondary_indexes = [\n {\n \"IndexName\": \"TargetIDKeysIndex\",\n \"KeySchema\": [\n {\"AttributeName\": \"TargetIDKeys\", \"KeyType\": \"HASH\"},\n {\"AttributeName\": \"PrimaryRangeKey\", \"KeyType\": \"RANGE\"},\n ],\n \"Projection\": {\"ProjectionType\": \"ALL\"},\n \"ProvisionedThroughput\": {\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 10},\n }\n ]\n\n try:\n client = boto3.client(\"dynamodb\", **db_kwargs)\n resp = client.create_table(\n TableName=table_name,\n AttributeDefinitions=attrs,\n KeySchema=key_schema,\n GlobalSecondaryIndexes=global_secondary_indexes,\n ProvisionedThroughput={\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 10},\n )\n log.info(\"Table %s created successfully\", table_name)\n return resp[\"TableDescription\"][\"TableArn\"]\n except Exception as err: # pylint:disable=broad-except\n raise RuntimeError(\"Error creating table %s: %s\" % (table_name, str(err)))",
"def __init__(self, tablename):\n dynamodb = boto3.resource(\"dynamodb\")\n self._table = dynamodb.Table(table_name)\n super().__init__()",
"def htable_get(table, key):",
"def update_row(table_str, attribute_value_dict, keys_dict): #works\n sql = make_update_row(table_str, attribute_value_dict, keys_dict)\n #print sql\n execute_edit_queries(sql)",
"def __init__(self, table_name='casbin_rule', **kwargs):\n self.table_name = table_name\n self.dynamodb = boto3.client('dynamodb', **kwargs)\n try:\n\n self.dynamodb.create_table(\n TableName=self.table_name,\n\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n except self.dynamodb.exceptions.ResourceInUseException:\n pass",
"def _describe_table(table_name: str) -> dict:\n return _execute_boto3_dynamodb(\n delegate=lambda x: x.describe_table(TableName=table_name))",
"def getTableByName(self, tablename):\n pass",
"def make_insert_row(table_str, attribute_value_dict): #works\n#Aanpassen zodat query niet uitgevoerd wordt als pk al bestaat\n #initialize input for string formatting\n attributes_string = \"(\"\n values_list = []\n #retrieve attributes and values from dictionary and add them to the string\n for key in attribute_value_dict:\n values_list += [attribute_value_dict[key]]\n attributes_string += \"%s, \" % key\n attributes_string = attributes_string[:(len(attributes_string)-2)]\n attributes_string += ')'\n values = str(tuple(values_list))\n sql = \"\"\"INSERT INTO `%s` %s VALUES %s \"\"\" % (table_str, attributes_string, values)\n return sql",
"def dynamodb_config(dynamodb: boto3.resource, player: dict) -> boto3.resource:\n # create a table\n table = dynamodb.create_table(\n TableName=\"Table\",\n AttributeDefinitions=[{\"AttributeName\": \"playerId\", \"AttributeType\": \"S\"}],\n KeySchema=[{\"AttributeName\": \"playerId\", \"KeyType\": \"HASH\"}],\n ProvisionedThroughput={\"ReadCapacityUnits\": 1, \"WriteCapacityUnits\": 1},\n )\n\n # Put player into DB\n table.put_item(Item={\"playerId\": \"player_hash\", \"player_data\": player})\n\n # Put target into DB\n table.put_item(Item={\"playerId\": \"target_hash\", \"player_data\": player})\n\n return table",
"def insert_row(table_str, attribute_value_dict): #works\n sql = make_insert_row(table_str, attribute_value_dict)\n #print sql\n execute_edit_queries(sql)",
"def htable_put(table, key, value):",
"def make_update_row(table_str, attribute_value_dict, keys_dict): #works\n\n # initialize query statement and input\n sql = \"\"\"UPDATE `%s` SET %s = '%s'\"\"\"\n # get attributes from the dictionaries and initialize list for values\n attributes_list = attribute_value_dict.keys()\n value_list = []\n list_of_key_attributes = keys_dict.keys()\n list_of_key_values = []\n # create list of values from the dictionary\n for key in attributes_list:\n value_list += [attribute_value_dict[key]]\n for key in list_of_key_attributes:\n list_of_key_values += [keys_dict[key]]\n input_format = (table_str, attributes_list[0], value_list[0])\n # add set's to the query statemtens and input\n for i in range(1, len(attributes_list)):\n sql += \", %s = '%s'\"\n input_format += (attributes_list[i], value_list[i])\n # add the Where-clause\n sql += \" WHERE %s = '%s'\"\n input_format += (list_of_key_attributes[0], list_of_key_values[0])\n # add an AND to the Where-clause for multi-keys\n for i in range(1, len(list_of_key_attributes)):\n sql += \" AND %s = '%s'\"\n input_format += (list_of_key_attributes[i], list_of_key_values[i])\n # combine query & input, run\n sql = sql % input_format\n return sql",
"def make_s3_keys(task, fmt):\n table_key = fmt.format(task.db_name, task.orm.__tablename__)\n version_key = fmt.format(task.db_name, \"__version__\")\n return table_key, version_key",
"def test_build_base_attribute_key_function_with_table_name_succeeds_with_valid_input(self):\n instrument_id = 4\n attribute_name = \"attribute_string\"\n table_name = \"table_string\"\n expected_key = \"instruments:4:table_string:attribute_string\"\n result = redis_interface.RedisInterface._build_base_attribute_key(instrument_id, attribute_name,\n table_name=table_name)\n\n self.assertEqual(result, expected_key, \"Built key '%s' does not match expected string '%s'.\"\n % (result, expected_key))",
"def make_delete_row(table_str, key_value_dict): # works\n \n # initialize query statement and input\n sql = \"\"\"DELETE FROM `%s` WHERE %s = '%s'\"\"\"\n # create list of attributes\n attributes_list = key_value_dict.keys()\n #create list for the values\n value_list = []\n for key in attributes_list:\n value_list += [key_value_dict[key]]\n # initialize the first input (to match with the top sql-statement)\n input_format = (table_str, attributes_list[0], value_list[0])\n # add an AND to the Where-clause for multi-keys\n for i in range(1, len(attributes_list)):\n sql += \" AND %s = '%s'\"\n input_format += (attributes_list[i], value_list[i])\n # combine query & input, run\n sql = sql % input_format\n return sql",
"def createTables(self,table=\"all\"):\n auto=\"\"\n\tif self.dbType==\"mysql\":\n\t auto=\"AUTO_INCREMENT\"\n\t \n\ttableName=\"FileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t fileid %s %s PRIMARY KEY, \n\t fileName TEXT,\n\t typeid %s\n\t )\n\t \"\"\"%(tableName,self.long,auto,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"KeyFile\"\n\tif table==\"all\" or table==tableName: \n\t # Drop/create KeyFile table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL, \n\t view VARCHAR(255) NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t keyFileId %s NOT NULL, PRIMARY KEY(graphid,view,run,uid) )\n\t \"\"\"%(tableName,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"RunUID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t run %s NOT NULL,\n\t uid %s )\n\t \"\"\"%(tableName,self.UINT,self.uid)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"MaxMasterID\"\n if table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t masterMaxId %s NOT NULL,\n\t comment TEXT )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Location\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Localtion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t graphid %s NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t locationFileId %s NOT NULL )\n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t query = \"CREATE INDEX LocationGroups ON Location(graphid,run,uid)\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Version\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Version table in SQLDB.EventStoreDB\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t grade VARCHAR(255) NOT NULL, \n\t timeStamp %s NOT NULL, \n\t minRunNumber %s NOT NULL, \n\t maxRunNumber %s NOT NULL, \n\t graphid %s NOT NULL,\n\t state VARCHAR(10) ) \n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersion\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t svName VARCHAR(255) NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersionComment\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersionComment table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s NOT NULL PRIMARY KEY,\n\t svid %s NOT NULL,\n\t CommentDate %s,\n\t Comment TEXT )\n\t \"\"\"%(tableName,self.UINT,auto,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"GraphPath\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"PathDepend\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t parentId %s, \n\t childId %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"FileType\"\n if table==\"all\" or table==tableName: \n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s %s PRIMARY KEY, \n\t type VARCHAR(8) NOT NULL,\n\t description TEXT )\n\t \"\"\"%(tableName,self.UINT,auto)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"OrphanFileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s PRIMARY KEY, \n\t dateTime DATETIME,\n\t user VARCHAR(8) NOT NULL )\n\t \"\"\"%(tableName,self.long)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query",
"def put_item(key, value):\n try:\n response = table.put_item( Item={ 'my-key': key, 'some-other-key': value )\n print(f\"Successfully added new item\")\n print(f\"Response : {response}\")\n except ClientError as ce:\n print(f\"Failed to creat new item - key : {key}, value : {value}\")\n print(ce)\n\ndef update_nested_item(key, value):\n \"\"\"\n Update a nested item. create \n \"\"\"\n try:\n response = table.update_item( Key={ 'my-key': key },\n UpdateExpression='SET #other-key = :new_value',\n ExpressionAttributeNames={\n '#other-key': 'New-Key'\n },\n ExpressionAttributeValues={ ':new_value': True },\n ReturnValues='ALL_NEW'\n )\n print(\"Successfully created/updated item.\")\n print(f\"Response : {response}\")\n except ClientError as ce:\n print(f\"Failed to update item : {ce}\")",
"def create_table(db, table_name, param_name, param_type):\r\n command = \"CREATE TABLE IF NOT EXISTS \" + table_name + \" (\"\r\n for i in range(len(param_name)):\r\n command = command + param_name[i] + \" \" + param_type[i]\r\n if i == (len(param_name) - 1):\r\n command = command + \");\"\r\n else:\r\n command = command + \", \"\r\n try:\r\n c = db.cursor()\r\n c.execute(command)\r\n db.commit()\r\n print \"SQLite command complete.\"\r\n except Error as e:\r\n print(e)",
"def delete_item(self, table_name: str, primary_key: dict):\n table = self.resource.Table(table_name)\n table.delete_item(Key=primary_key)",
"def GenMetaTableParam(a_name):\n table_param = Table_Param()\n if a_name == \"meta\":\n table_param.NAME = meta_table_name\n table_param.COLS = \\\n (Column(\"version\", String(50), primary_key=True, index=False),\n Column('site', String),\n Column('pairDbFile', String),\n Column('dumpType', String),\n Column('lock', Integer),\n Column('prNumber', String),\n Column('kbUrl', String),\n Column('desc', String))\n if a_name == \"fixby\":\n table_param.NAME = fixby_table_name\n table_param.COLS = \\\n (Column(\"id\", Integer, primary_key=True), # will auto increase\n Column('module', String),\n Column('desc', String))\n\n return table_param",
"def test_dynamodb(dynamodb: boto3.resource):\n # create a table\n table = dynamodb.create_table(\n TableName=\"Test\",\n KeySchema=[{\"AttributeName\": \"playerId\", \"KeyType\": \"HASH\"}],\n AttributeDefinitions=[{\"AttributeName\": \"playerId\", \"AttributeType\": \"S\"}],\n ProvisionedThroughput={\"ReadCapacityUnits\": 1, \"WriteCapacityUnits\": 1},\n )\n\n _id = str(uuid.uuid4())\n\n # put an item into db\n table.put_item(Item={\"playerId\": _id, \"test_key\": \"test_value\"})\n\n # get the item\n item = table.get_item(Key={\"playerId\": _id})\n\n # check the content of the item\n assert item[\"Item\"][\"test_key\"] == \"test_value\"",
"def update_item(self, table_name: str, primary_key: dict, update: dict):\n table = self.resource.Table(table_name)\n\n update_expression = 'SET '\n updates = []\n for key, value in update.items():\n # Add a suffix the key to create a substitute name for it to\n # prevent conflicts with a reserved DynamoDB word.\n # Refer the following for more details:\n # - https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html\n # - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html\n substitute_key = f'#{key}_key'\n substitute_value = f':{key}_value'\n updates.append({\n 'key': key,\n 'value': value,\n 'substitute_key': substitute_key,\n 'substitute_value': substitute_value,\n })\n update_expression += f'{substitute_key} = {substitute_value}, '\n update_expression = update_expression[:-2] # remove the last ', '\n\n table.update_item(\n Key=primary_key,\n UpdateExpression=update_expression,\n ExpressionAttributeNames={\n u['substitute_key']: u['key'] for u in updates\n },\n ExpressionAttributeValues={\n u['substitute_value']: u['value'] for u in updates\n },\n )",
"def redis_key(self):\n return 'tesseract:table:%s' % self.table_name",
"def get_table_attributes(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n table_attributes = dict(primary_attributes=[], secondary_attributes=[])\n for attribute_name, attribute_info in getattr(schema_virtual_module,\n table_name).heading.attributes.items():\n if attribute_info.in_key:\n table_attributes['primary_attributes'].append((\n attribute_name,\n attribute_info.type,\n attribute_info.nullable,\n attribute_info.default,\n attribute_info.autoincrement\n ))\n else:\n table_attributes['secondary_attributes'].append((\n attribute_name,\n attribute_info.type,\n attribute_info.nullable,\n attribute_info.default,\n attribute_info.autoincrement\n ))\n\n return table_attributes",
"def get_item(self, table_name: str, primary_key: dict) -> dict:\n table = self.resource.Table(table_name)\n response = table.get_item(Key=primary_key)\n try:\n return json.loads(json.dumps(response['Item'], cls=DynamoDBEncoder))\n except KeyError:\n raise DynamoDbWrapper.DoesNotExist(\n 'No item matching the primary key was found.'\n )"
]
| [
"0.62873125",
"0.62679005",
"0.62276125",
"0.6227167",
"0.62052846",
"0.60579866",
"0.602017",
"0.58784723",
"0.58703464",
"0.5857174",
"0.5849382",
"0.5844431",
"0.5825517",
"0.5795559",
"0.56661654",
"0.56511134",
"0.56288356",
"0.5625783",
"0.56059664",
"0.55820763",
"0.55804956",
"0.5557955",
"0.551105",
"0.55087423",
"0.5489408",
"0.5469011",
"0.54642826",
"0.54407567",
"0.54118156",
"0.5380923"
]
| 0.6615286 | 0 |
Registers a combination of one or more models, one or more keys and one or more signals. Whenever one of the signals is sent by one of the models, the associated cache keys will be deleted. By default, if you omit the `signals` parameter it will use the Django `post_save` signal. | def register(self, models, keys, signals=post_save, **kwargs):
if not isinstance(signals, (list, tuple)):
signals = [signals]
for signal in signals:
if settings.DEBUG:
err = "{} is not a valid Signal subclass.".format(signal)
assert isinstance(signal, Signal), err
self._registry.setdefault(signal, {})
if not isinstance(models, (list, tuple)):
models = [models]
for model in models:
if settings.DEBUG:
err = "{} is not a valid ModelBase subclass.".format(model)
assert isinstance(model, ModelBase), err
self._registry.get(signal).setdefault(model, set())
if not isinstance(keys, (list, tuple)):
keys = [keys]
for key in keys:
self._registry.get(signal).get(model).add(key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect(self):\n for signal, models in six.iteritems(self._registry):\n for model, keys in six.iteritems(models):\n # Local function the current signal is going to be\n # connected to.\n # Defining it dynamically allows us to pass in the current\n # set of keys for the given model, but we have to store\n # a strong reference to it to avoid garbage collection.\n def delete_cache(signal, sender=model, keys=keys):\n cache.delete_many(list(keys))\n signal.connect(delete_cache, sender=model, weak=False, dispatch_uid=signal)",
"def add_signals(self, signals):\n\n self.signals = {**self.signals, **signals} # merge the two",
"def load_cache_signals(version=None, **kwargs):\r\n if not cache_signals.ready:\r\n results = CacheBotSignals.objects.all()\r\n tables = [r.table_name for r in results]\r\n mapping = cache.get_many(tables)\r\n for result in results:\r\n key = cache.make_key(u'.'.join(('cachesignals', result.table_name)), version=version)\r\n accessor_set = mapping.get(key) or set()\r\n accessor_set.add((result.accessor_path, result.lookup_type, result.exclude))\r\n mapping[key] = accessor_set\r\n cache.set_many(mapping, 0)\r\n cache_signals.ready = True",
"def register_models(self, *models, **kwargs):\n\n for model in models:\n self.register(model, **kwargs)",
"def register_signals(self):\n for _, callback in self.signal_callbacks:\n Signal.subscribe(self, callback)",
"def post_model_save(sender, instance, **kwargs):\n cache.clear()",
"def register(self, *model):\n for m in model:\n m.Register()\n self.models.append(m)",
"def set_signal_handlers(cls, signals):\n for sig in signals:\n try:\n original_handler = signal.getsignal(sig)\n if original_handler == cls.signal_handler:\n continue\n signal.signal(sig, cls.signal_handler)\n cls.__signal_handlers[sig] = original_handler\n except Exception as e:\n pass",
"def register(self, model, values=None, instance_values=None):\n\n if model in self._models:\n raise Exception(\"%s is already registered\" % model)\n\n self._models[model] = CacheConfig(values, instance_values)",
"def register(self):\n REGISTERED_SIGNALS.setdefault(self.path, []).append(self)",
"def signals_import():\n \n models.signals.post_save.connect(create_player_profile, sender=User)\n #this is breaking things right now models.signals.post_save.connect(create_goldorauth, sender=User)",
"def register_admin_models(*args, **kwargs):\n for model in args:\n admin.register(model, session=kwargs['session'])\n return None",
"def _class_prepared(self, sender, **kwargs):\n if not self.cache_fields:\n return\n post_init.connect(self._post_init, sender=sender, weak=False)\n post_save.connect(self._post_save, sender=sender, weak=False)\n post_delete.connect(self._post_delete, sender=sender, weak=False)",
"def register( key, obj ):\n global callbacks\n callbacks[ key ] = obj",
"def register(cls, model):\n cls.models[model] = True",
"def signals_import():\n from accounts.utils import create_client\n\n models.signals.post_save.connect(create_client, sender=User)",
"def register_models(self, app_label, *models):\n for model in models:\n # Store as 'name: model' pair in a dictionary\n # in the app_models dictionary\n model_name = model._meta.model_name\n model_dict = self.app_models.setdefault(app_label, SortedDict())\n if model_name in model_dict:\n # The same model may be imported via different paths (e.g.\n # appname.models and project.appname.models). We use the source\n # filename as a means to detect identity.\n fname1 = os.path.abspath(upath(\n sys.modules[model.__module__].__file__))\n fname2 = os.path.abspath(upath(\n sys.modules[model_dict[model_name].__module__].__file__))\n # Since the filename extension could be .py the first time and\n # .pyc or .pyo the second time, ignore the extension when\n # comparing.\n if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:\n continue\n model_dict[model_name] = model\n self._get_models_cache.clear()",
"def ready(self):\n for model in self.get_models():\n # register model-level signals\n pre_save.connect(receivers.presave, sender=model, weak=False, dispatch_uid=f\"{model.__name__}_presave\")\n pre_delete.connect(receivers.predelete, sender=model, weak=False, dispatch_uid=f\"{model.__name__}_predel\")\n post_save.connect(receivers.postsave, sender=model, weak=False, dispatch_uid=f\"{model.__name__}_postsave\")\n post_delete.connect(receivers.postdelete, sender=model, weak=False, dispatch_uid=f\"{model.__name__}_postdel\")\n\n # register many to many fields of model - EXPERIMENTAL\n m2m_field_names = []\n for m2m in model._meta.many_to_many:\n m2m_field = getattr(model, m2m.name)\n m2m_changed.connect(receivers.m2mchanged, sender=m2m_field.through, weak=False,\n dispatch_uid=f\"{model.__name__}_{m2m.name}\")",
"def _save(self, **kwargs): #signal, sender, instance):\r\n tags = self._get_instance_tag_cache(kwargs['instance'])\r\n if tags is not None:\r\n Tag.objects.update_tags(kwargs['instance'], tags)",
"def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)",
"def register_all(models, admin_class=admin.ModelAdmin):\n for attr in dir(models):\n attr = getattr(models, attr, None)\n if isinstance(attr, type):\n if issubclass(attr, Model) and not attr._meta.abstract:\n try:\n admin.site.register(attr, admin_class)\n except admin.sites.AlreadyRegistered:\n pass",
"def invalidate_cache(model_class, objects, **extra_keys):\r\n invalidation_dict = {}\r\n accessor_set = cache_signals.get_global_signals(model_class)\r\n for obj in objects:\r\n for (accessor_path, lookup_type, negate) in accessor_set:\r\n if lookup_type != 'exact' or negate:\r\n invalidation_key = get_invalidation_key(\r\n model_class._meta.db_table, \r\n accessor_path = accessor_path, \r\n negate = negate,\r\n value = '')\r\n invalidation_dict[invalidation_key] = None\r\n else:\r\n for value in get_values(obj, accessor_path):\r\n invalidation_key = get_invalidation_key(\r\n model_class._meta.db_table, \r\n accessor_path = accessor_path, \r\n negate = negate,\r\n value = value)\r\n invalidation_dict[invalidation_key] = None\r\n \r\n if invalidation_dict:\r\n invalidation_dict.update(cache.get_many(invalidation_dict.keys()))\r\n\r\n cache_keys = set()\r\n for obj_key, cache_key_list in invalidation_dict.iteritems():\r\n if cache_key_list:\r\n cache_keys.update(cache_key_list.split(','))\r\n \r\n if cache_keys:\r\n cache.set_many(dict([(key, None) for key in cache_keys]), conf.CACHE_INVALIDATION_TIMEOUT)\r\n invalidation_dict.update(extra_keys)\r\n cache.delete_many(invalidation_dict.keys())",
"def register_signals(self):\n logging.debug(\"BigQuery class created and registering signals\")",
"def save(self, *args, **kwargs):\n self.key = str(self.key).upper()\n\n do_cache = kwargs.pop('cache', True)\n\n self.clean(**kwargs)\n self.validate_unique(**kwargs)\n\n # Update this setting in the cache\n if do_cache:\n self.save_to_cache()\n\n super().save()\n\n # Get after_save action\n setting = self.get_setting_definition(self.key, *args, **kwargs)\n after_save = setting.get('after_save', None)\n\n # Execute if callable\n if callable(after_save):\n after_save(self)",
"def register(self, model_or_iterable, handler_class, **kwargs):\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model in self._registry:\n try:\n model_name = model._meta.model_name\n except AttributeError:\n # Django < 1.6\n model_name = model._meta.module_name\n raise ModelAlreadyRegistered(\n \"The model {} is already registered.\".format(model_name))\n handler = get_handler_instance(model, handler_class, kwargs)\n self._registry[model] = handler\n contribute_to_class(model)",
"def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func",
"def setup_signals(self):\n\n # Hook the play/pause buttons\n self.ui.actionContinue_Live_Updates.triggered.connect(self.on_live)\n self.ui.actionPause_Live_Updates.triggered.connect(self.on_pause)\n self.ui.actionSave.triggered.connect(self.on_save)\n\n # Custom graph buttons\n self.action_graph_reset.triggered.connect(self.reset_graph)\n self.action_graph_full_extent.triggered.connect(self.full_extent)\n\n # Custom tools generated in visualize that are not actions\n self.zoom_tool.wrap_sig.clicked.connect(self.process_zoom)\n self.select_tool.wrap_sig.clicked.connect(self.process_select)",
"def restore_signal_handlers(cls):\n signals = cls.__signal_handlers.keys()\n for sig in signals:\n try:\n signal.signal(sig, cls.__signal_handlers[sig])\n except Exception as e:\n pass\n cls.__signal_handlers = {}",
"def register_func_list(self, func_and_handler):\n for func, handler in func_and_handler:\n self._function_dispatch.register(func, handler)\n self.dispatch.cache_clear()",
"def hook_signals(self):\n signal.signal(signal.SIGTERM, self.quit)\n signal.signal(signal.SIGQUIT, self.quit)\n signal.signal(signal.SIGHUP, self.reload)"
]
| [
"0.66491324",
"0.6442545",
"0.58168805",
"0.57932657",
"0.552136",
"0.548373",
"0.5481442",
"0.52676225",
"0.5223032",
"0.51982576",
"0.5167057",
"0.5161264",
"0.51268774",
"0.5116198",
"0.50248075",
"0.50216794",
"0.49629274",
"0.4926565",
"0.49122542",
"0.4890563",
"0.48737139",
"0.47956407",
"0.47759175",
"0.4759938",
"0.4743465",
"0.4736937",
"0.47290042",
"0.4711022",
"0.46680441",
"0.46468666"
]
| 0.78166646 | 0 |
Connects all current registered signals to the cache delete function. | def connect(self):
for signal, models in six.iteritems(self._registry):
for model, keys in six.iteritems(models):
# Local function the current signal is going to be
# connected to.
# Defining it dynamically allows us to pass in the current
# set of keys for the given model, but we have to store
# a strong reference to it to avoid garbage collection.
def delete_cache(signal, sender=model, keys=keys):
cache.delete_many(list(keys))
signal.connect(delete_cache, sender=model, weak=False, dispatch_uid=signal) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, cache_key):\r\n pass",
"def on_delete(key):\n pass",
"def disconnect(self):\n for signal, models in six.iteritems(self._registry):\n for model, keys in six.iteritems(models):\n signal.disconnect(sender=model, weak=False, dispatch_uid=signal)\n self._registry = {}",
"def delete(self, *args, **kwargs):\n self.flush_from_cache()\n self._is_deleted = True\n super().delete(*args, **kwargs)",
"def __del__(self):\n self.disconnect()",
"def pre_delete_function_set(sender, instance, **kwargs):\n # Delete all functions (this also deletes the links).\n instance.function_set.all().delete()",
"def _notify_delete(self, cuds_object):",
"def __delete__(self):\n pass",
"def decache(self):",
"def delete(self):\n ...",
"def _clean(self):\n\t\tfor hid in self.handlers_id:\n\t\t\tself.obj.handler_disconnect(hid)",
"def delete_plugin_data(self):",
"def hook_signals(self):\n signal.signal(signal.SIGTERM, self.quit)\n signal.signal(signal.SIGQUIT, self.quit)\n signal.signal(signal.SIGHUP, self.reload)",
"def delete_callback(self):\n pass",
"def cleanup(self):\r\n #self.removeObservers()\r\n pass",
"def unregister_signals(self):\n for _, callback in self.signal_callbacks:\n Signal.unsubscribe(self, callback)",
"def __del__(self):\n self.unsubscribe()",
"def delete(self):\n redis_store.delete(String.generate_key(self.key))",
"def delete_cache(self):\n if hasattr(self, '_f_12_interp'):\n del self._f_12_interp\n if hasattr(self, '_f_32_interp'):\n del self._f_32_interp",
"def delete():",
"def cleanup(self):\n self.removeObservers()",
"def process_IN_DELETE(self, event):",
"def _post_delete(self, instance, **kwargs):\n pk_name = instance._meta.pk.name\n for key in self.cache_fields:\n if key in ('pk', pk_name):\n continue\n # remove pointers\n cache.delete(self._get_from_cache_key(**{key: getattr(instance, key)}))\n # remove actual object\n cache.delete(self._get_from_cache_key(**{pk_name: instance.pk}))",
"def clear_cache(sender, **kwargs):\n# print \"Post save() -> clear cache\"\n cache.clear() # FIXME: This cleaned the complete cache for every site!",
"def cleanup():\n redis_client.flushall()",
"def pre_delete_network(sender, instance, **kwargs):\n # Disable the pre_delete signal for centroids (the signal is useless\n # because the links are already deleted but it slows down the deleting\n # process).\n pre_delete.disconnect(sender=Centroid, dispatch_uid=\"centroid\")\n instance.centroid_set.all().delete()\n # Enable the pre_delete signal again.\n pre_delete.connect(pre_delete_centroid, sender=Centroid)\n pre_delete.disconnect(sender=Crossing, dispatch_uid=\"crossing\")\n instance.crossing_set.all().delete()\n pre_delete.connect(pre_delete_crossing, sender=Crossing)",
"def process_IN_DELETE_SELF(self, event):",
"def post_delete_metrics(sender, **kwargs):\r\n tags = _database_tags('deleted', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)",
"def delete_cache(self, *args, **kwargs):\n if self._cache:\n key = self.get_cache_key(*args, **kwargs)\n cache_backend.delete(key)\n\n if self._memoize:\n memoization_key = self._get_memoization_key(*args, **kwargs)\n if memoization_key in self._cached_results:\n del self._cached_results[memoization_key]",
"def cleanup_on_disconnect(self, datapath):\n self.delete_all_flows(datapath)"
]
| [
"0.63536656",
"0.6163808",
"0.59302086",
"0.59195465",
"0.58715653",
"0.5742273",
"0.5662632",
"0.5651707",
"0.56499434",
"0.55677015",
"0.5546776",
"0.55031997",
"0.54978436",
"0.54933244",
"0.5492756",
"0.54902864",
"0.5485602",
"0.5471204",
"0.5457612",
"0.5454246",
"0.54528725",
"0.54502916",
"0.5414679",
"0.5392689",
"0.53738683",
"0.5355034",
"0.5344982",
"0.53407353",
"0.53224975",
"0.53175"
]
| 0.7473073 | 0 |
Disconnects all current registered signals. To reconnect, signals must be registered again. | def disconnect(self):
for signal, models in six.iteritems(self._registry):
for model, keys in six.iteritems(models):
signal.disconnect(sender=model, weak=False, dispatch_uid=signal)
self._registry = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disconnect_signals(self, *args):\n children = self.children()\n Pair.disconnect_signals(self, *args)\n for child in children:\n child.disconnect_signals(*args)",
"def unregister_signals(self):\n for _, callback in self.signal_callbacks:\n Signal.unsubscribe(self, callback)",
"def disconnectAll(self, parentObject):\n\t\tfor widget in parentObject.findChildren(QtWidgets.QWidget):\n\t\t\tif widget.property('xmlTag'):\n\t\t\t\ttry:\n\t\t\t\t\twidget.disconnect()\n\t\t\t\t\t# verbose.debug(\"Disconnect signals from %s\" % widget.objectName())\n\t\t\t\texcept TypeError:\n\t\t\t\t\tpass\n\t\t\t\t\t# verbose.debug(\"No signals to disconnect from %s\" % widget.objectName())",
"def disconnect(self):\n for connection in six.itervalues(self):\n connection.disconnect()",
"def disconnect(self):\n for conn in self.all_connections():\n conn.disconnect()",
"def disconnected_from_listeners(signal):\n listeners = list(signal.receivers_for(ANY))\n for listener in listeners:\n signal.disconnect(listener)\n yield\n for listener in listeners:\n signal.connect(listener)",
"def disconnect_all(self):\n all_conns = chain([_x[0] for _x in self._available_connections], self._in_use_connections)\n for connection in all_conns:\n try:\n connection.disconnect()\n except Exception as err:\n self.class_logger.warning(\"Error occurred while disconnecting connection: %s\" % (err, ))\n self._available_connections = []\n self._in_use_connections = set()",
"def disconnect(signal, handler=None):\n if signal not in REGISTRY:\n return\n if handler:\n REGISTRY[signal].disconnect(handler)\n return\n REGISTRY[signal].registry.clear()\n del REGISTRY[signal]",
"def DisconnectWired(self):\n self.SetForcedDisconnect(True)\n self.wired.Disconnect()",
"def _unsubscribe(self, signal):\n while signal in self._downstream:\n self._downstream.remove(signal)\n while signal in self._downstream_reconnect:\n self._downstream_reconnect.remove(signal)",
"def disconnect(self):\n\n if self.connect:\n Disconnect(self.connect)",
"def unsubscribe_from_all(module):\n for name, signal in _signals.items():\n for receiver in list(signal.receivers.values()):\n obj = receiver()\n if obj is not None:\n if inspect.ismethod(obj):\n # find the object to which a method is bound\n self = None\n for name, value in inspect.getmembers(obj):\n if name == '__self__':\n self = value\n # if this is a method which belongs to the module\n if self is not None and self is module:\n signal.disconnect(obj)",
"def deactivate(self):\n SignalPlug.deactivate(self)\n self.killconnections()\n self.killtimers()",
"def _unregisterConnect(self, function):\n self._sig_connect.unsubscribe(function)",
"def disconnect(handler, signal, sender=dispatcher.Any):\n logger.debug('Disconnecting {} from {}({})'.format(handler, signal, sender))\n dispatcher.disconnect(handler, signal, sender)",
"def disconnect(self) -> None:\n ...",
"def disconnect(self):\n pass",
"def disconnect(self):\n pass",
"def disconnect(self):\n pass",
"def disconnect(self):\n pass",
"def disconnect(self):\n pass",
"def connect(self):\n for signal, models in six.iteritems(self._registry):\n for model, keys in six.iteritems(models):\n # Local function the current signal is going to be\n # connected to.\n # Defining it dynamically allows us to pass in the current\n # set of keys for the given model, but we have to store\n # a strong reference to it to avoid garbage collection.\n def delete_cache(signal, sender=model, keys=keys):\n cache.delete_many(list(keys))\n signal.connect(delete_cache, sender=model, weak=False, dispatch_uid=signal)",
"def shutdown(self) -> None:\n logger.info(\"Disconnecting instruments...\")\n for instrument in self._config[\"instruments\"]:\n instrument.disconnect()\n logger.info(f\"Shutting down {self}...\")\n self._daemon.shutdown()",
"def disconnect(self) -> None:\n for pin in self.pins:\n pin.disconnect()",
"def disconnect(self):\n _abstract()",
"def disconnect(self):\n _abstract()",
"def _disconnect_buttons(cls):\n try:\n cls.btn_startpause.clicked.disconnect()\n except RuntimeError:\n pass\n\n try:\n cls.btn_stopsave.clicked.disconnect()\n except RuntimeError:\n pass",
"def test_disconnect_multiple(self):\n self.inverter.disconnect()\n self.inverter.disconnect() # Should not raise exception",
"def disconnect(self):\n if self.is_connected:\n try:\n self.client.unregister()\n finally:\n if self.client.is_running:\n self.client.stop()\n self.hub.disconnect()",
"def disconnect(self):\n # Nothing to do\n pass"
]
| [
"0.7635583",
"0.7409263",
"0.71576667",
"0.70118153",
"0.6968156",
"0.6782369",
"0.6688207",
"0.6678641",
"0.6602246",
"0.6402267",
"0.6377517",
"0.63748914",
"0.63205457",
"0.6311291",
"0.63096774",
"0.63037467",
"0.627339",
"0.627339",
"0.627339",
"0.627339",
"0.627339",
"0.6255583",
"0.62334424",
"0.62217957",
"0.62075603",
"0.62075603",
"0.61910343",
"0.6159909",
"0.61535573",
"0.6128535"
]
| 0.8220706 | 0 |
Yield raw feature strings from the inputed file paths | def raw_feature_iterator(file_paths):
for path in file_paths:
with open(path, "r") as fin:
for line in fin:
yield line | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_feature_files(paths: list[str], **kwargs: Any) -> tuple[list[Feature], list[ScenarioTemplate], list[Step]]:\n features = get_features(paths, **kwargs)\n scenarios = sorted(\n itertools.chain.from_iterable(feature.scenarios.values() for feature in features),\n key=lambda scenario: (scenario.feature.name or scenario.feature.filename, scenario.name),\n )\n steps = sorted((step for scenario in scenarios for step in scenario.steps), key=lambda step: step.name)\n return features, scenarios, steps",
"def _generate_features(reader, paths, same_size=False,\n allow_missing_files=False):\n\n shape_determined = False\n for i, path in enumerate(paths):\n if allow_missing_files and not os.path.isfile(path):\n logger.debug(\"... File %s, that does not exist, has been ignored.\", path)\n continue\n\n feature = numpy.atleast_2d(reader(path))\n feature = numpy.ascontiguousarray(feature)\n if not shape_determined:\n shape_determined = True\n dtype = feature.dtype\n shape = list(feature.shape)\n yield (dtype, shape)\n else:\n # make sure all features have the same shape and dtype\n if same_size:\n assert shape == list(feature.shape)\n else:\n assert shape[1:] == list(feature.shape[1:])\n assert dtype == feature.dtype\n\n for value in feature.flat:\n yield value",
"def get_features(files):\n files = files.tolist()\n return np.array([pipeline(file) for file in files])",
"def get_all_features_from_path(path):\n\n data, sr, file_name = get_data(path)\n\n features = get_all_features_from_data(data, sr)\n\n return [file_name] + features",
"def select_files(input_line):\n uri = str(input_line.split(',')[0])\n label = str(input_line.split(',')[1])\n\n yield uri, label",
"def load_features(feature_path):\n if not os.path.exists(os.path.join(feature_path, f\"0_features.npy\")): \n raise ValueError(f\"The provided location {feature_path} does not contain any representation files\")\n\n ds_list, chunk_id = [], 0\n while os.path.exists(os.path.join(feature_path, f\"{chunk_id}_features.npy\")): \n features = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_features.npy\"))).float()\n labels = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_labels.npy\"))).long()\n ds_list.append(ch.utils.data.TensorDataset(features, labels))\n chunk_id += 1\n\n print(f\"==> loaded {chunk_id} files of representations...\")\n return ch.utils.data.ConcatDataset(ds_list)",
"def get_image_features(paths: pd.Series) -> np.array:\r\n # Pretrained image classification model to convert images into embeddings\r\n image_model = tf.keras.applications.EfficientNetB7(weights='imagenet',\r\n include_top=False,\r\n input_shape=(IMG_SIZE, IMG_SIZE, 3),\r\n pooling='avg')\r\n image_model = tf.keras.Sequential(\r\n [tf.keras.models.load_model(image_model),\r\n tf.keras.layers.Layer(2560, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Transform paths to files into tf.data.Dataset\r\n input_data = tf.data.Dataset.from_tensor_slices(paths)\r\n # Preprocess images\r\n input_data = input_data.map(process_path, num_parallel_calls=AUTOTUNE)\r\n input_data = configure_for_performance(input_data)\r\n\r\n # Convert all images into embeddings and average colors\r\n features = image_model.predict(input_data,\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Image features extracted. Shape:', features.shape)\r\n\r\n return features",
"def load_features(paths: List[str],\n expected_shape: Optional[tuple] = None) -> List[np.ndarray]:\n data = [] # type: List[np.ndarray]\n for path in paths:\n data.append(load_feature(path, expected_shape))\n return data",
"def get_features(self, feature_type=\"all\"):\n # if exists(path=\"data.csv\"):\n # return pd.read_csv(\"data.csv\")\n # else:\n # reading through directory\n for file_path in self.list_news_path:\n with open(file_path, 'r') as f:\n\n # open document to read and assign to doc\n doc = json.load(f)\n # skip the empty title or body\n if doc['title'] == \"\" or doc['text'] == \"\":\n pass\n else:\n # to extract all data from news content\n if feature_type == \"all\":\n news = doc['title'] + doc['text']\n\n # preprocesses news content\n words = preprocess(news)\n yield words\n\n # to extract title and text as a pair\n elif feature_type == \"pair\":\n title = preprocess(doc[\"title\"])\n body = preprocess(doc['text'])\n yield title, body\n # if not title or not body:\n # pass\n # else:\n # yield title, body\n\n # else you only need either title or body\n else:\n assert feature_type in doc.keys(), \"feature not in the document: \" + file_path\n # without stemming\n # CUSTOM_FILTERS = [lambda x: x.lower(), strip_tags, strip_punctuation, strip_multiple_whitespaces,\n # strip_numeric, remove_stopwords]\n\n feature = doc[feature_type]\n words = preprocess(feature)\n # using alternative preprocessing function\n # words = preprocess_string(words, filters=CUSTOM_FILTERS)\n yield words",
"def _generate_examples(self, files):\n idx = 0\n for filename in files:\n with open(filename) as file:\n for line in file:\n yield idx, {\"text\": line}\n idx += 1",
"def read_ptbtagged(ptbtagged_path: str) -> Iterator[Tuple[TokenSeq, PosSeq]]:\n #do this immediately (first)\n #start generating feature matrices\n \n #read file into an array \n with open(ptbtagged_path) as f:\n file_array = f.readlines()\n file_array.append(\"\\n\")\n array_of_tuples = create_tuples(file_array)\n\n return generator(array_of_tuples)",
"def gen_features(log_file_path: str, out_path: str):\n raise RuntimeError(\"Feature extraction is not supported yet in AutoScheduler dialect\")",
"def examplereader(path, lower=False):\n for line in filereader(path):\n line = line.lower() if lower else line\n tokens = tokens_from_treestring(line)\n tree = Tree.fromstring(line) # use NLTK's Tree\n label = int(line[1])\n trans = transitions_from_treestring(line)\n yield Example(tokens=tokens, tree=tree, label=label, transitions=trans)",
"def readFeatures(self):\n\t\treturn self._fileSystem.readFeatures()",
"def read_data(feature_file, label_file):",
"def listFeatures() :\n global features\n features = [feature.split(\".\")[0] for feature in os.listdir(os.path.abspath(__file__)[:-11])\n if feature.endswith(\".py\") and feature != \"__init__.py\"]",
"def lexf(path) -> \"(lib.Token,)\":\n with open(path, 'r') as source:\n for line in source:\n yield from lexs(line)",
"def test__read_scenario_files(self):\n test_str = '<sequence_demo><adaptivenumericinput />'\n test_result = _read_scenario_files()\n self.assertEqual(test_str, test_result[0:len(test_str)])",
"def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list",
"def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list",
"def get_feature_labels_files(dataset):\n features = []\n audio_labels = []\n focal_labels = []\n files = []\n for frame in dataset:\n files.append(frame[0])\n features.append(frame[1][0].T)\n if frame[1][1] is not None:\n audio_labels.append(frame[1][1][0].T)\n focal_labels.append(frame[1][1][1].T)\n else:\n audio_labels.append(None)\n focal_labels.append(None)\n features = np.expand_dims(np.asarray(features), 4)\n audio_labels = np.asarray(audio_labels)\n focal_labels = np.asarray(focal_labels)\n return [features, audio_labels,focal_labels, files]",
"def getFeatures(filedir):\r\n lbs = getLabels(filedir)\r\n width, height = getSize(filedir)\r\n features = [os.listdir(filedir + 's' + str(lbs[i])) for i in range(len(lbs))]\r\n for i in range(len(lbs)):\r\n for j in range(len(features[i])):\r\n im = Image.open(filedir + 's' + str(lbs[i]) + '/' + features[i][j]) # type(im): <class 'PIL.PpmImagePlugin.PpmImageFIle'>\r\n im = im.convert('L') # type(im): <class 'PIL.Image.Image'>\r\n data = im.getdata() # type(data): <class 'ImagingCore'>\r\n img = np.reshape(list(data), (height, width))\r\n features[i][j] = img\r\n return features",
"def _read(self, path: str):\n num_samples, length = [int(x) for x in path.split(\":\")]\n random.seed(self.seed)\n for _ in range(num_samples):\n tokens, tags = self._sample(length)\n yield self.text_to_instance(tokens, tags)",
"def parse_features(self, skip=...):\n ...",
"def parse_features(self, skip=...):\n ...",
"def input_fn(self,\n file_pattern: List[Text]):\n root_paths = [x.replace(\"*\", \"\") for x in file_pattern]\n\n file_paths = []\n for root in root_paths:\n file_paths.extend(path_utils.list_dir(root))\n\n dataset = tf.data.TFRecordDataset(file_paths,\n compression_type='GZIP')\n df = convert_raw_dataset_to_pandas(dataset,\n self.schema,\n 100000)\n\n # Separate labels\n X = df[[x for x in df.columns if\n naming_utils.check_if_transformed_feature(x)]]\n y = df[[x for x in df.columns if\n naming_utils.check_if_transformed_label(x)]]\n return X, y",
"def _generate_examples(self, filepath, split):\r\n if self.config.name == \"trex\":\r\n paths = filepath\r\n relations_path = paths[0]\r\n paths = paths[1:]\r\n all_rels = {}\r\n with open(relations_path, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n all_rels[data[\"relation\"]] = data\r\n id_ = -1\r\n for filepath in paths:\r\n with open(filepath, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n pred = all_rels.get(data[\"predicate_id\"], {})\r\n for evidences in data[\"evidences\"]:\r\n id_ += 1\r\n yield id_, {\r\n \"uuid\": str(data[\"uuid\"]),\r\n \"obj_uri\": str(data[\"obj_uri\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"sub_uri\": str(data[\"sub_uri\"]),\r\n \"sub_label\": str(data[\"sub_label\"]),\r\n \"predicate_id\": str(data[\"predicate_id\"]),\r\n \"sub_surface\": str(evidences[\"sub_surface\"]),\r\n \"obj_surface\": str(evidences[\"obj_surface\"]),\r\n \"masked_sentence\": str(evidences[\"masked_sentence\"]),\r\n \"template\": str(pred.get(\"template\", \"\")),\r\n \"template_negated\": str(pred.get(\"template_negated\", \"\")),\r\n \"label\": str(pred.get(\"label\", \"\")),\r\n \"description\": str(pred.get(\"description\", \"\")),\r\n \"type\": str(pred.get(\"type\", \"\")),\r\n }\r\n elif self.config.name == \"conceptnet\":\r\n id_ = -1\r\n with open(filepath, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n if data.get(\"negated\") is not None:\r\n for masked_sentence, negated in zip(data[\"masked_sentences\"], data[\"negated\"]):\r\n id_ += 1\r\n yield id_, {\r\n \"uuid\": str(data[\"uuid\"]),\r\n \"sub\": str(data.get(\"sub\", \"\")),\r\n \"obj\": str(data.get(\"obj\", \"\")),\r\n \"pred\": str(data[\"pred\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"masked_sentence\": str(masked_sentence),\r\n \"negated\": str(negated),\r\n }\r\n else:\r\n for masked_sentence in data[\"masked_sentences\"]:\r\n id_ += 1\r\n yield id_, {\r\n \"uuid\": str(data[\"uuid\"]),\r\n \"sub\": str(data.get(\"sub\", \"\")),\r\n \"obj\": str(data.get(\"obj\", \"\")),\r\n \"pred\": str(data[\"pred\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"masked_sentence\": str(masked_sentence),\r\n \"negated\": str(\"\"),\r\n }\r\n elif self.config.name == \"squad\":\r\n id_ = -1\r\n with open(filepath, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n for masked_sentence in data[\"masked_sentences\"]:\r\n id_ += 1\r\n yield id_, {\r\n \"id\": str(data[\"id\"]),\r\n \"sub_label\": str(data[\"sub_label\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"negated\": str(data.get(\"negated\", \"\")),\r\n \"masked_sentence\": str(masked_sentence),\r\n }\r\n elif self.config.name == \"google_re\":\r\n id_ = -1\r\n paths = filepath\r\n for filepath in paths:\r\n # from https://github.com/facebookresearch/LAMA/blob/master/scripts/run_experiments.py\r\n if \"place_of_birth\" in filepath:\r\n pred = {\r\n \"relation\": \"place_of_birth\",\r\n \"template\": \"[X] was born in [Y] .\",\r\n \"template_negated\": \"[X] was not born in [Y] .\",\r\n }\r\n elif \"date_of_birth\" in filepath:\r\n pred = {\r\n \"relation\": \"date_of_birth\",\r\n \"template\": \"[X] (born [Y]).\",\r\n \"template_negated\": \"[X] (not born [Y]).\",\r\n }\r\n else:\r\n pred = {\r\n \"relation\": \"place_of_death\",\r\n \"template\": \"[X] died in [Y] .\",\r\n \"template_negated\": \"[X] did not die in [Y] .\",\r\n }\r\n with open(filepath, encoding=\"utf-8\") as f:\r\n for row in f:\r\n data = json.loads(row)\r\n for masked_sentence in data[\"masked_sentences\"]:\r\n id_ += 1\r\n yield id_, {\r\n \"pred\": str(data[\"pred\"]),\r\n \"sub\": str(data[\"sub\"]),\r\n \"obj\": str(data[\"obj\"]),\r\n \"evidences\": str(data[\"evidences\"]),\r\n \"judgments\": str(data[\"judgments\"]),\r\n \"sub_w\": str(data[\"sub_w\"]),\r\n \"sub_label\": str(data[\"sub_label\"]),\r\n \"sub_aliases\": str(data[\"sub_aliases\"]),\r\n \"obj_w\": str(data[\"obj_w\"]),\r\n \"obj_label\": str(data[\"obj_label\"]),\r\n \"obj_aliases\": str(data[\"obj_aliases\"]),\r\n \"uuid\": str(data[\"uuid\"]),\r\n \"masked_sentence\": str(masked_sentence),\r\n \"template\": str(pred[\"template\"]),\r\n \"template_negated\": str(pred[\"template_negated\"]),\r\n }",
"def _generate_examples(self, **kwargs):\n file_paths = kwargs.get(\"file_paths\")\n if not file_paths:\n raise ValueError(\"Must pass file_paths.\")\n\n for file_path in file_paths:\n for record in SeqIO.parse(file_path, \"fasta\"):\n yield record.id, {\n \"sequence\": str(record.seq),\n \"description\": str(record.description),\n \"id\": str(record.id),\n }",
"def source_test_file_content():\n return 'feature content'",
"def _generate_examples(self, filepath):\n\n key = 1\n with tf.io.gfile.GFile(filepath, \"r\") as f:\n tokens = []\n tags = []\n langs = []\n for line in f:\n line = line.rstrip()\n # pylint: disable=g-explicit-bool-comparison\n if line.startswith(\"-DOCSTART-\") or line == \"\":\n if tokens:\n spans = get_spans(tokens, tags)\n yield key, {\n \"tokens\": tokens,\n \"tags\": tags,\n \"langs\": langs,\n \"spans\": spans\n }\n key += 1\n tokens = []\n tags = []\n langs = []\n else:\n # wikiann data is tab separated\n fields = line.split(\"\\t\")\n # strip out language prefix\n langs.append(fields[0].split(\":\")[0])\n tokens.append(\":\".join(fields[0].split(\":\")[1:]))\n if len(fields) > 1:\n tags.append(fields[-1])\n else:\n # examples have no label in test set\n tags.append(\"O\")\n if tokens:\n spans = get_spans(tokens, tags)\n yield key, {\n \"tokens\": tokens,\n \"tags\": tags,\n \"langs\": langs,\n \"spans\": spans\n }"
]
| [
"0.6346424",
"0.6260554",
"0.6087644",
"0.60024726",
"0.5857876",
"0.5802286",
"0.579959",
"0.57831514",
"0.5770136",
"0.57632935",
"0.57309633",
"0.5721869",
"0.5705221",
"0.56793064",
"0.5656468",
"0.5652713",
"0.56373656",
"0.56315863",
"0.5627689",
"0.5627689",
"0.559542",
"0.5585908",
"0.5580611",
"0.5572649",
"0.5572649",
"0.55630326",
"0.55596906",
"0.5552804",
"0.55417645",
"0.5535192"
]
| 0.7716821 | 0 |
Vectorize a single sample of raw features and write to a large numpy file | def vectorize(irow, raw_features_string, X_path, y_path, extractor, nrows):
raw_features = json.loads(raw_features_string)
feature_vector = extractor.process_raw_features(raw_features)
y = np.memmap(y_path, dtype=np.float32, mode="r+", shape=nrows)
y[irow] = raw_features["label"]
X = np.memmap(X_path, dtype=np.float32, mode="r+", shape=(nrows, extractor.dim))
X[irow] = feature_vector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vectorize(irow, raw_features_string, X_path, y_path, nrows):\n extractor = PEFeatureExtractor()\n raw_features = json.loads(raw_features_string)\n feature_vector = extractor.process_raw_features(raw_features)\n\n y = np.memmap(y_path, dtype=np.float32, mode=\"r+\", shape=nrows)\n y[irow] = raw_features[\"label\"]\n\n X = np.memmap(X_path, dtype=np.float32, mode=\"r+\", shape=(nrows, extractor.dim))\n X[irow] = feature_vector",
"def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)",
"def take_unique_features_large(filename_data,filename_features,filename_data_save,filename_features_save,rm_features=None,block_size=1000):\n # read the features from file\n features_org=np.loadtxt(filename_features,delimiter='\\t',dtype=object)\n \n # create a new file to save processed data\n filename_data_save_handle=open(filename_data_save,'w')\n filename_data_save_handle.close()\n # open the new file to save data sequentially\n filename_data_save_handle=open(filename_data_save,'a')\n \n filename_data_handle=open(filename_data,'r')\n\n count=0\n start=0\n data_block=[]\n end_of_file=False\n print(\"Start processing ...\")\n while not end_of_file:\n line=filename_data_handle.readline()\n if line=='':\n end_of_file=True\n else:\n if start==0:\n data_block=[]\n # remove \"\\n\" at the end\n data_line=line[0:-1]\n # split the string to substrings\n data_line=data_line.split('\\t')\n # append the current line to the block \n data_block.append(data_line)\n # increase total count\n count=count+1\n # get a full block or partial block at the end\n if start==block_size-1 or (end_of_file and start!=0):\n print(\"processing the %d-th line ...\" %count)\n \n ### process the block ###\n data_block=np.array(data_block,dtype=str)\n data_block,features=take_unique_features(data_block,features_org,rm_features)\n # append to file\n np.savetxt(filename_data_save_handle,data_block,fmt='%s',delimiter='\\t')\n ### finished processing the block ###\n \n # reset the counts of lines in the block (0-based)\n start=0\n else:\n start=start+1\n filename_data_handle.close() \n filename_data_save_handle.close()\n print(\"Done! %d lines are processed.\" %count)\n print(\"The features are:\")\n print(features)\n\n # save feature list\n np.savetxt(filename_features_save,features,fmt='%s',delimiter='\\t')",
"def create_sample_vectors(cleaned_data_directory, out_vectors_path):\n vectors = []\n\n for filename in os.listdir(cleaned_data_directory):\n if not filename.endswith(\".txt\"):\n continue\n\n path = os.path.join(cleaned_data_directory, filename)\n f = open(path, mode='r', encoding='utf8')\n\n print(\"Processing\", path)\n\n lang = filename[:2]\n lang_number = language_codes.index(lang)\n\n print(f\"\\tLanguage: {lang} ({lang_number})\")\n print(\"\\tReading...\", end=' ')\n\n file_content = f.read()\n content_length = len(file_content)\n\n print(\"done.\")\n print(\"\\tExtracting vectors...\", end=' ')\n\n sample_start_index = 0\n count = 0\n\n while sample_start_index + text_sample_size < content_length:\n sample = get_sample(file_content, sample_start_index, text_sample_size)\n input_vector = build_input_vector(sample)\n vector = input_vector + [lang_number]\n vectors.append(vector)\n sample_start_index += text_sample_size\n count += 1\n\n print(\"done.\")\n print(f\"\\tExtracted {count} vectors.\")\n\n del file_content\n\n print(f\"Total {len(vectors)} vectors.\")\n\n np_vectors = np.array(vectors, dtype=np.uint16)\n np.random.shuffle(np_vectors)\n\n print(f\"Converted to NumPy array, shape: {np_vectors.shape}.\")\n\n np.savez_compressed(out_vectors_path, data=np_vectors)\n\n print(f\"Saved to {out_vectors_path}.\")",
"def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)",
"def create_vectorized_features(data_dir, feature_version=2):\n extractor = PEFeatureExtractor(feature_version)\n\n print(\"Vectorizing training set\")\n X_path = os.path.join(data_dir, \"X_train.dat\")\n y_path = os.path.join(data_dir, \"y_train.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"train_features_{}.jsonl\".format(i)) for i in range(6)]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)\n\n print(\"Vectorizing test set\")\n X_path = os.path.join(data_dir, \"X_test.dat\")\n y_path = os.path.join(data_dir, \"y_test.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"test_features.jsonl\")]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)",
"def memmap_feats(features):\n features = np.array(features)\n dtype = features.dtype\n feats_shape = features.shape\n\n outfile = TemporaryFile()\n fp = np.memmap(outfile, dtype=dtype, mode='w+', shape=feats_shape)\n fp[:] = features[:]\n fp.flush()\n del features\n del fp\n logging.info('Features memory mapped features to temporary file: %s' % outfile)\n\n #read back in again without using any memory\n features = np.memmap(outfile, dtype=dtype, mode='r', shape=feats_shape)\n return features",
"def save_feature(self):\n import scipy.io as sio\n testdp = self.test_data_provider\n num_batches = len(testdp.batch_range)\n print 'There are ' + str(testdp.get_num_batches(self.data_path)) + ' in directory'\n if self.test_data_provider.batch_size > 0:\n num_batches = (num_batches - 1)/ self.test_data_provider.batch_size + 1\n if self.test_one:\n num_batches = min(num_batches, 1)\n print 'There are ' + str( num_batches ) + ' in range'\n iu.ensure_dir(self.save_feature_path)\n feature_name = self.op.get_value('save_feature_name')\n feature_dim = self.model_state['layers'][self.feature_idx]['outputs']\n print 'Feature dim is %d' % feature_dim\n for b in range(num_batches):\n epoch, b_num, data = self.get_next_batch(train=False)\n print ' Start writing batch......\\t' + str(b_num)\n num_data = data[0].shape[-1]\n data += [n.zeros((num_data, feature_dim), dtype=n.single)]\n save_name = 'batch_feature_' + str(b_num) + '_' + feature_name \n save_path = iu.fullfile(self.save_feature_path, save_name)\n self.libmodel.startFeatureWriter(data, self.feature_idx)\n self.finish_batch()\n d = dict()\n d['X'] = data[-1].transpose()\n d['batch_num'] = b_num\n d['Y'] = data[1]\n cur_batch_indexes = self.test_data_provider.data_dic['cur_batch_indexes']\n # d['Y_other'] = data[2:-1] if len(data) > 3 else []\n ####### WARN BEGIN ################\n # for human eva fake experiments\n # d['images_path'] = [self.test_data_provider.images_path[x] for x in cur_batch_indexes]\n # d['Y'] = np.concatenate(map(lambda x:self.test_data_provider.batch_meta['RelativeSkel_Y3d_mono_body_backup'][...,x].reshape((-1,1),order='F'), cur_batch_indexes),axis=1)\n print d['Y'].shape\n d['cur_batch_indexes'] = cur_batch_indexes\n ####### WARN END ################\n print 'The len of data is ' + str(len(data))\n print 'The shape of X is' + str(d['X'].shape)\n print 'The shape of Y is' + str(d['Y'].shape)\n ##sio.savemat(save_path, d)\n pickle(save_path, d)",
"def gather_and_save_vectors(path, words_vec = collections.defaultdict(list), features = []):\n with open(path, 'rt', encoding='mac_roman') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='\"')\n for row in csvreader:\n words_vec, features = countize(row[3], row[2], words_vec, features)\n try:\n words_vec, features = countize(row[6], row[2], words_vec, features)\n except:\n pass\n pickle.dump(words_vec, open(\"ind_vectors.data\", \"wb\"))\n pickle.dump(features, open(\"i_features.data\", \"wb\"))\n return words_vec, features",
"def _synth_output(self, path, files):\n features = np.empty((0, 6))\n for i in range(len(files)):\n train_set = np.load(f'{path}angles/{files[i]}.npy')\n features = np.concatenate((features, train_set), axis=0)\n self.output = F.normalize(torch.tensor(np.array(features[:, :5]), dtype=torch.float32))",
"def write_features_to_file(filename,locs,desc):\n savetxt(filename, hstack((locs, desc)))",
"def generate_data(filename_in, filename_out):\n file_in = open(filename_in, 'r')\n file_out = open(filename_out, 'w+')\n\n df = pd.read_csv(file_in, header=None, sep=' ', quoting=csv.QUOTE_NONE)\n x = df.iloc[:, 0].values\n y_class = df.iloc[:, -1].values\n file_in.close()\n\n y_class = np.where(y_class == 'O', 0, 1)\n\n x_features = []\n size_x = len(x)\n for i in range(3, size_x):\n if i % 5000 == 0:\n print(i, \"/\", size_x)\n x_features.append(features(x[i-2], x[i-1], x[i], y_class[i]))\n\n df_write = pd.DataFrame(x_features)\n\n tab = [x for x in range(1, NUMBER_OF_FEATURE + 2)]\n df_write.columns = tab\n write_csv(df_write, file_out)\n file_out.close()",
"def convert_dataset_to_libsvm(samples, path):\n with open(path, 'wb') as f:\n for sample in samples:\n # Write sample's label.\n f.write('%d' % sample[-1])\n\n # Write sample's features.\n for i, feature in enumerate(sample[:-1], 1): # Write features.\n # Convert to int if no data will be lost.\n if feature == int(feature):\n f.write(' %d:%d' % (i, feature))\n # Else stick with float.\n else:\n f.write(' %d:%f' % (i, sample))\n\n f.write('\\n')",
"def gen_feats_file(data_feats,ids,feat_filename):\n if not os.path.isfile(feat_filename) :\n new_feats=np.empty((0,2))\n for iid in ids:\n print(iid)\n indices = [i for i, v in enumerate(data_feats[:,0]) if iid in v]\n new_feats=np.vstack((new_feats,data_feats[indices,:]))\n np.savetxt(feat_filename,new_feats,fmt=\"%s\")",
"def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')",
"def file_based_convert_examples_to_features(examples,label_list,output_file):\n writer = tf.python_io.TFRecordWriter(output_file)\n for(ex_index, example) in enumerate(examples):\n if ex_index%10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list)\n \n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n features = {\n \"\":tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))),\n }\n\n tf_example = tf.train.Example(features=tf.train.Features(features=features))\n writer.write(tf_example.SerializeToString())",
"def extract_feats_to_file(npy_path, audio_path, featurizer):\n # Returns a (time, feature) NumPy array\n data = featurizer.file_to_feats(audio_path)\n np.save(npy_path, data)",
"def save_features_to_file(path: str, features: Data_dict_type, labels: Labels_dict_type_numpy):\n for key, item in features.items():\n filename = key\n values, sample_rate = item\n window_labels = labels[filename].reshape((-1, 1))\n concatenated_data = np.concatenate(\n [np.array([i for i in range(values.shape[0])])[..., np.newaxis], # window_idx\n values, # features\n window_labels], axis=-1) # labels\n df_to_save = pd.DataFrame(data=concatenated_data)\n columns = ['window_idx'] + ['feature_%i' % i for i in range(values.shape[-1])] + ['label']\n df_to_save.columns = columns\n df_to_save.to_csv(os.path.join(path, filename.split('.')[0] + '.csv'), index=False)",
"def write_feature_weight(weights,features,lambda1s,filename):\n # example:\n #weights=np.asarray([[1.1,2.2,3.4],[5.5,6.6,7.7]])\n #features=np.asarray(['f1','f2','f3'],dtype=object)\n #lambda1s=np.asarray([1.0,2.0])\n #write_feature_weight(weights,features,lambda1s,filename='test.txt')\n \n features=np.insert(features,0,'lambda')\n weights=np.asarray(weights,dtype=object)\n lambda1s=np.asanyarray(lambda1s,dtype=object)\n lambda1s.resize((lambda1s.shape[0],1))\n lambda1s_weights=np.hstack((lambda1s,weights))\n features.resize((1,features.shape[0]))\n features_lambda1s_weights=np.vstack((features,lambda1s_weights))\n np.savetxt(filename,features_lambda1s_weights,fmt='%s',delimiter='\\t')",
"def write_vectors(self, filename):\n svu.write_realvectors(self,filename)",
"def vectorize_subset(X_path, y_path, raw_feature_paths, nrows):\n # Create space on disk to write features to\n extractor = PEFeatureExtractor()\n X = np.memmap(X_path, dtype=np.float32, mode=\"w+\", shape=(nrows, extractor.dim))\n y = np.memmap(y_path, dtype=np.float32, mode=\"w+\", shape=nrows)\n del X, y\n\n # Distribute the vectorization work\n pool = multiprocessing.Pool()\n argument_iterator = ((irow, raw_features_string, X_path, y_path, nrows)\n for irow, raw_features_string in enumerate(raw_feature_iterator(raw_feature_paths)))\n for _ in tqdm.tqdm(pool.imap_unordered(vectorize_unpack, argument_iterator), total=nrows):\n pass",
"def test_load_and_featurize_save_csv():\n f = ImageFeaturizer()\n name, ext = os.path.splitext(CSV_NAME_MULT)\n check_array_path = \"{}_{}\".format(name, 'squeezenet_depth-1_output-512')\n f.featurize(save_csv=True, save_features=True, omit_time=True,\n **LOAD_DATA_ARGS_MULT)\n full_check = \"{}{}{}\".format(check_array_path, '_full', ext)\n feature_check = \"{}{}{}\".format(check_array_path, '_features_only', ext)\n f.save_csv(save_features=True, omit_time=True)\n try:\n assert os.path.isfile(full_check)\n assert os.path.isfile(feature_check)\n finally:\n remove_generated_paths(assert_not=False)\n if os.path.isfile(\"{}{}{}\".format(check_array_path, '_features_only', ext)):\n os.remove(\"{}{}{}\".format(check_array_path, '_features_only', ext))\n if os.path.isfile(\"{}{}{}\".format(check_array_path, '_full', ext)):\n os.remove(\"{}{}{}\".format(check_array_path, '_full', ext))",
"def save_data(features, labels, mask, file_name):\n label = labels[mask]\n label = label.reshape((len(label), 1))\n data = np.concatenate((features[mask, :], label), axis = 1)\n np.save(file_name, data)",
"def read_vectorized_features(data_dir, subset=None, feature_version=2):\n if subset is not None and subset not in [\"train\", \"test\"]:\n return None\n\n extractor = PEFeatureExtractor(feature_version)\n ndim = extractor.dim\n X_train = None\n y_train = None\n X_test = None\n y_test = None\n\n if subset is None or subset == \"train\":\n X_train_path = os.path.join(data_dir, \"X_train.dat\")\n y_train_path = os.path.join(data_dir, \"y_train.dat\")\n y_train = np.memmap(y_train_path, dtype=np.float32, mode=\"r\")\n N = y_train.shape[0]\n X_train = np.memmap(X_train_path, dtype=np.float32, mode=\"r\", shape=(N, ndim))\n if subset == \"train\":\n return X_train, y_train\n\n if subset is None or subset == \"test\":\n X_test_path = os.path.join(data_dir, \"X_test.dat\")\n y_test_path = os.path.join(data_dir, \"y_test.dat\")\n y_test = np.memmap(y_test_path, dtype=np.float32, mode=\"r\")\n N = y_test.shape[0]\n X_test = np.memmap(X_test_path, dtype=np.float32, mode=\"r\", shape=(N, ndim))\n if subset == \"test\":\n return X_test, y_test\n\n return X_train, y_train, X_test, y_test",
"def createFeatureMap(features, filename=\"xgb.fmap\"):\n with open(filename, 'w') as outfile:\n i = 0\n for feat in features:\n outfile.write('{0}\\t{1}\\tq\\n'.format(i, feat))\n i = i + 1",
"def get_feature_0(data_points:np.array, save_name = ''):\n def pair_d(M):\n # extract the upper triangle of the pairwise distance matrix\n # upper_tri() in functions.py\n d = [upper_tri(pairwise_distances(M[:,i].reshape(-1,1))) for i in range(M.shape[1])]\n # Unlist the list and convert it to an array \n vec = np.array(list(chain.from_iterable(d))).reshape(-1,1)\n return vec\n \n start = time.time()\n # apply pairwise function to all samples \n d = [pair_d(data_points[i]) for i in range(data_points.shape[0])]\n feature0 = np.array(d).reshape(data_points.shape[0],-1)\n tm = round(time.time()-start,4)\n \n if save_name != '':\n np.savetxt(fname = output_dir + save_name + '.csv', X=feature0, delimiter=',')\n #print(\"-----Feature set 0 shape:\", feature0.shape)\n return(feature0, tm)",
"def write_vecs(self, vecs_fname):\r\n header = f'{self.vectors.shape[0]} {self.vectors.shape[1]}'\r\n np.savetxt(vecs_fname, np.hstack([self.words.reshape(-1, 1), self.vectors]), fmt='%s', header=header)",
"def output(self, doc):\n structfeaures = self.build_feature(doc)\n for f in structfeaures:\n yield np.array(f).astype(np.float32)",
"def create_test_file(filename, array):\n array = np.ma.asanyarray(array)\n crs = rasterio.crs.CRS(init='epsg:4326')\n transform = from_origin(52, 4, 10, 10)\n with rasterio.open(\n filename,\n mode='w',\n driver='GTiff',\n width=array.shape[1],\n height=array.shape[2],\n count=array.shape[0],\n dtype=array.dtype,\n crs=crs,\n transform=transform) as dataset:\n for band, data in enumerate(array, start=1):\n dataset.write(data, band)",
"def output(self, doc):\n contextfeaures = self.build_feature(doc)\n for f in contextfeaures:\n yield np.array(f).astype(np.float32)"
]
| [
"0.62940425",
"0.6106697",
"0.60896504",
"0.5908478",
"0.586378",
"0.5802398",
"0.57704306",
"0.5692055",
"0.5680772",
"0.56781024",
"0.5660835",
"0.5647756",
"0.563185",
"0.56275016",
"0.5625246",
"0.56193984",
"0.5572783",
"0.5572056",
"0.55287766",
"0.55282754",
"0.5521531",
"0.5521473",
"0.54971755",
"0.54791164",
"0.54621094",
"0.5428442",
"0.54188704",
"0.5413468",
"0.5389675",
"0.5380783"
]
| 0.6187531 | 1 |
Pass through function for unpacking vectorize arguments | def vectorize_unpack(args):
return vectorize(*args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vector_b(args):\n\treturn [f(arg) for arg in args]",
"def vectorize_inputs(f):\n return lambda args: f(*np.hsplit(args, args.shape[1]))",
"def vectorize_method(func):\n @wraps(func)\n def inner(obj, *iargs, **ikwargs):\n\n def functionalized_method(*args, **kwargs):\n return func(obj, *args, **kwargs)\n\n return np.vectorize(functionalized_method, otypes=[np.dtype('c16')])(*iargs, **ikwargs)\n #return np.vectorize(functionalized_method)(*iargs, **ikwargs)\n return inner",
"def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper",
"def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper",
"def _lock_in_non_vectorized_args(fn, arg_structure, flat_core_ndims, flat_args):\n\n # Extract the indices and values of args where core_ndims is not `None`.\n (vectorized_arg_indices,\n vectorized_arg_core_ndims,\n vectorized_args) = [], [], []\n if any(nd is not None for nd in flat_core_ndims):\n vectorized_arg_indices, vectorized_arg_core_ndims, vectorized_args = zip(*[\n (i, nd, tf.convert_to_tensor(t))\n for i, (nd, t) in enumerate(zip(flat_core_ndims, flat_args))\n if nd is not None])\n\n vectorized_arg_index_set = set(vectorized_arg_indices)\n\n def fn_of_vectorized_args(vectorized_args):\n with tf.name_scope('fn_of_vectorized_args'):\n vectorized_args_by_index = dict(\n zip(vectorized_arg_indices, vectorized_args))\n # Substitute the vectorized args into the original argument\n # structure.\n new_args_with_original_structure = tf.nest.pack_sequence_as(\n arg_structure, [vectorized_args_by_index[i]\n if i in vectorized_arg_index_set\n else v for (i, v) in enumerate(flat_args)])\n return tf.nest.map_structure(\n # If `fn` returns any Distribution instances, ensure that their\n # parameter batch shapes are padded to align after vectorization.\n _maybe_rectify_parameter_shapes,\n fn(*new_args_with_original_structure))\n\n return (vectorized_arg_core_ndims,\n vectorized_args,\n fn_of_vectorized_args)",
"def func(self, name, vecs):\n raise NotImplementedError",
"def create_args(func):\n # Get a dictionary of the params of the function\n params = dict(inspect.signature(func).parameters)\n # We will always use z for the vector input so delete that from the dict\n del params['z']\n return {k: peturb(v) for k, v in params.items()}",
"def apply(self, vec):\n raise NotImplementedError",
"def sp_mulvec_wrap(fn):\n\n @functools.wraps(fn)\n def qarrayed_fn(self, other):\n out = fn(self, other)\n if isinstance(other, qarray):\n out = qarray(out)\n return out\n\n return qarrayed_fn",
"def vectorize(self):\n return vectorize(self)",
"def __call__(self, *args):\n result = self\n if len(args) == 1:\n if np.isscalar(args[0]) or args[0] is None:\n result -= args[0]\n else:\n for i in args[0]:\n result -= i\n return result\n if np.isscalar(args[0]) or args[0] is None:\n result += args[0]\n else:\n for i in args[0]:\n result += i\n for i in args[1:]:\n if np.isscalar(i) or i is None:\n result -= i\n else:\n for j in i:\n result -= j\n return result",
"def Generate_Custom(f, n, m):\n return np.fromfunction(np.vectorize(f, otypes=[float]), (n,m))",
"def vectorize(point_a:tuple, point_b:tuple)->tuple:\n return (point_b[0] - point_a[0], point_b[1] - point_a[1])",
"def func(self, name, vecs):\n arity = len(vecs)\n funcobj = self.get_ground_vector('!Func:{}:{}'.format(arity, name))\n\n argobjs = [\n associate_comp(\n self.get_ground_vector('!Arg:{}:{}:{}'.format(arity, name, i)),\n vec)\n for i, vec in enumerate(vecs) ]\n\n result = normalize_comp(\n self.func_weights @\n torch.cat([\n funcobj,\n merge(argobjs),\n merge(vecs),\n ]).reshape(-1, 2 * self.hrr_size)).reshape(2, self.hrr_size)\n\n return result",
"def func(self, name, vecs):\n arity = len(vecs)\n funcobj = self.get_ground_vector('!Func:{}:{}'.format(arity, name))\n\n argobjs = [\n associate_comp(\n self.get_ground_vector('!Arg:{}:{}:{}'.format(arity, name, i)),\n vec)\n for i, vec in enumerate(vecs) ]\n\n result = normalize_comp(\n self.func_weights @\n torch.cat([\n funcobj,\n merge(argobjs),\n merge(vecs),\n ]).reshape(-1, 2 * self.hrr_size)).reshape(2, self.hrr_size)\n\n return result",
"def vecToFunc(vector):\n def f(x):\n f = 0\n for i in range(len(vector)):\n f += vector[i]*x**i\n return f\n return f",
"def proVec(*args):\r\n resultado = []\r\n i,j,k = (args[0][1] * args[1][2]) - (args[0][2] * args[1][1]) , ((args[0][0] * args[1][2]) - (args[0][2] * args[1][0])) * (-1) , (args[0][0] * args[1][1]) - (args[0][1] * args[1][0])\r\n resultado.append(i)\r\n resultado.append(j)\r\n resultado.append(k)\r\n return resultado",
"def vectorize(self, *args, **kwargs):\n kwargs['add_start'] = False\n kwargs['add_end'] = False\n return TorchRankerAgent.vectorize(self, *args, **kwargs)",
"def check_uniform_augment_cpp(method):\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n [transforms, num_ops], _ = parse_user_args(method, *args, **kwargs)\n type_check(num_ops, (int,), \"num_ops\")\n check_positive(num_ops, \"num_ops\")\n\n if num_ops > len(transforms):\n raise ValueError(\"num_ops is greater than transforms list size\")\n type_check_list(transforms, (TensorOp,), \"tensor_ops\")\n\n return method(self, *args, **kwargs)\n\n return new_method",
"def formParameters(unaryFunctions, binaryFunctions):",
"def func(self, name, vecs):\n arity = len(vecs)\n funcobj = self.get_ground_vector('!Func:{}:{}'.format(arity, name))\n\n argobjs = [\n associate(\n self.get_ground_vector('!Arg:{}:{}:{}'.format(arity, name, i)),\n vec)\n for i, vec in enumerate(vecs) ]\n\n result = normalize(\n self.func_weights @\n torch.cat([\n funcobj,\n merge(argobjs),\n merge(vecs),\n ]).reshape(-1, self.hrr_size))\n\n return result",
"def vectorMultiply(v, f):\n return [x * f for x in v]",
"def scalar_vector_ext(alpha, v, a, b):\n return [alpha * v[0],\n alpha * v[0] * a + b]",
"def __getitem__(self, *args):\n return _ida_hexrays.qvector_carg_t___getitem__(self, *args)",
"def apply(self: T, target: types.Vector) -> types.Vector:",
"def apply(self, *args: _Data) -> _Data:",
"def __getitem__(self, *args):\n return _ida_hexrays.qvector_lvar_t___getitem__(self, *args)",
"def numeric(*args):",
"def _scalar_vectorized(scalar, M):\n return scalar[:, np.newaxis, np.newaxis]*M"
]
| [
"0.6970474",
"0.6857698",
"0.64146477",
"0.62835205",
"0.62835205",
"0.6027144",
"0.5954756",
"0.59166986",
"0.5904902",
"0.5775839",
"0.5764978",
"0.5747971",
"0.5725748",
"0.5675826",
"0.5659955",
"0.5659955",
"0.5634167",
"0.5625573",
"0.5567981",
"0.551946",
"0.5516323",
"0.5510443",
"0.55039036",
"0.5497885",
"0.54974097",
"0.5473559",
"0.5470727",
"0.54494554",
"0.54490495",
"0.5436229"
]
| 0.8652428 | 0 |
Create feature vectors from raw features and write them to disk | def create_vectorized_features(data_dir, feature_version=2):
extractor = PEFeatureExtractor(feature_version)
print("Vectorizing training set")
X_path = os.path.join(data_dir, "X_train.dat")
y_path = os.path.join(data_dir, "y_train.dat")
raw_feature_paths = [os.path.join(data_dir, "train_features_{}.jsonl".format(i)) for i in range(6)]
nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])
vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)
print("Vectorizing test set")
X_path = os.path.join(data_dir, "X_test.dat")
y_path = os.path.join(data_dir, "y_test.dat")
raw_feature_paths = [os.path.join(data_dir, "test_features.jsonl")]
nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])
vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()",
"def _create_feature_vec():\n\tnum_tags = NGRAM_TUPLE[0]\n\tfvec = []\n\tfor _, size in FEATURE_TUPLE:\n\t\tfvec.append(np.zeros((num_tags, size)))\n\n\t# Append tag ngram weights to end\n\tfvec.append(np.zeros((num_tags, num_tags)))\n\treturn fvec",
"def gather_and_save_vectors(path, words_vec = collections.defaultdict(list), features = []):\n with open(path, 'rt', encoding='mac_roman') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='\"')\n for row in csvreader:\n words_vec, features = countize(row[3], row[2], words_vec, features)\n try:\n words_vec, features = countize(row[6], row[2], words_vec, features)\n except:\n pass\n pickle.dump(words_vec, open(\"ind_vectors.data\", \"wb\"))\n pickle.dump(features, open(\"i_features.data\", \"wb\"))\n return words_vec, features",
"def create_sample_vectors(cleaned_data_directory, out_vectors_path):\n vectors = []\n\n for filename in os.listdir(cleaned_data_directory):\n if not filename.endswith(\".txt\"):\n continue\n\n path = os.path.join(cleaned_data_directory, filename)\n f = open(path, mode='r', encoding='utf8')\n\n print(\"Processing\", path)\n\n lang = filename[:2]\n lang_number = language_codes.index(lang)\n\n print(f\"\\tLanguage: {lang} ({lang_number})\")\n print(\"\\tReading...\", end=' ')\n\n file_content = f.read()\n content_length = len(file_content)\n\n print(\"done.\")\n print(\"\\tExtracting vectors...\", end=' ')\n\n sample_start_index = 0\n count = 0\n\n while sample_start_index + text_sample_size < content_length:\n sample = get_sample(file_content, sample_start_index, text_sample_size)\n input_vector = build_input_vector(sample)\n vector = input_vector + [lang_number]\n vectors.append(vector)\n sample_start_index += text_sample_size\n count += 1\n\n print(\"done.\")\n print(f\"\\tExtracted {count} vectors.\")\n\n del file_content\n\n print(f\"Total {len(vectors)} vectors.\")\n\n np_vectors = np.array(vectors, dtype=np.uint16)\n np.random.shuffle(np_vectors)\n\n print(f\"Converted to NumPy array, shape: {np_vectors.shape}.\")\n\n np.savez_compressed(out_vectors_path, data=np_vectors)\n\n print(f\"Saved to {out_vectors_path}.\")",
"def create_feature_vector(self, files=[], name=\"\"):\n\n if( len(files)==0 ):\n return\n\n epsilon = 1e-8\n set = []\n\n #iterating all files obtaining the significant data to compute the feature vectors\n for file in files:\n\n #reading the csv files and keeping the first 3 columns (x,y,time)\n file_data = pd.read_csv(file)\n file_data = file_data.to_numpy()\n data = np.zeros((file_data.shape[0],7))\n data[:,0:3] = file_data[:,0:3]\n\n #computing the other interesting features\n angle = np.arctan(data[:,1]/(data[:,0]+epsilon))\n velocity = np.sqrt( np.square(data[:,1]) + np.square(data[:,0]) )\n log_curvature = np.log10( velocity/(angle+epsilon) )\n acceleration = np.sqrt( np.square(velocity) + np.square(velocity*angle) )\n\n #assigning the new computed features\n data[:,3] = angle\n data[:,4] = velocity\n data[:,5] = log_curvature\n data[:,6] = acceleration\n\n #normalizing the data\n data = self.normalization(data)\n set.append(data)\n\n return set",
"def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)",
"def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')",
"def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)",
"def createFeatureMap(features, filename=\"xgb.fmap\"):\n with open(filename, 'w') as outfile:\n i = 0\n for feat in features:\n outfile.write('{0}\\t{1}\\tq\\n'.format(i, feat))\n i = i + 1",
"def dump_vecs():\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n \n with open(v_file, 'wb') as f:\n pickle.dump(VECTORIZER, f)\n with open(d_file, 'wb') as f:\n pickle.dump(CECTORIZER, f)",
"def extract_feature_vectors(model, data_loader, parameters, features_file_path):\n feature_vectors, label_vectors = [], []\n\n # Set model to evaluation mode\n model.eval()\n\n # Show progress bar while iterating over mini-batches\n with tqdm(total=len(data_loader)) as progress_bar:\n for i, (X_batch, Y_batch) in enumerate(data_loader):\n\n # Dimensions of the input Tensor\n batch_size, channels, height, width = X_batch.size()\n\n # If GPU available, enable CUDA on data\n if parameters.cuda:\n X_batch = X_batch.cuda()\n Y_batch = Y_batch.cuda()\n\n # Wrap the input tensor in a Torch Variable\n X_batch_variable = Variable(X_batch, volatile=True)\n\n # Run the model on this batch of inputs, obtaining a Variable of predicted labels and a Variable of features\n Y_predicted, features = model(X_batch_variable)\n\n # Convert the features Variable (of size [batch_size, 1024]) to a Tensor, move it to\n # CPU, and convert it to a NumPy array\n features_numpy = features.data.cpu().numpy()\n\n # Move the labels Tensor (of size [batch_size, 14]) to CPU and convert it to a NumPy array\n Y_numpy = Y_batch.cpu().numpy()\n\n # For each example in the batch, record its features and labels\n for j in range(batch_size):\n feature_vectors.append(features_numpy[j,:])\n label_vectors.append(Y_numpy[j,:])\n\n progress_bar.update()\n\n utils.write_feature_and_label_vectors(features_file_path, feature_vectors, label_vectors)",
"def load_vectors (file_extension = None):\n \n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n \n prettyPrint( \"Loading feature vectors and labels from disk ... \", color.CYAN)\n if not os.path.isfile(feat_file_name) or not os.path.isfile(label_file_name):\n prettyPrint(\"Feature vector files {0} could not be found. Generating from scratch instead ...\".format(feat_file_name), color.CYAN)\n return None, None\n with open(feat_file_name, 'r') as f:\n feat_vec = pickle.load(f)\n with open(label_file_name, 'r') as f:\n labels = pickle.load(f)\n\n prettyPrint (\"Done loading feature vectors.\", color.CYAN)\n return feat_vec, labels",
"def write_svm_features(clf, vectorizer, round=1, filename=\"features\"):\n\n f = open(\"%s-round%d.txt\" % (filename, round), \"w\")\n weight_feature_pairs = zip(clf.coef_.tolist()[0], vectorizer.feature_names_)\n weight_feature_pairs.sort(key=lambda x:abs(x[0]), reverse=True)\n for weight, word in weight_feature_pairs:\n f.write(\"%s\\t%g\\n\" % (word, weight))\n f.close()",
"def save_vectors (feat_vec = None, labels = None, file_extension = None):\n\n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n\n prettyPrint('Saving feature vector file: {0} ... \\n'\n 'Saving Labels file: {1} ... '.format(feat_file_name, label_file_name), color.CYAN)\n\n #Save feature vector to disk\n with open(feat_file_name, 'w') as f:\n pickle.dump(feat_vec, f)\n #Save label file\n with open(label_file_name, 'w') as f:\n pickle.dump(labels, f)",
"def generate_features(headerfile_path, zipfile_path, features_to_use,\n custom_script_path, is_test, already_featurized,\n in_docker_container):\n if already_featurized:\n # Read in features from CSV file\n objects = parse_prefeaturized_csv_data(headerfile_path)\n else:\n # Parse header file\n (features_to_use, fname_class_dict, fname_class_science_features_dict,\n fname_metadata_dict) = parse_headerfile(headerfile_path,\n features_to_use)\n input_params_list = generate_featurize_input_params_list(\n features_to_use, fname_class_dict,\n fname_class_science_features_dict, fname_metadata_dict,\n zipfile_path, custom_script_path, is_test)\n # TO-DO: Determine number of cores in cluster:\n res = featurize_celery_task.chunks(input_params_list,\n cfg.N_CORES).delay()\n res_list = res.get(timeout=100)\n objects = []\n for line in res_list:\n for el in line:\n short_fname, new_feats = el\n if short_fname in fname_metadata_dict:\n all_features = dict(\n list(new_feats.items()) +\n list(fname_metadata_dict[short_fname].items()))\n else:\n all_features = new_feats\n objects.append(all_features)\n return objects",
"def features_to_db(db, training_dir, test_dir, label_file):\n logger.debug(\"Getting Labels\")\n labels = get_labels(label_file)\n logger.debug(\"Extracting training features\")\n train_features = extract_from_dir(training_dir)\n logger.debug(\"Saving training features in db\")\n data_source = []\n with db.atomic():\n for name, features in train_features:\n features[\"name\"] = name + \"_train\"\n features[\"label\"] = labels[name]\n data_source.append(features)\n if len(data_source) == 1000:\n Feature.insert_many(data_source).execute()\n data_source = []\n if len(data_source):\n Feature.insert_many(data_source).execute()\n\n logger.debug(\"Extracting test features\")\n test_features = extract_from_dir(test_dir)\n logger.debug(\"Saving test features in db\")\n data_source = []\n with db.atomic():\n for name, features in test_features:\n features[\"name\"] = name\n data_source.append(features)\n if len(data_source) == 1000:\n Feature.insert_many(data_source).execute()\n data_source = []\n if len(data_source):\n Feature.insert_many(data_source).execute()",
"def vectorize(irow, raw_features_string, X_path, y_path, nrows):\n extractor = PEFeatureExtractor()\n raw_features = json.loads(raw_features_string)\n feature_vector = extractor.process_raw_features(raw_features)\n\n y = np.memmap(y_path, dtype=np.float32, mode=\"r+\", shape=nrows)\n y[irow] = raw_features[\"label\"]\n\n X = np.memmap(X_path, dtype=np.float32, mode=\"r+\", shape=(nrows, extractor.dim))\n X[irow] = feature_vector",
"def pack_features_vector(features, labels):\n features = tf.stack(list(features), axis=1)\n return features, labels",
"def memmap_feats(features):\n features = np.array(features)\n dtype = features.dtype\n feats_shape = features.shape\n\n outfile = TemporaryFile()\n fp = np.memmap(outfile, dtype=dtype, mode='w+', shape=feats_shape)\n fp[:] = features[:]\n fp.flush()\n del features\n del fp\n logging.info('Features memory mapped features to temporary file: %s' % outfile)\n\n #read back in again without using any memory\n features = np.memmap(outfile, dtype=dtype, mode='r', shape=feats_shape)\n return features",
"def generateFeatures(self, data):\n pass",
"def create_Tf_matrix(\n corpus,\n filename_npz=\"../data/tfidf/data_tf.npz\",\n filename_features=\"../data/tfidf/data_feature_names.pkl\",\n):\n\n vectorizer = CountVectorizer(max_features=len(corpus))\n X = vectorizer.fit_transform(corpus)\n print(\"-Vectorized matrix, \", X.toarray().shape)\n print(\" first line:\")\n print(X.toarray()[0])\n print(\"- Nombre de features :\" + str(len(vectorizer.get_feature_names())))\n print(vectorizer.get_feature_names()[0:10], \" ...\")\n\n data = pd.DataFrame(vectorizer.get_feature_names())\n data.to_pickle(filename_features)\n print(\"tf feature names - saved\")\n sparse.save_npz(filename_npz, X)\n print(\"tf matrix:\", filename_npz, \" - saved\")",
"def write_features(config_file):\n features = get_all_features(config_file)\n with FEATURES_PATH.open(\"w\") as writer:\n for feature in features:\n writer.write(f\"{feature}\\n\")",
"def pack_features_vector(features, labels):\n features = tf.stack(list(features.values()), axis=1)\n return features, labels",
"def pack_features_vector(features, labels):\n features = tf.stack(list(features.values()), axis=1)\n return features, labels",
"def read_vectorized_features(data_dir, subset=None, feature_version=2):\n if subset is not None and subset not in [\"train\", \"test\"]:\n return None\n\n extractor = PEFeatureExtractor(feature_version)\n ndim = extractor.dim\n X_train = None\n y_train = None\n X_test = None\n y_test = None\n\n if subset is None or subset == \"train\":\n X_train_path = os.path.join(data_dir, \"X_train.dat\")\n y_train_path = os.path.join(data_dir, \"y_train.dat\")\n y_train = np.memmap(y_train_path, dtype=np.float32, mode=\"r\")\n N = y_train.shape[0]\n X_train = np.memmap(X_train_path, dtype=np.float32, mode=\"r\", shape=(N, ndim))\n if subset == \"train\":\n return X_train, y_train\n\n if subset is None or subset == \"test\":\n X_test_path = os.path.join(data_dir, \"X_test.dat\")\n y_test_path = os.path.join(data_dir, \"y_test.dat\")\n y_test = np.memmap(y_test_path, dtype=np.float32, mode=\"r\")\n N = y_test.shape[0]\n X_test = np.memmap(X_test_path, dtype=np.float32, mode=\"r\", shape=(N, ndim))\n if subset == \"test\":\n return X_test, y_test\n\n return X_train, y_train, X_test, y_test",
"def create_feature_csv(faces, objects, rgb, hsv, gray):\n faces_columns = ['faces_zs']\n objects_columns = ['comp_1', 'comp_2', 'comp_3', 'comp_4', 'comp_5', 'comp_6', 'comp_7', 'comp_8', 'comp_9',\n 'comp_10', 'comp_11', 'comp_12', 'comp_13', 'comp_14', 'comp_15', 'comp_16', 'comp_17', 'comp_18',\n 'comp_19', 'comp_20', 'comp_21', 'comp_22', 'comp_23', 'comp_24', 'comp_25']\n rgb_columns = ['max_b_zs', 'max_g_zs', 'max_r_zs', 'med_b_zs', 'med_g_zs', 'med_r_zs', 'std_b', 'std_g', 'std_r']\n hsv_columns = ['hmax1_zs', 'hmax2_zs', 'hmax3_zs', 'smax1_zs', 'smax2_zs', 'smax3_zs', 'smed_zs', 'sstd',\n 'vmax1_zs', 'vmax2_zs', 'vmax3_zs', 'vmed_zs', 'vstd']\n gray_columns = ['gray_max_zs', 'gray_med_zs', 'gray_std_zs']\n\n with open(feature_file, 'r', encoding='utf-8') as infile:\n new_csv = pd.read_csv(infile, sep=',')\n\n if not faces:\n for col in faces_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not objects:\n for col in objects_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not rgb:\n for col in rgb_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not hsv:\n for col in hsv_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not gray:\n for col in gray_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not os.path.exists(os.path.join(workdir, \"output\")):\n os.makedirs(os.path.join(workdir, \"output\"))\n with open(output_file, 'w', encoding='utf-8') as outfile:\n new_csv = new_csv.drop(['Unnamed: 0'], 1)\n new_csv.to_csv(outfile, sep=',')",
"def save_features(args: Namespace):\n # Get data and features function\n data = get_data(path=args.data_path, max_data_size=None)\n features_func = get_features_func(args.features_generator)\n temp_save_dir = args.save_path + '_temp'\n\n # Load partially complete data\n if args.restart:\n if os.path.exists(args.save_path):\n os.remove(args.save_path)\n if os.path.exists(temp_save_dir):\n shutil.rmtree(temp_save_dir)\n else:\n if os.path.exists(args.save_path):\n raise ValueError(f'\"{args.save_path}\" already exists and args.restart is False.')\n\n if os.path.exists(temp_save_dir):\n features, temp_num = load_temp(temp_save_dir)\n\n if not os.path.exists(temp_save_dir):\n os.makedirs(temp_save_dir)\n features, temp_num = [], 0\n\n # Build features map function\n data = data[len(features):] # restrict to data for which features have not been computed yet\n mols = (d.mol for d in data)\n if args.parallel:\n with Pool() as pool:\n features_map = tqdm(pool.imap(features_func, mols), total=len(data))\n else:\n features_map = tqdm(map(features_func, mols), total=len(data))\n\n # Get features\n temp_features = []\n for i, feats in enumerate(features_map):\n temp_features.append(feats)\n\n # Save temporary features every save_frequency\n if (i > 0 and (i + 1) % args.save_frequency == 0) or i == len(data) - 1:\n save(os.path.join(temp_save_dir, f'{temp_num}.pckl'), temp_features)\n features.extend(temp_features)\n temp_features = []\n temp_num += 1\n\n try:\n # Save all features\n save(args.save_path, features)\n\n # Remove temporary features\n shutil.rmtree(temp_save_dir)\n except OverflowError:\n print('Features array is too large to save as a single file. Instead keeping features as a directory of files.')",
"def get_feature_vectors(self):\n\t\tresult = self.session.query(Image.id, Image.feature_vector).all()\n\n\t\ttransformed_result = list()\n\t\t\n\t\tfor (id, serialized_feature_vector) in result:\n\t\t\tdeserialized_tensor = tf.deserialize_feature_vector(serialized_feature_vector)\n\t\t\ttransformed_result.append((id, deserialized_tensor))\n\n\t\treturn transformed_result",
"def persistent_image_features(images, toStoreFile):\n image_features = extract_features(images)\n\n np.save(toStoreFile, image_features)",
"def write_vectors(self, filename):\n svu.write_realvectors(self,filename)"
]
| [
"0.68451023",
"0.6687783",
"0.66236377",
"0.66236335",
"0.6608172",
"0.651898",
"0.65123",
"0.63167775",
"0.6315384",
"0.63149315",
"0.625614",
"0.6229033",
"0.6199346",
"0.6194556",
"0.61851686",
"0.6167491",
"0.6155025",
"0.6137881",
"0.6135727",
"0.61232656",
"0.6087816",
"0.60787994",
"0.6074922",
"0.6074922",
"0.6065061",
"0.6063997",
"0.60477835",
"0.6045137",
"0.6018942",
"0.6014435"
]
| 0.69787437 | 0 |
Decode a raw features string and return the metadata fields | def read_metadata_record(raw_features_string):
all_data = json.loads(raw_features_string)
metadata_keys = {"sha256", "appeared", "label", "avclass"}
return {k: all_data[k] for k in all_data.keys() & metadata_keys} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_metadata_record(raw_features_string):\n full_metadata = json.loads(raw_features_string)\n return {\"sha256\": full_metadata[\"sha256\"], \"appeared\": full_metadata[\"appeared\"], \"label\": full_metadata[\"label\"]}",
"def decode_data(features, reader_settings):\n if features.dtype == tf.string:\n return tf.decode_raw(\n features,\n reader_settings)\n else:\n return tf.cast(\n features,\n reader_settings)",
"def _decode_record(record,name_to_features):\n example = tf.parse_single_example(record,name_to_features)\n\n return example",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n return example",
"def _parser(serialized_example):\n\n features = tf.compat.v1.parse_single_example(\n serialized_example,\n features={\n 'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),\n 'label': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'category': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),\n })\n\n img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)\n img = tf.reshape(img, [96, 96, 1])\n img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized\n\n lab = tf.cast(features['label'], tf.int32)\n cat = tf.cast(features['category'], tf.int32)\n elv = tf.cast(features['elevation'], tf.int32)\n azi = tf.cast(features['azimuth'], tf.int32)\n lit = tf.cast(features['lighting'], tf.int32)\n\n return img, lab, cat, elv, azi, lit",
"def _read_and_decode(example_proto,data_shape,dtypes):\n features = {}\n for name in data_shape:\n features[name] = tf.FixedLenFeature([], tf.string)\n parsed_features = tf.parse_single_example(example_proto, features)\n count = 0\n res = {}\n for name in data_shape:\n res[name] = parsed_features[name]\n if dtypes[count]!=str:\n res[name]=tf.decode_raw(res[name],dtypes[count])\n if dtypes[count]==tf.float32 or dtypes[count]==tf.float64:\n res[name]=tf.convert_to_tensor(res[name],dtype=dtypes[count])\n if data_shape[name]:\n res[name]=tf.reshape(res[name],shape=data_shape[name])\n count += 1\n return res",
"def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image.set_shape([CHANNELS * HEIGHT * WIDTH])\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [CHANNELS, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32) * (2. / 255) - 1\n\n label = tf.cast(features['label'], tf.int32)\n\n random_noise = tf.random_normal([noise_dim])\n features = {\n 'real_images': image,\n 'random_noise': random_noise}\n\n return features, label",
"def _extract_feature(element):\n features = tf.parse_single_example(\n element,\n # Defaults are not specified since both keys are required.\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'label/x': tf.FixedLenFeature([], tf.int64),\n 'label/y': tf.FixedLenFeature([], tf.int64)\n })\n return features",
"def _parse(serialized_example):\n\n feature_map = {\n 'dayofweek': tf.io.FixedLenFeature([], tf.int64),\n 'dropofflat': tf.io.FixedLenFeature([], tf.float32),\n 'dropofflon': tf.io.FixedLenFeature([], tf.float32),\n 'fare_amount': tf.io.FixedLenFeature([], tf.float32),\n 'hourofday': tf.io.FixedLenFeature([], tf.int64),\n 'passengers': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplat': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplon': tf.io.FixedLenFeature([], tf.float32)\n }\n\n # Parse the serialized data into a dictionary.\n parsed_example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=feature_map)\n\n features = add_engineered(parsed_example)\n label = features.pop(\"fare_amount\")\n\n return features, label",
"def _decode_record(record):\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"stroke_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"lmask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"label_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n }\n\n\n example = tf.parse_single_example(record, name_to_features)\n\n #int64 to int32\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n input_ids = example['input_ids']\n input_mask = example['input_mask']\n segment_ids = example['segment_ids']\n stroke_ids = example['stroke_ids']\n label_ids = example['label_ids']\n lmask = example['lmask']\n py_labels = tf.py_func(_get_py_seq, [label_ids], [tf.int32])\n\n return input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels",
"def decode(value):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/segmentation/class/encoded':\n tf.FixedLenFeature((), tf.string, default_value='')\n }\n data = tf.parse_single_example(value, keys_to_features)\n return data",
"def _decode_csv(line):\n column_header = column_names if with_target else column_names[:4]\n record_defaults = [[0.] for _ in xrange(len(column_names) - 1)]\n # Pass label as integer.\n if with_target:\n record_defaults.append([0])\n columns = tf.decode_csv(line, record_defaults=record_defaults)\n features = dict(zip(column_header, columns))\n target = features.pop(column_names[4]) if with_target else None\n return features, target",
"def _decode_record(self, record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n # tf.logging.info(t)\n # t = tf.sparse.to_dense(t)\n # tf.logging.info(t.get_shape().as_list())\n # assert t.get_shape().as_list()[0] is not None\n example[name] = t\n \n del example[\"source_sos_ids\"]\n del example[\"source_sos_mask\"]\n\n return example",
"def read_temp(temp):\n type_dict = {\"string\": str, \"unknown\": str, \"numeric\": float}\n with open(temp, 'r') as topen:\n feature_lines = topen.readlines()\n feature_labels = []\n feature_types = []\n for i, row in enumerate(feature_lines):\n if row.startswith(\"@attribute\"):\n flabel, ftype = row[11:-1].split(' ')\n feature_labels.append(flabel)\n feature_types.append(type_dict[ftype])\n elif row.startswith(\"@data\"):\n feature_values = feature_lines[i+1].split(\",\")\n if len(feature_values) < len(feature_labels):\n feature_values = feature_lines[i+2].split(\",\")\n for i, item in enumerate(feature_values):\n try:\n feature_values[i] = (feature_types[i](item))\n except:\n feature_values[i] = item\n return(dict(zip(feature_labels, feature_values)))",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n print(name)\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example",
"def _parse_csv(rows_string_tensor):\n # Last dim is the label.\n num_features = len(FEATURE_KEYS)\n num_columns = num_features + 1\n columns = tf.decode_csv(rows_string_tensor,\n record_defaults=[[]] * num_columns)\n features = dict(zip(FEATURE_KEYS, columns[:num_features]))\n labels = tf.cast(columns[num_features], tf.int32)\n return features, labels",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example",
"def parse_record(raw_record):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.FixedLenFeature((), tf.int64),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'label/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n # height = tf.cast(parsed['image/height'], tf.int32)\n # width = tf.cast(parsed['image/width'], tf.int32)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)\n image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image.set_shape([None, None, 3])\n\n label = tf.image.decode_image(\n tf.reshape(parsed['label/encoded'], shape=[]), 1)\n label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))\n label.set_shape([None, None, 1])\n\n\n return image, label",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example[\"src_ids\"].values, example[\"tgt_ids\"].values, example[\"label\"][0]",
"def decodemeta(data):\n d = {}\n for l in data.split('\\0'):\n if l:\n key, value = l.split(':')\n d[key] = value\n return d",
"def parse_feature(self, feature_key, lines):\n ...",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example",
"def _decode_record(record, name_to_features):\n\t\t\texample = tf.parse_single_example(record, name_to_features)\n\n\t\t\t# tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n\t\t\t# So cast all int64 to int32.\n\t\t\tfor name in list(example.keys()):\n\t\t\t\tt = example[name]\n\t\t\t\tif t.dtype == tf.int64:\n\t\t\t\t\tt = tf.to_int32(t)\n\t\t\t\texample[name] = t\n\n\t\t\treturn example",
"def extract_features(raw_data):\n width = len(raw_data[0])\n num_features = len(raw_data) * width\n features = np.zeros((num_features, 3), dtype=bool)\n for row, line in enumerate(raw_data):\n for col, char in enumerate(line):\n if char == ' ':\n features[col + row * width][0] = True\n elif char == '+':\n features[col + row * width][1] = True\n elif char == '#':\n features[col + row * width][2] = True\n return features",
"def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'env': tf.FixedLenFeature([1, 4], tf.int64),\n # 'env_segment_number': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_cpu': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_mem': tf.FixedLenFeature([], tf.int64),\n # 'query_plan_ops': tf.VarLenFeature(tf.string),\n # 'query_table_size': tf.VarLenFeature(tf.float32),\n # 'segment_cpu_usage': tf.VarLenFeature(tf.float32),\n 'label': tf.FixedLenFeature([], tf.float32)\n })\n env = tf.cast(features['env'], tf.float32)\n # image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # # Reshape from [depth * height * width] to [depth, height, width].\n # image = tf.cast(\n # tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n # tf.float32)\n label = tf.cast(features['label'], tf.float32)\n reshape_label = tf.reshape(features['label'], (1,1))\n return env, reshape_label",
"def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example",
"def parser(record):\n # keys_to_features = {\n # \"image_data\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n # \"date_time\": tf.FixedLenFeature((), tf.int64, default_value=\"\"),\n # \"label\": tf.FixedLenFeature((), tf.int64,\n # default_value=tf.zeros([], dtype=tf.int64)),\n # }\n\n keys_to_features = {\n \"image_data\": tf.FixedLenFeature((), tf.float, default_value=\"\"),\n \"label\": tf.FixedLenFeature((), tf.int32,\n default_value=tf.zeros([], dtype=tf.int64)),\n }\n parsed = tf.parse_single_example(record, keys_to_features)\n\n # Perform additional preprocessing on the parsed data.\n image = tf.image.decode_jpeg(parsed[\"image_data\"])\n image = tf.reshape(image, [299, 299, 1])\n label = tf.cast(parsed[\"label\"], tf.int32)\n\n return {\"image_data\": image, \"date_time\": parsed[\"date_time\"]}, label"
]
| [
"0.7640253",
"0.6727874",
"0.6480235",
"0.6434632",
"0.6149312",
"0.61184937",
"0.6096524",
"0.6088268",
"0.60809267",
"0.603969",
"0.60149556",
"0.5988124",
"0.5919679",
"0.59035814",
"0.5876347",
"0.5871728",
"0.58618104",
"0.58504206",
"0.58429444",
"0.580738",
"0.5801658",
"0.5786202",
"0.578382",
"0.578382",
"0.578382",
"0.5773392",
"0.57669044",
"0.5760366",
"0.575379",
"0.57512254"
]
| 0.73563117 | 1 |
Read an already created metadata file and return its dataframe | def read_metadata(data_dir):
return pd.read_csv(os.path.join(data_dir, "metadata.csv"), index_col=0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata",
"def read_metadata(self, file=None):\n if file is None:\n file = self.meta_data_file\n\n try:\n self.meta_data = self.input_dataframe(file, index_col=None)\n except IOError:\n self.meta_data = self.create_default_meta_data(self.expression_matrix)",
"def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data",
"def read_meta_data(data_dir):\n meta_data = pd.read_csv(join(data_dir, 'index.csv'))\n\n return meta_data",
"def get_ctffind_4_1_0_meta(file_name: str) -> pd.DataFrame:\n extract_dict: typing.Dict[str, str]\n ctffind_meta_data: pd.DataFrame\n lines: typing.List[str]\n match: typing.Optional[typing.Match[str]]\n non_string_values: typing.Set[str]\n\n extract_dict = get_ctffind_4_1_0_extract_dict()\n ctffind_meta_data = pd.DataFrame(index=[0], columns=extract_dict.keys())\n with open(file_name, 'r') as read:\n lines = read.readlines()\n\n non_string_values = set([\n 'MicrographNameNoDW',\n 'version'\n ])\n for line in lines:\n for key, value in extract_dict.items():\n match = re.match(value, line)\n if match is not None:\n try:\n ctffind_meta_data[key] = float(match.group(1))\n except ValueError:\n assert key in non_string_values, f'{key}: {match.group(1)}'\n ctffind_meta_data[key] = match.group(1)\n else:\n pass\n return ctffind_meta_data",
"def GetTrackMetaData():\n\t\n\t## Get the raw file. The low_memory option\n\t## suppresses a warning regarding mismatched datatypes in\n\t## the track_id column. That's due to spacing in the original file.\n\tdf = pd.read_csv(\"_data\\\\fma_metadata\\\\tracks.csv\",header=1,index_col=0,low_memory=False)\n\t\n\t## Fix the track_id column by dropping\n\t## the problematic row and renaming.\n\tdf = df.drop(\"track_id\",axis=0)\n\tdf.index.rename(\"track_id\",inplace=True)\n\n\treturn df",
"def read_metadata_txt(path):\n df = pd.read_csv(path,\n sep='\\s+', # Fields are separated by one or more spaces\n usecols=[0, 1, 2, 3, 4], # Grab only the first 4 columns\n # Missing elevation is noted as -999.9\n na_values=[-999.9],\n header=None,\n names=['station_id', 'latitude', 'longitude', 'elevation', 'state'])\n return df",
"def read_data_with_metadata(trace_path, metadata):\n # find the encoding of the csv\n try:\n enc = metadata[\"encoding\"]\n except KeyError:\n enc = None\n\n return pd.read_csv(trace_path, encoding=enc)",
"def read_metafile(path: PathType) -> dd.DataFrame:\n with bgen_metafile(path) as mf:\n divisions = [mf.partition_size * i for i in range(mf.npartitions)] + [\n mf.nvariants - 1\n ]\n dfs = [\n dask.delayed(_read_metafile_partition)(path, i)\n for i in range(mf.npartitions)\n ]\n meta = dd.utils.make_meta(METAFILE_DTYPE)\n return dd.from_delayed(dfs, meta=meta, divisions=divisions)",
"def read(self):\n self._load_metadata()\n return self._df.compute()",
"def get_session_metadata(self):\n assert os.path.exists(self._metadata_file), f\"No database exists at {self._metadata_file}\"\n with open(self._metadata_file, 'r') as f:\n df = pd.read_csv(f)\n return df",
"def load_image_metadata(path_to_data_csv: str) -> pd.DataFrame:\n return pd.read_csv(path_to_data_csv)",
"def load_metadata_from_database(filename,key):\n\t# Opening file\n\tstore = pd.HDFStore(filename)\n\tmetadata = store.get_storer(key).attrs.metadata\n\tstore.close()\n\t# Ok returning the data now\n\treturn metadata",
"def metadata(filename):\n import numpy as np\n import pandas as pd\n\n infos = \"\"\"IGRAID 1- 11 Character\nWMOID 13- 17 Integer\nNAME 19- 48 Character\nNAMFLAG 50- 50 Character\nLATITUDE 52- 60 Real\nLATFLAG 62- 62 Character\nLONGITUDE 64- 72 Real\nLONFLAG 74- 74 Character\nELEVATION 76- 81 Real\nELVFLAG 83- 83 Character\nYEAR 85- 88 Integer\nMONTH 90- 91 Integer\nDAY 93- 94 Integer\nHOUR 96- 97 Integer\nDATEIND 99- 99 Integer\nEVENT 101-119 Character\nALTIND 121-122 Character\nBEFINFO 124-163 Character\nBEFFLAG 164-164 Character\nLINK 166-167 Character\nAFTINFO 169-208 Character\nAFTFLAG 209-209 Character\nREFERENCE 211-235 Character\nCOMMENT 236-315 Character\nUPDCOM 316-346 Character\nUPDDATE 348-354 Character\n\"\"\"\n\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n\n elif it == 'Real':\n it = 'float'\n\n else:\n it = 'int'\n\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data",
"def get_dataset(filepath):\n return pandas.read_csv(filepath, header='infer')",
"def _read_data(self, fp):\n names = [\n \"Year\",\n \"Month\",\n \"Day\",\n \"Hour\",\n \"Minute\",\n \"Data Source and Uncertainty Flags\",\n \"Dry Bulb Temperature\",\n \"Dew Point Temperature\",\n \"Relative Humidity\",\n \"Atmospheric Station Pressure\",\n \"Extraterrestrial Horizontal Radiation\",\n \"Extraterrestrial Direct Normal Radiation\",\n \"Horizontal Infrared Radiation Intensity\",\n \"Global Horizontal Radiation\",\n \"Direct Normal Radiation\",\n \"Diffuse Horizontal Radiation\",\n \"Global Horizontal Illuminance\",\n \"Direct Normal Illuminance\",\n \"Diffuse Horizontal Illuminance\",\n \"Zenith Luminance\",\n \"Wind Direction\",\n \"Wind Speed\",\n \"Total Sky Cover\",\n \"Opaque Sky Cover (used if Horizontal IR Intensity missing)\",\n \"Visibility\",\n \"Ceiling Height\",\n \"Present Weather Observation\",\n \"Present Weather Codes\",\n \"Precipitable Water\",\n \"Aerosol Optical Depth\",\n \"Snow Depth\",\n \"Days Since Last Snowfall\",\n \"Albedo\",\n \"Liquid Precipitation Depth\",\n \"Liquid Precipitation Quantity\",\n ]\n\n first_row = self._first_row_with_climate_data(fp)\n df = pd.read_csv(fp, skiprows=first_row, header=None, names=names)\n return df",
"def read_data(filepath):\n df = pd.read_csv(filepath)\n return df",
"def read_data(filepath):\n df = pd.read_csv(filepath)\n return df",
"def read_data(filepath):\n df = pd.read_csv(filepath)\n return df",
"def parse_metadata_file(self, file):\n\n file_keys = list(file.keys())\n\n if 'labelAnnotations' in file_keys:\n file_annots = file['labelAnnotations']\n file_top_score = np.asarray(\n [x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n\n file_colors = file['imagePropertiesAnnotation']['dominantColors'][\n 'colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray(\n [x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray(\n [x['confidence'] for x in file_crops]).mean()\n\n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray(\n [x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n\n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n\n return df_metadata",
"def _load_sql_client_data_metadata(database_filepath: str) -> pd.DataFrame:\n\n if not tf.io.gfile.exists(database_filepath):\n raise FileNotFoundError(f'No such file or directory: {database_filepath}')\n elif not os.path.exists(database_filepath):\n logging.info('Starting fetching SQL database to local.')\n tmp_dir = tempfile.mkdtemp()\n tmp_database_filepath = tf.io.gfile.join(\n tmp_dir, os.path.basename(database_filepath))\n tf.io.gfile.copy(database_filepath, tmp_database_filepath, overwrite=True)\n database_filepath = tmp_database_filepath\n logging.info('Finished fetching SQL database to local.')\n\n con = sqlite3.connect(database_filepath)\n return pd.read_sql_query('SELECT * from client_metadata', con)",
"def parse_metadata_file(self, file):\n \n file_keys = list(file.keys())\n \n if 'labelAnnotations' in file_keys:\n #file_annots = file['labelAnnotations'][:int(len(file['labelAnnotations']) * 0.5)]\n file_annots = file['labelAnnotations'][:]\n file_top_score = np.asarray([x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n \n file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray([x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray([x['confidence'] for x in file_crops]).mean()\n \n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray([x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n \n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n \n return df_metadata",
"def create_metadata(data_dir):\n pool = multiprocessing.Pool()\n raw_feature_paths = [os.path.join(data_dir, \"features.jsonl\")]\n records = list(pool.imap(read_metadata_record, raw_feature_iterator(raw_feature_paths)))\n records = [dict(record, **{\"subset\": \"train\"}) for record in records]\n\n metadf = pd.DataFrame(records)[[\"sha256\", \"appeared\", \"subset\", \"label\"]]\n metadf.to_csv(os.path.join(data_dir, \"metadata.csv\"))\n print(\"\\n[Done] create_metadata\\n\")\n \n return metadf",
"def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()",
"def get_data(fnames: List[str]) -> pd.DataFrame:\n\n def read_file(fname: str) -> pd.DataFrame:\n \"\"\"Read in a single file\n \"\"\"\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data\n\n raw_dfs = [read_file(fname) for fname in fnames]\n clean_data = reduce(\n lambda left, right: pd.merge(left, right, how=\"inner\", on=[\"lat\", \"lon\"]),\n raw_dfs,\n )\n try:\n clean_data.drop(\"date_x\", axis=1, inplace=True)\n clean_data.drop(\"date_y\", axis=1, inplace=True)\n except KeyError:\n print(\"Columns not found.\")\n return clean_data",
"def get_file_df(filepath):\n dd = [json.loads(f) for f in open(filepath).readlines()]\n return pd.DataFrame(dd)",
"def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df",
"def loadData(directoryName, fileName):\r\n\r\n dataset = pd.read_csv(directoryName + \"/\" + fileName, header = None)\r\n with open(directoryName + \"/\" + \"metadata.json\") as json_file:\r\n metadata = json.load(json_file)\r\n for uuid in metadata:\r\n for column in metadata[uuid]:\r\n index = int(metadata[uuid][column])\r\n dataset.iloc[0, index] = uuid\r\n \r\n return DataManipulationService.createDictionary(dataset)",
"def read_meta(metafn=None):\n\n metadata = {}\n\n # potential future improvement: strip quotation marks from strings, where applicable. Will then need to adjust\n # the indices used to get the dates and times in the functions above \n # (get_DEM_img_times: dtstrings = {\"sourceImage1\":(5,19, '%Y%m%d%H%M%S')})\n\n #each key is equated with '='. This loop strips and seperates then fills the dictonary.\n with open(metafn) as f: \n for line in f:\n if not line.strip(';') == \"END\":\n val = line.strip().split('=')\n if len(val) == 1:\n continue\n else:\n metadata.setdefault(val[0].strip(), []).append(val[1].strip().strip(';')) \n else:\n break\n\t\n return metadata",
"def get_metadata(filepath):\n\tsong_id = os.path.basename(filepath).split('.')[0]\n\t\n\ttry:\n\t\tid3_ = EasyID3(filepath)\n\texcept Exception as e:\n\t\tprint(\"Error encountered with file {}: {}\".format(filepath, e))\n\t\treturn None\n\ttry:\n\t\tmp3_ = MP3(filepath) \n\texcept Exception as e:\n\t\tprint(\"Error encountered with file {}: {}\".format(filepath, e))\n\t\treturn None\n\tdf = pd.DataFrame([song_id], columns=['id'])\n\tdf['album'] = id3_.get('album', ['unknown'])[0]\n\tdf['genre'] = id3_.get('genre', ['unknown'])[0]\n\tdf['duration'] = mp3_.info.length\n\treturn df.set_index('id').reset_index()"
]
| [
"0.75550187",
"0.73237497",
"0.7193487",
"0.69972616",
"0.69912016",
"0.6918955",
"0.67753977",
"0.6759768",
"0.6757957",
"0.6754",
"0.6751405",
"0.6681768",
"0.66246736",
"0.65975994",
"0.6565998",
"0.65403295",
"0.65184593",
"0.65184593",
"0.65184593",
"0.65133196",
"0.64948726",
"0.64623636",
"0.64567226",
"0.6445596",
"0.64451814",
"0.6441313",
"0.6426626",
"0.63944",
"0.6392189",
"0.6391916"
]
| 0.7495927 | 1 |
Train the LightGBM model from the EMBER dataset from the vectorized features | def train_model(data_dir, params={}, feature_version=2):
# update params
params.update({"application": "binary"})
# Read data
X_train, y_train = read_vectorized_features(data_dir, "train", feature_version)
# Filter unlabeled data
train_rows = (y_train != -1)
# Train
lgbm_dataset = lgb.Dataset(X_train[train_rows], y_train[train_rows])
lgbm_model = lgb.train(params, lgbm_dataset)
return lgbm_model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train_lightgbm(x_train, train_labels, x_test, orig_test):\n train_labels = train_labels['Col2']\n num_lgbm_ensemble = 17\n lgb_forests = []\n for i in range(num_lgbm_ensemble):\n print(\"training LGBC model {}\".format(i))\n params = {\n 'n_estimators': 17,\n 'max_depth': 7,\n 'learning_rate': 0.01,\n 'random_state': i,\n 'colsample_bytree': 0.1,\n 'reg_lambda': 15,\n 'reg_alpha': 10\n }\n\n lgbc = models.make_model(params=params, model_name='light_gbm')\n lgbc.fit(x_train, train_labels)\n lgb_forests.append(lgbc)\n\n model_file_path = os.path.join(MODEL_PATH, \"lgb\", \"lgb_forest.pkl\")\n pickle.dump(lgb_forests, open(model_file_path, 'wb'))\n\n evaluation.submission_lgbm(model_file_path,\n x_test,\n orig_test,\n submission_name='submission_lgb.csv')",
"def train_model(data_dir, rows):\n X, y = read_vectorized_features(data_dir, rows)\n\n # Set params\n # Scores ~0.784 (without tuning and early stopping)\n params = {'boosting_type': 'gbdt',\n 'max_depth' : -1,\n 'objective': 'binary',\n 'nthread': 3, # Updated from nthread\n 'num_leaves': 64,\n 'learning_rate': 0.05,\n 'max_bin': 512,\n 'subsample_for_bin': 200,\n 'subsample': 1,\n 'subsample_freq': 1,\n 'colsample_bytree': 0.8,\n 'reg_alpha': 5,\n 'reg_lambda': 10,\n 'min_split_gain': 0.5,\n 'min_child_weight': 1,\n 'min_child_samples': 5,\n 'scale_pos_weight': 1,\n 'num_class' : 1,\n 'metric' : 'binary_error'}\n\n # Create parameters to search\n gridParams = {\n 'learning_rate': [0.15, 0.2, 0.25, 0.3], #default = 0.1\n 'n_estimators': [40],\n 'num_leaves': [6,8,12,16],\n 'boosting_type' : ['gbdt'],\n 'objective' : ['binary'],\n 'random_state' : [501], # Updated from 'seed'\n 'colsample_bytree' : [0.65, 0.66],\n 'subsample' : [0.7,0.75],\n 'reg_alpha' : [1,1.2],\n 'reg_lambda' : [1,1.2,1.4],\n }\n\n # Create classifier to use. Note that parameters have to be input manually\n # not as a dict!\n mdl = lgb.LGBMClassifier(boosting_type= 'gbdt',\n objective = 'binary',\n n_jobs = 3, # Updated from 'nthread'\n silent = True,\n max_depth = params['max_depth'],\n max_bin = params['max_bin'],\n subsample_for_bin = params['subsample_for_bin'],\n subsample = params['subsample'],\n subsample_freq = params['subsample_freq'],\n min_split_gain = params['min_split_gain'],\n min_child_weight = params['min_child_weight'],\n min_child_samples = params['min_child_samples'],\n scale_pos_weight = params['scale_pos_weight'])\n\n # Create the grid\n grid = GridSearchCV(mdl, gridParams,\n verbose=0,\n cv=4,\n n_jobs=2)\n # train\n grid.fit(X, y)\n print(grid.best_params_)\n print(grid.best_score_)\n\n\n # train\n lgbm_dataset = lgb.Dataset(X, y)\n lgbm_model = lgb.train({\"application\": \"binary\"}, lgbm_dataset)\n\n return lgbm_model",
"def trainModel( self, featureTrain, classTrain):",
"def kfold_lightgbm(df, num_rows, num_folds, stratified=False, debug=False):\n train_df = df[df['TARGET'].notnull()]\n test_df = df[df['TARGET'].isnull()]\n text = \"Starting LightGBM. Train shape: {}, test shape: {}\"\n print(text.format(train_df.shape, test_df.shape))\n del df\n gc.collect()\n # Cross validation model\n if stratified:\n folds = StratifiedKFold(n_splits=num_folds, shuffle=True,\n random_state=1001)\n else:\n folds = KFold(n_splits=num_folds, shuffle=True, random_state=1001)\n # Create arrays and dataframes to store results\n # oof = 'out of fold'\n oof_preds = np.zeros(train_df.shape[0])\n # sub_preds = submission predictions (i.e., will be submitted to kaggle)\n sub_preds = np.zeros(test_df.shape[0])\n feature_importance_df = pd.DataFrame()\n feats = [f for f in train_df.columns\n if f not in ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU',\n 'SK_ID_PREV', 'index']]\n\n xx = folds.split(train_df[feats], train_df['TARGET'])\n for n_fold, (train_idx, valid_idx) in enumerate(xx):\n train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]\n\n valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]\n\n # LightGBM parameters found by Bayesian optimization\n clf = LGBMClassifier(\n nthread=4,\n n_estimators=10000, # quite a few trees!\n learning_rate=0.02,\n num_leaves=34,\n colsample_bytree=0.9497036,\n subsample=0.8715623,\n max_depth=8,\n reg_alpha=0.041545473,\n reg_lambda=0.0735294,\n min_split_gain=0.0222415,\n min_child_weight=39.3259775,\n silent=-1,\n verbose=-1, )\n\n clf.fit(train_x, train_y,\n eval_set=[(train_x, train_y), (valid_x, valid_y)],\n eval_metric='auc', verbose=200, early_stopping_rounds=200)\n\n oof_preds[valid_idx] = clf.predict_proba(valid_x,\n num_iteration=\\\n clf.best_iteration_)[:, 1]\n # our submission predicitons are a sum of probabilities for each fold\n # because each fold finds a different optimal fit (.best_iteration)\n # based on that fold's data; we do need to weight the probabilities\n # by 1/n_splits, obviously\n sub_preds += clf.predict_proba(test_df[feats],\n num_iteration=\n clf.best_iteration_)[:, 1]/folds.n_splits\n\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = feats\n fold_importance_df[\"importance\"] = clf.feature_importances_\n fold_importance_df[\"fold\"] = n_fold + 1\n feature_importance_df = pd.concat([feature_importance_df,\n fold_importance_df], axis=0)\n print('---------\\nFold %2d AUC : %.6f' %\n (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))\n print('Feature importance df shape:', feature_importance_df.shape)\n del clf, train_x, train_y, valid_x, valid_y\n gc.collect()\n print('Full AUC score %.6f\\n=======' % roc_auc_score(train_df['TARGET'],\n oof_preds))\n # plot ROC AUC and precision - sensitivity\n\n\n # Write submission file and plot feature importance\n if not debug:\n test_df['TARGET'] = sub_preds\n test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name,\n index=False)\n folds_idx = [(trn_idx, val_idx) for trn_idx, val_idx in xx]\n display_importances(feature_importance_df, num_rows)\n display_roc_curve(train_df['TARGET'], oof_preds, folds_idx)\n display_precision_recall(train_df['TARGET'], oof_preds, folds_idx)\n return feature_importance_df, oof_preds, train_df['TARGET']",
"def train_default(X_train, Y_train):\n model = lgb.LGBMClassifier(objective = 'multiclass', num_class = \"6\", random_state = 42)\n model.fit(X_train, Y_train)\n return model",
"def train_model(X_train, y_train, X_valid, y_valid, params=None, model_type='lgb', \r\n model_path_name='lgb', plot_feature_importance=False, model=None):\r\n def lgb_f1_score(y_true, y_pred):\r\n y_pred = np.round(y_pred)\r\n return 'f1', f1_score(y_true, y_pred), True\r\n\r\n scores = []\r\n feature_importance = pd.DataFrame()\r\n print('Started at', time.ctime())\r\n \r\n \r\n if model_type == 'lgb':\r\n \r\n model = lgb.LGBMClassifier(**params, n_estimators=50000, n_jobs=-1)\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), \r\n eval_metric=lgb_f1_score, early_stopping_rounds=300)\r\n \r\n y_pred_valid = model.predict(X_valid)\r\n \r\n if model_type == 'cat':\r\n model = cb.CatBoost(iterations=20000, **params)\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False)\r\n y_pred_valid = model.predict(X_valid)\r\n\r\n #save the model\r\n joblib.dump(model, model_path_name)\r\n \r\n scores.append(f1_score(y_valid, y_pred_valid)) \r\n \r\n if model_type == 'lgb':\r\n # feature importance\r\n fold_importance = pd.DataFrame()\r\n fold_importance[\"feature\"] = X_train.columns\r\n fold_importance[\"importance\"] = model.feature_importances_\r\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\r\n \r\n print('score: {0:.4f}.'.format(np.mean(scores)))\r\n\r\n if model_type == 'lgb':\r\n feature_importance[\"importance\"]\r\n if plot_feature_importance:\r\n cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\r\n by=\"importance\", ascending=False)[:50].index\r\n\r\n best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\r\n\r\n #sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False));\r\n \r\n return feature_importance, np.mean(scores)\r\n return np.mean(scores)\r\n \r\n else:\r\n return np.mean(scores)",
"def train(self, features, labels):\n pass",
"def train_gradient_boost(self, params, num_boost_round = 50):\n print \"training GB......\"\n dtrain = xgb.DMatrix(self.X, self.y)\n model = xgb.train(params, dtrain, num_boost_round = num_boost_round)\n self.models += [model]",
"def train(self, features, labels, seed=None):\n raise NotImplementedError('Not implemented')",
"def fit(self, df, features, target, params, numrounds):\n X_train = df[features]\n y_train = df[target]\n dtrain = xgb.DMatrix(X_train.values, label=y_train.values)\n self.model = xgb.train(params, dtrain, numrounds )",
"def train(original_sql,\n model_image,\n estimator_string,\n datasource,\n select,\n validation_select,\n model_params,\n train_params,\n feature_column_map,\n label_column,\n save,\n load=None):\n conn = db.connect_with_data_source(datasource)\n fc_map_ir, fc_label_ir = infer_feature_columns(conn,\n select,\n feature_column_map,\n label_column,\n n=1000)\n fc_map = compile_ir_feature_columns(fc_map_ir, EstimatorType.XGBOOST)\n\n feature_column_list = fc_map[\"feature_columns\"]\n field_descs = get_ordered_field_descs(fc_map_ir)\n feature_column_names = [fd.name for fd in field_descs]\n feature_metas = dict([(fd.name, fd.to_dict()) for fd in field_descs])\n label_meta = label_column.get_field_desc()[0].to_dict()\n\n # NOTE: in the current implementation, we are generating a transform_fn\n # from the COLUMN clause. The transform_fn is executed during the process\n # of dumping the original data into DMatrix SVM file.\n transform_fn = xgboost_extended.feature_column.ComposedColumnTransformer(\n feature_column_names, *feature_column_list)\n\n disk_cache = False\n batch_size = None\n epoch = 1\n if \"disk_cache\" in train_params:\n disk_cache = train_params.pop(\"disk_cache\")\n if \"batch_size\" in train_params:\n batch_size = train_params.pop(\"batch_size\")\n if \"epoch\" in train_params:\n epoch = train_params.pop(\"epoch\")\n\n def build_dataset(fn, slct):\n return xgb_dataset(datasource,\n fn,\n slct,\n feature_metas,\n feature_column_names,\n label_meta,\n cache=disk_cache,\n batch_size=batch_size,\n epoch=epoch,\n transform_fn=transform_fn)\n\n file_name = \"my_model\"\n if load:\n Model.load_from_db(datasource, load)\n bst = xgb.Booster()\n bst.load_model(file_name)\n else:\n bst = None\n\n with temp_file.TemporaryDirectory() as tmp_dir_name:\n train_fn = os.path.join(tmp_dir_name, 'train.txt')\n val_fn = os.path.join(tmp_dir_name, 'val.txt')\n train_dataset = build_dataset(train_fn, select)\n if validation_select:\n val_dataset = build_dataset(val_fn, validation_select)\n else:\n val_dataset = None\n\n eval_result = dict()\n watchlist = [None]\n if val_dataset:\n # The `xgboost.train` API only accepts the XGBoost DMatrix\n # object as the training or validation dataset, so we should\n # convert the generator to DMatrix.\n if isinstance(val_dataset, types.GeneratorType):\n val_dataset = list(val_dataset)[0]\n watchlist.append((val_dataset, \"validate\"))\n\n for per_batch_dmatrix in train_dataset:\n watchlist[0] = (per_batch_dmatrix, \"train\")\n bst = xgb.train(model_params,\n per_batch_dmatrix,\n evals=watchlist,\n evals_result=eval_result,\n xgb_model=bst,\n **train_params)\n print(\"Evaluation result: %s\" % eval_result)\n\n meta = collect_metadata(original_sql=original_sql,\n select=select,\n validation_select=validation_select,\n model_repo_image=model_image,\n class_name=estimator_string,\n attributes=model_params,\n features=fc_map_ir,\n label=fc_label_ir,\n evaluation=eval_result,\n num_workers=1)\n\n save_model_to_local_file(bst, model_params, file_name)\n model = Model(EstimatorType.XGBOOST, meta)\n model.save_to_db(datasource, save)\n return eval_result",
"def train_model(database):\n train_set = creating_set(database)\n return NaiveBayesClassifier.train(train_set)",
"def lgb_train_method(traindata, ytrain, valdata, yval, num_round):\n traindataset = lgb.Dataset(traindata, label=ytrain)\n valdataset = lgb.Dataset(valdata, label=yval)\n\n model_kwargs = params['training']['lgbt-model-kwargs']\n\n # train model\n evaldict = {}\n model = lgb.train(model_kwargs,\n traindataset,\n num_round,\n valid_sets=[valdataset, traindataset],\n valid_names=['validation', 'train'],\n early_stopping_rounds=10,\n evals_result=evaldict,\n verbose_eval=False\n )\n\n return model, evaldict",
"def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()",
"def train_linear_ensemble(x, y, alpha, max_iter, n_ensembles):\n # x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n x_train, y_train = x, y\n ensemble_models = []\n for i in range(n_ensembles):\n samples = sample_without_replacement(n_population=x_train.shape[0], n_samples=(x_train.shape[0]/5))\n x_seg_train = pd.DataFrame()\n y_seg_train = pd.Series()\n for sample in samples:\n x_seg_train = pd.concat([x_seg_train, x_train.iloc[[sample]]])\n y_seg_train = pd.concat([y_seg_train, y_train.iloc[[sample]]])\n\n model: Ridge = Ridge(alpha=alpha, normalize=True, max_iter=max_iter).fit(x_seg_train, y_seg_train)\n print(model.score(x_seg_train, y_seg_train))\n # print(model.score(x_test, y_test))\n ensemble_models.append(model)\n\n return ensemble_models",
"def train(records):\n m = len( records[0].features )\n sqm = int(math.sqrt(m))\n return train_r( records, range(m), sqm, 1e30 )",
"def train_fru(model, epochs=EPOCHS):\n train(model, epochs=epochs, dataset=FRUDataset)",
"def train_cv(X_train, Y_train, nfold = 5, early_stopping_rounds = 20):\n # model params\n params = { \"objective\" : \"multiclass\",\n \"num_class\" : 6,\n \"verbosity\" : -1 }\n\n # create dataset for lightgbm\n lgb_train = lgb.Dataset(X_train, Y_train)\n \n # cross validate to find optimal no of iterations\n r = lgb.cv(params, \n lgb_train, \n 10000,\n early_stopping_rounds = early_stopping_rounds,\n nfold = nfold,\n feval = accuracy_error,\n metrics = 'None',\n verbose_eval = True,\n seed = 42)\n\n # Highest score\n r_best = np.max(r['accuracy-mean'])\n\n # best number of estimators\n best_estimators = np.argmax(r['accuracy-mean']) + 1\n print(best_estimators)\n\n print(f'The maxium accuracy on the validation set was {r_best:.5f}')\n print(f'The ideal number of iterations was {best_estimators}.')\n\n # Fit on all of the training data using the ideal number of iterations\n model = lgb.LGBMClassifier(n_estimators=best_estimators, n_jobs = -1,\n **params, random_state = 42) \n model.fit(X_train, Y_train)\n\n return model",
"def train(self, training_data, model_name):\n dataset = []\n for example in training_data:\n entity_offsets = self._convert_example(example)\n dataset.append(self._from_json_to_crf(example, entity_offsets))\n\n features = [self._sentence_to_features(s) for s in dataset]\n labels = [self._sentence_to_labels(s) for s in dataset]\n trainer = sklearn_crfsuite.CRF(\n algorithm=\"lbfgs\",\n # coefficient for L1 penalty\n c1=0.1,\n # coefficient for L2 penalty\n c2=0.1,\n # stop earlier\n max_iterations=50,\n # include transitions that are possible, but not observed\n all_possible_transitions=True,\n )\n trainer.fit(features, labels)\n logger.info(\"Creating Model for Intent %s\",model_name)\n joblib.dump(trainer, 'core/agent/model_files/%s.model' % model_name)\n return True",
"def train_model_regression(X, X_test, y, params, folds, model_type='lgb', eval_metric='mae', columns=None,\r\n plot_feature_importance=False, model=None,\r\n verbose=10000, early_stopping_rounds=200, n_estimators=50000):\r\n columns = X.columns if columns is None else columns\r\n X_test = X_test[columns]\r\n\r\n # to set up scoring parameters\r\n metrics_dict = {'mae': {'lgb_metric_name': 'mae',\r\n 'catboost_metric_name': 'MAE',\r\n 'sklearn_scoring_function': metrics.mean_absolute_error},\r\n 'group_mae': {'lgb_metric_name': 'mae',\r\n 'catboost_metric_name': 'MAE',\r\n 'scoring_function': group_mean_log_mae},\r\n 'mse': {'lgb_metric_name': 'mse',\r\n 'catboost_metric_name': 'MSE',\r\n 'sklearn_scoring_function': metrics.mean_squared_error}\r\n }\r\n\r\n result_dict = {}\r\n\r\n # out-of-fold predictions on train data\r\n oof = np.zeros(len(X))\r\n\r\n # averaged predictions on train data\r\n prediction = np.zeros(len(X_test))\r\n\r\n # list of scores on folds\r\n scores = []\r\n feature_importance = pd.DataFrame()\r\n\r\n # split and train on folds\r\n for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\r\n print(f'Fold {fold_n + 1} started at {time.ctime()}')\r\n if type(X) == np.ndarray:\r\n X_train, X_valid = X[columns][train_index], X[columns][valid_index]\r\n y_train, y_valid = y[train_index], y[valid_index]\r\n else:\r\n X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]\r\n y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]\r\n\r\n if model_type == 'lgb':\r\n model = lgb.LGBMRegressor(**params, n_estimators=n_estimators, n_jobs=-1)\r\n model.fit(X_train, y_train,\r\n eval_set=[(X_train, y_train), (X_valid, y_valid)],\r\n eval_metric=metrics_dict[eval_metric]['lgb_metric_name'],\r\n verbose=verbose, early_stopping_rounds=early_stopping_rounds)\r\n\r\n y_pred_valid = model.predict(X_valid)\r\n y_pred = model.predict(X_test, num_iteration=model.best_iteration_)\r\n\r\n if model_type == 'xgb':\r\n train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns)\r\n valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns)\r\n\r\n watchlist = [(train_data, 'train'), (valid_data, 'valid_data')]\r\n model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200,\r\n verbose_eval=verbose, params=params)\r\n y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns),\r\n ntree_limit=model.best_ntree_limit)\r\n y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit)\r\n\r\n if model_type == 'sklearn':\r\n model = model\r\n model.fit(X_train, y_train)\r\n\r\n y_pred_valid = model.predict(X_valid).reshape(-1, )\r\n score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)\r\n print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.')\r\n print('')\r\n\r\n y_pred = model.predict(X_test).reshape(-1, )\r\n\r\n if model_type == 'cat':\r\n model = CatBoostRegressor(iterations=20000, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'],\r\n **params,\r\n loss_function=metrics_dict[eval_metric]['catboost_metric_name'])\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True,\r\n verbose=False)\r\n\r\n y_pred_valid = model.predict(X_valid)\r\n y_pred = model.predict(X_test)\r\n\r\n oof[valid_index] = y_pred_valid.reshape(-1, )\r\n if eval_metric != 'group_mae':\r\n scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid))\r\n else:\r\n scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type']))\r\n\r\n prediction += y_pred\r\n\r\n if model_type == 'lgb' and plot_feature_importance:\r\n # feature importance\r\n fold_importance = pd.DataFrame()\r\n fold_importance[\"feature\"] = columns\r\n fold_importance[\"importance\"] = model.feature_importances_\r\n fold_importance[\"fold\"] = fold_n + 1\r\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\r\n\r\n prediction /= folds.n_splits\r\n\r\n print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))\r\n\r\n result_dict['oof'] = oof\r\n result_dict['prediction'] = prediction\r\n result_dict['scores'] = scores\r\n\r\n # if model_type == 'lgb':\r\n # if plot_feature_importance:\r\n # feature_importance[\"importance\"] /= folds.n_splits\r\n # cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\r\n # by=\"importance\", ascending=False)[:50].index\r\n #\r\n # best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\r\n #\r\n # plt.figure(figsize=(16, 12));\r\n # sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False));\r\n # plt.title('LGB Features (avg over folds)');\r\n #\r\n # result_dict['feature_importance'] = feature_importance\r\n\r\n return result_dict",
"def svm_train(X, y, b, alpha, n_samples, n_features, learner, loop, eta,\n max_iter=100, step_probability=0.5):\n from pysofia import _sofia_ml\n if isinstance(X, six.string_types):\n if n_features is None:\n # the default in sofia-ml TODO: parse file to see\n n_features = 2**17\n w = _sofia_ml.train(X, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n elif isinstance(X, np.ndarray):\n if n_features is None:\n n_features = X.shape[1]\n\n if n_samples is None:\n n_samples = X.shape[0]\n\n w = _sofia_ml.train_fast(np.float64(X), np.float64(y), n_samples,\n n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n else:\n if n_features is None:\n n_features = X.shape[1]\n\n with tempfile.NamedTemporaryFile() as f:\n datasets.dump_svmlight_file(X, y, f.name, query_id=b)\n w = _sofia_ml.train(f.name, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n return w",
"def train(self, training_steps=10):",
"def train_LeNet_model(self, model, datagen, trainX_norm, trainY_bin, testX_norm, testY_bin, n_epochs, batch_size):\n # Train model\n model_history = model.fit(datagen.flow(trainX_norm, trainY_bin, batch_size=batch_size), # using a batch size defined by the user (or default batch size of 32)\n validation_data=(testX_norm, testY_bin),\n epochs=n_epochs, verbose=1) # using number of epochs specified by the user (or the default number which is 20)\n \n return model_history",
"def train(self, X, y):\n tf.logging.set_verbosity(\n tf.logging.INFO) # comment if you don't want to display the information during training/evaluation\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=self.params[\"TEST_SIZE\"], random_state=42, stratify=y)\n\n self.label_list = y.unique()\n\n train_features = self.sentences_to_features(X_train, y_train)\n test_features = self.sentences_to_features(X_test, y_test)\n if DEBUG:\n print(\"Transformation to features completed\")\n\n num_train_steps = int(\n len(train_features) / self.params[\"BATCH_SIZE\"] * self.params[\"NUM_TRAIN_EPOCHS\"])\n num_warmup_steps = int(\n num_train_steps * self.params[\"WARMUP_PROPORTION\"])\n\n run_config = self.run_config_builder()\n model_fn = self.model_fn_builder(len(self.label_list), self.params[\"LEARNING_RATE\"], num_train_steps,\n num_warmup_steps)\n self.estimator = self.estimator_builder(model_fn, run_config)\n\n train_input_fn = bert.run_classifier.input_fn_builder(features=train_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=True, drop_remainder=False)\n if DEBUG:\n print(\"Beginning Training!\")\n current_time = time.time()\n self.estimator.train(input_fn=train_input_fn,\n max_steps=num_train_steps)\n if DEBUG:\n print(\"Training took time :\", time.time() - current_time,\n \"s, or \", (time.time() - current_time) / 60, \"min\")\n\n self.classifier_trained = True\n\n test_input_fn = run_classifier.input_fn_builder(features=test_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=False, drop_remainder=False)\n\n # apply model on test set and print all metrics\n if DEBUG:\n print(\"Evaluating\")\n self.estimator.evaluate(input_fn=test_input_fn, steps=None)",
"def run_lgr():\n num_folds = 5\n with pd.HDFStore('./OT_clr_train_LGG_grade.h5') as store:\n X = store['expression'].values\n Y = store['labels'].values\n\n # standardize expression\n mu = np.mean(X, axis=0)\n std = np.std(X, axis=0)\n X = (X-mu)/std\n\n # define CVmodel to manage hyperparameter selection\n cvmodel = CVmodel(LogisticRegressor_skl,\n [1e-6, 1e-5, 1e-4, 1e-3, 1e-2,1e-1,1,10,100,1000], 'C^-1',\n solver = 'lbfgs', max_iter=5000, multi_class='auto')\n\n # define Predictor object to manage nested CV\n lg_predictor = Predictor(cvmodel,scorers.accuracy_scorer)\n\n # cross validate\n lg_cross_validation_scores = \\\n lg_predictor.cross_validate(X, Y,\n outer_folds=num_folds, inner_folds=num_folds)\n logger.info('Logistic Regression cross-validation = {0:.3f}'.format(\n np.mean(lg_cross_validation_scores)))",
"def retrain(data):\n import xgboost as xgb\n import numpy as np\n from os.path import abspath, dirname, join\n import sys\n sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'modules'))\n import utils\n sys.path.pop(0)\n\n XGBOOST_DIR = '/var/opt/xgboost'\n latest_dir = utils.get_latest_dir(XGBOOST_DIR)\n\n if latest_dir is not None:\n base_model = xgb.Booster()\n base_model.load_model('{}/model'.format(latest_dir))\n else:\n base_model = None\n\n # retrain model\n X = np.array([d[1:] for d in data])\n y = np.array([d[0] for d in data])\n dataset = xgb.DMatrix(X, label=y)\n params = {\n 'max_depth': 5,\n 'eta': 0.1,\n 'objective': 'binary:logistic',\n 'silent': 0\n }\n model = xgb.train(params, dataset, 30, [], xgb_model=base_model)\n\n # save model\n new_dir = utils.create_child_dir(XGBOOST_DIR)\n model.save_model('{}/model'.format(new_dir))\n\n import time\n time.sleep(10)\n utils.commit_dir(new_dir)",
"def train_bb_reg(self, data):\n logger.info('Training Bounding Box Regression model')\n assert os.path.exists(data), \"File %s with data does not \"\\\n \"exist\" % (data)\n input_images, p_boxes, g_boxes = pickle.load(gzip.open(data))\n self.bb_reg = BoundingBoxRegressor(self.bb_reg_model_name)\n coefficients, scores = self.bb_reg.train_model(\n input_images, p_boxes, g_boxes)\n self.reg_coeffs = coefficients\n return scores",
"def compute_sklearn_features():\n text_dir = 'text_model'\n emb_dir = 'embedding_weights'\n filename = 'glove.6B.50d.txt'\n emb_name = 'glove'\n emotions = ['happy', 'sad', 'angry', 'scared', 'disgusted', 'surprised']\n post_size = 200\n df_all, word_to_id, embedding = preprocess_df(text_dir, emb_dir, filename, emb_name, emotions, post_size)\n\n X = np.stack(df_all['text_list'])\n y = df_all['search_query'].values\n\n id_to_word = {i: k for k, i in word_to_id.iteritems()}\n config = {'word_to_id': word_to_id,\n 'id_to_word': id_to_word,\n 'batch_size': 128,\n 'vocab_size': len(word_to_id),\n 'embedding_dim': embedding.shape[1],\n 'post_size': post_size,\n 'fc1_size': 16,\n 'nb_emotions': len(emotions),\n 'dropout': 1.0, # Proba to keep neurons\n 'max_grad_norm': 5.0, # Maximum norm of gradient\n 'init_scale': 0.1, # Weights initialization scale\n 'initial_lr': 1e-3,\n 'lr_decay': 0.5,\n 'max_epoch_no_decay': 2, # Number of epochs without decaying learning rate\n 'nb_epochs': 10} # Maximum number of epochs\n \n tf.reset_default_graph()\n with tf.Session() as sess:\n print('Computing sklearn features:')\n init_scale = config['init_scale']\n initializer = tf.random_uniform_initializer(-init_scale, init_scale) \n with tf.variable_scope('Model', reuse=None, initializer=initializer):\n config['nb_epochs'] = 1\n m_train = WordModel(config)\n sess.run(tf.global_variables_initializer())\n sess.run(m_train.embedding_init, feed_dict={m_train.embedding_placeholder: embedding})\n\n batch_size = m_train.config['batch_size']\n initial_lr = m_train.config['initial_lr']\n \n nb_batches = X.shape[0] / batch_size\n dropout_param = 1.0\n ops = m_train.h1\n \n sess.run(tf.assign(m_train.learning_rate, initial_lr))\n\n X, y = _shuffling(X, y)\n X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))\n y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))\n h1_list = []\n for i in range(nb_batches):\n curr_input = X_reshaped[i, :, :]\n curr_target = y_reshaped[i, :]\n h1_features = sess.run(ops, feed_dict={m_train.input_data: curr_input, \n m_train.target: curr_target,\n m_train.keep_prob: dropout_param})\n h1_list.append(h1_features)\n\n X_sklearn = np.vstack(h1_list)\n y_sklearn = y_reshaped.reshape((-1))\n print('Finished')\n return X_sklearn, y_sklearn",
"def train(self, dataset): \n self.random_forest.fit(dataset[:,:-1], dataset[:,-1])",
"def train(self, num_batches: int):"
]
| [
"0.72763",
"0.6727893",
"0.65406424",
"0.63562286",
"0.6320691",
"0.6302329",
"0.6298004",
"0.6286214",
"0.62784106",
"0.6252643",
"0.61828154",
"0.6118745",
"0.6105687",
"0.60919255",
"0.6041456",
"0.6040824",
"0.6040496",
"0.6013378",
"0.60047317",
"0.5980098",
"0.5949575",
"0.59472275",
"0.5947169",
"0.5944408",
"0.5900699",
"0.5889577",
"0.58749884",
"0.58639085",
"0.5856649",
"0.58524674"
]
| 0.73816603 | 0 |
Predict a PE file with an LightGBM model | def predict_sample(lgbm_model, file_data, feature_version=2):
extractor = PEFeatureExtractor(feature_version)
features = np.array(extractor.feature_vector(file_data), dtype=np.float32)
return lgbm_model.predict([features])[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predict_sample(lgbm_model, file_data):\n extractor = PEFeatureExtractor()\n features = np.array(extractor.feature_vector(file_data), dtype=np.float32)\n return lgbm_model.predict([features])[0]",
"def predict(self, datafile):",
"def predict_proba(self):\n ...",
"def predict():\n if (not request.json):\n abort(400)\n \n product = {\n 'brand': request.json['brand'],\n 'category-1': request.json['category-1'],\n 'category-2': request.json['category-2'],\n 'category-3': request.json['category-3'],\n 'colour': request.json['colour'],\n 'fabric_type': request.json['fabric_type'],\n 'ftp_acrylic': request.json['ftp_acrylic'],\n 'ftp_cotton': request.json['ftp_cotton'],\n 'ftp_elastane': request.json['ftp_elastane'],\n 'ftp_linen': request.json['ftp_linen'],\n 'ftp_other': request.json['ftp_other'],\n 'ftp_polyamide': request.json['ftp_polyamide'],\n 'ftp_polyester': request.json['ftp_polyester'],\n 'ftp_polypropylene': request.json['ftp_polypropylene'],\n 'ftp_silk': request.json['ftp_silk'],\n 'ftp_viscose': request.json['ftp_viscose'],\n 'ftp_wool': request.json['ftp_wool'],\n 'gender': request.json['gender'],\n 'label': request.json['label'],\n 'made_in': request.json['made_in'],\n 'season': request.json['season'],\n 'size': request.json['size'],\n 'unspsc_code': request.json['unspsc_code'],\n 'weight': request.json['weight'],\n 'ML-model': request.json['ML-model']\n }\n\n product['co2_total'] = None\n ml_model = product.pop('ML-model', None)\n if (ml_model == None or ml_model == ''):\n print('Loading default model: LGBM')\n ml_model = 'lgbm_default'\n else:\n print(f'Loading model: {ml_model}')\n model = load_model(ml_model)\n print('Model loaded')\n \n pred_with_intervals = do_prediction_with_params(model, product, intervals=True)\n \n prediction = pred_with_intervals[0][0]\n percentile_5 = pred_with_intervals[0][1] if len(pred_with_intervals[0]) == 3 and pred_with_intervals[0][1] is not None else None\n percentile_95 = pred_with_intervals[0][2] if len(pred_with_intervals[0]) == 3 and pred_with_intervals[0][2] is not None else None\n result = {\n \"prediction\": prediction,\n \"5-percentile\": percentile_5,\n \"95-percentile\": percentile_95\n }\n \n print('CO2e prediction complete, returning result')\n print(result)\n \n resp = jsonify(result)\n resp.status_code = 201\n return resp",
"def predict_file(img_path, model):\n return gennet.predict_file(img_path, 'Resnet50', model)",
"def predict_all(model_file, input_file):\n # Reading a model file\n w = {}\n for line in open(model_file):\n line = line.strip()\n (name, value) = line.split(\"\\t\")\n value = float(value)\n w[name] = value\n\n # Evaluation and print results\n for line in open(input_file):\n line = line.strip()\n phi = create_features(line)\n y_ = predict_one(w, phi)\n\n print y_",
"def learn(filePath):\n filename = filePath.stem\n processedJAFFE = load(str(filePath))\n processedDF = pd.DataFrame(processedJAFFE)\n processedDF.columns = ['name', 'data', 'emotion']\n processedDF = processedDF.sort_values(by=['name', 'emotion'])\n grouped = processedDF.groupby(['name', 'emotion'])\n train = grouped.nth([0, 1])\n test = grouped.nth([2, 3, 4])\n\n yTrain = train.index.get_level_values(1).tolist()\n xTrain = train.values.ravel().tolist()\n yTest = test.index.get_level_values(1).tolist()\n xTest = test.values.ravel().tolist()\n\n parameters = {\n 'C': [\n 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08, 1.00E-07, 1.00E-06,\n 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02, 1.00E-01, 1.00,\n 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05\n ],\n 'gamma': [\n 1.00E00,\n 1.00E-01,\n 1.00E-02,\n 1.00E-03,\n 5.00E-04, 2.00E-04, 1.50E-04, 1.10E-04, 1.05E-04, 1.00E-04,\n 9.50E-05, 9.00E-05, 7.00E-05, 5.00E-05, 1.90E-05, 1.00E-05,\n 1.00E-06,\n 1.00E-07,\n ],\n }\n\n models = []\n models.append(['gamma \\\\ C', 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08,\n 1.00E-07, 1.00E-06, 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02,\n 1.00E-01, 1.00, 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05 ])\n gridTimeStart = time()\n numIteration = len(parameters['gamma']) * len(parameters['C'])\n iteration = 0\n meanTime = 0\n for gamma in parameters['gamma']:\n row = [gamma]\n for C in parameters['C']:\n print('C = %s \\t gamma = %s'%(C, gamma))\n timeStart = time()\n svc = OneVsRestClassifier(SVC(random_state=0, decision_function_shape='ovr',\n C=C, kernel='rbf', gamma=gamma), n_jobs=4)\n svc.fit(xTrain, yTrain)\n yTrue, yPred = yTest, svc.predict(xTest)\n yTrue = np.array(yTrue, dtype=np.unicode_)\n yPred = np.array(yPred, dtype=np.unicode_)\n correct = np.sum(yTrue == yPred)\n \n print(\"accuracy: %d/%d = \"%(correct, len(yTrue)),\n D('%.2f'%(correct/len(yTrue)*100)))\n row.append(D('%.2f'%(correct/len(yTrue)*100)))\n \n iterTime = time()-timeStart\n iteration = iteration + 1\n meanTime = meanTime * (iteration-1)/iteration + iterTime/iteration\n remainingTime = (numIteration-iteration)*meanTime\n print('--------------------------(%d sec)--remaining: %s'%\n (iterTime, str(timedelta(seconds=int(remainingTime)))))\n models.append(row)\n gridTime = time() - gridTimeStart\n gridTime = timedelta(seconds=int(gridTime))\n print('time: %s'%str(gridTime))\n print('saving file: %s.csv'%filename)\n with open('../csv/%s.csv'%filename, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(models)",
"def predict(self, ngsFile, output, **kwargs):\n raise NotImplementedError",
"def test_fer_model(img_folder, model=\"/path/to/model\"):\n preds = None\n ### Start your code here\n\n if not os.path.exists(model):\n print (\"Model Loading Error: can't find the model.\\n\")\n return None\n\n if not os.path.exists(img_folder):\n print (\"Data Loading Error: can't find the data.\\n\")\n return None\n\n with open(model, 'rb') as model_file:\n model = load(model_file)\n data = load_FER2013_samples(img_folder)\n preds = model.predict(data)\n print (preds)\n ### End of code\n return preds",
"def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test",
"def predict(self, x_test):\n x_test = self.tabular_preprocessor.encode(x_test)\n if self.clf is not None:\n y = self.clf.predict(x_test)\n elif self.save_filename is not None:\n booster = lgb.Booster(model_file=self.save_filename)\n y = booster.predict(x_test)\n else:\n pre_model_name = []\n for file in os.listdir(self.path):\n if file.endswith(\"_lgb.txt\"):\n file_ind_pat = re.compile(\"(\\d+)\")\n tmp_filename = int(file_ind_pat.findall(file)[0])\n pre_model_name.append(tmp_filename)\n total_model = len(pre_model_name)\n if total_model == 0:\n raise ValueError(\"Tabular predictor does not exist\")\n else:\n # Use the latest predictor\n self.save_filename = self.path + '/' + str(max(pre_model_name)) + '_lgb.txt'\n booster = lgb.Booster(model_file=self.save_filename)\n y = booster.predict(x_test)\n\n if y is None:\n raise ValueError(\"Tabular predictor does not exist\")\n return y",
"def load_model():\n \n _files = training_file()\n \n predictor_path = _files.model_file(LANDMARKS_WEIGHTS)\n face_rec_model_path = _files.model_file(RESNET_WEIGHTS)\n \n detector = dlib.get_frontal_face_detector()\n sp = dlib.shape_predictor(predictor_path)\n facerec = dlib.face_recognition_model_v1(face_rec_model_path)\n \n return (detector, sp, facerec)",
"def prediction_on_a_image(self, input, output,model_saved_path):\n\n # load the saved model\n if os.path.isfile(model_saved_path) is False:\n raise IOError('trained model: %s not exist' % model_saved_path)\n\n clf = joblib.load(model_saved_path)\n\n # split a large image to many small ones\n patch_w = 500 # parameters.get_digit_parameters(\"\", \"train_patch_width\", None, 'int')\n patch_h = 500 # parameters.get_digit_parameters(\"\", \"train_patch_height\", None, 'int')\n overlay_x = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_x\", None, 'int')\n overlay_y = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_y\", None, 'int')\n\n img_folder = os.path.dirname(input)\n img_name = os.path.basename(input)\n inf_list_txt = 'inf_image_list.txt'\n with open(inf_list_txt, 'w') as txt_obj:\n txt_obj.writelines(img_name + '\\n')\n\n img_patches = build_RS_data.make_dataset(img_folder, inf_list_txt, patch_w, patch_h, overlay_x, overlay_y,\n train=False)\n\n for img_idx, aImg_patches in enumerate(img_patches):\n inf_output_dir = 'inf_results' #os.path.splitext(img_name)[0]\n os.system('mkdir -p '+inf_output_dir)\n os.system('rm '+inf_output_dir+'/*')\n\n ## parallel inference patches\n # but it turns out not work due to the Pickle.PicklingError\n # not working due to mulitple parameters. Jan 9, 2019, hlc\n # use multiple thread\n num_cores = multiprocessing.cpu_count()\n print('number of thread %d' % num_cores)\n # theadPool = mp.Pool(num_cores) # multi threads, can not utilize all the CPUs? not sure hlc 2018-4-19\n theadPool = Pool(num_cores) # multi processes\n\n # inference_one_patch_svm(img_idx, image_count, p_idx, patch_count, inf_output_dir, img_patch, scaler,clf)\n\n parameters_list = [\n (img_idx, len(img_patches), idx, len(aImg_patches), inf_output_dir, img_patch, self._scaler, clf)\n for (idx, img_patch) in enumerate(aImg_patches)]\n # results = theadPool.map(inference_one_patch_svm, parameters_list) # not working\n results = theadPool.starmap(inference_one_patch_svm, parameters_list) # need python3\n print('result_list', results)\n\n # for p_idx, img_patch in enumerate(aImg_patches):\n # # read images\n # patch_data = build_RS_data.read_patch(img_patch) # read_whole_x_pixels(input)\n #\n # nbands, height, width = patch_data.shape\n #\n # X_predit = patch_data.reshape(nbands, -1)\n # X_predit = np.transpose(X_predit, (1, 0))\n #\n # if os.path.isfile(scaler_saved_path) and self._scaler is None:\n # self._scaler = joblib.load(scaler_saved_path)\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # elif self._scaler is not None:\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # else:\n # X = X_predit\n # basic.outputlogMessage('warning, no pre-processing of data before prediction')\n #\n # # more method on prediction can be foudn in :\n # # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\n # pre_result = clf.predict(X)\n # result_img = pre_result.reshape((height, width))\n #\n # # save results\n # print('Save patch:%d/%d on Image:%d/%d , shape:(%d,%d)' %\n # (p_idx,len(aImg_patches), img_idx,len(img_patches), result_img.shape[0], result_img.shape[1]))\n #\n # # short the file name to avoid error of \" Argument list too long\", hlc 2018-Oct-29\n # file_name = \"I%d_%d\" % (img_idx, p_idx)\n #\n # save_path = os.path.join(inf_output_dir, file_name + '.tif')\n # build_RS_data.save_patch_oneband_8bit(img_patch,result_img.astype(np.uint8),save_path)\n #\n # with rasterio.open(input) as src_obj:\n # # Set spatial characteristics of the output object to mirror the input\n # kwargs = src_obj.meta\n # kwargs.update(\n # dtype=rasterio.uint8,\n # count=1)\n # # Create the file\n # with rasterio.open(output, 'w', **kwargs) as dst:\n # dst.write_band(1, result_img.astype(rasterio.uint8))\n # basic.outputlogMessage(\"save to %s\" % output)\n\n return True",
"def predict():\n import trace\n trace.predict()",
"def predict_with_lgbm(test_df, row_ids, model_filepath):\n if os.path.isdir(model_filepath):\n click.echo(\"Loading models in directory\" + model_filepath)\n models_in_dir = os.listdir(model_filepath)\n num_models = len(models_in_dir)\n predictions = np.zeros(len(row_ids))\n\n for i, model in enumerate(models_in_dir, start=1):\n with timer(\"Loading model [\" + str(i) + \"/\" + str(num_models) + \"]\"):\n lgbm_model = lgb.Booster(model_file=model_filepath + \"/\" + model)\n\n with timer(\"Predicting values [\" + str(i) + \"/\" + str(num_models) + \"]\"):\n predictions_current = lgbm_model.predict(test_df)\n predictions += np.expm1(predictions_current)\n\n predictions = predictions / num_models\n predictions[predictions < 0] = 0\n return predictions\n\n else:\n with timer(\"Loading model \" + model_filepath):\n lgbm_model = lgb.Booster(model_file=model_filepath)\n\n with timer(\"Predicting values\"):\n predictions = lgbm_model.predict(test_df)\n # Invert log and set possible neg. values to 0\n predictions = np.expm1(predictions)\n predictions[predictions < 0] = 0\n return predictions",
"def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')",
"def predict(self, infile, model_path=None, eval_gold=False, as_text=False):\n\n\t\tif model_path is None: # Try default model location\n\t\t\tmodel_path = script_dir + os.sep + \"models\" + os.sep + self.corpus + \"_ensemble_sent.pkl\"\n\n\t\tclf, num_labels, cat_labels, multicol_dict, vocab, firsts, lasts = joblib.load(model_path)\n\n\t\tif as_text:\n\t\t\tconllu = infile\n\t\telse:\n\t\t\tconllu = io.open(infile,encoding=\"utf8\").read()\n\n\t\t#tagged = udpipe_tag(conllu,self.udpipe_model)\n\t\ttagged = tt_tag(conllu,self.lang)\n\n\t\ttrain_feats, _, toks, _, _ = read_conll(tagged,genre_pat=self.genre_pat,mode=\"sent\",as_text=True,char_bytes=self.lang==\"zho\")\n\t\theaders = sorted(list(train_feats[0].keys()))\n\n\t\tdata = []\n\n\t\tpreds = {}\n\t\tfor e in self.estimators:\n\t\t\tpred = e.predict(tagged)\n\t\t\t_, preds[e.name + \"_prob\"] = [list(x) for x in zip(*pred)]\n\t\t\theaders.append(e.name + \"_prob\")\n\n\t\tgenre_warning = False\n\t\tfor i, item in enumerate(train_feats):\n\t\t\titem[\"first\"] = item[\"word\"][0] if item[\"word\"][0] in firsts else \"_\"\n\t\t\titem[\"last\"] = item[\"word\"][-1] if item[\"word\"][-1] in lasts else \"_\"\n\t\t\tif \"genre\" in cat_labels:\n\t\t\t\tif item[\"genre\"] not in multicol_dict[\"encoder_dict\"][\"genre\"].classes_: # New genre not in training data\n\t\t\t\t\tif not genre_warning:\n\t\t\t\t\t\tsys.stderr.write(\"! WARN: Genre not in training data: \" + item[\"genre\"] + \"; suppressing further warnings\\n\")\n\t\t\t\t\t\tgenre_warning = True\n\t\t\t\t\titem[\"genre\"] = \"_\"\n\t\t\tif \"pos\" in cat_labels:\n\t\t\t\tif item[\"pos\"] not in multicol_dict[\"encoder_dict\"][\"pos\"].classes_:\n\t\t\t\t\titem[\"pos\"] = \"_\"\n\t\t\tif \"cpos\" in cat_labels:\n\t\t\t\tif item[\"cpos\"] not in multicol_dict[\"encoder_dict\"][\"cpos\"].classes_:\n\t\t\t\t\titem[\"cpos\"] = \"_\"\n\t\t\tif item[\"word\"] not in vocab and \"word\" in multicol_dict[\"encoder_dict\"]:\n\t\t\t\tif item[\"pos\"] in multicol_dict[\"encoder_dict\"][\"word\"].classes_:\n\t\t\t\t\titem[\"word\"] = item[\"pos\"]\n\t\t\t\telse:\n\t\t\t\t\titem[\"word\"] = \"_\"\n\t\t\tfor e in self.estimators:\n\t\t\t\titem[e.name + \"_prob\"] = preds[e.name + \"_prob\"][i]\n\n\t\t\tfeats = []\n\t\t\tfor k in headers:\n\t\t\t\tfeats.append(item[k])\n\n\t\t\tdata.append(feats)\n\n\t\tdata, headers, _, _ = self.n_gram(data,headers,[],[])\n\n\t\tdata = pd.DataFrame(data, columns=headers)\n\t\tdata_encoded = self.multicol_transform(data,columns=multicol_dict[\"columns\"],all_encoders_=multicol_dict[\"all_encoders_\"])\n\n\t\tdata_x = data_encoded[cat_labels+num_labels].values\n\t\tpred = clf.predict(data_x)\n\n\t\t# Ensure first token in document is always a sentence break\n\t\tfor i, x in enumerate(data_encoded[\"tok_id\"].values):\n\t\t\tif x == 1:\n\t\t\t\tpred[i] = 1\n\n\t\tif eval_gold:\n\t\t\tgold_feats, _,_,_,_ = read_conll(conllu,genre_pat=self.genre_pat,mode=\"sent\",as_text=True)\n\t\t\tgold = [int(t['wid'] == 1) for t in gold_feats]\n\t\t\tconf_mat = confusion_matrix(gold, pred)\n\t\t\tsys.stderr.write(str(conf_mat) + \"\\n\")\n\t\t\ttrue_positive = conf_mat[1][1]\n\t\t\tfalse_positive = conf_mat[0][1]\n\t\t\tfalse_negative = conf_mat[1][0]\n\t\t\tprec = true_positive / (true_positive + false_positive)\n\t\t\trec = true_positive / (true_positive + false_negative)\n\t\t\tf1 = 2*prec*rec/(prec+rec)\n\t\t\tsys.stderr.write(\"P: \" + str(prec) + \"\\n\")\n\t\t\tsys.stderr.write(\"R: \" + str(rec) + \"\\n\")\n\t\t\tsys.stderr.write(\"F1: \" + str(f1) + \"\\n\")\n\t\t\twith io.open(\"diff.tab\",'w',encoding=\"utf8\") as f:\n\t\t\t\tfor i in range(len(gold)):\n\t\t\t\t\tf.write(\"\\t\".join([toks[i],str(gold[i]),str(pred[i])])+\"\\n\")\n\t\t\treturn conf_mat, prec, rec, f1\n\t\telse:\n\t\t\treturn pred",
"def load_model(file_index):\n normal, abnormal, all = read_in(file_index, 1, 2, 0.3)\n autoencoder = keras.models.load_model('Working_Data/ae_patient_' + str(file_index) + '_dim' + str(100) + '_model.h5')\n reconstructed = autoencoder.predict(all)\n reconstruction_save = \"Working_Data/reconstructed_cdae_10d_Idx\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstructed)",
"def load_model():\n prepro = Prepro(PATH_STOPSWORD, PATH_ACRONYM)\n vectorizer = joblib.load(PATH_TFIDF)\n label_encoder = joblib.load(PATH_ENCODER)\n model_svm = joblib.load(PATH_SVM)\n model_nb = joblib.load(PATH_NB)\n model_lr = joblib.load(PATH_LR)\n return prepro, vectorizer, label_encoder, model_svm, model_nb, model_lr",
"def predict_property(model_file, fps):\n model = joblib.load(model_file)\n return model.predict(fps)",
"def test_rm500(self):\n\t\tmy_test_file = \"/\".join([os.path.dirname(sys.modules[\"cancerscope\"].__file__), \"../tests/data/test_tcga.txt\"])\n\t\tscope_ensemble_obj = cancerscope.scope()\n\t\ttest_X = scope_ensemble_obj.load_data(my_test_file) # X, samples, features_test, in_genecode\n\t\t## Get the model of interest\n\t\tmodel_name = \"v1_rm500\"\n\t\tmodel_in = \"\"\n\t\tquery_localdirs = cancerscope.get_models.findmodel(os.path.dirname(cancerscope.__file__), model_name)\n\t\tif query_localdirs is not None:\n\t\t\tmodel_in = query_localdirs[model_name]\n\t\telse:\n\t\t\tmodel_in = cancerscope.get_models.downloadmodel(model_label=model_name)\n\t\tself.assertTrue(os.path.isdir(model_in))\n\t\tself.assertTrue(os.path.exists(\"\".join([model_in, \"/lasagne_bestparams.npz\"])))\n\t\t\"\"\"Test if model can be setup correctly\"\"\"\n\t\tlmodel = cancerscope.scopemodel(model_in)\n\t\tlmodel.fit()\n\t\tself.assertEqual(len(lmodel.features), 17688)\n\t\tx_input = lmodel.prepare_input_featorders(X=test_X[0], x_features_genecode = test_X[3], x_features=test_X[2])\n\t\t\"\"\"Test if it predicts properly\"\"\"\n\t\tallpreds_names = lmodel.predict(x_input, get_all_predictions=True,get_numeric=False, get_predictions_dict=False)\n\t\tallpreds_values = lmodel.predict(x_input, get_all_predictions=True,get_numeric=True, get_predictions_dict=False)\n\t\ttoppreds_names = lmodel.predict(x_input, get_all_predictions=False,get_numeric=False, get_predictions_dict=False)\n\t\ttoppreds_values = lmodel.predict(x_input, get_all_predictions=False,get_numeric=True, get_predictions_dict=False)\n\t\ttoppreds_df = lmodel.predict(x_input, get_all_predictions=True,get_numeric=False, get_predictions_dict=True)\n\t\tself.assertEqual(len(allpreds_names[0]), 66); self.assertEqual(len(allpreds_names[1]), 66); \n\t\tself.assertEqual(allpreds_values.shape[1],66); \n\t\tself.assertAlmostEqual(allpreds_values[0][1], 0.003065253372039)\n\t\tself.assertEqual(toppreds_names[0], \"PAAD_TS\"); self.assertEqual(toppreds_names[1], \"HNSC_TS\")\n\t\tself.assertAlmostEqual(toppreds_values[0],0.20889836023919614, 6, 0.000001); self.assertAlmostEqual(toppreds_values[1], 0.44416348623870444, 6, 0.000001)\n\t\t#self.assertEqual(round(toppreds_values[0],12), round(0.208874390780809,12)); self.assertEqual(round(toppreds_values[1],12), round(0.444162763077693,12))\n\t\tself.assertEqual(toppreds_df[0][0][0], toppreds_names[0]); self.assertAlmostEqual(float(toppreds_df[0][0][1]), toppreds_values[0]); \n\t\tself.assertEqual(toppreds_df[1][0][0], toppreds_names[1]); self.assertAlmostEqual(float(toppreds_df[1][0][1]), toppreds_values[1])",
"def apply_model(infile_pred,\n\t\t\t\tinfile_model,\n\t\t\t\tinfile_features,\n\t\t\t\tthreshold_prob=0.5,\n\t\t\t\twindowed_peak_picking=False,\n\t\t\t\tout_dir=\"res/\",\n\t\t\t\ttop_peaks=50,\n\t\t\t\tmax_distance=275,\n\t\t\t\tdistance_bins=0.005):\n\t\n\t#Read the to be used m/z bins (features)\n\tfeatures = [f.strip() for f in open(infile_features).readlines()]\n\t\n\t#Check the file extension and parse to get features\n\tif infile_pred.endswith(\".mgf\"): \n\t\tfeats,feat_bins,instance_names,count_inst = read_mgf(infile_pred,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sum_feats=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t feat_bins=features,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t max_dist=max_distance,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t step_size=distance_bins,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t top_peaks=top_peaks)\n\telif infile_pred.endswith(\".msp\"): \n\t\tfeats,feat_bins,instance_names,count_inst = read_msp(infile_pred,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sum_feats=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t feat_bins=features,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t max_dist=max_distance,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t step_size=distance_bins,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t top_peaks=top_peaks)\n\telse: return(False)\n\t\n\t#print(feats.shape)\n\t#print(feats)\n\t#print(len(instance_names))\n\t#print(instance_names)\n\t\n\t#Load the XGBoost model\n\trandom_search_res_xgb = pickle.load(open(infile_model,\"rb\"))\n\t\n\t#Format the preds\n\tpreds = pd.DataFrame(random_search_res_xgb.predict_proba(feats),index=instance_names,columns=[\"Prob_class_0\",\"Prob_class_1\"])\n\t#print(preds)\n\t\n\t#Plot the probabilities\n\tpd.Series(preds[\"Prob_class_1\"]).plot(kind=\"density\")\n\taxes = plt.gca()\n\taxes.set_xlim([0.0,1.0])\n\taxes.set_ylim([0.0,1.0])\n\tplt.savefig(out_dir+\"density_groups_zoomed.png\", bbox_inches=\"tight\")\n\tplt.close()\n\t\n\t#Threshold probabilities\n\t#print(list(preds.index[preds[\"Prob_class_1\"] > threshold_prob]))\n\tpreds.to_csv(out_dir+\"predictions.csv\")",
"def predict(self, chips, windows, tmp_dir):\n pass",
"def predict(model, test_file, weights, params):\n # Extract the name and the folder from the given weights file\n weight_name = basename(weights)\n dir_out = dirname(weights)+'/'\n mean = cv2.imread(\"/home/juarez/ActionRecognitionSmallDatasets/codes/Data/dogcentric/dog_centric_mean.png\")\n # Create a progress bar in order to controle how long predict is going to take\n pf = pathfile.FileOfPaths(test_file)\n pb = progressbar.ProgressBar(pf.nb_lines)\n dim_ordering = K.image_data_format()\n # Create a txt to be written with the prediction\n fout = open(dir_out + 'prediction_' + weight_name + '.txt', 'w')\n\n #X_test, y_test = _lstm_preprocess(test_file, params)\n model.load_weights(weights)\n # Read each image from test_file and predict it with the model\n with open(test_file) as f:\n for line in f:\n pb.update()\n path = line.split(' ')[0]\n y = line.split(' ')[1][0]\n img = cv2.imread(path)\n img = np.reshape(img, [1, 240, 240, 3])\n #img = np.reshape(img, dim_ordering)\n #img = img_to_array(img)\n #img -= mean\n classes = model.predict(img)\n prediction = classes.argmax()\n #print str(prediction) + ' - ' + str(y)\n fout.write(path + ' ' + str(y) + ' ' + str(prediction) + '\\n')\n fout.close()",
"def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))",
"def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class",
"def predict(cls, input):\n clf = cls.get_model() \n\n input.to_csv(data_dir + 'vdok_predction_src_file.csv')\n\n q = qa_serializer_lang_selector(data_dir)\n q.serialize_record('vdok_predction_src_file.csv', task_name)\n q.select_lang([1], task_name).to_csv(data_dir + data_file, encoding= 'latin1')\n\n pipeline=['pos', 'lemma', 'synset', 'hype', 'hypo']\n\n bnlqd = fex_basic_nlp(data_file, data_dir)\n bnlqd.nlp_run(pipeline[0])\n bnlqd.nlp_run(pipeline[1])\n bnlqd.df_ac_lemma.to_csv(data_dir + 'Lemma-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[2])\n bnlqd.df_ac_synset.to_csv(data_dir + 'Synset-' + data_file , encoding= 'latin1')\n bnlqd.nlp_run(pipeline[3])\n bnlqd.df_ac_hypernyms.to_csv(data_dir + 'Hypernyms-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[4])\n bnlqd.df_ac_hyponyms.to_csv(data_dir + 'Hyponyms-' + data_file, encoding= 'latin1')\n\n bnlpd = fex_basic_nlp(def_file, data_dir, task_name)\n bnlpd.nlp_run(pipeline[0])\n bnlpd.nlp_run(pipeline[1])\n bnlpd.df_ac_lemma.to_csv(data_dir + 'Lemma-P-' + data_file, encoding= 'latin1')\n \n btgqd = bi_trigram(data_file, data_dir)\n btgqd.nlp_run(r'bigram')\n btgqd.nlp_run(r'trigram') \n\n stop_words_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words)\n\n oanc_shelve = oanc_resource + 'ANC-all-lemma-04262014.db'\n oalqd = odi_oanc_lemma_frequency(data_file, oanc_shelve, None, data_dir, stop_words_d) \n oalqd.oanc_lemma_frequency('Lemma-' + data_file, 'Student_Question_Index', 'Pre_Col_Name')\n \n stop_words_hy_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words_hy)\n\n ovlqd = odi_overlapping(data_file, def_file, data_dir, stop_words_d)\n ovlqd.count_overlapping('Lemma-' + data_file, 'Student_Question_Index',\n 'Pre_Col_Name', 'Question_ID', 'Question_ID_Sec',\n 'Lemma-P-' + data_file, 'Question_ID', 'Question_ID_Sec')\n ovlqd.count_overlapping_synset('Synset-' + data_file)\n ovlqd.count_overlapping_hypernyms('Hypernyms-' + data_file, stop_words_hy_d)\n ovlqd.count_overlapping_hyponyms('Hyponyms-' + data_file, stop_words_hy_d)\n\n df_ac_pmi_dist_bigram = cls.bi_trigram_pmi_distribution(pmi_bigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_bigram, 'bigram')\n df_ac_pmi_dist_trigram = cls.bi_trigram_pmi_distribution(pmi_trigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_trigram, 'Trigram')\n\n df_ac_aggregate = cls.aggregate_plim(bnlqd, oalqd, ovlqd, df_ac_pmi_dist_bigram, df_ac_pmi_dist_trigram,\n bnlpd, specific_count_lemmas, stop_words_pos, task_name)\n df_ac_aggregate.to_csv(data_dir + 'vdok_predction_Aggregate_plim.csv', encoding= 'latin1')\n df_ac_aggregate_item_level = cls.aggregate_item_level_plim(df_ac_aggregate, oalqd.stem_option_name_clm, \n task_name)\n df_ac_aggregate_item_level.to_csv(data_dir + 'vdok_predction_Key_Stem_Passage_Aggregate_plim.csv',\n encoding= 'latin1')\n\n rfrpod = tmv_RF_classify('Independent_Variable_w_Label-Def.csv', data_dir)\n rfrpod.load_data('vdok_predction_Key_Stem_Passage_Aggregate_plim.csv', True, drop_vars, dependent_var)\n clf.perform_prediction(rfrpod.df_ac_modeling_values)\n return clf.df_ac_classified",
"def main():\n \n # Load the model\n model = EpamModel()\n model.load(\"bayes_1.zip\")\n \n # Load and clean/prepare test data \n x_test = pd.read_csv('BAZA_VALID_INPUT.csv')\n x_test_clean = cleanup_df(x_test)\n \n # Predict\n # FIXME: This currently does probabilistic prediction only!\n y_pred = model.predict(x_test_clean)\n \n with open('output.txt', 'w+') as f:\n for label in y_pred:\n f.write(f'{label}\\n')",
"def predict_only(self):",
"def lm_train(data_dir, language, fn_LM):\r\n\r\n # TODO: Implement Function\r\n\r\n language_model, unigram, bigram = {}, {}, {}\r\n CKP = \"WEAREDELETINGEND\"\r\n pre_w = CKP\r\n for root, dirs, files in os.walk(data_dir, topdown=False):\r\n for name in files:\r\n if name.endswith(language):\r\n #print(\"reading \", name)\r\n filepath = os.path.join(data_dir, name)\r\n readingfile = open(filepath, \"r\")\r\n for line in readingfile:\r\n processed = preprocess(line, language)\r\n if len(processed) != 0:\r\n tokenList = processed.split()\r\n for w in tokenList:\r\n # ======================\r\n # for unigram structure\r\n # ======================\r\n # not exist yet, initialize it at count 1\r\n if w not in unigram.keys():\r\n unigram[w] = 1\r\n else:\r\n unigram[w] += 1\r\n\r\n # ======================\r\n # for bigram structure\r\n # ======================\r\n if pre_w not in bigram.keys():\r\n bigram[pre_w] = {} # building the first words level\r\n bigram[pre_w][w] = 1\r\n else:\r\n if w not in bigram[pre_w].keys():\r\n bigram[pre_w][w] = 1\r\n else:\r\n bigram[pre_w][w] += 1\r\n pre_w = w\r\n pre_w = CKP\r\n\r\n\r\n language_model[\"uni\"] = unigram\r\n bigram.pop(CKP)\r\n bigram.pop(\"SENTEND\")\r\n language_model[\"bi\"] = bigram\r\n\r\n #Save Model\r\n with open(fn_LM+'.pickle', 'wb') as handle:\r\n pickle.dump(language_model, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n return language_model"
]
| [
"0.6942636",
"0.66490805",
"0.6243864",
"0.6220594",
"0.619182",
"0.6052634",
"0.6038517",
"0.60092866",
"0.5975578",
"0.592887",
"0.59086394",
"0.5866465",
"0.5832725",
"0.58074164",
"0.57809746",
"0.5778944",
"0.5771282",
"0.5757018",
"0.5744",
"0.5714554",
"0.56919616",
"0.5689461",
"0.56862515",
"0.5686197",
"0.56794983",
"0.5658926",
"0.56567174",
"0.5641976",
"0.5627279",
"0.56225026"
]
| 0.6835541 | 1 |
gaussian(centre, k, x, intensity) == (intensity exp((k (x centre))^2)) | def gaussian(centre, k, intensity, xpos):
return intensity * np.exp(- np.power(k * (xpos - centre), 2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gaussian_k(x0, y0, sigma, height, width):\n y = np.arange(0, width, 1, float)\n x = np.arange(0, height, 1, float)[:, np.newaxis]\n return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))",
"def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):\n \n return (amplitude/(np.sqrt(2.*np.pi)*sigma)) * exp(-np.power((1.0*x-center)/(sigma), 2.)/2.)",
"def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)",
"def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)",
"def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)",
"def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)",
"def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)",
"def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)",
"def gaussian(x, amp, wid, cen):\n return amp*np.exp(-(x-cen)**2/(2*wid**2))",
"def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))",
"def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))",
"def make_gaussian(size, fwhm=3, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:, np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n k = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)\n return k / np.sum(k)",
"def gaussian(size,sigma):\n a,b=np.ogrid[-size/2:size/2,-size/2:size/2]\n mask = a**2+b**2\n mask = np.exp(-mask.astype('float')/(2*float(sigma**2)))\n return mask",
"def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix",
"def gauss_kernel(X, test_locs, X_org, test_locs_org, sigma, sigma0, epsilon):\r\n DXT = Pdist2(X, test_locs)\r\n DXT_org = Pdist2(X_org, test_locs_org)\r\n # Kx = torch.exp(-(DXT / sigma0))\r\n Kx = (1 - epsilon) * torch.exp(-(DXT / sigma0) - DXT_org / sigma) + epsilon * torch.exp(-DXT_org / sigma)\r\n return Kx",
"def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)",
"def gaussian(x, amp, cen, wid):\n return amp * exp (-(x-cen)**2/(2*wid**2))",
"def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H",
"def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background",
"def fspecial_gaussian(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h",
"def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)",
"def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )",
"def GaussianKernel(shape=(3, 3), sigma=0.5):\r\n radius_x, radius_y = [(radius-1.)/2. for radius in shape]\r\n y_range, x_range = np.ogrid[-radius_y:radius_y+1, -radius_x:radius_x+1]\r\n h = np.exp(- (x_range*x_range + y_range*y_range) / (2.*sigma*sigma))\r\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\r\n sumofh = h.sum()\r\n if sumofh != 0:\r\n h /= sumofh\r\n return h",
"def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )",
"def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r",
"def kp2gaussian(kp, spatial_size, kp_variance):\n mean = kp['value']\n coordinate_grid = make_coordinate_grid(spatial_size, mean.type())\n number_of_leading_dimensions = len(mean.shape) - 1\n shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape\n coordinate_grid = coordinate_grid.view(*shape)\n repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)\n coordinate_grid = coordinate_grid.repeat(*repeats)\n shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)\n mean = mean.view(*shape)\n mean_sub = coordinate_grid - mean\n out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)\n return out",
"def gauss_kern(sigma,h):\n h1 = h\n h2 = h\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n # sigma = 10.0\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) )\n return g / g.sum()",
"def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val",
"def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()",
"def gauss_kern(size, sigma=1.0):\n h1 = size[0]\n h2 = size[1]\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );\n return g / g.sum()"
]
| [
"0.75760573",
"0.71707886",
"0.7004626",
"0.69898623",
"0.69898623",
"0.69898623",
"0.69565445",
"0.6826124",
"0.6816904",
"0.6770682",
"0.6768502",
"0.67603844",
"0.6753947",
"0.6737203",
"0.67277646",
"0.672466",
"0.66839015",
"0.66783154",
"0.66724944",
"0.6649369",
"0.65789616",
"0.6572583",
"0.6568363",
"0.6546761",
"0.65152085",
"0.6506453",
"0.6427564",
"0.6419911",
"0.63930964",
"0.63900995"
]
| 0.87825394 | 0 |
Checks if the goal state or start state is in the obstacle area | def obstacle_prone_area(self,image):
start_x=int(self.start[0])
start_y=int(self.start[1])
goal_x=int(self.goal[0])
goal_y=int(self.goal[1])
print(goal_x,goal_y)
if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):
#print(1)
return False
else:
#print(2)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def goal_occupied(self, view):\n for line in view.obstacles:\n if linesegdist2(line.p1, line.p2, self.goal) < self.radius ** 2:\n return True\n\n for p in view.pedestrians:\n if p.velocity.length2() == 0.0:\n if p.position.distance_to2(self.goal) < p.radius:\n return True\n\n return False",
"def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)",
"def isGoalState(self, state):\n diff = state.get_pos() - self._player_loc\n return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \\\n abs(diff.x) + abs(diff.z) == 2 and \\\n state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \\\n (_AIR, _LAVA, _WATER)",
"def isInGoal(self):\n coordx= self.playerPos.x\n coordy= self.playerPos.y\n target = 0 if self.id_team == 1 else 1\n\n if((((target == 0)and (coordx<=5))|\n ((target == 1) and(coordx>145))) \n and (coordy<=50 and coordy>=40)):\n return True\n else:\n return False",
"def goal_test(self, state):\n for x, y in state.alvos:\n if state.tabuleiro[x][y] is not BOX_ON_TARGET:\n return False\n return True",
"def __check_obstacle_intersections(self, goal):\n # generate a proximity test geometry for the goal\n min_clearance = self.cfg[\"goal\"][\"min_clearance\"]\n n = 6 # goal is n sided polygon\n goal_test_geometry = []\n for i in range(n):\n goal_test_geometry.append(\n [goal[0] + min_clearance * cos(i * 2 * pi / n),\n goal[1] + min_clearance * sin(i * 2 * pi / n)])\n goal_test_geometry = Polygon(goal_test_geometry)\n intersects = False\n for obstacle in self.current_obstacles:\n intersects |= geometrics.convex_polygon_intersect_test(goal_test_geometry, obstacle.global_geometry)\n return intersects",
"def check_movement(self):\n is_clear = True # default return value if no obstacles\n # !!! IR_SENSORS DISABLED\n if self.move_state == MOV_FORWARD:\n if self.l.look_for_obstacle(OBST_FRONT) == True:\n is_clear = False\n return is_clear",
"def is_map_obstacle_in_screen_range(self):\n raise NotImplementedError",
"def isGoalState(self, state):\n x,y = state\n if x == self.goal[0] and y == self.goal[1]:\n return True\n else:\n return False\n util.raiseNotDefined()",
"def __isTileGoalState(self, point):\n return point == self.goalPoint",
"def is_obstacle(self, pos: tuple):\n if self.within_map(pos):\n return self.map[round(pos[0]), round(pos[1])] == OBSTACLE\n else:\n return False",
"def is_obstacle_in_path(self):\n for obstacle in self.obstacles.tolist():\n print(\"obstacle.get_point():\", obstacle.get_point())\n dist_to_obstacle = VectorMath.get_vector_magnitude(np.subtract(obstacle.get_point(), self.drone.get_point()))\n if dist_to_obstacle < obstacle.get_radius() + Constants.DETECTION_THRESHOLD:\n if isinstance(obstacle, StationaryObstacle):\n paths = self.generate_possible_paths(obstacle)\n\n if len(paths) != 0:\n return True, np.array(paths)\n elif isinstance(obstacle, MovingObstacle):\n pass\n\n return False, None",
"def is_in_obstacle(self, x: float, y: float) -> bool:\n for obstacle in self.obstacles:\n if obstacle.contains_point((x, y)):\n return True\n return False",
"def check_map_obstacle_has_sight(self):\n return self.map_obstacle.sight_range > 0",
"def is_at_goal(self):\n return self._current_loc.get_row() == BoardPath._goal_loc.get_row() and \\\n self._current_loc.get_column() == BoardPath._goal_loc.get_column()",
"def isGoal(self, state):\n x, y = state\n return (x, y) in self.food.asList()",
"def is_goal(self, state: Grid2D.State) -> bool:\n return state.agent_position in self.goals",
"def isGoalState(self, state):\n \"*** YOUR CODE HERE ***\"\n # Utilizaré el método .count del grid, de manera que me contará los trues que haya.\n # Cuando no queden trues, ya hemos acabado.\n return state[1].count() == 0\n # util.raiseNotDefined()",
"def check_obstruction(self, start_x, start_y, end_x, end_y, piece):\n\n # Displacement for any single point in the area\n disp_x = end_x - start_x\n disp_y = end_y - start_y\n\n # Piece's area to shift for obstructions\n space = piece.get_area()\n\n # Game board area, initialize check spaces for while loop\n board_space = self._game_board.get_board_area()\n check_x = 0\n check_y = 0\n\n # Assign correct shift value for displacement\n if disp_x > 0:\n shift_x = 1\n elif disp_x == 0:\n shift_x = 0\n else:\n shift_x = -1\n\n if disp_y > 0:\n shift_y = 1\n elif disp_y == 0:\n shift_y = 0\n else:\n shift_y = -1\n\n # For each point in space\n for point in space:\n scale = 1\n # Gradually shift values in piece area up to displacement and check if the space is occupied\n while (check_x, check_y) != (point[0] + disp_x, point[1] + disp_y):\n check_x = point[0] + shift_x * scale\n check_y = point[1] + shift_y * scale\n\n # If an obstruction is found, and it is not a piece meant to be captured\n # ie, a piece in the end-position, return True\n if ((check_x, check_y) not in space) and board_space[check_x][check_y] != \" \":\n if (check_x, check_y) != (point[0] + disp_x, point[1] + disp_y):\n return True\n scale += 1\n # Return False if not obstructed\n return False",
"def goal_test(self, state):\n self.numbernodes += 1\n\n i = 0\n for box in state.boxes :\n for coord in self.board.positionGoal :\n if coord[0] == box.y and coord[1] == box.x : \n i+=1\n if i == 0 : return False\n i = 0\n return True",
"def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False",
"def isGoalState(self, state):\n x,y = state\n\n \"*** YOUR CODE HERE ***\"\n return self.food[x][y]",
"def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0",
"def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0",
"def isGoalState(self, state):\n x, y = state\n if state in self.food.asList():\n return True",
"def isGoalState(self, state):\n coordinates = state[0]\n edges = state[1]\n corners = self.corners\n TotalCorners = 4\n\n if(len(edges) == TotalCorners):\n return True\n else:\n if coordinates in corners:\n if not coordinates in edges:\n edges.append(coordinates)\n return False",
"def isGoalState(self, state):\n x, y = state[0]\n\n \"*** YOUR CODE HERE ***\"\n return self.food[x][y]\n # util.raiseNotDefined()",
"def check_boundaries(self):\n # Checks if the enemy bar has gone of the net\n if self.rect.left <= self.settings.WINDOW_WIDTH / 2:\n self.rect.left = self.settings.WINDOW_WIDTH / 2\n self.isMovingUp = False\n\n # Checks if the enemy bar has gone out of bound to the right\n if self.rect.right >= self.settings.WINDOW_WIDTH:\n self.rect.right = self.settings.WINDOW_WIDTH\n self.isMovingUp = True",
"def agents_at_goal(self):\r\n return self.searchenv.conv.state_to_tile(self.searchstate.positions) == self.searchenv.goal_tile",
"def in_goal(self, goal, proximity=0.0, min_distance=-1.0):\n if self.total_distance_covered < min_distance:\n return False\n\n goal_x, goal_y = goal\n\n x_in_goal = goal_x - proximity <= self.x <= goal_x + proximity\n y_in_goal = goal_y - proximity <= self.y <= goal_y + proximity\n\n return x_in_goal and y_in_goal"
]
| [
"0.73806673",
"0.7085184",
"0.705285",
"0.7039204",
"0.7030871",
"0.691709",
"0.6894923",
"0.67947966",
"0.67904234",
"0.6745311",
"0.6700286",
"0.6694245",
"0.6691153",
"0.6671266",
"0.6661765",
"0.66572803",
"0.6646301",
"0.6644943",
"0.66365445",
"0.662294",
"0.66006845",
"0.6559619",
"0.65154904",
"0.65154904",
"0.6510489",
"0.65035594",
"0.6483453",
"0.6446708",
"0.6435834",
"0.6402745"
]
| 0.7400996 | 0 |
Create all obstacles in the images by calling various obstacle functions | def obstacles_form(self,image):
major_axis=60
minor_axis=30
c_y=246
c_x=145
c_y1=90
c_x1=70
radius=35
for i in range(len(image)):
for j in range(len(image[0])):
#self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)
self.circle(image,100,i,j,200,200)
self.circle(image,100,i,j,800,200)
#self.slanted_rect(image,i,j)
self.boundary(image,i,j)
self.boundary1(image,i,j)
self.boundary2(image,i,j)
self.c_shape(image,i,j)
#exploration.c_shape(image,i,j)
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_obstacles_map(obstacles, placed_pecies):\n \n #create a mask image to draw the obstacles on\n blocks = np.zeros(ARENA_SIZE[::-1], np.uint8)\n\n #get the grid points where the robot needs to placed\n grid = get_grid(ARENA_SIZE)\n\n #draw the obstacles and their safety region on the map\n for i in obstacles.keys():\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(obstacles[i][0]/4), i[1]-int(obstacles[i][1]/4)), (i[0]+int(obstacles[i][0]/4), i[1]+int(obstacles[i][1]/4)), 255, -1)\n\n #draw the obstacles and their safety region on the map\n for i in placed_pecies.keys():\n try:\n if not i == grid[5]:\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n else:\n cv2.rectangle(blocks, (int(i[0]-7.4*placed_pecies[i][0]/4), int(i[1]-7.4*placed_pecies[i][1]/4)),\n (int(i[0]+7.4*placed_pecies[i][0]/4), int(i[1]+7.4*placed_pecies[i][1]/4)), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(placed_pecies[i][0]/4), i[1]-int(placed_pecies[i][1]/4)), (i[0]+int(placed_pecies[i][0]/4), i[1]+int(placed_pecies[i][1]/4)), 255, -1)\n except Exception as e:\n print(e)\n\n return cv2.bitwise_not(blocks)",
"def get_obstacles(image):\n\n ih, iw = image.shape[:2]\n image_copy = image.copy()\n\n #resize the image to the size of arena\n image = cv2.resize(image, ARENA_SIZE, interpolation=cv2.INTER_CUBIC)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n #replace all black pixels to white pixels\n gray[np.where(gray == 0)]= 255\n\n #get the thresholded binary image\n ret,threshold = cv2.threshold(gray,200,255,cv2.THRESH_BINARY_INV)\n\n #find all the countours in the binary image\n _, contours, heiarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n cont = []\n\n #create a mask to draw contours on\n blocks = mask = np.zeros(threshold.shape[:2], np.uint8)\n\n #create a dictionary to hold image roi of all puzzle peices\n blocks_roi = {}\n\n #iterate through all contours\n for i, c in enumerate(contours[1:]):\n\n #find the minimum area fitting rectangle of the contour\n rect = cv2.minAreaRect(c)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n #create the copy of the mask\n mask_copy = mask.copy()\n\n #draw the rectangle on the mask\n cv2.drawContours(mask_copy, [box], -1, (255,255,255), 3)\n\n #floodfill the rectangle\n cv2.floodFill(mask_copy, None, (0,0), 255)\n mask_inv = cv2.bitwise_not(mask_copy)\n blocks = cv2.add(blocks, mask_inv)\n\n _, contours, heiarchy = cv2.findContours(blocks, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n obstacles = {}\n\n for c in contours:\n x,y,w,h = cv2.boundingRect(c)\n obstacles.update({(int(x+w/2), int(y+h/2)): BLOCK_SIZE})\n #obstacles.update({(int(x+w/2), int(y+h/2)): (w, h)}) # for unknown block sizes\n bottom_r = remap((x+w, y+h), ARENA_SIZE, (iw,ih))\n top_l = remap((x, y), ARENA_SIZE, (iw,ih))\n blocks_roi.update({(int(x+w/2), int(y+h/2)): image_copy[top_l[1]:bottom_r[1], top_l[0]:bottom_r[0]]})\n\n return obstacles, blocks_roi",
"def draw_obstacles():\n for obstacle in obstacles:\n plt.gca().add_patch(obstacle)",
"def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects",
"def draw_obstacles(self):\n for obstacle in self.obstacles:\n obstacle.draw(self.window, Colors.BLACK.value)",
"def create(self, pygame):\n\n white = (255,255,255)\n self.obstacle_img = pygame.image.load(\"./Images/Obstacle.png\").convert()\n self.obstacle_img.set_colorkey(white)\n\n for i in range(8):\n self.random_objects.append(pygame.image.load(\"./Images/Object{}.png\".format(i+1)).convert())\n # self.random_objects[i].set_colorkey(white)",
"def getInitialObstacles():\n # hardcode number of blocks\n # will account for movemnet\n from random import choice\n from globals import TILEWIDTH, TILEHEIGHT, WINHEIGHT, TILEFLOORHEIGHT, LEVEL, HALFWINWIDTH\n\n no_of_blocks = 50\n for b in range(no_of_blocks // 2):\n # get image\n # image = globals.IMAGESDICT['rock']\n for y in range(1,5):\n image = globals.IMAGESDICT[choice(['ugly tree', 'rock', 'tall tree'])]\n # make rect\n spaceRect = pygame.Rect((b * TILEWIDTH, y * TILEFLOORHEIGHT, TILEWIDTH, TILEFLOORHEIGHT))\n landscape = Landscape(image, spaceRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n image = globals.IMAGESDICT['corner']\n negativeRect = pygame.Rect([-150, WINHEIGHT - TILEHEIGHT, TILEWIDTH, TILEHEIGHT])\n landscape = Landscape(image, negativeRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n image = globals.IMAGESDICT['corner']\n positiveRect = pygame.Rect([LEVEL[0] - TILEWIDTH, WINHEIGHT - TILEHEIGHT, TILEWIDTH, TILEFLOORHEIGHT])\n landscape = Landscape(image, positiveRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n bottomRect = pygame.Rect([HALFWINWIDTH, LEVEL[1] - TILEHEIGHT, TILEWIDTH, TILEFLOORHEIGHT])\n landscape = Landscape(image, bottomRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n for x in range(0, LEVEL[0], 50):\n for y in range(10):\n image = globals.IMAGESDICT[choice(['ugly tree', 'rock', 'tall tree'])]\n spaceRect = pygame.Rect((x, LEVEL[1] - (y * TILEHEIGHT), TILEWIDTH, TILEFLOORHEIGHT))\n landscape = Landscape(image, spaceRect)\n if choice([0,1,0]):\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n\n return",
"def spawn_obstacles(self):\n self.obstacle_sprites.empty()\n number_of_obstacles = random.randint(MIN_OBSTACLES, MAX_OBSTACLES)\n while len(self.obstacle_sprites) < number_of_obstacles:\n obstacle = Obstacle(random.randrange(0, WIDTH), random.randrange(HEIGHT - 500, HEIGHT))\n obstacle_collision = pygame.sprite.spritecollide(obstacle, self.obstacle_sprites, False)\n if not obstacle_collision:\n self.obstacle_sprites.add(obstacle)",
"def generate_obstacles(self):\r\n obstacles = self.get_obstable_metrics\r\n obstacle_arrays = []\r\n\r\n for nb_obstacle in obstacles:\r\n empty_array = np.zeros(shape=(self.WINDOW_HEIGHT,\r\n self.WINDOW_WIDTH))\r\n start_location = 0 if nb_obstacle[2] == 1 else self.WINDOW_HEIGHT\r\n y, x = start_location - 1, nb_obstacle[3]\r\n empty_array[y, x] = -1\r\n\r\n for w_value in range(nb_obstacle[0]):\r\n x_updated = x + w_value\r\n\r\n for h_value in range(nb_obstacle[1]):\r\n if nb_obstacle[2] == 1:\r\n y_updated = y + h_value\r\n else:\r\n y_updated = y - h_value\r\n # Replace Value\r\n empty_array[y_updated, x_updated] = -1\r\n\r\n new_array = self.trim_whitespace(empty_array,\r\n nb_obstacle[2],\r\n self.MIN_GAP)\r\n obstacle_arrays.append(new_array)\r\n\r\n return obstacle_arrays",
"def _find_obstacle(self, obstacle_type='*traffic_light*'): \r\n obst = list()\r\n \r\n _actors = self._world.get_actors()\r\n _obstacles = _actors.filter(obstacle_type)\r\n\r\n\r\n for _obstacle in _obstacles:\r\n trigger = _obstacle.trigger_volume\r\n\r\n _obstacle.get_transform().transform(trigger.location)\r\n \r\n distance_to_car = trigger.location.distance(self._vehicle.get_location())\r\n\r\n a = np.sqrt(\r\n trigger.extent.x ** 2 +\r\n trigger.extent.y ** 2 +\r\n trigger.extent.z ** 2)\r\n b = np.sqrt(\r\n self._vehicle.bounding_box.extent.x ** 2 +\r\n self._vehicle.bounding_box.extent.y ** 2 +\r\n self._vehicle.bounding_box.extent.z ** 2)\r\n\r\n s = a + b + 10\r\n \r\n if distance_to_car <= s:\r\n # the actor is affected by this obstacle.\r\n obst.append(_obstacle)\r\n\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(_obstacle.get_transform().location, carla.Vector3D(0.5,0.5,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,255,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,10)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(trigger,\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n\r\n return obst",
"def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()",
"def draw_obstacle(start, end, img):\n # start, end, top_right, top_left = generate_obstacle_point(start, (start[0] + _OBSTACLE_SIZE, start[1] ))\n cv2.fillPoly(img, np.array([[[start[0] - 25, start[1] - 25],\n [start[0] + 25, start[1] - 25],\n [start[0] + 25, start[1] + 25],\n [start[0] - 25, start[1] + 25]]]), _RED)\n # cv2.rectangle(img, (start[0] - 25, start[1] - 25), (start[0] + 25, start[1] + 25), (0, 255, 0), 3)\n return img",
"def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight",
"def process_obstacle(color, cx, cy, box, x, y, obj_length, obj_height, obj_depth,\n\t\t\t\t\t equi_diameter, obstacle_list, obstacle_lifetime, obstacle_id, visualize, send_data):\n\tcoords = list(depth_to_point_cloud_pos(cx, cy, obj_depth)) # convert obstacle depth to XYZ coordinate\n\n\t#theta = CameraPosition['azimuth'] * math.pi / 180 # get robot pitch angle in radians\n\t#coords[0] = CameraPosition['x'] - coords[0] * math.cos(theta) # convert relative obstacle position to global\n\t#coords[2] = CameraPosition['y'] + coords[2] * math.sin(theta)\n\tmm_diameter = equi_diameter * (1.0 / CameraParams['fx']) * obj_depth # convert pixel diameter to mm\n\n\tif 100 < mm_diameter < 400:\n\t\tnew_obstacle = True\n\t\tcurrent_obstacle = None\n\t\tfor obstacle in obstacle_list:\n\t\t\tx_match = abs(obstacle.x - coords[0]) < 0.3\n\t\t\ty_match = abs(obstacle.y - coords[2]) < 0.3\n\t\t\tz_match = abs(obstacle.z - coords[1]) < 0.5\n\t\t\tdiameter_match = abs(obstacle.diameter - mm_diameter) / 1000. < 0.5\n\t\t\tif x_match and y_match:\n\t\t\t\tobstacle.x = coords[0]\n\t\t\t\tobstacle.y = coords[2]\n\t\t\t\tobstacle.z = coords[1]\n\t\t\t\tobstacle.diameter = mm_diameter / 1000.\n\t\t\t\tnew_obstacle = False\n\t\t\t\tobstacle.lifetime = obstacle_lifetime\n\t\t\t\tif send_data:\n\t\t\t\t\tsend_obstacle_data(obstacle)\n\t\t\t\tcurrent_obstacle = Obstacle(obstacle.id,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.x,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.y,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.z,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.diameter,\n\t\t\t\t\t\t\t\t\t\t\tobstacle_lifetime)\n\t\t\t\tif obstacle.lifetime == 0:\n\t\t\t\t\tobstacle_list.remove(obstacle)\n\t\t\t\tbreak\n\t\tif new_obstacle:\n\t\t\tcurrent_obstacle = Obstacle(obstacle_id,\n\t\t\t\t\t\t\t\t\t\tcoords[0],\n\t\t\t\t\t\t\t\t\t\tcoords[2],\n\t\t\t\t\t\t\t\t\t\tcoords[1],\n\t\t\t\t\t\t\t\t\t\tmm_diameter / 1000.,\n\t\t\t\t\t\t\t\t\t\tobstacle_lifetime)\n\t\t\tobstacle_id += 1\n\t\t\tif send_data:\n\t\t\t\tsend_obstacle_data(current_obstacle)\n\t\t\tobstacle_list.append(current_obstacle)\n\n\t\tif visualize:\n\t\t\t# begin visualization\n\t\t\tcv2.drawContours(color, [box], 0, (0, 0, 255), 1)\n\t\t\tcv2.rectangle(color, (x, y), (x + obj_length, y + obj_height), (0, 255, 0), 2)\n\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\tcv2.putText(color, 'id = %d' % current_obstacle.id, (cx, cy + 15), font, 0.4, (255, 0, 255),\n\t\t\t\t\t\t1, cv2.LINE_AA)\n\t\t\tcv2.putText(color, \"x = %.2f\" % coords[0], (cx, cy + 30), font, 0.4, (0, 0, 255), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\t\t\tcv2.putText(color, \"y = %.2f\" % coords[2], (cx, cy + 45), font, 0.4, (0, 255, 0), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\t\t\tcv2.putText(color, \"z = %.2f\" % (obj_depth / 1000), (cx, cy + 60), font, 0.4, (255, 0, 127),\n\t\t\t\t\t\t1, cv2.LINE_AA)\n\t\t\tcv2.putText(color, \"diameter = %.2f\" % (mm_diameter / 1000), (cx, cy + 75), font, 0.4,\n\t\t\t\t\t\t(255, 127, 0), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\treturn obstacle_id",
"def __generate_octagon_obstacles(self, world):\n obs_radius = self.cfg[\"obstacle\"][\"octagon\"][\"radius\"]\n obs_min_count = self.cfg[\"obstacle\"][\"octagon\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"octagon\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"octagon\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"octagon\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = OctagonObstacle(obs_radius, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles",
"def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False",
"def create_obstacles(self) -> List[Square]:\n obstacles_number = random.randint(1, self.maximum_obstacles_on_board)\n obstacles = list()\n\n while len(obstacles) < obstacles_number:\n\n obstacle_x_pos = random.randint(0, Dimension.board_width() - 1)\n obstacle_y_pos = random.randint(0, Dimension.board_height() - 1)\n obstacle = Square(obstacle_x_pos, obstacle_y_pos)\n if obstacle not in obstacles:\n self.board_matrix[obstacle_y_pos][obstacle_x_pos] = 0\n obstacles.append(obstacle)\n\n return obstacles",
"def __generate_rectangle_obstacles(self, world):\n obs_min_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"min_dim\"]\n obs_max_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_dim\"]\n obs_max_combined_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_combined_dim\"]\n obs_min_count = self.cfg[\"obstacle\"][\"rectangle\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"rectangle\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dim_range = obs_max_dim - obs_min_dim\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n # generate dimensions\n width = obs_min_dim + (random() * obs_dim_range )\n height = obs_min_dim + (random() * obs_dim_range )\n while width + height > obs_max_combined_dim:\n height = obs_min_dim + (random() * obs_dim_range )\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = RectangleObstacle(width, height, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles",
"async def show_obstacles(canvas):\n\n while True:\n boxes = []\n\n for obstacle in obstacle_manager:\n boxes.append(obstacle.get_bounding_box())\n\n for x, y, frame in boxes:\n draw_frame(canvas, x, y, frame)\n\n await Sleep(1)\n\n for x, y, frame in boxes:\n draw_frame(canvas, x, y, frame, negative=True)",
"def obstacles(p):\n c1 = np.array([-0.5,-1.])\n r1 = 1.\n c2 = np.array([0.75,0.5])\n r2 = 0.5\n return [\n (p[0] + 2, np.array([1.,0.])), # left\n (2 - p[0], np.array([-1.,0.])), # right\n (p[1] + 1, np.array([0.,1.])), # bottom\n (1 - p[1], np.array([0.,-1.])), # top\n (norm(p - c1) - r1, (p - c1)/norm(p - c1)), # circle 1\n (norm(p - c2) - r2, (p - c2)/norm(p - c2)) # circle 2\n ]",
"def draw_boxes(img, paths, exit_masks=[]):\r\n for path in paths:\r\n contour, centroid = path[-1][:2]\r\n # DONT DRAW IF VEHICLE EXITS\r\n if vehicle_exits(centroid, exit_masks): continue\r\n x, y, w, h = contour\r\n\r\n # DRAW RECTANGLE AND CIRCLE DENOTING THE BOUNDARY AND CENTROID OF VEHICLE\r\n cv2.rectangle(img, (x, y), (x + w - 1, y + h - 1),BOUNDING_BOX_COLOUR, 1)\r\n cv2.circle(img, centroid, 2, CENTROID_COLOUR, -1)\r\n return img",
"async def show_obstacles(canvas):\n\n while True:\n boxes = []\n\n for obstacle in OBSTACLES:\n boxes.append(obstacle.dump_bounding_box())\n\n for row, column, frame in boxes:\n draw_frame(canvas, row, column, frame)\n\n await asyncio.sleep(0)\n\n for row, column, frame in boxes:\n draw_frame(canvas, row, column, frame, negative=True)",
"def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)",
"def __init__(self, grid_height, grid_width, obstacle_list = None, \r\n zombie_list = None, human_list = None):\r\n poc_grid.Grid.__init__(self, grid_height, grid_width)\r\n if obstacle_list != None:\r\n for cell in obstacle_list:\r\n self.set_full(cell[0], cell[1])\r\n if zombie_list != None:\r\n self._zombie_list = list(zombie_list)\r\n else:\r\n self._zombie_list = []\r\n if human_list != None:\r\n self._human_list = list(human_list) \r\n else:\r\n self._human_list = []",
"def __init__(self, grid_height, grid_width, obstacle_list = None, \n zombie_list = None, human_list = None):\n poc_grid.Grid.__init__(self, grid_height, grid_width)\n if obstacle_list != None:\n for cell in obstacle_list:\n self.set_full(cell[0], cell[1])\n if zombie_list != None:\n self._zombie_list = list(zombie_list)\n else:\n self._zombie_list = []\n if human_list != None:\n self._human_list = list(human_list) \n else:\n self._human_list = []",
"def __init__(self, grid_height, grid_width, obstacle_list = None,\n zombie_list = None, human_list = None):\n poc_grid.Grid.__init__(self, grid_height, grid_width)\n if obstacle_list != None:\n for cell in obstacle_list:\n self.set_full(cell[0], cell[1])\n if zombie_list != None:\n self._zombie_list = list(zombie_list)\n else:\n self._zombie_list = []\n if human_list != None:\n self._human_list = list(human_list) \n else:\n self._human_list = []",
"def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)",
"def reset_obstacles(self):\n self.obstacles = np.array([])",
"def init_map(self, obstacle_rate=0.9):\n n = self.size()\n\n map_obstacles = [] # np.zeros((n, n)) # 1: obstacle, 0: non-obstacle\n \n for i in range(n):\n # We only need 2 bit to encode 1/0 for each element of NumberArray\n row = NumberArray(2, n)\n for j in range(n):\n if i == j:\n # map_obstacles[i][j] = 0\n row[j] = 0\n elif i > j:\n # map_obstacles[i][j] = map_obstacles[j][i]\n row[j] = map_obstacles[j][i]\n else:\n # map_obstacles[i][j] = 1 if random.random() > 0.9 else 0\n row[j] = 1 if random.random() > obstacle_rate else 0\n map_obstacles.append(row)\n\n self.map_obstacle = map_obstacles",
"def __init__(self, mapfile, xpos, zpos, emap, width=10.0, depth=10.0, height=10.0, name=\"building\", draw_details=None, yoff=0.0, scheme=None):\r\n self.xpos = xpos\r\n self.zpos = zpos\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.name = name\r\n self.ceilingthickness = 1.0\r\n self.walls = []\r\n\r\n if scheme == None:\r\n self.scheme = Building.baseScheme\r\n else:\r\n self.scheme = scheme\r\n\r\n # We don't have to be rigorous here, this should only be a draw_details or an iterable of draw_details.\r\n if hasattr(draw_details, \"__getitem__\") or hasattr(draw_details, \"__iter__\"):\r\n assert (len(draw_details) == self.scheme[\"#models\"])\r\n self.details = draw_details\r\n else:\r\n self.details = [draw_details for x in range(self.scheme[\"#models\"])]\r\n # having a method like this allows draw details to be set later\r\n\r\n self.yoff = yoff\r\n\r\n self.model = [MergeShape(name=name+\".\"+str(x)) for x in range(self.scheme[\"#models\"])]\r\n\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n print(\"Loading building map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n ix,iy = im.size\r\n\r\n print(\"image size\", ix, \",\", iy)\r\n\r\n startx = xpos - ix / 2 * width\r\n starty = zpos - ix / 2 * depth\r\n\r\n yoff += emap.calcHeight(-xpos,-zpos)\r\n\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n pixels = im.load()\r\n\r\n for y in range(1,iy-1):\r\n print(\".\", end='')\r\n for x in range(1,ix-1):\r\n colour = pixels[x,y]\r\n\r\n if x == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y], \"edge\"), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y]), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if x == ix-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y], \"edge\"), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y]), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1], \"edge\"), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1]), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == iy-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x, y+1], \"edge\"), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y+1]), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self._executeScheme(x, y, startx, starty, (colour, None), wallfunc=None, ceilingedgefunc=None, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self.set_draw_details(self.details) # after models created otherwise\r\n # details lost by merging\r"
]
| [
"0.7210673",
"0.71489096",
"0.71302867",
"0.7099033",
"0.7065752",
"0.6955419",
"0.68727446",
"0.6848929",
"0.6677928",
"0.6603184",
"0.6546827",
"0.6510921",
"0.64350826",
"0.6417596",
"0.63998944",
"0.63848907",
"0.62678915",
"0.62417763",
"0.61390275",
"0.6092278",
"0.6092071",
"0.60906297",
"0.6072062",
"0.6069229",
"0.60332084",
"0.6025984",
"0.60176545",
"0.60034525",
"0.59728444",
"0.5960463"
]
| 0.7757402 | 0 |
Checks if the goal is reached or not if reached return True and if not reached continues exploring | def goal_reached(self):
pos_0=self.goal[0]
pos_1=self.goal[1]
#self.start_score=self.string(self.start[0],self.start[1])
#self.data_with_string[self.start_score]=self.start
#self.goal_score=self.string(pos_0,pos_1)
if self.h(self.current_score[0],self.current_score[1],self.current_score[2]) <=10 :
self.goal_score=self.string(self.current_score[0],self.current_score[1],self.current_score[2])
print("goal_reached")
#print(len(self.expanded))
#print("self.expanded",self.expanded)
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reached_goal(self):\n for i in range(self.simulator_.num_agents):\n if rvo_math.abs_sq(self.simulator_.agents_[i].position_ - self.goals_[i]) > self.simulator_.agents_[i].radius_ * self.simulator_.agents_[i].radius_:\n return False\n\n return True",
"def goal_reached(self, position):\n return position >= self.goal",
"def check_reached_waypoint_goal(self):\n return self.control_instance.check_reached_waypoint_goal()",
"def checkGoal(self):\n # -- It is not included for simplifity --#\n if self.reward_cumulative != None:\n x = round((abs(self.reward_cumulative) - abs(round(self.reward_cumulative))) * 100);\n rem_goal = x % 25\n rem_timeout = x % 20\n if rem_goal == 0 and x != 0:\n self.is_goal = True\n else:\n self.is_goal = False\n\n if rem_timeout == 0 and x != 0:\n self.is_timeout = True\n else:\n self.is_timeout = False",
"def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1",
"def done(self):\n return self.goal == (0, 0)",
"def is_at_goal(self):\n return self._current_loc.get_row() == BoardPath._goal_loc.get_row() and \\\n self._current_loc.get_column() == BoardPath._goal_loc.get_column()",
"def goal_test(self):\n if -1 in self.state:\n return False\n else:\n return True",
"def isInGoal(self):\n coordx= self.playerPos.x\n coordy= self.playerPos.y\n target = 0 if self.id_team == 1 else 1\n\n if((((target == 0)and (coordx<=5))|\n ((target == 1) and(coordx>145))) \n and (coordy<=50 and coordy>=40)):\n return True\n else:\n return False",
"def reach_goal(state: State, action: Action, next_state: State) -> bool:\n return overlap(state, action, next_state, object_type=Goal)",
"def checkGoalReached(self):\n if self._after_dead_line():\n if not self._crowdsale_closed.get():\n self._crowdsale_closed.set(True)\n self.CrowdsaleEnded()\n\n if self._amount_raised.get() >= self._funding_goal.get():\n self._funding_goal_reached.set(True)\n self.GoalReached(self._addr_beneficiary.get(), self._amount_raised.get())\n Logger.debug(f'Goal reached!', TAG)",
"def goal_reached(self, robot_pose):\n goal = self.global_plan.poses[-1].pose\n return self.calc_distance(robot_pose, goal) < self.goal_dist_threshold",
"def at_goal(self):\n return self.distance_from_goal < self.robot.wheels.base_length/2",
"def at_goal(self):\n return self.distance_from_goal < self.robot.wheels.base_length/2",
"def _is_goal_reached(self, state):\n return self._goal.holds(state.literals)",
"def goalReached(self, rewards):\n return len(rewards) >= 100 and np.mean(rewards[-100:]) >= 18",
"def agents_at_goal(self):\r\n return self.searchenv.conv.state_to_tile(self.searchstate.positions) == self.searchenv.goal_tile",
"def isGoal( self ):\n if self.numPlayer1 == 0:\n return 1\n if self.numPlayer2 == 0:\n return -1\n return 0\n # d_n1 = 0 # count number(-1)\n # d1 = 0 # count number(1))\n # for row in range( self.n ):\n # for col in range( self.n ):\n # if (self.board[row][col] == -1):\n # d_n1 += 1\n # if (self.board[row][col] == 1):\n # d1 += 1\n # if d_n1 > 0 and d1 > 0:\n # return 0 # //not goal state\n # if d_n1:\n # return -1\n # return 1",
"async def is_target_reached(self) -> bool: # type: ignore\n ...",
"def if_goal_reached(self, pose):\n dx = self.pos.x - pose.x\n dy = self.pos.y - pose.y\n dist = math.sqrt(dx ** 2 + dy ** 2)\n return dist < self.radiu",
"def is_goal(self, node):\n # print(\"is {} in {}\".format(node, self.goal_nodes))\n if node in self.goal_nodes:\n return True",
"def is_goal(state):\n pass",
"def try_advance(self):\n if not self.step.toclick:\n self.step.finished = True\n return True\n return False",
"def _check_for_completion(self, node):\n dis=0\n for i in range(node.state.size):\n dis+=(node.state[i]-self.goal.state[i])**2\n\n dis=np.sqrt(dis)\n if(dis<=self.step_size):\n return True\n else: return False",
"def _is_goal_achieved(self) -> bool:\n assert self._goal_info_cache\n return self._goal_info_cache[1]",
"def is_goal(self):\n if self.team1.get_cur_hp() == 0:\n return 1\n elif self.team2.get_cur_hp() == 0:\n return -1\n else:\n return 0",
"def is_goal_unreachable(self, x, y, theta):\n self.current_x = x\n self.current_y = y\n self.wp_goal_unreachable = Point(self.current_x,self.current_y)\n self.dist_btw_follow_goal_unreachable = abs(self.wp_goal_unreachable.distance_to(self.wp_follow))\n #print self.is_left_line\n #print self.dist_btw_follow_goal_unreachable\n if self.dist_btw_follow_goal_unreachable < self.TOLERANCE and self.is_left_line == 1:\n print \"goal unreachable\"\n return True\n else:\n return False",
"def goal_test(self, state):\n\n\t\treturn self.goal == state",
"def goingToBreak(self):\n \n if (\n (self.current_loc == 0 and not self.direction_forward) or\n (self.current_loc == len(self.destinations)-1 and self.direction_forward)\n ):\n return True\n return False",
"def onGoal(self):\n return self.index == len(self.path)"
]
| [
"0.764153",
"0.75423175",
"0.74891615",
"0.73719585",
"0.7327295",
"0.7301565",
"0.7220743",
"0.7139437",
"0.70832837",
"0.7046251",
"0.7040974",
"0.70182705",
"0.6996969",
"0.6996969",
"0.6945148",
"0.69250876",
"0.69134796",
"0.6900344",
"0.6892872",
"0.6848054",
"0.6762561",
"0.67582536",
"0.67410696",
"0.67366356",
"0.6729884",
"0.67234457",
"0.67206824",
"0.671206",
"0.6707307",
"0.67033184"
]
| 0.7737054 | 0 |
This function give points that lies in slanted rectangle. | def slanted_rect(self,image,i,j):
s1 = 0.7
s2 = -1.42814
x1 = np.arctan(s1)
x2 = np.arctan(s2)
d1 = np.cos(np.pi - x1)
d2 = np.cos(np.pi - x2)
a = -(self.padding/d1)
b = -(self.padding/d2)
if (-0.7*j+1*i)>=(73.4 - a) and (i+1.42814*j)>=(172.55 - b) and (-0.7*j+1*i)<=(99.81 + a) and (i+1.42814*j)<=(429.07 + b):
image[self.maximum_size-i,j,:]=0,0,0
self.image_p[i,j,:]=2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def triPoints(rect, orientation): \r\n p1 = (rect.center[0]+orientation[0]*rect.size[0]/3.,\r\n rect.center[1]+orientation[1]*rect.size[1]/3.)\r\n p2 = (rect.center[0]-orientation[0]*rect.size[0]/4.,\r\n rect.center[1]-orientation[1]*rect.size[1]/4.)\r\n orthdir = (orientation[1], -orientation[0])\r\n p2a = (p2[0]-orthdir[0]*rect.size[0]/6.,\r\n p2[1]-orthdir[1]*rect.size[1]/6.)\r\n p2b = (p2[0]+orthdir[0]*rect.size[0]/6.,\r\n p2[1]+orthdir[1]*rect.size[1]/6.) \r\n return [(p[0], p[1]) for p in [p1, p2a, p2b]]",
"def points_2_xywh(box):\n\n box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]\n box = [int(round(x)) for x in box]\n return box",
"def sample(self):\n ndim = len(self.lower_bounds)\n pts = numpy.zeros(ndim)\n for j in range(ndim):\n lb = self.lower_bounds[j]\n ub = self.upper_bounds[j]\n pts[j] = (ub - lb) / 2.0 + lb\n return pts",
"def g_corners(self):\n return (point for point in self.p)",
"def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.0):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = pts[0][2]\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point",
"def get_quad_points():\n points = np.array(\n [[0.333333333333333333333333333333, 0.333333333333333333333333333333],\n [0.950275662924105565450352089520, 0.024862168537947217274823955239],\n [0.024862168537947217274823955239, 0.950275662924105565450352089520],\n [0.024862168537947217274823955239, 0.024862168537947217274823955239],\n [0.171614914923835347556304795551, 0.414192542538082326221847602214],\n [0.414192542538082326221847602214, 0.171614914923835347556304795551],\n [0.414192542538082326221847602214, 0.414192542538082326221847602214],\n [0.539412243677190440263092985511, 0.230293878161404779868453507244],\n [0.230293878161404779868453507244, 0.539412243677190440263092985511],\n [0.230293878161404779868453507244, 0.230293878161404779868453507244],\n [0.772160036676532561750285570113, 0.113919981661733719124857214943],\n [0.113919981661733719124857214943, 0.772160036676532561750285570113],\n [0.113919981661733719124857214943, 0.113919981661733719124857214943],\n [0.009085399949835353883572964740, 0.495457300025082323058213517632],\n [0.495457300025082323058213517632, 0.009085399949835353883572964740],\n [0.495457300025082323058213517632, 0.495457300025082323058213517632],\n [0.062277290305886993497083640527, 0.468861354847056503251458179727],\n [0.468861354847056503251458179727, 0.062277290305886993497083640527],\n [0.468861354847056503251458179727, 0.468861354847056503251458179727],\n [0.022076289653624405142446876931, 0.851306504174348550389457672223],\n [0.022076289653624405142446876931, 0.126617206172027096933163647918],\n [0.851306504174348550389457672223, 0.022076289653624405142446876931],\n [0.851306504174348550389457672223, 0.126617206172027096933163647918],\n [0.126617206172027096933163647918, 0.022076289653624405142446876931],\n [0.126617206172027096933163647918, 0.851306504174348550389457672223],\n [0.018620522802520968955913511549, 0.689441970728591295496647976487],\n [0.018620522802520968955913511549, 0.291937506468887771754472382212],\n [0.689441970728591295496647976487, 0.018620522802520968955913511549],\n [0.689441970728591295496647976487, 0.291937506468887771754472382212],\n [0.291937506468887771754472382212, 0.018620522802520968955913511549],\n [0.291937506468887771754472382212, 0.689441970728591295496647976487],\n [0.096506481292159228736516560903, 0.635867859433872768286976979827],\n [0.096506481292159228736516560903, 0.267625659273967961282458816185],\n [0.635867859433872768286976979827, 0.096506481292159228736516560903],\n [0.635867859433872768286976979827, 0.267625659273967961282458816185],\n [0.267625659273967961282458816185, 0.096506481292159228736516560903],\n [0.267625659273967961282458816185, 0.635867859433872768286976979827]]);\n\n w = np.array(\n [0.051739766065744133555179145422,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190])*0.5;\n quad_x = np.copy(points[:,0])\n quad_y = np.copy(points[:,1])\n return (quad_x, quad_y, w)",
"def snap(point,shape,snapRange=20):\n snapped = list(point)\n if snapped[0] < snapRange: snapped[0] = 0\n if snapped[0] > shape[1]-snapRange: snapped[0] = shape[1]\n if snapped[1] < snapRange: snapped[1] = 0\n if snapped[1] > shape[0]-snapRange: snapped[1] = shape[0]\n return tuple(snapped)",
"def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.01):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = 0.0\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point",
"def stretch(points, stretches=[1, 1]):\n x = stretches[0] * points[0]\n y = stretches[1] * points[1]\n return [x, y]",
"def _points_on_the_right(self, pt1, pt2, point_list):\n new_list = []\n for pt3 in point_list:\n if oriented_area(pt1, pt2, pt3) < 0:\n new_list.append(pt3)\n return new_list",
"def rpoints(self):\n return self.gmap.interp_gpos(self.points)",
"def get_boundary_corners_2D(points):\r\n\tpadding=0.05\r\n\tif points.shape[0] == 3:\r\n\t\tassert (len(points.shape)==2)\r\n\t\tminPt_3d_x = np.amin(points[0,:])\r\n\t\tmaxPt_3d_x = np.amax(points[0,:])\r\n\t\tminPt_3d_y = np.amin(points[1,:])\r\n\t\tmaxPt_3d_y = np.amax(points[1,:])\r\n\r\n\t\tboudary = [minPt_3d_x-padding, maxPt_3d_x+padding, minPt_3d_y-padding, maxPt_3d_y+padding]\r\n\r\n\telse:\r\n\t\traise Exception(\"wrong dimension of points!\")\r\n\r\n\treturn boudary",
"def inters_segment(self, s):\r\n x1 = s.start[0] - self.center[0]\r\n y1 = s.start[1] - self.center[1]\r\n x2 = s.end[0] - self.center[0]\r\n y2 = s.end[1] - self.center[1]\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n dr = math.sqrt(dx * dx + dy * dy)\r\n D = x1 * y2 - x2 * y1\r\n dr2 = dr * dr\r\n d = self.radius * self.radius * dr2 - D * D \r\n \r\n if d < 0:\r\n return []\r\n else: \r\n if dy < 0:\r\n sgndy = -1\r\n else:\r\n sgndy = 1 \r\n \r\n Ddy = D * dy\r\n mDdx = -D * dx\r\n sgndydxsqrtd = sgndy * dx * math.sqrt(d)\r\n absdysqrtd = abs(dy) * math.sqrt(d) \r\n \r\n xa = float(Ddy + sgndydxsqrtd) / dr2 + self.center[0]\r\n ya = float(mDdx + absdysqrtd) / dr2 + self.center[1]\r\n \r\n xb = (Ddy - sgndydxsqrtd) / dr2 + self.center[0]\r\n yb = (mDdx - absdysqrtd) / dr2 + self.center[1]\r\n \r\n if (d == 0) or not s.contains_point(xb, yb):\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya))]\r\n else:\r\n return []\r\n else:\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya)), (int(xb), int(yb))]\r\n else:\r\n return [(int(xb), int(yb))]",
"def X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen):\n p0 = (ix, iy)\n p1 = (ix - w_sen + n_w_sen / 2, iy + t_sen)\n p2 = (ix - w_sen + n_w_sen / 2, iy)\n p3 = (ix - n_w_sen / 2, iy)\n p4 = (ix - n_w_sen / 2, iy - t_sen)\n p5 = (ix + w_sen - n_w_sen / 2, iy - t_sen)\n p6 = (ix + w_sen - n_w_sen / 2, iy)\n p7 = (ix + n_w_sen / 2, iy)\n p8 = (ix + n_w_sen / 2, iy + t_sen)\n\n return p0, p1, p2, p3, p4, p5, p6, p7, p8",
"def phantom_rectangles(n_points,R):\n \n \n #Rescaling according to image size \n R[:,0] = R[:,0]*n_points/2\n R[:,1] = R[:,1]*n_points/2\n R[:,2] = R[:,2]*n_points/2\n R[:,3] = R[:,3]*n_points/2\n R[:,4] = R[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = R.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sui rettangoli\n x_new = x - R[k,0]\n y_new = y - R[k,1]\n\n u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))\n v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):\n phantom1[i,j,k] = R[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom",
"def drawLine2P(x,y,xlims):\n \n xrange = np.arange(xlims[0],xlims[1],1)\n A = np.vstack([x, np.ones(len(x))]).T\n k, b = np.linalg.lstsq(A, y, rcond=None)[0]\n return [xrange, k*xrange + b]",
"def non_rotated_vertices(self):\n v0 = [self.pos.x - self.width / 2, self.pos.y - self.height / 2]\n v1 = [self.pos.x + self.width / 2, self.pos.y - self.height / 2]\n v2 = [self.pos.x + self.width / 2, self.pos.y + self.height / 2]\n v3 = [self.pos.x - self.width / 2, self.pos.y + self.height / 2]\n return v0, v1, v2, v3",
"def _get_points(self, pos):\n\t\tpoints = [ (self.last_pos[0], self.last_pos[1]) ]\n\t\tlen_x = pos[0] - self.last_pos[0]\n\t\tlen_y = pos[1] - self.last_pos[1]\n\t\tlength = math.sqrt(len_x ** 2 + len_y ** 2)\n\t\tstep_x = len_x / length\n\t\tstep_y = len_y / length\n\t\tfor i in xrange(int(length)):\n\t\t\tpoints.append((points[-1][0] + step_x, points[-1][1] + step_y))\n\t\tpoints = map(lambda x:(int(0.5+x[0]), int(0.5+x[1])), points)\n\t\t# return light-weight, uniq integer point list\n\t\treturn list(set(points))",
"def surround(self, p):\n res = set([])\n if p.x + 1 < self.height:\n res.add((p.x + 1, p.y))\n if p.y + 1 < self.width:\n res.add((p.x + 1, p.y + 1))\n res.add((p.x, p.y + 1))\n if p.y - 1 >= 0:\n res.add((p.x + 1, p.y - 1))\n res.add((p.x, p.y - 1))\n if p.x - 1 >= 0:\n res.add((p.x - 1, p.y))\n if p.y + 1 < self.width:\n res.add((p.x - 1, p.y + 1))\n res.add((p.x, p.y + 1))\n if p.y - 1 >= 0:\n res.add((p.x - 1, p.y - 1))\n res.add((p.x, p.y - 1))\n return res",
"def region_points(x, y, width, xmin, xmax):\n right = (x, y + width / 2)\n top = (xmax, y)\n left = (x, y - width / 2)\n bottom = (xmin, y)\n return (right, top, left, bottom)",
"def LSSTPointing(xc, yc, angle_rot=0., area=None, maxbound=None):\n\n \"\"\"\n arr = [[3, 0], [12, 0], [12, 1], [13, 1], [13, 2], [14, 2], [14, 3], [15, 3],\n [15, 12], [14, 12], [14, 13], [13, 13], [\n 13, 14], [12, 14], [12, 15],\n [3, 15], [3, 14], [2, 14], [2, 13], [1, 13], [1, 12], [0, 12],\n [0, 3], [1, 3], [1, 2], [2, 2], [2, 1], [3, 1]]\n \"\"\"\n # this is a quarter of LSST FP (with corner rafts)\n arr = [[0.0, 7.5], [4.5, 7.5], [4.5, 6.5], [5.5, 6.5], [\n 5.5, 5.5], [6.5, 5.5], [6.5, 4.5], [7.5, 4.5], [7.5, 0.0]]\n\n # this is a quarter of LSST FP (without corner rafts)\n arr = [[0.0, 7.5], [4.5, 7.5], [4.5, 4.5], [7.5, 4.5], [7.5, 0.0]]\n if maxbound is not None:\n arr = [[0.0, maxbound], [maxbound*4.5/7.5, maxbound], [maxbound*4.5 /\n 7.5, maxbound*4.5/7.5], [maxbound, maxbound*4.5/7.5], [maxbound, 0.0]]\n # symmetry I: y -> -y\n arrcp = list(arr)\n for val in arr[::-1]:\n if val[1] > 0.:\n arrcp.append([val[0], -val[1]])\n\n # symmetry II: x -> -x\n arr = list(arrcp)\n for val in arrcp[::-1]:\n if val[0] > 0.:\n arr.append([-val[0], val[1]])\n\n # build polygon\n poly_orig = geometry.Polygon(arr)\n\n # set area\n if area is not None:\n poly_orig = affinity.scale(poly_orig, xfact=np.sqrt(\n area/poly_orig.area), yfact=np.sqrt(area/poly_orig.area))\n\n # set rotation angle\n rotated_poly = affinity.rotate(poly_orig, angle_rot)\n\n return affinity.translate(rotated_poly,\n xoff=xc-rotated_poly.centroid.x,\n yoff=yc-rotated_poly.centroid.y)",
"def corners((u,v)):\r\n return ((u+1,v+1), (u+1,v), (u,v), (u,v+1))",
"def straight_trench(self, l, w, gap, x0, y0, orientation):\n if orientation == 'H':\n return [self.rect(l, gap, x0, y0), self.rect(l, gap, x0, y0 + gap + w)]\n if orientation == 'V':\n return [self.rect(gap, l, x0, y0), self.rect(gap, l, x0 + gap + w, y0)]",
"def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points",
"def Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen):\n p0 = (ix, iy)\n p1 = (ix + t_sen, iy - w_sen + n_w_sen / 2)\n p2 = (ix, iy - w_sen + n_w_sen / 2)\n p3 = (ix, iy - n_w_sen / 2)\n p4 = (ix - t_sen, iy - n_w_sen / 2)\n p5 = (ix - t_sen, iy + w_sen - n_w_sen / 2)\n p6 = (ix, iy + w_sen - n_w_sen / 2)\n p7 = (ix, iy + n_w_sen / 2)\n p8 = (ix + t_sen, iy + n_w_sen / 2)\n\n return p0, p1, p2, p3, p4, p5, p6, p7, p8",
"def drawClippedPointSet (self, points, colour):\r\n\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n \r\n for pt in points:\r\n if pt [0] >= 0 and pt [0] < w and pt [1] >= 0 and pt [1] < h:\r\n self.image [pt [1]][pt [0]] = colour",
"def point_form(boxes):\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax",
"def get_points(self, npoints: int):\n\n R = sorted(np.random.rand(npoints) * 2. * np.pi)\n\n xx = self.cx + self.a * np.cos(R) * np.cos(self.angle_rad) - self.b * np.sin(R) * np.sin(\n self.angle_rad)\n\n yy = self.cy + self.a * np.cos(R) * np.sin(self.angle_rad) + self.b * np.sin(R) * np.cos(\n self.angle_rad)\n\n return R, xx, yy",
"def get_points(self, positive_points):\n points = [-1 * positive_points[0], 0, 0]\n positive_points = positive_points[1 :]\n left_list = []\n right_list = []\n\n for i in range(len(positive_points) // 3):\n bottom_left, top_left, top_right = positive_points[3 * i : 3 * i + 3]\n right_list += [bottom_left, top_left, top_right]\n left_list += [top_right + top_left - bottom_left, top_right, top_left]\n\n left_list = [-1 * i for i in left_list]\n points = left_list + points + right_list\n\n return points",
"def teselado(self,points):\n #muestrea todo el espacio de la envolvente para conseguir las fronteras de decision a intervalos regulares \n #get_hull\n area = boundingbox(points)\n #(min_x,min_y),(max_x,min_y),(max_x,max_y),(min_x,max_y)\n #sample inside hull\n lat_sample = np.arange(min_y, max_y, 0.001).tolist()\n lon_sample = np.arange(min_x,max_x, 0.001)\n sampling_space = [[x,y] for x in lat_sample for y in lon_sample]\n sampling_space = np.asarray(sampling_space, dtype=np.float32)\n prediction = kmeans_instance.predict(sampling_space)\n pol = [[list(sampling_space[index]) for index in [i for i, j in enumerate(prediction) if j == k]] for k in range(centers)]\n hull = [shapely.geometry.MultiPoint(pol[i]).convex_hull.exterior._get_coords() for i in range(len(pol))]"
]
| [
"0.62138176",
"0.58339304",
"0.58308536",
"0.5803912",
"0.5787294",
"0.57711476",
"0.5743119",
"0.57425475",
"0.57074183",
"0.56752676",
"0.56694865",
"0.5656441",
"0.5636366",
"0.5605589",
"0.5577327",
"0.5554112",
"0.55472916",
"0.5546715",
"0.5545921",
"0.5541117",
"0.5533156",
"0.5527852",
"0.55145985",
"0.5489227",
"0.5486319",
"0.548119",
"0.545976",
"0.5435197",
"0.54319876",
"0.54226756"
]
| 0.62317324 | 0 |
Interrupt handler allows suspension via SIGHUP and resumption via SIGCONT; and termination via SIGABRT, SIGINT, and/or preferably SIGTERM. | def sighandler(signum, frame):
global _terminate
global _interruptcnt
print >> FileKeyUtils.WMSlog, 'sighandler> ', signum
++_interruptcnt
if signum in(signal.SIGABRT, signal.SIGINT, signal.SIGTERM):
print >> FileKeyUtils.WMSlog, 'sighandler> terminate pid: ', os.getpid(), signum
_terminate = True
elif signum in(signal.SIGHUP, signal.SIGTSTP):
print >> FileKeyUtils.WMSlog, 'sighandler> suspend/stop/pause pid: ', os.getpid(), signum
signal.pause()
else:
print >> FileKeyUtils.WMSlog, 'sighandler> resume/continue pid: ', os.getpid(), signum
_terminate = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_interrupt(self, *args) -> None: #pylint: disable=unused-argument\r\n if not self.stop_requested:\r\n self.stop_requested = True\r\n self.logger.critical('SIGINT detected - will stop at the end of the current evolution')\r\n else:\r\n stop_from = time.time() - 5000\r\n if self.last_stop_request > stop_from:\r\n raise KeyboardInterrupt\r\n else:\r\n self.last_stop_request = time.time()\r\n self.logger.critical('SIGINT suppressed - repeat within 5 seconds to sigterm')",
"def __sigint_handler(signal, frame):\n logging.debug(\"SIGINT or SIGTERM catched\")\n logging.debug(\"Raise t_stop_event\")\n t_stop_event.set() # Set stop flag to true for all launched threads\n logging.info(\"Stopping daemons...\")\n sleep(1)",
"def SIGINT_handler(signal, frame):\n exit(2)",
"def handle_sigint(signum, frame):\n print(\"\\nInterrupted by SIGINT\\n\")\n sys.exit()",
"def sigint_handler(*dummy):\n print \"Received SIGINT. Stopping everything.\"\n executor.Stop()\n server.Stop()",
"def signal_handler(signal, _): \n import signal\n import sys\n from time import localtime, strftime\n signal.signal(signal.SIGINT, original_sigint)\n thetime = strftime(\"%H:%M:%S\", localtime())\n INPUTFUNC('\\n\\n%s: Paused. Press any key to resume, or ctrl+c to quit.\\n' % thetime)\n time = strftime(\"%H:%M:%S\", localtime())\n print('%s: Interrogation resumed.\\n' % time)\n signal.signal(signal.SIGINT, signal_handler)",
"def interrupt_handler(signum, frame): #pylint: disable=W0613\n cleanup()\n sys.exit(-2) # Terminate process here as catching the signal\n # removes the close process behaviour of Ctrl-C",
"def siginterrupt(sig, flag): # real signature unknown; restored from __doc__\n pass",
"def interrupt_handler(self, signo, frame):\n log.debug(\"interrupting run\")\n self._keep_running = False",
"def sigint_handler(signal, frame):\n rclpy.shutdown()\n if prev_sigint_handler is not None:\n prev_sigint_handler(signal)",
"def finish(self):\n super(InterruptibleMixin, self).finish()\n signal(SIGINT, self.original_handler)",
"def signal_handler(signal_number, stack_frame):\n if signal_number in [signal.SIGTERM, signal.SIGINT]:\n terminate_surveillance()",
"def register_signal_handler(self):\n signal.signal(signal.SIGINT, self.quit_gracefully)\n signal.signal(signal.SIGTERM, self.quit_gracefully)\n return",
"def keyboard_interrupt_handler(sig: int, _: object) -> None:\n logger.warning(f'KeyboardInterrupt (id: {sig}) has been caught...')\n logger.info('Terminating the session gracefully...')\n ray.shutdown()\n minio_leftovers = glob('*.part.minio')\n for leftover in minio_leftovers:\n Path(leftover).unlink()\n sys.exit(1)",
"def SIGINT(self, signum, frame):\n for t in [t for t in threading.enumerate() if t.name != 'MainThread']:\n if hasattr(t, 'stop') and callable(t.stop):\n t.stop()\n\n for t in [t for t in threading.enumerate() if t.name != 'MainThread']:\n t.join()\n\n self.exit(1)",
"def signal_handler(sig, frame):\n global shutdown_flag, orig_sighandler\n signal.signal(signal.SIGTERM, signal.SIG_IGN) # Disable SIGTERM\n logger.warning(\"SIGTERM received! Normally this triggered by the walltime limit.\")\n shutdown_flag.set()\n return",
"def sig_handler(sig, frame):\n if sig == signal.SIGINT:\n stop_ros()\n print(\"Shutting down...\")\n sys.exit(0)",
"def interrupt_handler(signum, frame):\n if DOCKER_MONITOR:\n util.log.warning(\"Signal %d received - Tearing down monitoring\"\n % (signum))\n DOCKER_MONITOR.tear_down_all()\n sys.exit(0)",
"def setup_termination():\n setpgrp()\n\n def terminate(signum, frame):\n logger.debug(\"SIGKILL received!\")\n logger.debug(\"%d, %s\", signum, frame)\n killpg(0, SIGKILL)\n\n signal(SIGINT, terminate)",
"def sigint_handler(signal, frame):\n print(\"SIGINT received by run_end_to_end.py. Terminating process.\")\n exit_gracefully()\n sys.exit()",
"def sigtrace_handler(sig,ign):\n global SIGNALS\n print(\"received SIG%s: %s\"%(SIGNALS[sig],process_infos(\"???\")),file=sys.stderr)\n if sig == 2:\n # Python has a special handler for SIGINT that generates\n # a KeyboardInterrupt exception\n signal.signal(sig,signal.default_int_handler)\n elif sig == signal.SIGCONT:\n # When the process restarts after being stopped we re-install\n # tracing handler on Ctrl-Z and TTIN/TTOUT signals so it is\n # possible to play with job control\n signal.signal(signal.SIGTSTP,sigtrace_handler)\n signal.signal(signal.SIGTTOU,sigtrace_handler)\n signal.signal(signal.SIGTTIN,sigtrace_handler)\n else:\n # Once a signal has been received we reinstall the default\n # handler before self-resending the signal\n signal.signal(sig,signal.SIG_DFL)\n # All signal received but SIGCONT are self-resent after being received\n if sig != signal.SIGCONT:\n os.kill(os.getpid(),sig)",
"def delay_termination():\n signals_to_catch = [signal.SIGINT, signal.SIGTERM, signal.SIGABRT]\n old_handlers = {signum: signal.getsignal(signum) for signum in signals_to_catch}\n signals_received = {signum: None for signum in signals_to_catch}\n\n def delay_handler(signum, frame):\n signals_received[signum] = (signum, frame)\n\n # Set handlers fot delay\n for signum in signals_to_catch:\n signal.signal(signum, delay_handler)\n\n yield # Resume program\n\n # Restore old handlers\n for signum, handler in listitems(old_handlers):\n signal.signal(signum, handler)\n\n # Fire delayed signals\n for signum, s in listitems(signals_received):\n if s is not None:\n old_handlers[signum](*s)",
"def _set_signal_handlers():\n\n def _handler(_signal, _frame):\n raise KeyboardInterrupt\n\n signal.signal(signal.SIGINT, _handler)\n signal.signal(signal.SIGTERM, _handler)",
"def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(0)",
"def signal_handler(self, signum, frame):\n if signum == signal.SIGINT:\n self.terminate = True\n elif signum == signal.SIGALRM:\n self.button_handler(self.BUTTON_PIN)",
"def _set_signal_handler(self) -> None:\r\n loop = asyncio.get_running_loop()\r\n # get interupt signals supported by user's OS.\r\n signals = [getattr(signal, s) for s in (\r\n 'SIGBREAK', 'SIGINT', 'SIGTERM', 'SIGHUP') if hasattr(signal, s)]\r\n for s in signals:\r\n try:\r\n loop.add_signal_handler(\r\n s, lambda s=s: asyncio.create_task(self.shutdown(s)))\r\n except NotImplementedError:\r\n pass",
"def sigint_handler(sig, frame):\n print(\"[i] Caught SIGINT, cleaning up...\")\n server.close()\n exit(0)",
"def _signal_handler(*_: typing.Any) -> None:\n shutdown_event.set()",
"def sigHandler(sig, frame):\n # pylint: disable=unused-argument\n shutdown()\n sys.exit(0)",
"def trap_sigint(handler, ignoreNum=0):\n handlers = _SIGINT_HANDLERS\n handlers.attach_override_unix(signal.SIGINT)\n handlers.register(handler, ignoreNum)"
]
| [
"0.6873077",
"0.6834924",
"0.6801791",
"0.6721506",
"0.6706197",
"0.6688984",
"0.66876125",
"0.6626961",
"0.6563405",
"0.6527134",
"0.6487581",
"0.648722",
"0.6463762",
"0.6410054",
"0.63769895",
"0.63597256",
"0.63591284",
"0.63388664",
"0.6319095",
"0.6309224",
"0.62986",
"0.6276668",
"0.6265651",
"0.62395144",
"0.6191063",
"0.6188072",
"0.6150698",
"0.61388177",
"0.6096591",
"0.6087479"
]
| 0.7475745 | 0 |
Uses os.system() to pkill and then invokes the Apache httpd with the specified | def restartHTTPd(htconf):
parentpid = pidHTTPd(htconf)
if parentpid <= 1:
return
# hopefulle killing the parent proc. will do the trick
print >> FileKeyUtils.WMSlog, 'restartHTTPd> kill parentpid:', parentpid
os.system('kill -TERM '+repr(parentpid))
apache = '/devstore/apache2/bin/httpd -f /devstore/apache2/conf/' + htconf
print >> FileKeyUtils.WMSlog, 'restartHTTPd> via:', apache
time.sleep(0.5) # give it time to complete proc. termination
os.system('/devstore/apache2/bin/httpd -f /devstore/apache2/conf/' + htconf) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kill_process(self,PID):\n os.system(\"sudo kill {}\".format(PID))\n return True",
"def webserver_stop():\n run(\"kill $(cat %s)\" % GUNICORN_PIDFILE)\n run(\"rm %s\" % GUNICORN_PIDFILE)",
"def remote_kill():",
"def webserver_restart():\n try:\n run(\"kill -HUP $(cat %s)\" % GUNICORN_PIDFILE)\n except:\n webserver_start()",
"def GET_kill(self):\n sys.exit(0)",
"def kill(pid):\n p = psutil.Process(pid)\n\n try:\n p.kill()\n except Exception:\n pass",
"def pkill(process_name):\n try:\n killed = os.system('taskkill /im ' + process_name)\n except Exception:\n killed = 0\n return killed",
"def pidHTTPd(htconf):\n# pcmd = \"/bin/ps -ef|/bin/grep \" + htconf + \"|/bin/grep -v grep|/bin/awk '{print $3}'|sort -u\"\n# pcmd = '/bin/ps -ef'\n# pcmd = 'ps -eo pid,ppid,rss,vsize,pcpu,pmem,cmd -ww --sort=pid'\n pcmd = 'ps -eo pid,ppid,rss,cmd'\n# produces something like the following (when selecting htconf == httpd.conf99)\n#30645 1 4420 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30646 30645 1640 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30647 30645 2628 /devstore/apache2/bin/fcgi- -f /devstore/apache2/conf/httpd.conf99\n#30648 30645 37216 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30649 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30650 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30651 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30652 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30656 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n# the 3rd column is ram util. in units of kbytes, kill the entire set of httpd pids\n# (by killing parent proc) if any one thread exceeds 1,000,000 kbytes (> 1gb)\n pid = os.popen(pcmd, \"r\")\n ppids = [] ; pids = [] ; memuse = [] ; vals = [] \n parentpid = -1\n restart = False\n try:\n for p in pid.xreadlines():\n p.rstrip()\n vals = p.split()\n conf = vals[-1] # last val should be confile\n try:\n if conf.index(htconf) >= 0 :\n pids.append(int(vals[0]))\n ppids.append(int(vals[1]))\n memuse.append(int(vals[2])) \n# print >> FileKeyUtils.WMSlog, 'pidHTTPd> pids: ', pids, ', ppids: ', ppids, ' memuse: ', memuse\n if int(vals[1]) == 1 : parentpid = int(vals[0])\n if int(vals[2]) > 1000000 : restart = True\n except:\n# print >> FileKeyUtils.WMSlog, 'pidHTTPd> vals: ', vals\n pass\n except: pass\n\n print >> FileKeyUtils.WMSlog, 'pidHTTPd>', htconf, ' parentpid:', parentpid, ', memuse:', memuse\n if restart: # return parentpid\n return parentpid\n\n return -1",
"def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')",
"def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)",
"def processKill(uPid):\n return processTerminate(uPid);",
"def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')",
"def reload_process(self):\n try:\n output = subprocess.check_output([\"pidof\", \"haproxy\"])\n pids = output.strip().split(\" \")\n except Exception as exc:\n command = \"/usr/sbin/haproxy -f {{ dest }} -p /var/run/haproxy.pid\"\n else:\n command = \"/usr/sbin/haproxy -f {{ dest }} -p /var/run/haproxy.pid -sf xyz\"\n command = command.replace(\"xyz\", \" \".join(pids))\n\n command = command.replace(\"{{ dest }}\", self.dest)\n log.debug(\"Running reload_cmd: {}\".format(command))\n\n args = shlex.split(command)\n process = subprocess.Popen(args)",
"def kill(pid):\n # If the process doesn't exist, it raises an exception that we can ignore.\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError:\n pass",
"def cli(ctx):\n with process_manager.process_manager(**ctx.parent.cm_kwargs) as pm:\n pm.shutdown()",
"def kill(self):\r\n\r\n endpoint = self._get_nailgun_endpoint()\r\n if endpoint:\r\n self._log_kill(endpoint.pid, endpoint.port)\r\n try:\r\n os.kill(endpoint.pid, 9)\r\n except OSError:\r\n pass",
"def restart_apache():\n check_prompt = (\n not env.prompt or\n console.confirm(\n \"Restart Apache web server?\",\n default=True,\n )\n )\n\n if check_prompt:\n sudo(\"service apache2 restart\")",
"def kill_running_process(appName=\"bitcoind\"):\n for line in os.popen(\"ps ax | grep \" + appName + \" | grep -v grep\"):\n fields = line.split()\n pid = fields[0]\n os.kill(int(pid), signal.SIGKILL)",
"def handle_process(self, request):\n \"\"\"\n @api {kill} /processes/:id Kill a running process\n @apiName KillProcesses\n @apiGroup Processes\n @apiVersion 1.1.0\n\n @apiParam {String} :id Process ID.\n\n @apiSuccessExample {json} Example response:\n {\n \"killed\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n match = re.match('/processes/([0-9a-z]+)', request.uri_path)\n processid = match.group(1)\n\n if request.method == \"KILL\":\n try:\n # If the process is running on the local node\n if (self.manager.is_process_running(processid)):\n self.manager.kill(processid)\n # If the process is running on another node\n else:\n try:\n # Find out on which node is the process\n cluster_running_processes = self.cluster.list_running_processes()\n process = cluster_running_processes[processid]\n node = process['node']\n # Kill the process on the node\n response = self.cluster.query_node(node, 'KILL', '/processes/%s' % (processid))\n if response['code'] == 404:\n raise ProcessNotFound()\n elif response['code'] != 200:\n raise Exception('Node %s returned code %d when asked to kill process %s.' % (node, response['code'], processid))\n except KeyError:\n raise ProcessNotFound()\n killed = True\n code = 200\n except ProcessNotFound:\n killed = False\n code = 404\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps({\n \"id\": processid,\n \"killed\": killed\n })\n return HTTPReply(code = code, body = body, headers = headers)",
"def killMongosProc():\n cmd = [\"pgrep -f \\\"\" + MONGOS_KSTR + \"\\\" | xargs kill -9\"]\n executeCommand(cmd)",
"def script_kill(self):\n return self._execute([b'SCRIPT', b'KILL'], b'OK')",
"def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])",
"def kill_child(self, pid):\n # try communicate\n try:\n self._child_pids[pid].communicate()\n except Exception:\n print('Could not communicate to child')\n try:\n self.execute_command(\"kill -9 \"+str(pid))\n except Exception as e:\n print(e)",
"def killIfRequested(self):\n pidFilePath = self.options.get(RunnerOptions.pidFilePath)\n\n if self.options.get(RunnerOptions.kill, False):\n if pidFilePath is None:\n exit(ExitStatus.EX_USAGE, \"No PID file specified\")\n return # When testing, patched exit doesn't exit\n else:\n pid = \"\"\n try:\n for pid in pidFilePath.open():\n break\n except EnvironmentError:\n exit(ExitStatus.EX_IOERR, \"Unable to read PID file.\")\n return # When testing, patched exit doesn't exit\n try:\n pid = int(pid)\n except ValueError:\n exit(ExitStatus.EX_DATAERR, \"Invalid PID file.\")\n return # When testing, patched exit doesn't exit\n\n self.startLogging()\n self.log.info(\"Terminating process: {pid}\", pid=pid)\n\n kill(pid, SIGTERM)\n\n exit(ExitStatus.EX_OK)\n return # When testing, patched exit doesn't exit",
"def _on_parent_process_kill(self):",
"def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)",
"def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')",
"def main():\n\n httpd = BaseHTTPServer.HTTPServer(\n ('localhost', int(ADDON.getSetting(\"port\"))),\n StupidHTTPRequestHandler)\n httpd_thread = threading.Thread(target=httpd.serve_forever)\n httpd_thread.start()\n\n monitor = xbmc.Monitor()\n \n while not monitor.abortRequested():\n # Sleep/wait for abort for 10 seconds\n if monitor.waitForAbort(10):\n # Abort was requested while waiting. We should exit\n break\n\n httpd.shutdown()\n httpd.server_close()",
"def reboot():\n sudo('/mnt/apps/bin/restart-all-apache.sh')",
"def kill(self):\n kill_cmds = [\n \"sudo pkill '(daos_server|daos_io_server)' --signal INT\",\n \"sleep 5\",\n \"pkill '(daos_server|daos_io_server)' --signal KILL\",\n ]\n self.log.info(\"Killing any server processes\")\n pcmd(self._hosts, \"; \".join(kill_cmds), False, None, None)"
]
| [
"0.6258043",
"0.6232436",
"0.62181044",
"0.60765",
"0.60613805",
"0.5952444",
"0.5894394",
"0.58898026",
"0.58821464",
"0.579318",
"0.5760136",
"0.5709812",
"0.5701586",
"0.5648284",
"0.5647017",
"0.56396043",
"0.5619126",
"0.5600666",
"0.55770516",
"0.5537543",
"0.552522",
"0.55214816",
"0.55205023",
"0.5490504",
"0.54857403",
"0.5482503",
"0.5469867",
"0.54669493",
"0.54644597",
"0.54613304"
]
| 0.7323256 | 0 |
Printout global _modinfo text, followed by module help(). | def printInfoDoc():
global _modinfo
print _modinfo
help("ProcUtils") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_help():\n print(sys.modules[__name__].__doc__)",
"def module_info():\n pass",
"def info():\n print(\"\"\"\n Module for use in WMDframe. Just an interaction with the git\n repo changeme by ztgrace.\n\n \"Getting default credentials added to commercial scanners is\n often difficult and slow. changeme is designed to be simple\n to add new credentials without having to write any code or modules.\"\n\n Checkout the git repo: https://github.com/ztgrace/changeme\n \"\"\")\n # Delete the parser info, if args.parse is not used.\n if parser.format_help():\n print('\\n\\t' + bc.OKBLUE + 'COMMANDLINE ARGUMENTS:' + bc.ENDC)\n for line in parser.format_help().strip().splitlines():\n print('\\t' + line)\n print('')",
"def module_help(self):\n print(\"Module \" + str(self))\n print(\"Available commands:\")\n for k in self.commands.values():\n n = k.get_name()\n if len(n) >= CONST_BINDCTL_HELP_INDENT_WIDTH:\n print(\" %s\" % n)\n print(textwrap.fill(k.get_desc(),\n initial_indent=\" \",\n subsequent_indent=\" \" +\n \" \" * CONST_BINDCTL_HELP_INDENT_WIDTH,\n width=70))\n else:\n print(textwrap.fill(\"%s%s%s\" %\n (k.get_name(),\n \" \"*(CONST_BINDCTL_HELP_INDENT_WIDTH - len(k.get_name())),\n k.get_desc()),\n initial_indent=\" \",\n subsequent_indent=\" \" +\n \" \" * CONST_BINDCTL_HELP_INDENT_WIDTH,\n width=70))",
"def help(self):\n self.logger.debug(\"module.Module.help()\")\n return os.linesep.join([\"{}:\".format(self.name),\n self.helptext])",
"def help_help(self):\n print(\"List commands or print details about a command\")",
"def get_module_help(self, module_name):\r\n module = self.env.load_module(module_name)\r\n arg_doc = module.__doc__\r\n return arg_doc.strip()",
"def modulesHelp(module):\n\tscripts = os.listdir('syncity/{}/'.format(module))\n\toutput = []\n\t\n\tfor s in scripts:\n\t\tif s[:2] != '__' and s != 'template.py' and s[-3:] != 'pyc':\n\t\t\toutput.append('\\t{}:'.format(s[:-3]))\n\t\t\t\n\t\t\ttry:\n\t\t\t\timport_script = __import__('syncity.{}.{}'.format(module,s[:-3]), fromlist=['syncity.{}'.format(module)])\n\t\t\t\thl = import_script.help()\n\t\t\t\thl = re.sub(r'^', '\\t\\t', hl).replace('\\n', '\\n\\t')\n\t\t\t\toutput.append(hl)\n\t\t\texcept:\n\t\t\t\toutput.append('\\tNo description')\n\t\n\treturn '\\n'.join(output)",
"def print_help(self):\n print self.get_help()",
"def present_module_info():\n writer()\n print_heading(\"Module Info\")\n writer(f\"GWT Version: {GWT_VERSION}\")\n writer(f\"Content-Type: {CONTENT_TYPE}\")\n writer(f\"X-GWT-Module-Base: {BASE_URL}\")\n writer(f\"X-GWT-Permutation: {GWT_PERMUTATION}\")\n if RPC_MODE:\n writer(f\"RPC Version: {RPC_VERSION}\")\n writer(f\"RPC Flags: {RPC_FLAGS}\")\n writer()",
"def printhelp():",
"def help_util():\r\n for cmd, f in COMMANDS.items():\r\n print(\"POM \" + cmd + \":\")\r\n print(f.__doc__.lstrip(\"\\n\"))",
"def printHelp(self,):\n print man\n return 0",
"def info(self, zolo, module, args):\n print(f\"[Other] Version {module.version}\")",
"def get_main_help(self):\r\n return __doc__.strip()",
"def help():",
"def help(self):\n res = \"\"",
"def print_generic_help():\r\n print ART_NAME\r\n print 'Version {1}\\nby {2}'.format(NAME, VERSION, AUTHOR)\r\n print DESCRIPTION\r\n tools = sorted(AvailableCommands.commands.keys(), key=lambda v: v.upper())\r\n # Do not show CUSTOM command in the help\r\n tools.remove('CUSTOM')\r\n tools.remove('CUSTOM_NO_OUTPUT')\r\n print '\\n\\nSupported tools are:\\n{0}'.format('\\n'.join(tools))\r\n print '\\nHint: Check tool specific help with --help <tool_name>\\n'",
"def print_help():\n\tprint(\"Help text\")",
"def get_help(self) -> None: \n print(messages.get_help())",
"def help():\n print(UI.HELP)",
"def help_description():\n pass",
"def print_modules(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\tmodule_string = ''\n\t\tmodule_string += 'Modules: \\n'\n\t\tmodule_string += ' Run order Build Remove Module ID\\n'\n\t\tfor module_id in self.module_ids():\n\t\t\tmodule_string += ' ' + str(self.shutit_map[module_id].run_order) + ' ' + str(\n\t\t\t\tcfg[module_id]['shutit.core.module.build']) + ' ' + str(\n\t\t\t\tcfg[module_id]['shutit.core.module.remove']) + ' ' + module_id + '\\n'\n\t\treturn module_string",
"def info():\n print __doc__\n sys.exit(1)",
"def print_help():\n parser = parsersetup()\n parser.print_help()",
"def print_help(self):\r\n\r\n print (\"\"\"Show data values for assignment.\r\n\r\nUsage:\r\n cat <request or table path>\r\n cat --id <assignment_id> #Where assignment_id provided by 'vers <table path>' command\r\n\r\nFormatting flags:\r\n\r\n -c or --comments - Show comments on/off\r\n -nc or --no-comments\r\n\r\n -ph or --horizontal - Print table horizontally\r\n -pa or --vertical - Print table vertically\r\n (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically:\r\n vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise)\r\n\r\n -b or --borders - Switch show borders on of off\r\n -nb or --no-borders\r\n\r\n -h or --header - Show header on/off\r\n -nh or --no-header\r\n\r\n -t or --time - Show time\r\n -nt or --no-time\r\n\r\nExamples:\r\n > cat /test/test_vars/test_table #print latest data for test_table\r\n > cat /test/test_vars/test_table::subtest #print latest data in subtest variation\r\n > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012\r\n\r\nSee also 'dump' command which is 'cat' formatted to save data to files. 'help dump'\r\n\r\n \"\"\")",
"def help(self):\n\t\treturn",
"def info():\n\n print('Maptool\\n--------\\n')\n print('Version: ' + __version__)\n print('Path: ' + __path__[0])\n print('Date: ' + __date__)\n print()\n\n import sys\n print('Python version=' + sys.version + '\\n')\n\n try:\n mm = __import__('pymongo')\n print('%10s %10s %s' % ('pymongo', mm.version, mm.__path__[0]))\n except ImportError:\n print('pymongo Not Found')\n\n for modui in ['numpy', 'scipy', 'mayavi', 'matplotlib', 'tqdm',\n 'future', 'nose', 'coverage', 'spglib', 'pyhull', 'pymatgen', 'qmpy', ]:\n try:\n mm = __import__(modui)\n print('%10s %10s %s' % (modui, mm.__version__, mm.__path__[0]))\n except ImportError:\n print('%10s %10s Not Found' % (modui, ''))\n\n if ASE:\n import ase\n #from ase import version as ase_version\n print('%10s %10s %s' % ('ase', ase.__version__, ase.__path__[0]))\n else:\n print('%10s %10s Not Found' % ('ase', ''))",
"def print_help(self):\n\n print((\"Help is not defined for command \" + self.command))",
"def display_help(self):\n pass"
]
| [
"0.73787487",
"0.7280849",
"0.72768086",
"0.7214288",
"0.70121974",
"0.6946926",
"0.6916308",
"0.68961084",
"0.6881714",
"0.6879395",
"0.67717725",
"0.67190427",
"0.6707886",
"0.6603345",
"0.6558752",
"0.6526451",
"0.652423",
"0.6444417",
"0.64231575",
"0.6407718",
"0.6405413",
"0.6398205",
"0.638697",
"0.6371327",
"0.63583654",
"0.6346018",
"0.63451314",
"0.63335675",
"0.6321484",
"0.63148016"
]
| 0.82748777 | 0 |
Symbolically calculates the generating polynomial for a lagrange polynomial of a given order. Our HyperCube elements use N + 1 Lagrange polynomials of order N for the spatial discretisation. This function returns N + 1 generating polynomials, which can be used to calculate the value of a Lagrange polynomial at an arbitrary location. This is particularily useful, because the full discretisation on a D dimensional element can be obtained by the tensor product these polynomials. This routine is derived on pg. 304 of Andreas' book. | def generating_polynomial_lagrange(N, coordinate, gll_points):
# Symbols.
coord, phi, d_phi = sym.symbols('{} Phi dPhi'.format(coordinate))
# Equation A.19
phi = 1
for x in gll_points:
phi *= (coord - x)
# Get derivative in coordinate direction
d_phi = sym.diff(phi, coord)
# Equation A.20
generators = []
for x in gll_points:
generators.append((1 / (coord - x)) * (phi / d_phi.subs(coord, x)))
return sym.Matrix(generators) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lagrange_poly(x, xp, fp):\n\n f = 0.0\n \n # sum over points\n m = 0\n while (m < len(xp)):\n\n # create the Lagrange basis polynomial for point m \n l = None\n\n n = 0\n while (n < len(xp)):\n if n == m:\n n += 1\n continue\n\n if l == None:\n l = (x - xp[n])/(xp[m] - xp[n])\n else:\n l *= (x - xp[n])/(xp[m] - xp[n])\n\n n += 1\n\n \n f += fp[m]*l\n\n m += 1\n\n return f",
"def lagrange(x, w):\n M = len(x)\n p = poly1d(0.0)\n for j in xrange(M):\n pt = poly1d(w[j])\n for k in xrange(M):\n if k == j:\n continue\n fac = x[j]-x[k]\n pt *= poly1d([1.0, -x[k]])/fac\n p += pt\n return p",
"def Hcurl_polynomials(\n domain_dim: int, range_dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[VectorFunction]:\n assert domain_dim == range_dim\n if domain_dim == 2:\n return [VectorFunction((\n variables[0] ** (order - 1 - j) * variables[1] ** (j + 1),\n -variables[0] ** (order - j) * variables[1] ** j,\n )) for j in range(order)]\n if domain_dim == 3:\n poly: typing.List[VectorFunction] = []\n poly += [VectorFunction((\n variables[0] ** (m - 1) * variables[1] ** n * variables[2] ** (order - m - n + 1),\n 0, -variables[0] ** m * variables[1] ** n * variables[2] ** (order - m - n)\n )) for n in range(order) for m in range(1, order + 1 - n)]\n poly += [VectorFunction((\n 0, variables[0] ** m * variables[1] ** (n - 1) * variables[2] ** (order - m - n + 1),\n -variables[0] ** m * variables[1] ** n * variables[2] ** (order - m - n)\n )) for m in range(order) for n in range(1, order + 1 - m)]\n poly += [VectorFunction((\n variables[0] ** (order - n) * variables[1] ** n,\n -variables[0] ** (order + 1 - n) * variables[1] ** (n - 1), 0\n )) for n in range(1, order + 1)]\n return poly\n raise ValueError(f\"Unsupported dimension: {domain_dim}\")",
"def _lagrange2(x, y):\n\n def P(x_ip):\n total = 0\n n = len(x)\n for i in range(0, n):\n\n def g(i, n):\n tot_mul = 1\n for j in range(0, n):\n if i == j:\n continue\n if x[i] == x[j]:\n log.fatal(\n f\"Leads to division by zero (x = {x[i]}). Identical values given in x array. \"\n \"For example by using Lagrange interpolation for precise orbit, \"\n \"check if identical observation epochs are given in SP3 file\"\n )\n tot_mul *= (x_ip - x[j]) / float(x[i] - x[j])\n return tot_mul\n\n total += y[i] * g(i, n)\n return total\n\n return P",
"def lagr_poly(ni, n):\n N = len(ni)\n h = np.zeros(N)\n for m in range(N):\n nm = ni[m]\n idx = np.concatenate([np.arange(0, m), np.arange(m + 1, N)])\n h[m] = np.prod((n - ni[idx]) / (nm - ni[idx]))\n return h",
"def lagrange(self,K, L):\n return lagrange(K, L)",
"def general_poly(L):\n def evaluate(x):\n length=len(L)-1\n value=0\n for i in L:\n value+=i*(x**length)\n length-=1\n return value\n return evaluate",
"def general_poly (L):\n def to_apply (x):\n n = 0\n for i in L:\n n = x*n + i\n return n\n return to_apply",
"def L(order=4):\n dim_sh = dimension(order)\n L = np.zeros((dim_sh, dim_sh))\n for j in range(dim_sh):\n l = sh_degree(j)\n L[j, j] = - (l * (l + 1))\n return L",
"def pyramid_polynomial_set_1d(\n dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[ScalarFunction]:\n assert dim == 3\n if order == 0:\n return [ScalarFunction(1)]\n\n poly = polynomial_set_1d(3, order)\n for d in range(order):\n for i in range(d + 1):\n for j in range(d + 1 - i):\n p = variables[0] ** i * variables[1] ** j * variables[2] ** (d - i - j)\n p *= (variables[0] * variables[1] / (1 - variables[2])) ** (order - d)\n poly.append(ScalarFunction(p))\n\n return poly",
"def gen_points(lo, hi, N):\n\treturn np.linspace(lo, hi, num=N)\n\t\n\t## a = np.array(range(0, N))\n\t## return lo + (a * (hi-lo)/float(N))",
"def lagrange(xint, yint, points):\n l_basis = []\n for i, x in enumerate(xint):\n denom = np.prod([x - xk for xk in xint if xk != x])\n numer = 1\n for xk in xint:\n if xk != x:\n numer *= (points - xk)\n l_basis.append(numer/denom)\n p = sum([y*l_basis[i] for i,y in enumerate(yint)])\n\n return p",
"def _lagrange(x, y):\n x_col, x_row = np.meshgrid(x, x)\n diff_x = x_col - x_row + np.eye(len(x))\n n = len(x)\n indices = np.eye(n) == 0\n\n def P(x_ip):\n l = np.zeros((n, x_ip.shape[0]))\n for idx, idxs in enumerate(indices):\n l[idx] = np.prod((x_ip[:, None] - x[idxs]) / diff_x[idxs, idx], axis=1)\n\n return np.sum(y * l.T, axis=1)\n\n return P",
"def Pol_Lagrange(x,datos_x,datos_y):\n puntos=[]\n for p in x: \n n=datos_x.shape[0] - 1 #n=numero de punto o datos menos 1 .\n l_s=[]\n for k in range(datos_x.shape[0]):\n producto=1\n for j in range(datos_x.shape[0]):\n if k!=j:\n producto = producto*((p-datos_x[j])/(datos_x[k]-datos_x[j]))\n l_s.append(producto)\n l_s=np.array(l_s)\n puntos.append(datos_y@l_s)\n puntos=np.array(puntos)\n return puntos",
"def general_poly (L):\r\n\r\n def secondFunc(x):\r\n total = 0\r\n listLength = len(L)-1\r\n for i in L:\r\n total += i * x**listLength\r\n listLength -= 1\r\n return(total)\r\n return secondFunc",
"def beta_gen_slope(p):\n cardi = 0.005\n return np.array( [0]*int(p-int(cardi*p)) + list(np.arange(1, int(cardi*p)+1, 1)) )",
"def genpoly(sum_count=10, deg=5, cof=10, min_count=1):\n\n p = Polynome([0], '')\n d_prev = -1\n while p.length < min_count:\n p.reset()\n for j in range(sum_count):\n d = randrange(deg)\n c = randrange(-cof, cof)\n while d == d_prev and c != 0:\n d = randrange(deg)\n c = randrange(-cof, cof)\n d_prev = d\n p.plus(c, d)\n return p",
"def polygen(count=10, sum_count=10, deg=5, cof=10):\n\n s = enumi_beg\n ans = enumi_beg\n\n for i in range(count):\n s += item_beg\n ans += item_beg\n p = genpoly(sum_count, deg, cof)\n ans += p.print_out()\n s += p.rep + item_end\n ans += item_end\n s += enumi_end\n ans += enumi_end\n return s, ans",
"def general_poly (L):\n def inside(x):\n result = 0\n pwr = len(L) - 1\n for l in L:\n result = result + l * x ** pwr\n pwr -= 1\n return result\n return inside",
"def generate_scheme(self):\n x = self.implementation.add_input_variable(\"x\", self.precision)\n #\n [a, s], [limbs, _, _] = implementpoly(self.function, self.interval, None, self.epsilon, \\\n precision = self.precision.get_precision()+1, binary_formats = [24, 53])\n #\n p = implementpoly_multi_node_expand(s, x, self.precision, limbs, mem_map = {})\n self.implementation.set_output_format(p.precision)\n #\n return Return(p)",
"def make_DL_reg_lp_le_quad_func(n, x_0, node_num):\n def quad_func(xi, eta, nodes):\n x = geo.linear_interp(xi, eta, nodes)\n S = geo.stresslet_n(x, x_0, n)\n phi = geo.shape_func_linear(xi, eta, node_num)\n return phi * S\n return quad_func",
"def ddL(n):\n\n\tif (n==0):\n\t\treturn lambda x: 0.0\n\n\telif (n==1):\n\t\treturn lambda x: 0.0\n\n\telse:\n\n\t\t# approximate by fitting polynomial and taking derivatives\n\t\tc_om1 = coef_approximation(L(n), n)\n\t\tc_prime = polynomial_derivative(c_om1)\n\t\tc_double_prime = polynomial_derivative(c_prime)\n\n\t\t# [TODO]: fix own method from Hw 3\n\t\treturn lambda x: np.polyval(c_double_prime, x)",
"def lagrange_coefficients_ASTME2022(interval=10, interval_type='inner'):\n\n global _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE\n if _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE is None:\n _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE = CaseInsensitiveMapping()\n\n name_lica = ', '.join((str(interval), interval_type))\n if name_lica in _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE:\n return _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE[name_lica]\n\n r_n = np.linspace(1 / interval, 1 - (1 / interval), interval - 1)\n d = 3\n if interval_type.lower() == 'inner':\n r_n += 1\n d = 4\n\n lica = _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE[name_lica] = (\n as_float_array([lagrange_coefficients(r, d) for r in r_n]))\n\n return lica",
"def lagrangePoints(mu):\n \n # define l = 1-mu\n l = 1 - mu\n \n # collinear points\n def eqL1(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)+mu-l)*x**2 + (mu**2*l**2+2*(l**2+mu**2))*x + mu**3-l**3\n #fval = gamma**5 - (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 + 2*mu*gamma - mu\n return fval\n sol_l1 = optimize.root(eqL1, 0.5, method='hybr')\n l1 = np.array([sol_l1.x[0] , 0, 0])\n \n def eqL2(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)-(mu+l))*x**2 + (mu**2*l**2+2*(l**2-mu**2))*x - (mu**3+l**3)\n #fval = gamma**5 + (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 - 2*mu*gamma - mu\n return fval\n sol_l2 = optimize.root(eqL2, 1.5, method='hybr')\n l2 = np.array([sol_l2.x[0] , 0, 0])\n \n def eqL3(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*mu*l+mu**2)*x**3 + (2*mu*l*(l-mu)+(l+mu))*x**2 + (mu**2*l**2+2*(mu**2-l**2))*x + l**3+mu**3\n return fval\n sol_l3 = optimize.root(eqL3, -1, method='hybr')\n l3 = np.array([sol_l3.x[0] , 0, 0])\n \n # equilateral points\n # L4\n l4 = np.array([np.cos(np.pi/3) - mu , np.sin(np.pi/3), 0])\n # L5\n l5 = np.array([np.cos(np.pi/3) - mu , -np.sin(np.pi/3), 0])\n \n return _lagrangePointsReturn(l1,l2,l3,l4,l5)",
"def get_laplace_coefficients(n_step: int, analytic: bool = False):\n v = np.zeros(n_step + 1, dtype=int)\n v[1] = 1\n\n if analytic:\n A = Matrix([[0] * v.size] * v.size)\n v = Matrix(v)\n for n, m in itertools.product(*[range(n_step + 1)] * 2):\n A[m, n] = 1 / analytic_factorial(2 * m) * (-1) ** m * n ** (2 * m)\n\n gamma = np.array(A.LUsolve(v)).flatten().astype(float)\n\n else:\n nn, mm = np.meshgrid(*[np.arange(n_step + 1)] * 2)\n A = 1 / factorial(2 * mm) * (-1) ** mm * nn ** (2 * mm)\n\n gamma = np.linalg.solve(A, v)\n\n coeffs = {}\n for n, coeff in enumerate(gamma):\n if n == 0:\n coeffs[n] = -coeff\n else:\n coeffs[+n] = -coeff / 2\n coeffs[-n] = -coeff / 2\n\n return coeffs",
"def get_l_n_u_inegral(ppc, lower_bound, upper_bound, Nhrs=2):\n \"\"\"either lower and upper bound must be positive\"\"\"\n gens_hrs = ppc['gen'][:, 0]\n gens_hrs = np.sort(gens_hrs)\n \n n_buses = set_n_buses(ppc, Nhrs)\n n_gens = len(gens_hrs) // 2 \n l = np.zeros(n_buses)\n u = np.zeros(n_buses)\n for i in range(len(l)):\n if (i+1) in gens_hrs:\n l[i] = lower_bound\n u[i] = upper_bound\n else:\n l[i] = -np.inf\n u[i] = np.inf\n return l, u",
"def polyrecur(order,dist,trunc=1):\n\n printer(0,'Computing polynomials ...')\n if not isinstance(dist,Joint): dist = Joint(dist)\n\n nbrPoly = order+1\n dim = dist[:].shape[0]\n expo = np.arange(nbrPoly)\n coef = np.zeros((nbrPoly,nbrPoly))\n norm = np.ones((dim,nbrPoly))\n\n coef[0,0] = 1\n polyList = []\n\n # Creates the univariate polynomial basis\n\n for i in range(dim):\n\n polyList.append(Polynomial(expo,coef))\n AB = dist[i].coef(nbrPoly)\n\n for j in range(1,nbrPoly):\n\n norm[i,j] = norm[i,j-1]*AB[1,j]\n polyList[i][j] = np.roll(polyList[i][j-1],1,axis=0)\n polyList[i][j] -= AB[0,j-1]*polyList[i][j-1]+AB[1,j-1]*polyList[i][j-2]\n\n # Normalization and tensor product\n\n for i in range(dim): polyList[i][:] /= np.sqrt(norm[i,:,None])\n poly = tensdot(polyList,order,trunc)\n\n printer(1,'Computing polynomials 100 %')\n return poly",
"def metoda_lagrange(X, Y, pointx):\n\n n = X.shape[0]\n aprox_value = 0\n\n for i in range(n):\n aprox_value += Y[i] * L_k(i, pointx, X)\n\n return aprox_value",
"def construct_poly(data, power):\n return np.power(data, power)",
"def lap(self):\n\n gr = self.grid\n phi = gr.phi\n\n lapphi = gr.scratch_array()\n\n ib = gr.ilo\n ie = gr.ihi\n\n lapphi[ib:ie+1] = \\\n (phi[ib-1:ie] - 2.0*phi[ib:ie+1] + phi[ib+1:ie+2])/gr.dx**2\n\n return lapphi"
]
| [
"0.67642653",
"0.65641516",
"0.6269907",
"0.6085851",
"0.59619576",
"0.5891837",
"0.5783319",
"0.5757106",
"0.5720937",
"0.56095165",
"0.5596212",
"0.5561844",
"0.5558487",
"0.5481505",
"0.54707587",
"0.5437779",
"0.54326445",
"0.53526294",
"0.53392595",
"0.5330124",
"0.5308051",
"0.5298482",
"0.5297448",
"0.52744853",
"0.5262169",
"0.525022",
"0.5231568",
"0.5190661",
"0.5169794",
"0.5159534"
]
| 0.7209638 | 0 |
Loads the given json schema file | def load_json_schema(filename):
relative_path = join('../schema', filename)
absolute_path = join(dirname(__file__), relative_path)
base_path = dirname(absolute_path)
base_uri = 'file://{}/'.format(base_path)
with open(absolute_path) as schema_file:
return jsonref.loads(
schema_file.read(), base_uri=base_uri, jsonschema=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_json_schema(filename):\n\n relative_path = join('schemas', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())",
"def _load_json_schema(filename: str):\n relative_path = path.join('schemas', filename)\n absolute_path = path.join(path.dirname(__file__), relative_path)\n\n with open(absolute_path, 'r', encoding='utf-8') as schema_file:\n schema = json.loads(schema_file.read())\n\n return schema",
"def load_schema(filename):\n with open(filename) as f:\n schema = json.load(f)\n\n return schema",
"def _load_json_schema(filename):\n\n relative_path = join(\"schemas\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n print(f\"base uri {base_uri}\")\n print(f\"base path {base_path}\")\n print(f\"relative_path {relative_path}\")\n print(f\"absolute_path {absolute_path}\")\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)",
"def read_json_schema(schema_file_path):\n with open(schema_file_path) as f:\n schema = json.load(f)\n return schema",
"def get_schema(path):\n with open(path, 'r') as f:\n return json.load(f)",
"def get_schema(filename: str) -> dict:\n return _load_json_schema(filename)",
"def load_schema(name):\r\n\r\n data = pkgutil.get_data(__package__, \"schemas/{0}.json\".format(name))\r\n return json.loads(data.decode(\"utf-8\"))",
"def schema_load(filename):\n print(uc.schema_load(filename))",
"def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)",
"def _load_schema(self, json_schema):\n # use jsonrefs to resolve all $refs in json\n data = jsonref.loads(json.dumps(json_schema))\n return self.__initialise_template(data)",
"def load_schema(schema_path):\n with open(schema_path) as schema_file:\n return Utils.parse(schema_file.read())",
"def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out",
"def load_local_schema(filename):\n # Confirm that schema file exists\n if not os.path.isfile(filename):\n log.warning(\"Schema file '{}' does not exist.\".format(filename))\n raise FileNotFoundError\n\n # Read schema file and return the schema as a dictionary\n schema_f = open(filename, 'r')\n schema = yaml.load(schema_f)\n assert isinstance(schema, dict), \"Failed to load schema file '{}'. \" \\\n \"Not a dictionary.\".format(filename)\n\n return schema",
"def parse_schema_from_file(schema_path):\n with open(schema_path) as f:\n return parse_schema_from_string(f.read())",
"def load_schemas():\n schemas = {}\n for filename in os.listdir(get_abs_path('schemas')):\n path = get_abs_path('schemas') + '/' + filename\n file_raw = filename.replace('.json', '')\n with open(path) as file:\n schemas[file_raw] = Schema.from_dict(json.load(file))\n return schemas",
"def load_schema(self, schema_file):\n with open(schema_file) as fp:\n for line in io.lines_in(fp):\n parts = line.strip().split('\\t')\n if len(parts) != 3:\n raise ValueError('invalid type declaration %r' % line.strip())\n self.declare_relation(parts[0], parts[1], parts[2])",
"def load_resolved_schema(spec_path, file_name=None, schema_obj=None, path_prefix=True):\r\n\r\n # Only one of file_name or schema_obj must be set\r\n assert bool(file_name) != bool(schema_obj)\r\n\r\n if path_prefix:\r\n spec_path = os.path.join(spec_path, \"APIs/schemas/\")\r\n base_path = os.path.abspath(spec_path)\r\n if not base_path.endswith(\"/\"):\r\n base_path = base_path + \"/\"\r\n if os.name == \"nt\":\r\n base_uri_path = \"file:///\" + base_path.replace('\\\\', '/')\r\n else:\r\n base_uri_path = \"file://\" + base_path\r\n\r\n loader = jsonref.JsonLoader(cache_results=False)\r\n\r\n if file_name:\r\n json_file = str(Path(base_path) / file_name)\r\n with open(json_file, \"r\") as f:\r\n schema = jsonref.load(f, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n elif schema_obj:\r\n # Work around an exception when there's nothing to resolve using an object\r\n if \"$ref\" in schema_obj:\r\n schema = jsonref.JsonRef.replace_refs(schema_obj, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n else:\r\n schema = schema_obj\r\n\r\n return schema",
"def read_json(json_file):\n with open(json_file) as schema:\n val = json.load(schema)\n\n return val",
"def load_yaml(fname, schema=None):\n with open(fname) as fh:\n data = yaml.safe_load(fh.read())\n if schema:\n import jsonschema\n jsonschema.validate(data, schema=schema)\n return data",
"def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = self.alter_key(concept.attrib['id'])\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema",
"def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance",
"def load_definition_file(self, json_file: str):\n with open(json_file, 'r') as j:\n self.load_definitions(json.load(j))",
"def load_schema(path, collection, readonly):\n return JSONStorage(path, collection, readonly)",
"def load_validation_schema(self) -> t.Dict[str, t.Any]:\n if self._schema is None:\n try:\n self._schema = json.loads(self.schema())\n except KeyError:\n device_type_striped = self._device_type.lower().rstrip(string.digits)\n with open(_CT_FILES[device_type_striped], encoding=\"utf-8\") as file_:\n self._schema = json.load(file_)\n return self._schema # type: ignore",
"def ReadSchemaFile(schema_file, bigquery_messages):\n\n if os.path.exists(schema_file):\n with open(schema_file, mode='r') as f:\n try:\n def UpperOrNone(string):\n return string and string.upper()\n field_schemas = [\n bigquery_messages.TableFieldSchema(\n name=json_object.get('name'),\n type=json_object.get('type').upper(),\n mode=UpperOrNone(json_object.get('mode')))\n for json_object in json.load(f)]\n return bigquery_messages.TableSchema(fields=field_schemas)\n except ValueError as e:\n raise bigquery.SchemaError(\n 'Error decoding JSON schema from file {0}: {1}.'.format(\n schema_file, e))\n else:\n raise bigquery.SchemaError(\n 'Error reading schema: File \"{0}\" was not found.'.format(schema_file))",
"def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)",
"def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)",
"def load_json_obj(path: str) -> RAW_CFG:\n with fsspec.open(path) as json_file:\n return json.load(json_file)",
"def load_json_document(f):\n return json.load(f)"
]
| [
"0.84585667",
"0.8389199",
"0.83260584",
"0.8293592",
"0.77692807",
"0.7753466",
"0.75813144",
"0.7514916",
"0.7409711",
"0.7408482",
"0.7398229",
"0.7358662",
"0.7209835",
"0.71893764",
"0.708138",
"0.70652896",
"0.69564265",
"0.69377166",
"0.68508154",
"0.6835237",
"0.6827011",
"0.68120104",
"0.6763664",
"0.67109936",
"0.6698591",
"0.6672221",
"0.66594374",
"0.66304344",
"0.6628056",
"0.6590035"
]
| 0.839412 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.