query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Delete all game objects in database.
def delete_all(sid): Game.objects.all().delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def delete_all_games(self):\n all_games = await ex.conn.fetch(\"SELECT gameid FROM blackjack.games\")\n for games in all_games:\n game_id = games[0]\n await self.delete_game(game_id)", "def delete_all(cls):\n cls.dbm().modelclass_deleteall(cls)", "def deleteAll(self):\n self.db.execute(\"DELETE FROM MATCH;\", ())", "def delete_all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SQL = \"DELETE FROM accounts;\"\n cursor.execute(SQL)", "def deleteAll():\n _table.deleteAll()\n _initialiseGlobals()\n\n return", "def clean_all_db():\n for model in [\n Component, Arch, AutoCase, AutoCaseFailure, Bug, Linkage, WorkItem,\n Document, Project, Framework]:\n model.objects.all().delete()", "def delete_db():\n db.drop_all()", "def delete_all():\n answer = ['YES', 'NO']\n str = rs.GetString(\"Delete all objects?\", 'YES', answer)\n\n if str == 'YES':\n obs = rs.ObjectsByType(0)\n rs.DeleteObjects(obs)\n elif str == 'NO':\n pass\n else:\n sys.exit()", "def delete_all_games(self):\r\n\r\n # Deletes all rows from the database table\r\n conn_2 = sqlite3.connect('scribble_database.db')\r\n sql_2 = 'DELETE FROM paintings'\r\n cur_2 = conn_2.cursor()\r\n cur_2.execute(sql_2)\r\n conn_2.commit()\r\n\r\n # clears screen\r\n self.action_label.destroy()\r\n self.painting_name.destroy()\r\n self.w['bg'] = 'White'\r\n self.w.delete(ALL)\r\n\r\n # clears all guesses/available game ids from their menus\r\n self.menu_bar.destroy()\r\n self.action_label = Message(self.master, text='Database emptied', bg='Black', fg='orange', font=20,\r\n width=330)\r\n self.action_label.pack()\r\n self.action_label.place(x=500, y=440)\r\n self.menu_bar = Menu(self.master)\r\n self.guess_menu = Menu(self.menu_bar)\r\n self.file_menu = Menu(self.menu_bar, tearoff=1)\r\n self.menu_bar.add_cascade(label=\"Available game IDs: \", menu=self.file_menu)\r\n self.menu_bar.add_cascade(label=\"Guesses of displayed games: \", menu=self.guess_menu)\r\n self.master.config(menu=self.menu_bar)", "def drop_all():\n db.drop_all()", "def clear_all() -> None:\n datastore.db.client.drop_database(DATABASE_NAME)\n ClassifierCache.clear_all()", "def clear_db():\n humans = Human4j.nodes.all()\n for h in humans:\n h.delete()\n binomes = Binome4j.nodes.all()\n for b in binomes:\n b.delete()\n projects = Project4j.nodes.all()\n for p in projects:\n p.delete()\n sherpas = Sherpa4j.nodes.all()\n for sh in sherpas:\n sh.delete()\n students = Pioupiou4j.nodes.all()\n for piou in students:\n piou.delete()\n partenaires = Partenaire4j.nodes.all()\n for part in partenaires:\n part.delete()\n ps = Planete_Solidaire.nodes.all()\n for misc in ps:\n misc.delete()", "def remove_all():\n \"\"\" Removes all from the database \"\"\"\n redis_store.flushall()", "def deletePlayers():\n conn, cur = connect()\n query = \"TRUNCATE players CASCADE;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered deleting all players\")\n conn.commit()\n conn.close()", "def delete_all(self):\n models.CourseLearningOutcome.objects.all().delete()\n #models.CoreLearningOutcome.objects.all().delete()\n #models.CreditType.objects.all().delete()\n models.Course.objects.all().delete()\n models.DegreeProgram.objects.all().delete()\n models.DPCourseSpecific.objects.all().delete()\n models.DPCourseGeneric.objects.all().delete()\n models.DPCourseSubstituteSpecific.objects.all().delete()\n models.DPCourseSubstituteGeneric.objects.all().delete()", "def clear_db(self):\n self.cursor.execute(\"DELETE FROM TrackPoint\")\n self.cursor.execute(\"DELETE FROM Activity\")\n self.cursor.execute(\"DELETE FROM User\")\n self.db_connection.commit()", "def remove_all():\n storage = FileStorage()\n objects = storage.all()\n objects = list(objects.values())\n\n for element in objects:\n storage.delete(element)\n objects = storage.all()", "def clear(self, saveDatabase = True):\r\n debug.write(\"[SourceRPG] Clearing database\", 1)\r\n players.clearList()\r\n self.execute(\"DROP TABLE Player\")\r\n self.execute(\"DROP TABLE Skill\")\r\n if saveDatabase:\r\n self.save()\r\n self.__init__(self.path)\r\n for player in es.getUseridList():\r\n players.addPlayer(player)\r\n debug.write(\"[SourceRPG] Database cleared\", 1)", "def delete_all(self):\n self.session.query(TodoItem).delete()\n self.session.query(TodoList).delete()", "def delete_all_entities(self):\n self._delete_all_acls()\n self._delete_all_containers()\n self._delete_all_orders()\n self._delete_all_secrets()", "def deletePlayers():\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"DELETE FROM players;\"\"\")\n conn.commit()", "def deleteDB():\n db = sqlite.connect(db_path)\n db.row_factory = sqlite.Row\n cursor = db.cursor()\n cursor.execute(\"DELETE from rooms\")\n\n cursor.execute(\"DELETE from users\")\n\n cursor.execute(\"DELETE from urls\")\n\n cursor.fetchall()\n db.commit()\n cursor.close()\n db.close()", "def delete_all_users(self):\n\n User.query.delete()", "def deletePlayers():\n DB = dbc()\n DB.cursor().execute('DELETE FROM players')\n DB.commit()\n DB.close()", "def clean_database(self):\n for name in list(self.database):\n self._remove_database_entry(name)", "def deletePlayers():\n #deletes the contents of table players\n DB().execute(\"DELETE FROM players\", True)", "def clear(self):\n for project in Project.objects:\n project.delete()", "def deletePlayers():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"delete from players;\")\n db_conn.commit()\n db_conn.close()", "def deletePlayers():\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n dbcursor.execute(\"DELETE FROM players\")\n dbconnection.commit()\n dbconnection.close()", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()" ]
[ "0.7558716", "0.7451959", "0.7310933", "0.72619975", "0.72330093", "0.72307104", "0.7189395", "0.702915", "0.70258915", "0.7018561", "0.70178777", "0.69849235", "0.69767076", "0.6947214", "0.69419783", "0.6908184", "0.68683773", "0.68426365", "0.68415046", "0.6840984", "0.68383753", "0.6837618", "0.6837282", "0.6835973", "0.6809716", "0.6791783", "0.6788157", "0.6777727", "0.6757328", "0.67570376" ]
0.80226487
0
Determine if a repository is a Firefox repository.
def is_firefox_repo(repo): try: if len(repo) and repo[0].hex() == FIREFOX_ROOT_NODE: return True except error.FilteredRepoLookupError: pass # Backdoor for testing. return repo.vfs.exists('IS_FIREFOX_REPO')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def identify_repo(repo):\n repo_root = repo.ui.config('mozilla', 'repo_root', '/repo/hg/mozilla')\n if not repo_root.endswith('/'):\n repo_root += '/'\n\n d = {\n 'firefox': is_firefox_repo(repo),\n 'thunderbird': is_thunderbird_repo(repo),\n 'publishing': repo.ui.configbool('phases', 'publish', True),\n }\n\n if repo.root.startswith(repo_root):\n d['hosted'] = True\n d['path'] = repo.root[len(repo_root):]\n d['user_repo'] = d['path'].startswith('users/')\n\n else:\n d['hosted'] = False\n d['path'] = repo.root\n d['user_repo'] = False\n\n # We could potentially exclude more Firefox repos from this list. For now,\n # be liberal in what we apply this label to.\n d['firefox_releasing'] = (\n d['firefox']\n and d['publishing']\n and not d['user_repo'])\n\n return d", "def is_docker_hub(repository):\n return not repository or (\n repository.lower().replace(\" \", \"\").replace(\"-\", \"\").replace(\"_\", \"\")\n == \"dockerhub\"\n )", "def is_thunderbird_repo(repo):\n try:\n if len(repo) and repo[0].hex() == THUNDERBIRD_ROOT_NODE:\n return True\n except error.FilteredRepoLookupError:\n pass\n\n # Backdoor for testing.\n return repo.vfs.exists('IS_THUNDERBIRD_REPO')", "def has_firefox():\n if get_firefox_exe() is not None:\n return True\n\n try:\n subprocess.check_output(['firefox', '--version'])\n return True\n except Exception:\n return False", "def __has_repo(repo_name):\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n sql = ('SELECT id '\n 'FROM repos '\n \"WHERE repo_name='%s' \"\n 'LIMIT 1' % repo_name)\n\n cur.execute(sql)\n\n return bool(cur.fetchone() is not None)", "def check_repository(self, repo_type_key, value):\n def remove_tail(v, tail):\n if v.endswith(tail):\n v = v[:-len(tail)]\n return v\n\n for v in self.c.repositories.get(repo_type_key, ()):\n if remove_tail(v, '.git') == remove_tail(value, '.git'):\n return True\n return False", "def is_git_repo(template_repo):\n return template_repo.startswith(\"git@\") or \\\n template_repo.startswith(\"https://\")", "def is_git_repo(template_repo):\n return template_repo.startswith(\"git@\") or \\\n template_repo.startswith(\"https://\")", "def repo_exists_on_gf_server(p4, repo_name):\n return get_server_repo_config_rev(p4, repo_name) != '0'", "def _handle_repository(self, repo):\n\n logger.debug(\"Loading configuration for repository: '%s' from '%s'.\"\n % (repo['name'],\n 'repositories-%s' % self._type))\n\n if 'id' in repo:\n logger.warning(\"Repository '%s' is defined as plain. It must be available \"\n \"inside the image as Cekit will not inject it.\"\n % repo['name'])\n return False\n\n if 'content_sets' in repo:\n self._fetch_repos = True\n return self._prepare_content_sets(repo)\n\n elif 'rpm' in repo:\n self._prepare_repository_rpm(repo)\n return False\n\n elif 'url' in repo:\n return True\n\n return False", "def is_repository(cls, directory: Path) -> bool:\n\n git_directory = directory / \".git\"\n return git_directory in filter(Path.is_dir, directory.iterdir())", "def test_is_firefox_file(self):\r\n good_file = self._get_file()\r\n\r\n self.assertTrue(\r\n FBookmarkImporter.can_handle(good_file),\r\n \"FBookmarkImporter should handle this file\")\r\n\r\n good_file.close()", "def test_fetch_valid_github_repo(self):\n url = 'https://github.com/ivacf/archi'\n repo = GitHubRepoFetcher().fetch(url)\n self.assertEqual('archi', repo['name'])", "def _is_firefox_format(json, can_handle):\r\n if json['type'] == FBookmarkImporter.MOZ_CONTAINER:\r\n can_handle = True\r\n\r\n return can_handle", "def is_git_repo(directory):\n files = os.listdir(directory)\n if '.git' in files:\n return True\n return False", "def repository_type(self) -> str:\n return pulumi.get(self, \"repository_type\")", "def is_git():\n return exists('.git') and not islink('.git')", "def test_get_repo_false(self):\n repo = Repository(\"https://repo.com/fake.git\")\n self.assertFalse(repo.get_repo())\n if (os.path.exists(MEDIA_ROOT+\"/fake\")):\n shutil.rmtree(MEDIA_ROOT+\"/fake\")", "def identify_branch(self, gecko_branch):\n\n # Retrieve the name of the repository\n branch = re.search('.*/([\\S\\.]+$)', gecko_branch).group(1)\n\n # Supported branches: mozilla-aurora, mozilla-beta, mozilla-release, mozilla-esr*\n # All other branches (mozilla-central, mozilla-inbound, birch, elm, oak etc.) should fallback to the 'default' branch\n # This will work with Firefox and Thunderbird\n if not re.match(r'.*/releases/', gecko_branch):\n branch = \"default\"\n\n return branch", "def is_git_link():\n return islink('.git')", "def test_get_repo_name_OK(self):\n repo_name = doxi.get_repo_name(TestDoxygenIFX.repo_url)\n self.assertEqual(repo_name,'repository-name')", "def exists(self):\n return self._repository is not None", "def can_view_repo(session, repo):\n return repo.id in viewable_repos(session)", "def test_base_repository_supported_repo_types() -> None:\n assert BaseRepository.list_supported_repository_types() == []", "def is_repo_root(path: str) -> bool:\n return os.path.isdir(os.path.join(path, \".repo\"))", "def is_within_repository(context):\n\n return bool(filter(IRepositoryRoot.providedBy, aq_chain(context)))", "def test_get_repo_url_OK(self):\n repo_url = doxi.get_repo_url()\n self.assertEqual(repo_url,TestDoxygenIFX.repo_url)", "def get_default_repo(self):\n for repo in self.get_repos():\n if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'):\n return repo\n return False", "def test_if_fedora_software_is_reachable(self):\n # Kathe is the user of our example. She has heard about a new\n # exciting software for her Fedora Workstation. She visits it\n # via Firefox.\n\n # She notices the page title mentions 'Fedora-Software'\n self.assertIn(\"Welcome to Fedora Software\", self.browser.title)", "def test_firefox(firefox_browser):\n firefox_browser.get(\"https://habr.com/ru/\")\n print(firefox_browser.title)\n pass" ]
[ "0.7308661", "0.64443415", "0.6425791", "0.6104476", "0.60261166", "0.6015556", "0.59261096", "0.59261096", "0.57900804", "0.57796836", "0.5663567", "0.55708253", "0.5556107", "0.5546782", "0.55137646", "0.54955333", "0.5491645", "0.539184", "0.53543836", "0.53132266", "0.52919286", "0.5256687", "0.5245865", "0.5222251", "0.51695246", "0.5166526", "0.51005757", "0.50801027", "0.5059931", "0.5036158" ]
0.8278516
0
Determine if a repository is a Thunderbird repository.
def is_thunderbird_repo(repo): try: if len(repo) and repo[0].hex() == THUNDERBIRD_ROOT_NODE: return True except error.FilteredRepoLookupError: pass # Backdoor for testing. return repo.vfs.exists('IS_THUNDERBIRD_REPO')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_git_repo(template_repo):\n return template_repo.startswith(\"git@\") or \\\n template_repo.startswith(\"https://\")", "def is_git_repo(template_repo):\n return template_repo.startswith(\"git@\") or \\\n template_repo.startswith(\"https://\")", "def is_docker_hub(repository):\n return not repository or (\n repository.lower().replace(\" \", \"\").replace(\"-\", \"\").replace(\"_\", \"\")\n == \"dockerhub\"\n )", "def is_firefox_repo(repo):\n try:\n if len(repo) and repo[0].hex() == FIREFOX_ROOT_NODE:\n return True\n except error.FilteredRepoLookupError:\n pass\n\n # Backdoor for testing.\n return repo.vfs.exists('IS_FIREFOX_REPO')", "def check_repository(self, repo_type_key, value):\n def remove_tail(v, tail):\n if v.endswith(tail):\n v = v[:-len(tail)]\n return v\n\n for v in self.c.repositories.get(repo_type_key, ()):\n if remove_tail(v, '.git') == remove_tail(value, '.git'):\n return True\n return False", "def __has_repo(repo_name):\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n sql = ('SELECT id '\n 'FROM repos '\n \"WHERE repo_name='%s' \"\n 'LIMIT 1' % repo_name)\n\n cur.execute(sql)\n\n return bool(cur.fetchone() is not None)", "def is_repository(cls, directory: Path) -> bool:\n\n git_directory = directory / \".git\"\n return git_directory in filter(Path.is_dir, directory.iterdir())", "def is_git():\n return exists('.git') and not islink('.git')", "def repo_exists_on_gf_server(p4, repo_name):\n return get_server_repo_config_rev(p4, repo_name) != '0'", "def exists(self):\n return self._repository is not None", "def repository_type(self) -> str:\n return pulumi.get(self, \"repository_type\")", "def is_git_repo(directory):\n files = os.listdir(directory)\n if '.git' in files:\n return True\n return False", "def _handle_repository(self, repo):\n\n logger.debug(\"Loading configuration for repository: '%s' from '%s'.\"\n % (repo['name'],\n 'repositories-%s' % self._type))\n\n if 'id' in repo:\n logger.warning(\"Repository '%s' is defined as plain. It must be available \"\n \"inside the image as Cekit will not inject it.\"\n % repo['name'])\n return False\n\n if 'content_sets' in repo:\n self._fetch_repos = True\n return self._prepare_content_sets(repo)\n\n elif 'rpm' in repo:\n self._prepare_repository_rpm(repo)\n return False\n\n elif 'url' in repo:\n return True\n\n return False", "def identify_repo(repo):\n repo_root = repo.ui.config('mozilla', 'repo_root', '/repo/hg/mozilla')\n if not repo_root.endswith('/'):\n repo_root += '/'\n\n d = {\n 'firefox': is_firefox_repo(repo),\n 'thunderbird': is_thunderbird_repo(repo),\n 'publishing': repo.ui.configbool('phases', 'publish', True),\n }\n\n if repo.root.startswith(repo_root):\n d['hosted'] = True\n d['path'] = repo.root[len(repo_root):]\n d['user_repo'] = d['path'].startswith('users/')\n\n else:\n d['hosted'] = False\n d['path'] = repo.root\n d['user_repo'] = False\n\n # We could potentially exclude more Firefox repos from this list. For now,\n # be liberal in what we apply this label to.\n d['firefox_releasing'] = (\n d['firefox']\n and d['publishing']\n and not d['user_repo'])\n\n return d", "def is_svn():\n return (exists('.svn') and isdir('.svn'))", "def is_within_repository(context):\n\n return bool(filter(IRepositoryRoot.providedBy, aq_chain(context)))", "def test_heads_contains_true(repository: Repository) -> None:\n assert repository.head.name in repository.heads", "def is_git_link():\n return islink('.git')", "def is_legacy_landinglog_branch_present(repo):\n legacy_landinglog_name = phlgitu_ref.Name(_LEGACY_LANDINGLOG_NAME)\n remote_ref_names = repo(\"ls-remote\").split()[1::2]\n return legacy_landinglog_name.fq in remote_ref_names", "def is_repo_root(path: str) -> bool:\n return os.path.isdir(os.path.join(path, \".repo\"))", "def check_svn_repo(case_dict, username, password):\n # ---------------------------------------------------------------------\n logger.debug(\"check_svn_repo\")\n\n repo_exists = False\n svn_repo = \"{0}/trunk\".format(case_dict[\"svn_repo_url\"])\n cmd = [\"svn\", \"list\", svn_repo, \"--username\", username, \"--password\", password]\n result = \"\"\n try:\n result = subprocess.check_output(cmd)\n except subprocess.CalledProcessError:\n msg = \"SVN repo does not exist for this case. A new one will be created.\"\n logger.warning(msg)\n\n if re.search(\"README.archive\", result):\n repo_exists = True\n\n return repo_exists", "def is_remote_reserve_branch_present(repo):\n reserve_name = phlgitu_ref.Name(_RESERVE_BRANCH_FQ_NAME)\n remote_ref_names = repo(\"ls-remote\").split()[1::2]\n return reserve_name.fq in remote_ref_names", "def is_stone_backend(cls, path):\n path_without_ext, _ = os.path.splitext(path)\n _, second_ext = os.path.splitext(path_without_ext)\n return second_ext == cls.backend_extension", "def test_base_repository_supported_repo_types() -> None:\n assert BaseRepository.list_supported_repository_types() == []", "def is_redhat(distro):\n name = distro.lower()\n return name in red_hat", "def can_safely_release(*repo_paths):\n if repo_has_uncommitted():\n return False\n if repo_has_incoming(*repo_paths):\n return False\n if repo_has_outgoing():\n return continue_with_outgoing()\n return True", "def git_has_object(project: Project, name: str) -> bool:\n ret = project.git(\"rev-parse\", \"--verify\", name, _ok_code=[0, 128])\n return ret.exit_code == 0", "def check_in_repo():\n if not os.path.isfile(\"setup.py\"):\n return \"Not in root-level PyTorch repo, no setup.py found\"\n with open(\"setup.py\") as f:\n s = f.read()\n if \"PyTorch\" not in s:\n return \"Not in PyTorch repo, 'PyTorch' not found in setup.py\"", "def isDBReleaseFile(dbh, lfn):\n\n if dbh:\n return dbh.extractVersion(lfn)\n else:\n return False", "def test_heads_contains_false(repository: Repository) -> None:\n assert \"branch\" not in repository.heads" ]
[ "0.6485536", "0.6485536", "0.63009167", "0.6284621", "0.62410414", "0.62088764", "0.60371786", "0.57730424", "0.57722294", "0.5746184", "0.57141906", "0.5656342", "0.5609316", "0.54819727", "0.54176277", "0.53619325", "0.52856714", "0.5241295", "0.51687944", "0.5166422", "0.51442075", "0.5139074", "0.50961775", "0.50814575", "0.5051376", "0.5031375", "0.50231725", "0.5012007", "0.5008", "0.49653503" ]
0.8249476
0
Check GRID proxy >>> hasGridProxy ()
def hasGridProxy(): import os from subprocess import Popen, PIPE arguments = 'dirac-proxy-info --checkvalid' arguments = ['dirac-command'] + arguments.split() logger.verbose ( 'hasGridProxy:use Popen(%s)' % arguments) p = Popen(arguments, stdout=PIPE, stderr=PIPE) (cout, cerr) = p.communicate() # if 0 != p.returncode: return False # if py3 : cout = cout.decode ( 'utf-8' ) if cout else cout cerr = cerr.decode ( 'utf-8' ) if cerr else cerr # if 'expired' in cout : return False if 'Insane' in cout : return False if 'Error' in cout : return False # return 0 == p.returncode and cout and not cerr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def proxy_check(self, proxy):", "def proxies_get(self) -> bool:\n return True", "def is_bound(proxy):\n try:\n current_object(proxy)\n except UnboundProxyError:\n return False\n else:\n return True", "def test_need_proxy(self):\n os.environ['no_proxy'] = 'blah.com,blah2.com'\n self.assertTrue(dockerv2.need_proxy('proxy.blah3.com'))\n self.assertFalse(dockerv2.need_proxy('proxy.blah.com'))", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def check_proxy():\n proxy_envs = ['HTTP', 'HTTPS', 'http', 'https']\n dataloop_urls = ['dev-gate.dataloop.ai',\n 'gate.dataloop.ai',\n 'dataloop-development.auth0.com',\n 'dataloop-production.auth0.com']\n if True in [env in os.environ for env in proxy_envs]:\n # check if proxy exists\n if True in [env in os.environ for env in ['no_proxy', 'NO_PROXY']]:\n # check if no_proxy exists\n if 'no_proxy' in os.environ:\n # check if dataloop urls in no_proxy\n if True not in [url in os.environ['no_proxy'] for url in dataloop_urls]:\n # no dataloop url exists in no_proxy\n logger.warning('Proxy is used, make sure dataloop urls are in \"no_proxy\" environment variable')\n else:\n # check if dataloop urls in no_proxy\n if True not in [url in os.environ['NO_PROXY'] for url in dataloop_urls]:\n # no dataloop url exists in no_proxy\n logger.warning('Proxy is used, make sure dataloop urls are in \"no_proxy\" environment variable')\n else:\n logger.warning('Proxy is used, make sure dataloop urls are in \"no_proxy\" environment variable')", "def proxy_enabled(self) -> ConfigNodePropertyBoolean:\n return self._proxy_enabled", "def is_proxy(elt):\n\n if ismethod(elt):\n elt = get_method_function(elt)\n\n result = hasattr(elt, __PROXIFIED__)\n\n return result", "def IsProxyBypassed(self, tab):\n info = GetProxyInfoFromNetworkInternals(tab)\n if not info['enabled']:\n raise ChromeProxyMetricException, (\n 'Chrome proxy should be enabled. proxy info: %s' % info)\n\n bad_proxies = [str(p['proxy']) for p in info['badProxies']].sort()\n proxies = [self.effective_proxies['proxy'],\n self.effective_proxies['fallback']].sort()\n return bad_proxies == proxies", "def use_proxy(self, request):\n # if \"depth\" in request.meta and int(request.meta['depth']) <= 2:\n # return False\n # i = random.randint(1, 10)\n # return i <= 2\n return True", "def checkProxy(fname):\n try:\n import M2Crypto\n cert = M2Crypto.X509.load_cert(fname)\n try:\n cert.get_ext('proxyCertInfo')\n except LookupError:\n # Really, there shouldn't be an undefined extension.\n print \"Warning: You seem to be using a pre-RFC proxy.\"\n print \"Try doing grid-proxy-init -rfc\"\n except ImportError:\n print \"Warning: Cannot load M2Crypto. Not able to check proxy\"\n print \" If you are getting errors, perhaps you are not using\"\n print ' an RFC compliant proxy. Did you do \"grid-proxy-init -rfc\"?'\n print \"To enable proxy checking, install m2crypto (CentOS, RedHat),\"\n print \"python-m2crypto (Debian) or py25-m2crypto (MacPorts)\"", "def exists(self, proxy):\n return not self.database.zscore(self.key, proxy) == None", "def use_proxy(self, request):\n #if \"depth\" in request.meta and int(request.meta['depth']) <= 2:\n # return False\n #i = random.randint(1, 10)\n #return i <= 2\n return True", "def can_proxy_restclient(request, service, url):\n if not hasattr(request, \"can_proxy_restclient\"):\n request.can_proxy_restclient = is_admin()\n return request.can_proxy_restclient", "def isTrustProxy(self):\n pass", "def before_search(self):\n # check proxies first before anything\n if self.config.get('check_proxies', True) and self.proxy:\n if not self.proxy_check(proxy=self.proxy):\n self.startable = False", "def check_proxy(q):\n\n\n if not q.empty():\n\n proxy = q.get(False)\n proxy = proxy.strip()\n\n\n try:\n \n is_working = False\n\n if not is_bad_proxy(proxy):\n proxys_working_list.update({proxy: proxy})\n\n \n\n print(bcolors.OKGREEN + \" --[+] \", proxy, \" | PASS\" + bcolors.ENDC)\n\n else:\n print(\" --[!] \", proxy, \" | FAILED\")\n \n \n\n except Exception as err:\n print(\" --[!] \", proxy, \" | FAILED | \" + str(err))", "def use_random_public_proxy(self, val=True, test_proxy=False):\n if not val:\n self.random_proxy_bag = False\n return False\n self.random_proxy_bag = True\n\n if not self.proxy_bag:\n self.logger.debug(\"Proxy Bag already built, not getting more.\")\n self.proxy_bag = self.get_public_proxies()\n\n self.reset_proxy_from_bag()\n if not test_proxy:\n return True\n\n if self.test_public_proxy():\n return True\n\n return False", "def is_grid_search(self) -> bool:\r\n return os.path.exists(self._grid_search_path)", "def test_proxyable(self):\n\n @proxyable\n def is_proxyable():\n pass\n self.assertTrue(hasattr(is_proxyable, 'proxyable'))\n\n def not_proxyable():\n pass\n self.assertFalse(hasattr(not_proxyable, 'proxyable'))", "def use_proxy(self, request):\n if self.adsl:\n return True\n if \"depth\" in request.meta and int(request.meta['depth']) <= 2:\n return False\n i = random.randint(1, 10)\n return i <= 2", "def no_ext_grid(net):\n\n if net.ext_grid.in_service.sum() + (net.gen.slack & net.gen.in_service).sum() == 0:\n return True", "def test_public_proxy(self, retry_on_failure=True):\n logging.info(\"Testing Proxy: %s (%s)\" % (self.proxy_bag[0][\"ip\"], self.proxy_bag[0][\"country\"]))\n self.use_skip_ssl_verify()\n self.headers = {\"Content-Type\": \"application/json\"}\n test_url = self.remote_service_api.replace(\"api\", \"test\")\n\n test_response = self.get(test_url)\n self.use_skip_ssl_verify(False)\n\n # if not test_response:\n # logging.error(\"Could not find a working proxy.\")\n # return False\n\n logging.debug(\"Registered Proxy %s (%s) Test Request Took: %s\" % (\n self.proxy_bag[0][\"ip\"],\n self.proxy_bag[0][\"country\"],\n test_response.roundtrip))\n\n return True", "def has_grid(obj, is_json=False):\n if is_json:\n try:\n # this is the case where we're in a pure dict\n tv_grid = obj.get(constants.TV_GRID)\n except:\n # Thi is the case where we're in the data object\n tv_grid = obj.tv_grid\n\n return not is_empty_grid(tv_grid)\n else:\n tv_grid = obj.tv_grid\n if tv_grid and tv_grid.width != 0 or tv_grid.height != 0 or tv_grid.position != 0 or tv_grid.device_ids:\n return True\n return False", "def isuserproxy(model):\n from django.contrib.contenttypes.models import ContentType\n upk = ContentType.objects.get_for_model(User)\n mpk = ContentType.objects.get_for_model(model)\n return upk == mpk", "def check_proxy_status(proxy_ip):\n try:\n status = subprocess.check_output([\"ping\", \"-c\",\"1\", proxy_ip]).decode('utf-8')\n if status.find(\"1 received\") > -1:\n return True\n except subprocess.CalledProcessError as e:\n return False\n\n return False", "def is_result_proxy(obj):\n # Use type() instead of isinstance() to avoid the evaluation of the\n # ResultProxy if the object is indeed a proxy.\n return type(obj) is ResultProxy", "def is_python_fastproxy():\n import cpp_parameters\n # Note: _swig_new_instance_method is only generated when using -fastproxy\n return hasattr(cpp_parameters, \"_swig_new_instance_method\")", "def check_grid(grid: List):\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n return False\n return True", "def no_network_access_check(user):\n return not user.has_property(\"network_access\")" ]
[ "0.72894657", "0.6674798", "0.66601354", "0.6262091", "0.6189779", "0.6188301", "0.60288423", "0.59987336", "0.59448695", "0.59432656", "0.5932499", "0.5919514", "0.5895336", "0.5844185", "0.5822261", "0.57646286", "0.5723686", "0.56801885", "0.5607211", "0.5580686", "0.5574831", "0.55596733", "0.5487962", "0.5441852", "0.54410034", "0.5430961", "0.54145217", "0.5381069", "0.53771675", "0.53758013" ]
0.771145
0
Get the files from Bookkeeping DB valid GRID proxy is needed! >>> request = ... >>> files = filesFromBK ( request )
def filesFromBK(request): if not hasGridProxy(): logger.error('filesFromBK: No Grig proxy!') return [] if isinstance ( request , tuple ) : request = BKRequest ( *request ) elif isinstance ( request , dict ) : request = BKRequest ( **request ) path = request.path nmax = request.nmax first = request.first last = request.last grid = request.grid accessURL = request.accessURL SEs = request.SEs arguments = 'dirac-command %s' % which ( 'get_files_from_BK' ) arguments += " %s " % path if nmax < 0: nmax = 1000000 if last < 0: last = 1000000 if nmax < 1000000 : arguments += ' --Max %d' % nmax if 0 < first : arguments += ' --First %d' % first if last < 1000000 : arguments += ' --Last %d' % last if accessURL : arguments += ' -a True ' # if grid and isinstance ( grid , str ) : arguments += ' --Sites %s ' % grid elif grid and 1 == len(grid): arguments += ' --Sites %s ' % grid[0] elif grid: sg = ','.join(grid) arguments += ' --Sites %s ' % sg if SEs and isinstance ( SEs , str): arguments += ' --SEs %s ' % grid elif SEs and 1 == len ( SEs ) : arguments += ' --SEs %s ' % SEs [ 0 ] elif SEs: sg = ','.join(SEs) arguments += ' --SEs %s ' % sg ## arguments += ' "%s" ' % path ## convert to DIRAC import os from subprocess import Popen, PIPE arguments = arguments.split() logger.verbose('filesFromBK:use Popen(%s)' % arguments) p = Popen(arguments, stdout=PIPE, stderr=PIPE) (cout, cerr) = p.communicate() if 0 != p.returncode : logger.error ( 'filesFromBK: error from Popen: %d/%s' % (p.returncode, cerr ) ) return [] if py3 : cout = cout.decode ( 'utf-8' ) if cout else cout cerr = cerr.decode ( 'utf-8' ) if cerr else cerr if cerr : logger.error ( 'filesFromBK: error from Popen: %d/%s' % (p.returncode, cerr ) ) return [] cout = cout.split('\n') cout = cout [2:] cout = ' '.join ( cout ) try: lst = eval ( cout ) if not isinstance(lst, list): raise TypeError("Invalid list type") logger.debug( 'filesFromBK: %s ' % lst ) return lst except: logger.error ("filesFromBK: can't interpret: %s" % cout) return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def files(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/files'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def files(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/files'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def files(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/files'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def files(self):\r\n url = '{0}/files'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_files(self):\n return self.ebook_file.get_files()", "def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files", "def fetch_files():\n props = util.get_properties()\n # get atmosfn\n atmosfn = \"%s/rwis.txt\" % (INCOMING, )\n try:\n ftp = ftplib.FTP('165.206.203.34')\n except TimeoutError as _exp:\n print(\"process_rwis FTP Server Timeout\")\n sys.exit()\n ftp.login('rwis', props['rwis_ftp_password'])\n ftp.retrbinary('RETR ExpApAirData.txt', open(atmosfn, 'wb').write)\n # Insert into LDM\n pqstr = \"plot ac %s rwis.txt raw/rwis/%sat.txt txt\" % (GTS, GTS)\n subprocess.call((\"/home/ldm/bin/pqinsert -i -p '%s' %s \"\n \"\") % (pqstr, atmosfn), shell=True)\n\n # get sfcfn\n sfcfn = \"%s/rwis_sf.txt\" % (INCOMING, )\n ftp.retrbinary('RETR ExpSfData.txt', open(sfcfn, 'wb').write)\n ftp.close()\n # Insert into LDM\n pqstr = \"plot ac %s rwis_sf.txt raw/rwis/%ssf.txt txt\" % (GTS, GTS)\n subprocess.call((\"/home/ldm/bin/pqinsert -i -p '%s' %s \"\n \"\") % (pqstr, sfcfn), shell=True)\n\n return atmosfn, sfcfn", "def get_dna_bank_files(request):\n\n user = User.objects.get(username=DEFAULT_USERNAME)\n directory = Directory.objects.get(user=user)\n\n dna_files = []\n\n dna_files_query_set = DNAFile.objects.filter(\n directory=directory, is_available=True)\n\n for dna_file in dna_files_query_set:\n dna_files.append(dna_file.get_file_details())\n\n response = {\"dna_files\": dna_files}\n\n return Response(response, status=HTTP_200_OK)", "def get_files(self, step):\n dht = get_remote_node(self.dht_ip, self.dht_port)\n files = dht.get(get_hash(filestep + \"|\" + str(step)))\n return files", "def get_files(self):\n\n cur = self.app.conn.cursor()\n sql = \"select distinct case_text.fid, source.name from case_text join source on case_text.fid=source.id where \"\n sql += \"caseid=? order by lower(source.name) asc\"\n cur.execute(sql, [self.case['caseid'], ])\n self.casefiles = cur.fetchall()\n sql = \"select id, name, fulltext, mediapath, memo, owner, date, av_text_id from source order by source.name asc\"\n cur.execute(sql)\n self.allfiles = cur.fetchall()\n msg = _(\"Files linked: \") + str(len(self.casefiles)) + \" / \" + str(len(self.allfiles))\n self.ui.label_files_linked.setText(msg)", "async def fetch_file_list(client, bucket) -> List:\n # pylint: disable=invalid-name\n PG_HOSTNAME = config('PG_HOSTNAME')\n PG_DATABASE = config('PG_DATABASE')\n folder = f'backup/{PG_HOSTNAME}_{PG_DATABASE}'\n result = await client.list_objects_v2(Bucket=bucket, Prefix=folder)\n contents = result.get('Contents', None)\n file_list = list([])\n if contents:\n for content in contents:\n file_list.append(content.get('Key'))\n return file_list", "def _get_files_in_db(self):\r\n query = 'SELECT DISTINCT file_name FROM {0};'.format(\r\n self.tables['measurements'])\r\n self.cursor.execute(query)\r\n result = self.cursor.fetchall()\r\n files = [ele[0] for ele in result if ele[0] is not None]\r\n return files", "def download_files(self):", "def get_all_files_to_instrument():\n sql=\"SELECT * FROM files\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n conn.close()\n return results", "def get_files(self):\n\n self.files = []\n retriever_methods = [\n m\n for m in rtorrent9.file.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # 2nd arg can be anything, but it'll return all files in torrent\n # regardless\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"f.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n offset_method_index = retriever_methods.index(\n rtorrent9.rpc.find_method(\"f.offset\")\n )\n\n # make a list of the offsets of all the files, sort appropriately\n offset_list = sorted([r[offset_method_index] for r in results])\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n # get proper index positions for each file (based on the file\n # offset)\n f_index = offset_list.index(results_dict[\"offset\"])\n\n self.files.append(\n File(self._rt_obj, self.info_hash, f_index, **results_dict)\n )\n\n return self.files", "def gridfs_files(self):\n return self[\"files\"]", "def getAllFileRecordsIter(fs_name):\n files = None\n session = Queries.createSession()\n try:\n fs_db = session.execute(sqlalchemy.select([FileSpace]).where(FileSpace.storage_name == fs_name)).fetchone()\n catalog = session.execute(sqlalchemy.select([Catalog]).where(Catalog.fs_id == fs_db.id)).fetchone()\n files = session.query(FileTable).filter_by(catalog_id=catalog.id)\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return files", "def api_files():\n files = FileWrapper.get_files(g.user.id)\n return jsonify([f.serialize() for f in files])", "def getGlobusFiles(self):\n\t\treturn self.transfer_client.operation_ls(self.transfer_client.endpoint_search(DATA_ENDPOINT_NAME)[0]['name'])", "def get_files(self):\r\n return self._filelist", "def get_files(self, block):\n \n raise NotImplementedError('get_files')", "def _get_files(self, paths: List[str]) -> List[Tuple[str, bytes]]:\n pool = multiprocessing.dummy.Pool(self._processes)\n return pool.map(self._get_file, paths) # type: ignore", "def files():\n return get_cached(\"files.json\")", "async def fetch_files(self, tag=None, n=100):\n\n logging.debug(\"Fetching files (tag is %s)\" % tag)\n\n params = {\"n\": n}\n if tag is not None:\n params.update({\"tag\": tag})\n\n files = await self.client.request.get(\"/files\", params=params)\n return [FileBase.build_file(\n self.client, file, self.loop) for file in files[\"data\"]]", "def get_files_from_minio(bucket_name, file_prefix, dc_years=None):\n minio_client = get_minio_client()\n ls_minio_files = list(minio_client.list_objects(bucket_name, file_prefix))\n ls_files_to_download = []\n for obj in (ls_minio_files):\n # On charge le fichier seulement si\n # - dc_years est renseigné ET que l'objet est dedans\n # - OU que dc_years n'est pas renseigné\n if (\n (\n dc_years is not None and \n obj.object_name in dc_years\n ) or\n dc_years is None\n ):\n ls_files_to_download.append(obj.object_name)\n # get_file_fromMinio(bucket_name, minio_object_path, local_object_path)\n return ls_files_to_download", "def list_files():\n try:\n return jsonify(os.listdir(env(\"FILES_DIRECTORY\"))), 200\n except:\n return {\"msg\": \"Não há arquivos a serem listados.\"}, 404", "def _fetch_files(self, snapshot: Bug, filepaths: List[str]) -> None:\n bgz = self.__bugzoo\n container = bgz.containers.provision(snapshot)\n try:\n for filepath in filepaths:\n key = (snapshot.name, filepath)\n try:\n if key in self.__cache_file_contents:\n continue\n self.__cache_file_contents[key] = \\\n bgz.files.read(container, filepath)\n except KeyError:\n logger.exception(\"Failed to read source file, '%s/%s': file not found\", # noqa: pycodestyle\n snapshot.name, filepath)\n raise FileNotFound(filepath)\n finally:\n del bgz.containers[container.uid]", "def getfiles(self, path, ext=None, start=None, stop=None, recursive=False):\n from .utils import connection_with_anon, connection_with_gs\n\n parse = BotoClient.parse_query(path)\n\n scheme = parse[0]\n bucket_name = parse[1]\n\n if scheme == 's3' or scheme == 's3n':\n conn = connection_with_anon(self.credentials)\n bucket = conn.get_bucket(parse[1])\n elif scheme == 'gs':\n conn = connection_with_gs(bucket_name)\n bucket = conn.get_bucket()\n else:\n raise NotImplementedError(\"No file reader implementation for URL scheme \" + scheme)\n\n keys = BotoClient.retrieve_keys(\n bucket, parse[2], prefix=parse[3], postfix=parse[4], recursive=recursive)\n keylist = [key.name for key in keys]\n if ext:\n keylist = [keyname for keyname in keylist if keyname.endswith(ext)]\n keylist.sort()\n keylist = select(keylist, start, stop)\n\n return scheme, bucket.name, keylist", "def cb_filelist(args):\n req = args[\"request\"]\n\n pyhttp = req.getHttp()\n config = req.getConfiguration()\n pathinfo = pyhttp[\"PATH_INFO\"]\n\n if not pathinfo.startswith(\"/\" + TRIGGER):\n return\n\n logger = tools.getLogger()\n\n data = req.getData()\n data[INIT_KEY] = 1\n datadir = config[\"datadir\"]\n data['root_datadir'] = config['datadir']\n wikidir = config.get(\"wikidir\", config['datadir'])\n\n # convert the / to os.sep so that we can use os.path stuff.\n wikidir = wikidir.replace(\"/\", os.sep)\n if not wikidir.endswith(os.sep):\n wikidir = wikidir + os.sep\n\n page_name = pathinfo[len(\"/\" + TRIGGER)+1:]\n\n if not page_name:\n return\n\n page_name = page_name.replace(\"/\", os.sep)\n\n if not page_name:\n return\n\n if page_name.endswith(os.sep):\n page_name = page_name[:-1]\n\n # if the page has a flavour, we use that. otherwise\n # we default to the wiki flavour\n page_name, flavour = os.path.splitext(page_name)\n if flavour:\n data[\"flavour\"] = flavour[1:]\n\n # wikifile should hold the absolute path on the file system to\n # the wiki file we're looking at. if it's in a parent directory\n # of wikidir, then we abort. \n wikifile = os.path.normpath(os.path.join(wikidir, page_name))\n if not wikifile.startswith(wikidir):\n logger.info(\"wiki file requested '%s' is not in wikidir.\" % wikifile)\n return []\n\n # we build our own config dict for the fileentry to kind of\n # fake it into loading this file correctly rather than\n # one of the entries.\n newdatadir = wikidir\n\n ext = tools.what_ext(data[\"extensions\"].keys(), wikifile)\n\n if not ext:\n logger.info(\"wiki file '%s' does not exist.\" % wikifile)\n return []\n\n data['root_datadir'] = page_name + '.' + ext\n data['bl_type'] = 'file'\n wikifile = wikifile + \".\" + ext\n\n if not os.path.isfile(wikifile):\n return []\n\n fe = FileEntry(req, wikifile, wikidir)\n\n # now we evaluate python code blocks\n body = fe.getData()\n body = eval_python_blocks(req, body)\n body = \"<!-- STATIC PAGE START -->\\n\\n%s\\n<!-- STATIC PAGE END -->\\n\" % body\n\n # now we evaluate for wikilinks\n body = connect_links(config[\"base_url\"],\n data[\"extensions\"].keys(),\n wikidir,\n body)\n\n fe.setData(body)\n\n fe[\"absolute_path\"] = TRIGGER\n fe[\"fn\"] = page_name\n fe[\"file_path\"] = TRIGGER + \"/\" + page_name\n fe[\"template_name\"] = \"wiki\"\n\n data['blog_title_with_path'] = \"%s : %s\" % \\\n (config.get(\"blog_title\", \"\"), fe.get(\"title_escaped\", \"\"))\n\n # set the datadir back\n config[\"datadir\"] = datadir\n\n return [fe]", "def test_get_files_list(self):\n files = self.download.get_files_list()\n self.assertTrue(len(files) > 0)" ]
[ "0.6558046", "0.6558046", "0.6558046", "0.64741325", "0.64492", "0.64468104", "0.63758737", "0.6245564", "0.6214236", "0.6200066", "0.61985886", "0.6178122", "0.6109232", "0.61078346", "0.6092919", "0.6060358", "0.6049451", "0.6029149", "0.6018365", "0.5991038", "0.5925975", "0.5921891", "0.58837557", "0.5855249", "0.5842926", "0.58377534", "0.583344", "0.58235127", "0.5821157", "0.5808959" ]
0.78951913
0
Initialises the node with children and a handler
def __init__(self, handler=None): self.children = defaultdict(RouteTrieNode) self.handler = handler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_db(self, parent_type, child_type):\n self.parent = Node(self.handler, parent_type)\n self.children = [ Node(self.handler, child_type) for x in range(0, self.SIZE) ]\n for node in self.children:\n Link(self.handler, self.parent.node, node.node, child_type.upper())", "def __init__(self, handler=None):\n self.root = RouteTrieNode(handler=handler)", "def __init__(self, node_id):\n # Assign ID and update class-counter\n self.id = node_id\n\n # Initialize\n self.is_sequence_end = False\n self.children = {}", "def __init__(self):\n self.root = SimpleNode()", "def _init_child(self,child,path):\n pass", "def __init__(self):\n self.root = Node('')", "def __init__(self):\n self._child_key = None\n self._child_val = None\n self._children = None\n self._element = None\n self._elements = None", "def __init__(self):\n self.root = Node(\"\")", "def __init__(self):\n self.root = Node(\"\")", "def __init__(self):\n self.root = self.Node()", "def __init__(self, item=None):\n self.item = item\n self.children = [] #List to hold all child nodes of this Node", "def __init__(self, root_handler=None):\n self.root = RouteTrieNode(root_handler)", "def __init__(self):\n self.__root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self, children):\n self.children = children\n self.grammar = None", "def __init__(self):\n self.root = self.Node(None)", "def setup_children(self):\n # Only generate new children if there are none\n if len(self.children) == 0:\n # Create the encoder and decoder genes\n encoder = EncoderGene(name='encoder',\n parent=self,\n spatial_scale=self.hyperparam(\n 'spatial_scale'))\n self.children = [encoder]\n\n decoder = DecoderGene(name='decoder',\n parent=self,\n spatial_scale=self.hyperparam(\n 'spatial_scale'))\n\n self.children.append(decoder)\n\n pass", "def __init__(self, board, parent, children, hn):\n\t\tsuper(Node, self).__init__()\n\t\tself.board = []\n\t\tself.parent = parent\n\t\tself.children = []\n\t\tself.hn = hn", "def __init__(self, l_child, r_child):\n self.l_child = l_child\n self.r_child = r_child", "def __initChild(self):\n if self.__child is None:\n self.__child = []\n self._populateChild()", "def initialize_children(self, bibs):\n\t\tpass", "def __init__(self):\n self.root = Node(None)", "def __init__(self):\n self.root = TridNode()", "def __init__(self):\n self.root = self.get_new_node();", "def __init__(self, tree_node=None):\n self.root = tree_node", "def __init__(self, label, *children):\n self.__label = label;\n self.__children = \\\n [ c if type(c) is Tree else Tree(c) for c in children]", "def __init__(self, label, *children):\n self.__label = label;\n self.__children = \\\n [ c if type(c) is Tree else Tree(c) \n for c in children]", "def __init__(self,element,parent=None,children=None, state=None, player=None):\n self._element = element\n self._parent = parent\n if children is None:\n self._children = []\n self._state = state\n self._player = player" ]
[ "0.6854232", "0.66364855", "0.6558508", "0.65346444", "0.6506634", "0.6495653", "0.6488797", "0.6487924", "0.6487924", "0.6480149", "0.64634603", "0.644207", "0.6433212", "0.6403547", "0.6403547", "0.6403547", "0.63985157", "0.6396257", "0.6388987", "0.6380935", "0.637384", "0.6336809", "0.6322163", "0.63198835", "0.6319639", "0.6282585", "0.6277266", "0.6207367", "0.61962706", "0.618786" ]
0.6989251
1
Initialises trie with a root node and root handler
def __init__(self, root_handler=None): self.root = RouteTrieNode(root_handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, handler=None):\n self.root = RouteTrieNode(handler=handler)", "def __init__(self):\n self.root = self.TrieNode(0)", "def __init__(self):\n self.root = self.TrieNode(None)", "def __init__(self):\n self.root=TrieNode()", "def __init__(self):\n self.root=TrieNode()", "def __init__(self):\r\n self.root=Trie_Node()", "def __init__(self):\n self.root = Solution2.TrieNode()", "def __init__(self, root_handler=\"root handler\", not_found_handler=\"404 page not found\"):\n self.trie = RouteTrie(handler=root_handler)\n self.not_found_handler = not_found_handler", "def __init__(self):\n self.root = TrieNode()\n # self.root = {}", "def __init__(self):\n\t\tself.root = TrieNode('*')", "def __init__(self):\n self.root = TrieNode(\"st\")", "def __init__(self):\n self.root = TrieNode(\".\")", "def __init__(self):\n self.root = TrieNode(None)", "def __init__(self):\n self.root = TrieNode(None)", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n #initialize the root of Trie\n self._dict = TrieNode('/')", "def __init__(self, handler=None):\n self.children = defaultdict(RouteTrieNode)\n self.handler = handler" ]
[ "0.80115473", "0.78774226", "0.7809918", "0.77427226", "0.77427226", "0.77102625", "0.7547998", "0.75354844", "0.7342355", "0.7339628", "0.73328793", "0.7313797", "0.7303609", "0.7303609", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7302819", "0.7287032", "0.72774506" ]
0.81519413
0
Adds a handler for a path
def add_handler(self, path, handler) -> None: if self.__test_path(path) and self.__test_path(handler): path_parts = self.__split_path(path) # Splits parts into constituent components self.route_trie.insert(path_parts, handler) # Passes parts on for addition to the trie
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_handler(self, path, handler):\n if path: # guard against Null path, we assume handler could be Null\n path_list = self.split_path(path)\n self.trie.insert(step_list=path_list, handler=handler)", "def register_handler(self, method, path, fn):\n if not(method in self.handlers):\n self.handlers[method] = {}\n self.handlers[method][path] = fn", "def add_handler(self, handler):\n pass", "def publish(self, path, handler):\n path = ensure_slash(path)\n self.handlers[path] = handler", "def addhandler(self, txt, handler):\n self.handlers[txt] = handler\n rlog(0, 'webserver', '%s handler added' % txt)", "def addHandler(self, fn):\n self.handlers.append(fn)", "def register_handler(self, handler):\r\n self.handler = handler", "def register_handler(self, method, handler):\n self.handlers[method] = handler", "def add(self, handler, on_error=None):\n self.handlers.append(handler)", "def add_handler(self, handler):\n self.register(abcs.AHandler, handler, handler)", "def insert(self, path, handler=None):\n if path in self.children: # Return None if path already exists\n print('Warning: Path already exists!')\n return\n else:\n self.children[path] = RouteTrieNode(handler) # Add character if it doesn't exist", "def add(self, method: str, pattern: str, handler: Callable) -> None:", "def add_route(\n app: bottle.Bottle, path: str, method: str, handler: Callable, apply: list = None\n):\n\n if apply is None:\n apply = []\n if hasattr(handler, \"args\"):\n apply.append(use_args(handler.args))\n app.route(path, method, handler, apply=apply)", "def add_path(self, path, path_item):\n if path not in self._swagger:\n self._swagger[path] = path_item\n else:\n for method, definition in path_item.items():\n if definition is not None:\n setattr(self._swagger[path], method, definition)", "def register(self, command: str, handler: Any):\n\n if not command.startswith(\"/\"):\n command = f\"/{command}\"\n\n LOG.info(\"Registering %s to %s\", command, handler)\n self._routes[command].append(handler)", "def route(self, method, pattern, handler):\n pass", "def register_handler(self, handler):\n if handler.key in self.handlers.keys():\n raise ValueError(f'Key {handler.key} already registered')\n self.handlers[handler.key] = handler", "def register_handler(self, regex, handler):\n regex = re.compile(\"^\" + regex + \"$\")\n self.handlers.append((regex, handler))", "def add_path(self, path: Path):\n self._paths.append(path)\n path.set_end_of_line_callback(self.end_of_line)", "def register(self, handler):\n self.handlers.add(handler)\n return self", "def register_handler(self, token, handler):\r\n self._handlers[token] = handler", "def set_added_handler(self, handler):\n self._added_handler = handler", "def add_path_for_monitoring(self, path, prefix):\n pass", "def add_handler(self, handler, backtrack = False):\n\n # Add Handler\n self._handlers.append(handler)\n logger.debug(\"%s: handler %s added.\" % \\\n (self.__class__.__name__, handler.__name__))\n \n # Backtrack\n if backtrack:\n for message in self.get_waiting(): handler(message)\n logger.debug(\"%s: handler %s backtracked.\" % \\\n (self.__class__.__name__, handler.__name__))", "def add_quest(self, method: str, route: str, handler):\n\n self.aiohttp.router.add_route(method, route, handler)", "def add_handlers(self, host_pattern, host_handlers):\n pass", "def RegisterHttpRouteHandler(method, route, handler_cls):\n http_routing.HTTP_ROUTING_MAP.add(routing.Rule(\n route, methods=[method],\n endpoint=handler_cls))", "def set_handler(self, handler):\n self._handler = handler", "def handle(self, path, method='GET'):\r\n depr(\"This method will change semantics in 0.10. Try to avoid it.\")\r\n if isinstance(path, dict):\r\n return self._handle(path)\r\n return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})", "def set_handler(self, handler):\n self.next_handler = handler" ]
[ "0.8366175", "0.7557731", "0.7365812", "0.7264261", "0.71948415", "0.6904948", "0.6904236", "0.6826481", "0.6768612", "0.67261535", "0.65624624", "0.65400296", "0.6414063", "0.6391095", "0.6358848", "0.6326726", "0.6206793", "0.6205492", "0.61878026", "0.61793965", "0.6146285", "0.6139611", "0.6134876", "0.6113124", "0.60778344", "0.6071499", "0.6066905", "0.60232294", "0.59947836", "0.59866685" ]
0.85792696
0
Lookup path and return its handler
def lookup(self, path) -> str: if self.__test_path(path): path_parts = self.__split_path(path) # Splits parts into constituent components handler = self.route_trie.find(path_parts) # Stores result of path search return handler if handler else self.path_not_found # Returns handler if there's a match, else 404 error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lookup(self, path):\n\n path_list = self.split_path(path)\n return_handler = self.trie.find(prefix=path_list)\n\n if return_handler:\n return return_handler\n else:\n return self.not_found_handler", "def _find_url_handler(self, req):\n # First try - lookup in explicit (non parameterized URLs)\n if req.path in self.explicit_url_map:\n return self.explicit_url_map[req.path]\n # Second try - strip last path segment and lookup in another map\n idx = req.path.rfind(b'/') + 1\n path2 = req.path[:idx]\n if len(path2) > 0 and path2 in self.parameterized_url_map:\n # Save parameter into request\n req._param = req.path[idx:].decode()\n return self.parameterized_url_map[path2]\n\n if self.catch_all_handler:\n return self.catch_all_handler\n\n # No handler found\n return (None, None)", "def match(self, operation: str, path: str):\n for reg, handler in self.handlers:\n if re.match(reg, path):\n print(\"Found matching handler for\", operation, path)\n method = getattr(handler, operation)\n return method\n raise Exception(f\"No handler for {operation} at {path}\")", "def find(self, path_parts):\n current_node = self.root # Start at the root\n for part in path_parts: # Cycle through the path parts\n if part not in current_node.children: # If path doesn't exist...\n return # ..return None\n current_node = current_node.children[part] # Else, traverse onwards\n return current_node.handler # Return its handler", "def handler(req):\n name = gethandlername(req.uri)\n if name == \"dispatcher\":\n raise404(\"Can't display the dispatcher\")\n handlerfunc = gethandlerfunc(name)\n return handlerfunc(req)", "def find(self, prefix):\n\n node = self.root\n\n for path_step in prefix:\n if path_step in node.children:\n node = node.children[path_step]\n else:\n return RouteTrieNode().handler # a default empty page handler\n\n return node.handler", "def add_handler(self, path, handler) -> None:\n if self.__test_path(path) and self.__test_path(handler):\n path_parts = self.__split_path(path) # Splits parts into constituent components\n self.route_trie.insert(path_parts, handler) # Passes parts on for addition to the trie", "def _get_handler(self, name):\n\n if name not in self._handlers:\n raise CLICoreTemplateHandlerNotFoundError('Command [{name}] is not valid. '\n 'available commands: {commands}.'\n .format(name=name,\n commands=\n list(self._handlers.keys())))\n\n return self._handlers[name][0]", "def _parsed_path(self, handler_name='handler', suffix=''):\r\n return urlparse(self.runtime.handler_url(self.block, handler_name, suffix=suffix)).path", "def find_handler(url):\n for handler in __all__:\n # Get the symbol for handler\n mod = globals()[handler]\n # Ask handler if it can handle the url\n if getattr(mod, \"can_handle\")(url):\n return mod\n return None", "def find(self, route):\n curr = self.root\n for part in route:\n if part not in curr.children:\n return None\n curr = curr.children[part]\n return curr.handler", "def _parsed_path(self, handler_name='handler', suffix=''):\r\n return urlparse(handler_url(self.block, handler_name, suffix=suffix)).path", "def add_handler(self, path, handler):\n if path: # guard against Null path, we assume handler could be Null\n path_list = self.split_path(path)\n self.trie.insert(step_list=path_list, handler=handler)", "def getHandler(self):\n raise NotImplementedError(\"Shouldn't be called\")", "def _handle_path(path: str) -> Callable:\n parts = Path(path).parts\n\n result = _cogs\n for part in parts:\n result = result[part]\n\n return result", "def get_handler(logger, handler):\n handlers = {'TRFH': TRFH,\n 'StdH': StdH}\n levels = logbook.base._reverse_level_names\n try:\n requested_handler = handlers[handler]\n except KeyError:\n logger.error((f'Desired Handler [\\'{requested_handler}\\'] is not'\n f' a valid handler type.'\n f' Valid types are {list(levels.keys())}.'))\n return\n for handler in logger.handlers:\n if isinstance(handler, requested_handler):\n return handler", "def gethandlerfunc(modname):\n try:\n # Import the module\n mod = __import__(modname)\n except ImportError:\n # No module with this name\n raise404(\"Couldn't import module \" + modname)\n\n try:\n # Find the handler function\n handler = mod.handler\n except AttributeError:\n # No handler function\n raise404(\"Couldn't find handler function in module \" + modname)\n\n if not callable(handler):\n # It's not a function\n raise404(\"Handler is not callable in module \" + modname)\n\n return handler", "def __call__(self, request: Request):\n\t\tnormalize = request.normalize()\n\t\tif normalize is not None: return normalize\n\t\t\n\t\t# OK, that test passed. Now go find the most applicable handler.\n\t\t# A not-too-complicated back-tracking search. I anticipate that\n\t\t# real applications won't stress this too hard.\n\t\tpath, node, i, found, best, backtrack = request.path, self.root, 0, None, -1, []\n\t\twhile True:\n\t\t\tif node.entry is not None and i > best: found, best = node, i\n\t\t\tif i<len(path) and self.WILDCARD in node.kids: backtrack.append((node.kids[self.WILDCARD], i + 1))\n\t\t\tif i<len(path) and path[i] in node.kids: node, i = node.kids[path[i]], i + 1\n\t\t\telif backtrack: node, i = backtrack.pop()\n\t\t\telif found is None: return Response.generic(code=404)\n\t\t\telse:\n\t\t\t\trequest.mount_depth = best\n\t\t\t\thandler, wildcards = found.entry\n\t\t\t\trequest.args = [path[i] for i in wildcards]\n\t\t\t\treturn handler(request)", "def get_handler_for_level(level):\n for hdl in root_handlers():\n if hdl.level <= level:\n return hdl", "def _maybe_handle(self, prefix, handler, path, params, data=None):\r\n if path.startswith(prefix):\r\n relpath = path[len(prefix):]\r\n if data:\r\n handler(relpath, params, data)\r\n else:\r\n handler(relpath, params)\r\n return True\r\n else:\r\n return False", "def GetHandlerForHttpRequest(request):\n\n matcher = http_routing.HTTP_ROUTING_MAP.bind(\n \"%s:%s\" % (request.environ[\"SERVER_NAME\"],\n request.environ[\"SERVER_PORT\"]))\n try:\n match = matcher.match(request.path, request.method)\n except werkzeug_exceptions.NotFound:\n raise api_call_handlers.ApiCallHandlerNotFoundError(\n \"No API handler was found for (%s) %s\" % (request.path,\n request.method))\n\n handler_cls, route_args = match\n return (handler_cls(), route_args)", "def get_routine(self, method, version, path):\n routines = self._routines.get((method, version))\n if not routines:\n return None\n for r in routines:\n if r.match_path(path):\n return r\n return None", "def get_errorhandler(self, exc: typing.Union[HTTPException, int]) -> typing.Union[None, Route]:\n if isinstance(exc, HTTPException):\n exc = exc.code\n\n try:\n return self.errorhandlers[exc]\n except KeyError:\n try:\n return self._parent.get_errorhandler(exc)\n except (KeyError, AttributeError):\n return None", "def get_handler(self):\n return self._Handler(self)", "def get_handler(self, name):\n return self.params[name].value_handler", "def delegate(func):\n @functools.wraps(func)\n def wrapped(self, *args, **kwargs):\n path = args[0]\n handler = self.router.match(func.__name__, path)\n return handler(*args, **kwargs)\n return wrapped", "def _find_handler(self):\n exc = self.exception\n\n for block in self.exc_handlers:\n for leader in block.leaders:\n if (leader.opcode == ops.exc_catch and\n self._exc_match(leader.args)):\n return leader", "def test_get_handler(self):\n class DummyHandler(handlers.BaseHandler):\n pass\n\n route = RouteFactory.build()\n route.handler_class = DummyHandler\n\n handler = route.get_handler()\n self.assertIsInstance(handler, DummyHandler)\n self.assertEqual(handler.route, route)", "def route(self, method, pattern, handler):\n pass", "def get_task_handler_from_job_dir(job_dir):\n task_proc_dir = os.path.abspath(os.path.join(job_dir, \"..\", \"proc\"))\n return get_task_handler_from_task_proc_dir(task_proc_dir)" ]
[ "0.8171178", "0.73559314", "0.68021876", "0.6668805", "0.6506093", "0.64750767", "0.64577895", "0.6412674", "0.64054465", "0.6383391", "0.63511366", "0.63122034", "0.6276047", "0.61854106", "0.61347514", "0.6107696", "0.5994779", "0.5990019", "0.59125984", "0.5912008", "0.59106475", "0.5896144", "0.5853439", "0.58179235", "0.578479", "0.5757347", "0.57347053", "0.5714386", "0.57101643", "0.5706835" ]
0.82154274
0
Splits path into its constituent parts
def __split_path(path: str) -> List[str]: return [part for part in path.split('/') if part] # Splits path at '/', handles extra slashes in the process
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def splitPath(self, path):\n return os.path.split(path)", "def pathSplit(path):\n path = re.split('/|\\\\\\\\', path)\n return path", "def split_path(self, path):\n path = path.strip(\"/\")\n return path.split(\"/\") if len(path) > 0 else []", "def zenpathsplit(self, path):\n return zenpathsplit(path)", "def split_all(path):\r\n components = []\r\n path = path.lstrip('/')\r\n while path:\r\n head, tail = os.path.split(path)\r\n if tail:\r\n components.insert(0, tail)\r\n elif head == path:\r\n components.insert(0, head)\r\n break\r\n path = head\r\n return components", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def split_path(path):\n parts = []\n path, end = os.path.split(path)\n while end:\n parts.append(end)\n path, end = os.path.split(path)\n\n if path:\n parts.append(path)\n parts.reverse()\n return parts", "def splitPath(path):\n return tuple(\n element for element in os.path.split(path.rstrip(os.path.sep)) if element\n )", "def _path_parts(path):\n # clean it up. this removes duplicate '/' characters and any that may\n # exist at the front or end of the path.\n return [pp for pp in path.split(\"/\") if pp]", "def path_split(path):\n res = []\n while path:\n path, tail = os.path.split(path)\n res.insert(0, tail)\n if path == '/':\n res.insert(0, '/')\n break\n return res", "def split(self, path):\n if not self.is_managed_path(path):\n return os.path.split(path)\n client, _ = self._get_storage(path)\n prefix, rel_path = self.parse_managed_path(path)\n return (\"%s:\" % prefix,) + client.split(rel_path)", "def split(path):\r\n if path.lower().startswith(\"smb://\"):\r\n if '/' not in path[6:]:\r\n path = path.replace(\"smb://\", \"smb:///\", 1)\r\n return path.rsplit('/', 1)\r\n else:\r\n return os.path.split(path)", "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def pathsplit(path):\n stem, basename = os.path.split(path)\n if stem == '':\n return (basename,)\n if stem == path: # fixed point, likely '/'\n return (path,)\n return pathsplit(stem) + (basename,)", "def split_path(full_path, root_path):\n root_len = len(root_path)\n parsed_list = full_path[root_len+1:].split('/') \n \n return parsed_list", "def pathComponents(path):\n parts = [p for p in path.split(os.path.sep) if p not in [\"\", \".\"]]\n return parts", "def _os_path_split_all(path):\n allparts = []\n while 1:\n parts = os.path.split(path)\n if parts[0] == path: # sentinel for absolute paths\n allparts.insert(0, parts[0])\n break\n elif parts[1] == path: # sentinel for relative paths\n allparts.insert(0, parts[1])\n break\n else:\n path = parts[0]\n allparts.insert(0, parts[1])\n return allparts", "def split_path(path:str):\n if path is None or len(path) == 0:\n return '', '', ''\n path = sanitize_path(path)\n folder, filename = os.path.split(path)\n ext = ''\n if '.' in filename:\n filename, ext = os.path.splitext(filename)\n # handle double ext, like 'mode.pth.tar'\n filename, ext2 = os.path.splitext(filename)\n ext = ext2 + ext\n else:\n folder = os.path.join(folder, filename)\n filename = ''\n return folder, filename, ext", "def split_path(self, path: str) -> List[str]:\n dirs = path.split('/')\n return list(filter(lambda x: x!='', dirs))", "def splitall(path):\n allparts = []\n while 1:\n parts = os.path.split(path)\n # sentinel for absolute paths\n if parts[0] == path:\n allparts.insert(0, parts[0])\n break\n # sentinel for relative paths\n elif parts[1] == path:\n allparts.insert(0, parts[1])\n break\n else:\n path = parts[0]\n allparts.insert(0, parts[1])\n return allparts", "def split_path(s):\n dirname, filename = os.path.split(s)\n fname_noext, ext = os.path.splitext(filename)\n levels = dirname.strip('/').split(os.path.sep)[2:][-2:]\n return PATH_SPLIT.split(' '.join(levels + [fname_noext]))", "def split_all(path):\r\n result = []\r\n head = path\r\n while head:\r\n head2, tail = os.path.split(head)\r\n if head2 == head:\r\n break # reached root on Unix or drive specification on Windows\r\n head = head2\r\n result.insert(0, tail)\r\n if head:\r\n result.insert(0, head)\r\n return result", "def splitpath(self, full=False):\n path = _os.path.split(self.__str__())\n if full == True:\n return self.__str__().split(os.path.sep)\n else:\n return [getpath(path[0], custom=True), path[1]]", "def _split_path(self, path):\n if path.strip() in (None, \"\", \"/\"):\n return (None, None)\n tableName, primKey = util.save_split(path.strip(\"/\"), \"/\", 1)\n # _logger.debug(\"'%s' -> ('%s', '%s')\" % (path, tableName, primKey))\n return (tableName, primKey)", "def split_path(self, path):\n path = os.path.splitdrive(path)[1][1:]\n folders = []\n while 1:\n path, folder = os.path.split(path)\n if folder != \"\" and folder:\n folders.append(folder)\n if len(path) == 0:\n return folders[::-1]\n else:\n if path != \"\" and path:\n folders.append(path)\n break\n folders.reverse()\n return folders", "def split_path(path):\n items = []\n while True:\n path, folder = os.path.split(path)\n if folder != '':\n items.append(folder)\n else:\n if path != '':\n items.append(path)\n break\n items.reverse()\n return items", "def parse(path, root=True):\n if path.startswith(\"/\"):\n return path[1:], \"\"\n\n if \"/\" not in path:\n return path, \"\"\n\n if root:\n return path.split(\"/\", 1)\n else:\n return path.rsplit(\"/\", 1)", "def splitparams(path):\n if '/' in path:\n i = path.find(';', path.rfind('/'))\n else:\n i = path.find(';')\n if i < 0:\n return path, ''\n return path[:i], path[i + 1:]", "def path_elements(path):\n result = []\n (head, tail) = os.path.split(path)\n while tail != \"\":\n result.insert(0, tail)\n (head, tail) = os.path.split(head)\n result.insert(0, head)\n return result" ]
[ "0.8204391", "0.8174964", "0.8085845", "0.8003212", "0.7822543", "0.78090787", "0.7783582", "0.7781801", "0.7724184", "0.7705158", "0.7695372", "0.7639305", "0.76013774", "0.75980574", "0.75946397", "0.75852054", "0.7545167", "0.74679285", "0.73923457", "0.73623455", "0.7345628", "0.72870505", "0.72642946", "0.7236912", "0.7235237", "0.7210251", "0.71994996", "0.71643007", "0.71561354", "0.7031872" ]
0.82463825
0
Checks by pattern if roll is a valid dice roll. This includes `nDy`, `ndy` and mixes with numbers `+` and ``.
def is_dice_roll(roll: str) -> Optional[re.Match]: matcher = DICE_REGEX.match(roll) return matcher
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dice_roller():\n\n print('Use the xDy+z format to roll the dice. Example: \"2D6+4\"')\n roll = input(\"Enter the dice: \").lower()\n\n if roll.count('d') > 1:\n return \"Wrong input!\"\n\n validate = roll.replace(\"d\", \"\").replace(\"+\", \"\").replace('-', \"\")\n\n # \"validate\" variable is made just for checking if input is correct, by trying to convert itself into a integer.\n # This allows us to find if there are any unwanted characters, because we got rid of +, - and d which are essential for the roll.\n # Even if there are multiple of + or - signs, we can catch them either later in function or in other 3 functions.\n\n if not isinstance(roll, str):\n return \"Wrong input!\"\n\n try:\n validate = int(validate)\n except ValueError:\n return \"Wrong input!\"\n\n # Checking if the input is correct, so that user can only use xDy+z format\n # and is informed if they input something wrong.\n\n dice = dice_func(roll)\n amount_of_dice = amount_func(roll)\n modifier = mod_func(roll)\n\n if dice == \"Wrong input!\" or modifier == \"Wrong input!\":\n return \"Wrong input!\"\n\n # Checking if the other functions did not return \"Wrong input!\" statement\n\n else:\n result = 0\n for amount in range(amount_of_dice):\n result += randint(1, dice)\n \n # The last loop simulates the rolling of the dice, each one individually, just like in an RPG game.\n\n return result + modifier", "async def roll(self, ctx, roll: str):\n r = re.compile(r'\\d+[DdWw]\\d+')\n\n if r.match(roll) is None:\n await ctx.send(f'\"{roll}\" is not a valid input, please use e.g. 1d20 / 3d6 or 1w20 / 3w6')\n else:\n roll_times, roll_sides = self.rollStringToValues(roll)\n roll_values = Dice.roll_XdY(roll_times, roll_sides)\n self._last_roll = roll\n await self.send(ctx, f'Rolling {roll}:\\t{roll_values}')", "def roll_dice(roll, modifiers):\n try:\n if modifiers[\"Advantage\"] and not modifiers[\"Disadvantage\"]:\n modifiers[\"Advantage\"] = False\n return max(roll_dice(roll, modifiers), roll_dice(roll,modifiers))\n if modifiers[\"Disadvantage\"] and not modifiers[\"Advantage\"]:\n modifiers[\"Disadvantage\"] = False\n return min(roll_dice(roll, modifiers), roll_dice(roll, modifiers))\n num_dice = int(roll.split(\"D\")[0])\n if modifiers[\"Critical\"]:\n num_dice*=2\n num_dice+=modifiers[\"Brutal\"]\n die_type = roll.split(\"D\")[1]\n if die_type[0] == \"4\" or die_type[0] == \"6\" or die_type[0] == \"8\":\n die_type = int(die_type[0])\n elif die_type[:3] == \"100\" or die_type[0] == \"%\":\n die_type = 100\n elif die_type[:2] == \"10\" or die_type[:2] == \"12\" or die_type[:2] == \"20\":\n die_type = int(die_type[:2])\n else:\n die_type = 6\n roll_total = 0\n critical_success = False\n critical_failure = False\n for die in range(num_dice):\n die_result = random.randint(1,die_type)\n if die_result == 1 and modifiers[\"Lucky\"] or die_result <= 2 and modifiers[\"Great Weapon\"]:\n die_result = random.randint(1,die_type)\n if die_result < modifiers[\"Minimum Roll\"]:\n die_result = modifiers[\"Minimum Roll\"]\n if die_result == 20 and die_type == 20:\n critical_success = True\n if die_result == 1 and die_type == 20:\n critical_failure = True\n roll_total += die_result\n return roll_total\n except ValueError:\n return \"Error\"", "def roll_dice_logic(d_roll: str) -> list[int]:\n\n escaped_d_roll = re.sub(r'[-+]', '', d_roll)\n d_number, d_face = re.split(r'[dD]', escaped_d_roll)\n d_face = int(d_face)\n d_number = int(d_number) if d_number else 1\n return [random.randint(1, d_face) for _n in range(0, d_number)]", "def roll(dice):\n\n dice = str(dice).upper().strip()\n dice_mod = 0\n if dice == 'FLUX':\n return randint(1, 6) - randint(1, 6)\n else:\n if dice == 'GOODFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 < flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n else:\n if dice == 'BADFLUX':\n flux1 = randint(1, 6)\n flux2 = randint(1, 6)\n if flux1 > flux2:\n return flux2 - flux1\n else:\n return flux1 - flux2\n \n ichar1 = dice.find('DD')\n if ichar1 == -1:\n ichar1 = dice.find('D')\n if ichar1 == 0:\n num_dice = 1\n\n if ichar1 <> -1:\n if ichar1 <> 0:\n num_dice = int(dice[0:ichar1])\n# print 'Number of dice =', num_dice\n ichar2 = dice.find('+')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n else:\n ichar2 = dice.find('-')\n if ichar2 <> -1:\n dice_mod = int(dice[ichar2:len(dice)])\n# print 'dice mod =', dice_mod\n\n if ichar2 <> -1:\n dice_type = dice[ichar1: ichar2]\n dice_type = dice_type.rstrip()\n else:\n dice_type = dice[ichar1: len(dice)]\n# print 'dice type =', dice_type, 'Len = ', len(dice_type)\n\n if dice_type == 'D6': \n return die_rolls(6, num_dice) + dice_mod\n else:\n if dice_type == 'D66' and num_dice == 1 and dice_mod == 0:\n return randint(1, 6) * 10 + randint(1, 6)\n else:\n if dice_type == 'D100' and num_dice == 1: \n return (randint(1, 10) - 1) * 10 + randint(1, 10) + dice_mod \n else:\n if dice_type == 'D10': \n return die_rolls(10, num_dice) + dice_mod\n else: \n if dice_type == 'D20': \n return die_rolls(20, num_dice) + dice_mod\n else:\n if dice_type == 'D30': \n return die_rolls(30, num_dice) + dice_mod\n else:\n if dice_type == 'D12': \n return die_rolls(12, num_dice) + dice_mod\n else:\n if dice_type == 'D8': \n return die_rolls(8, num_dice) + dice_mod\n else:\n if dice_type == 'D4': \n return die_rolls(4, num_dice) + dice_mod\n else:\n if dice_type == 'D9': \n return die_rolls(9, num_dice) + dice_mod\n else:\n if dice_type == 'D3': \n return die_rolls(3, num_dice) + dice_mod\n else:\n if dice_type == 'DD':\n return (die_rolls(6, num_dice) + dice_mod) * 10\n \n print\n print \"** DICE ERROR! '%s' is unknown **\" % dice\n print \n print \"roll() is a dice rolling program.\"\n print\n print \"The types of dice to roll are (in string values):\"\n print \"roll('D6') -- roll one 6-sided die\"\n print \"roll('1D6') -- roll one 6-sided die\"\n print \"roll('2D6') -- roll two 6-sided dice\"\n print \"roll('D10') -- roll a 10-sided die\"\n print \"roll('D100') -- roll a 100-sided die (1 - 100)\"\n print \"roll('D66') -- roll for a D66 chart\"\n print \"roll('2DD+3') -- roll (2D6+3) x 10\"\n print\n print \"-/+ DMs can be added to rolls:\"\n print \"roll('3D6+6') -- add +6 DM to roll\"\n print \"roll('4D4-4') -- add -4 DM to roll\"\n print\n return 0", "async def roll(self, dice: str):\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await self.bot.say('Format has to be in NdN!')\n return\n\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await self.bot.say(result)", "def match_manual_dice(state, rolls_string):\n\n # regex:\n # ^, $: match from start to end of string\n # \\d+: match one or more integers\n pattern = r\"^\\d+$\"\n out_list = []\n dice_count = None\n dice_max = None\n\n # attribute and fight talent tests take 1D20\n if state.selection.category in (\"attr\", \"fight_talent\", \"advantage\"):\n dice_count = 1\n dice_max = 20\n # skill and spell tests take 3D20\n elif state.selection.category in (\"skill\", \"spell\"):\n dice_count = 3\n dice_max = 20\n # misc dice roll takes whatever was specified earlier\n elif state.selection.category == \"misc\":\n dice_count = state.selection.dice_count\n dice_max = state.selection.dice_eyes\n\n # allow matches for dice separated by any number of whitespaces and\n # commas\n rolls_string = rolls_string.replace(',', ' ')\n rolls_list = rolls_string.split(' ')\n\n for item in rolls_list:\n match = re.match(pattern, item)\n if match:\n if int(item) in range(1, dice_max + 1):\n out_list.append(int(item))\n\n state.rolls = out_list\n\n if len(state.rolls) != dice_count:\n state.rolls = None\n\n return state", "def dice_func(die):\n\n viable_dice = ('d3', 'd4', 'd6', 'd8', 'd100', 'd12', 'd20', 'd10')\n dice = \"\"\n\n for d in viable_dice:\n if d in die:\n dice = d\n break\n\n if len(dice) == 0:\n return \"Wrong input!\"\n\n try:\n dice = int(dice.replace('d', \"\"))\n except (TypeError, ValueError):\n return \"Wrong input!\"\n return dice", "def testRoll(self):\n \n nsides=3\n die = BaseDie(nsides)\n lighted_die = LightedDie(nsides,colors={1:'blue',2:'yellow',3:'gold'})\n\n self.assertEqual(die.last_roll,None)\n\n die.roll()\n lighted_die.roll()\n\n for d in [die,lighted_die]:\n self.assertTrue(d.last_roll>0 and d.last_roll <= nsides)", "def roll_dice(check_double=True):\n\n roll = np.random.choice(np.arange(1, 7), 2)\n\n if check_double:\n return roll.sum(), roll[0] == roll[1]\n else:\n return roll.sum()", "def die_roll():\n roll = random.randint(1,6)\n return roll", "async def roll(ctx, dice: str):\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await ctx.send('Format has to be in NdN!')\n return\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await ctx.send(result)", "async def roll(self, ctx, dice : str):\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await ctx.send('Format has to be in NdN!')\n return\n\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await ctx.send(result)", "def roll_dice(roll: str) -> tuple[int, defaultdict[str, list[int]]]:\n\n if roll.startswith(f'{settings.BOT_COMMAND_PREFIX}roll'):\n roll = roll.replace(f'{settings.BOT_COMMAND_PREFIX}roll', '').strip()\n\n if not is_dice_roll(roll):\n msg = _('dice roll `%(roll)s` syntax is incorrect.') % {\n 'roll': roll,\n }\n raise OilAndRopeException(msg.capitalize())\n\n # First of all we separate dice rolls since they work differently\n dice_rolls = re.findall(r'[-+]?\\d*[dD]\\d+', roll)\n dice_rolls_result = defaultdict(list)\n for d_roll in dice_rolls:\n dice_rolls_result[d_roll] = roll_dice_logic(d_roll)\n # NOTE: If we removed rolled dices is easier later to continue the logic\n roll = roll.replace(d_roll, '')\n\n # Number rolls are easier since it's just addition or subtraction\n number_rolls = re.findall(r'[-+]?\\d+', roll)\n for number in number_rolls:\n dice_rolls_result[number] = [int(number)]\n\n final_result = 0\n for key, value in dice_rolls_result.items():\n # NOTE: Doll doesn't work with negative numbers but list of positive int, we just sum and subtract\n if key.startswith('-') and len(value) >= 1:\n final_result -= sum(value)\n else:\n final_result += sum(value)\n\n return final_result, dice_rolls_result", "async def roll(dice: str):\r\n try:\r\n rolls, limit = map(int, dice.split('d'))\r\n except Exception:\r\n await rose.say('Format has to be in NdN!')\r\n return\r\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\r\n await rose.say(result)", "def parse_roll(roll_text):\n roll_text = roll_text.replace(\" \",\"\")\n rolls_total = 0\n critical_hit = False\n if CRITICAL_CODE in roll_text:\n critical_hit = True\n roll_text = roll_text.replace(CRITICAL_CODE,\"\")\n rolls = roll_text.split(\"+\")\n for roll in rolls:\n modifiers = get_modifiers(roll,critical_hit)\n roll = resolve_modifiers(roll)\n roll = resolve_naked_numbers(roll)\n d_position = roll.find(\"D\")\n if d_position == -1:\n rolls_total += int(roll)\n continue\n try:\n rolls_total += roll_dice(roll, modifiers)\n except TypeError:\n return \"Error\"\n\n return str(rolls_total)", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n \"*** YOUR CODE HERE ***\"\n count, return_sum = 0, 0\n while count < num_rolls:\n roll = dice()\n if roll == 1:\n count += 1\n while count < num_rolls:\n dice()\n count += 1\n return 1\n return_sum += roll\n count += 1\n return return_sum\n # END PROBLEM 1", "async def roll(dice : str):\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await bot.say('Format has to be in NdN!')\n return\n\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await bot.say(result)", "def roll_die(number_of_rolls, number_of_sides):\n\n roll = random.randint(1, number_of_sides) # Used recursion for this\n if number_of_rolls == 0:\n return 0 # Base case is 0. If it's 1, then I can roll a 7 with 6 sides\n else:\n return roll + roll_die(number_of_rolls - 1, number_of_sides) # Subtract 1 roll and keep calling function", "def roll_dice(num_rolls, dice=six_sided):\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN PROBLEM 1\n num_roll = 0\n sum = 0\n pig_out = False # Pig Out rule\n while num_roll < num_rolls:\n roll = dice()\n if roll == 1:\n pig_out = True\n sum += roll\n num_roll += 1\n if pig_out: return 1\n else: return sum\n # END PROBLEM 1", "async def roll (self, ctx, *, dice_text: str):\n if not re.search (\"^(\\d+[dD]\\d+)\", dice_text):\n await ctx.send (\"Format {} invalid\".format (dice_text))\n return\n match_dice_part = re.search (\"(\\d+[dD]\\d+)\", dice_text)\n dice_parts = match_dice_part.group()#.lower().split('d')\n print (\"dice_parts: {}\".format (dice_parts))\n elements = dice_parts.split ('d')\n print (elements)\n number = int (elements [0])\n face = int (elements [1])\n print (\"number: \")\n print (number)\n print (\"face: \")\n print (face)\n output = \"\"\n for n in range(number):\n output =output+ (\"{}\" if n ==0 else \" | {} \").format(random.randint(1, face))\n await ctx.send (output)\n \"\"\"\n match_dice_bonus = re.search (\"((\\+|-)\\d+)+\", dice_text)\n dice_bonus = 0\n print (\"match_dice_bonus\")\n print (match_dice_bonus)\n if match_dice_bonus:\n dice_bonus = eval (match_dice_bonus.group ())\n print (\"dice_bonus: {}\".format (dice_bonus))\n \"\"\"", "def roll_dice(self, dice_to_roll):\n if type(dice_to_roll) != type([]):\n raise TypeError(\"Expecting list of strings argument containing \"\n \"any combination of 'a' and 'b'.\")\n else:\n for member in dice_to_roll:\n if type(member) != type(\"\") or member not in ['a', 'b']:\n raise TypeError(\"Expecting argument containing \"\n \"any combination of 'a' and 'b'.\")\n if \"a\" in dice_to_roll:\n self.die_a.roll()\n if \"b\" in dice_to_roll:\n self.die_b.roll()", "def diceRoll():\n return random.randint(1, 6) # generates a random integer between 1 and 6 (inclusive) and returns it.", "async def dice(self, ctx, diceroll: str = '1d6'):\n times, num = diceroll.split('d')\n times = int(times) if times else 1\n num = int(num) if num else 6\n maxscore = times*num\n score = random.randint(times, maxscore)\n await ctx.send(ctx._(\"roll_result\").format(score=score, maxscore=maxscore))", "def determine_roll(self):\n dice_to_roll = []\n to_roll = input(\"Roll dice: \")\n if 'a' in to_roll:\n dice_to_roll.append(self.die_a)\n\n if 'b' in to_roll:\n dice_to_roll.append(self.die_b)\n\n return dice_to_roll", "async def rolldice(self, ctx, ndm):\n\n output = '__Dice roll:__\\n'\n\n if ndm.lower() == 'dnd' or ndm.lower() == 'd&d':\n for d in (4, 6, 8, 10, 10, 12, 20):\n output += f'd{d}: {random.randint(1, int(d))}\\n'\n else:\n n, d = ndm.lower().split('d')\n\n for i in range(1, int(n) + 1):\n output += f'd{d}: {random.randint(1, int(d))}\\n'\n\n await ctx.send(output)", "def dice_roll(name):\n roll = random.randint(1,6)\n print \"{name} shook the die \\\nand rolled a {roll}.\".format(name=name, roll=roll)\n return roll", "def dice_roll(name):\n roll = random.randint(1,6)\n print \"{name} shook the die \\\nand rolled a {roll}.\".format( name=name, roll=roll)\n return roll", "def roll(self, mask, target, args):\n d = re.match(r'(?P<rolls>\\d#)?(?P<dice>\\d+)d(?P<sides>\\d+)(?P<math>[\\+\\-\\*]\\d+)?', args['<dice>'])\n count = int(d.group('dice'))\n sides = int(d.group('sides'))\n\n rolls = int(d.group('rolls')) if d.group('rolls') else None\n math = d.group('math') if d.group('math') else None\n\n if rolls and rolls > 10:\n self.bot.privmsg(target, \"That's way too many rolls.\")\n irc3.base.logging.log(irc3.base.logging.WARN,\n \"%s in %s tried to roll %d sets of dice\" % (mask.nick, target, rolls))\n if sides > 100:\n self.bot.privmsg(target, \"That's an absurd number of sides.\")\n irc3.base.logging.log(irc3.base.logging.WARN,\n \"%s in %s tried to roll a %d sided dice\" % (mask.nick, target, sides))\n return\n if count > 100:\n self.bot.privmsg(target, \"That's too many dice!\")\n irc3.base.logging.log(irc3.base.logging.WARN,\n \"%s in %s tried to roll %d dice\" % (mask.nick, target, count))\n return\n\n dice = []\n result = \"\"\n\n for n in range(0, count):\n dice.append(self.rng.randint(1, sides))\n\n # Sum the results\n result += (\"=> %s\" % sum(dice))\n\n # Apply any math transforms\n if math:\n result += \"%s ==> %s\" % (d.group('math'), int(eval(\"%s %s\" % (sum(dice), math))))\n\n # Concatenate the description test\n if args[\"<description_text>\"]:\n result += (\": %s\" % ' '.join(args[\"<description_text>\"]))\n\n # Shell out to shadowrun if necessary\n if args[\"-s\"]:\n result += (\" %s\" % count_shadowrun(dice))\n\n self.msg(mask, target, str(dice) + result)", "def diceRoll():\n return randint(1,6)" ]
[ "0.6716512", "0.64137495", "0.6404577", "0.639976", "0.63927776", "0.61627877", "0.6144643", "0.61264604", "0.6005426", "0.58808833", "0.58372337", "0.58366555", "0.5821892", "0.57933265", "0.5792328", "0.57741827", "0.5772054", "0.57653105", "0.5753985", "0.57494026", "0.5747776", "0.5708116", "0.56742483", "0.56707746", "0.564327", "0.56048685", "0.5596284", "0.5588063", "0.5587533", "0.5575974" ]
0.65113264
1
initialize zmq publisher socket
def __get_zmq_pub(self): print("Publishing to tcp://127.0.0.1:%d channel: tweets" % self.port) context = zmq.Context() socket = context.socket(zmq.PUB) socket.bind("tcp://127.0.0.1:%d" % self.port) return socket
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, ip='127.0.0.1', port='50020'):\n self.ip = ip \n self.port = port\n self.ctx = zmq.Context()\n self.socket = zmq.Socket(self.ctx, zmq.REQ) # this is pub socket", "def initzmq(self):\n\n if \"topics\" not in self.configData:\n raise Exception(\"Topics not found in %s\" % self.configPath)\n\n for topic in self.configData['topics']:\n addr = self.gen_address(topic['protocol'], topic['address'],\n topic['port'])\n socket = self.build_socket(topic['paradigm'], topic['topic'], addr)\n self.topics[topic['name']] = socket", "def setup_subscriber(publisher_address):\n print(\"Subscribing to server on {}\".format(publisher_address))\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.connect(publisher_address)\n filter = \"\"\n # the following two lines are for Python2 compatability\n if isinstance(filter, bytes):\n filter = filter.decode(\"ascii\")\n socket.setsockopt_string(zmq.SUBSCRIBE, filter)\n return socket", "def connect(self):\n self.socket.connect(f'tcp://{self.ip}:{self.port}')\n self.socket.send_string('PUB_PORT')\n self.pub_port = self.socket.recv_string()\n self.pub_socket = zmq.Socket(self.ctx, zmq.PUB)\n self.pub_socket.connect(f\"tcp://{self.ip}:{self.pub_port}\")", "def connect(self):\n assert self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n port = NODE_INFOS[self.ID].port\n self._send_socket = ctx.socket(zmq.PUB)\n self._send_socket.bind(f\"tcp://*:{port}\")\n self.connected = True", "def setup(self):\n self.context = zmq.Context()\n self.sub_socket = self.context.socket(zmq.SUB)\n if self.filter:\n self.sub_socket.setsockopt(zmq.SUBSCRIBE, self.filter)\n self.sub_socket.connect('tcp://'+self.host+':'+str(self.com_port))\n return self", "def __init__(self, port=1071):\n\n context = zmq.Context()\n\n self.socket = context.socket(zmq.REP)\n self.socket.bind('tcp://*:' + str(port))\n\n self.socket.recv()", "def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)", "def __init__(self, store=None):\n self.sockets = []\n self.poller = zmq.core.poll.Poller()\n self.mh = MessageHandler()\n self.store = store", "def __init__(self, creator_socket):\n self.__socket = creator_socket\n logger.info(BUNDY_SOCKCREATOR_INIT)", "def init_connections(self):\n context = zmq.Context()\n self.sock_reply = context.socket(zmq.REQ)\n self.sock_reply.connect(self.sock_consumer_url)\n # Informs prev_stage that I am ready to work\n self.sock_reply.send_pyobj(\"READY\")\n # Create and register poller\n self.poll = zmq.Poller()\n self.poll.register(self.sock_reply, zmq.POLLIN)\n return True", "def main(connection_file):\n\n ctx = zmq.Context.instance()\n\n with open(connection_file) as f:\n cfg = json.loads(f.read())\n\n reg_url = cfg['interface']\n iopub_port = cfg['iopub']\n iopub_url = f\"{reg_url}:{iopub_port}\"\n\n session = Session(key=cfg['key'].encode('ascii'))\n sub = ctx.socket(zmq.SUB)\n\n # This will subscribe to all messages:\n sub.SUBSCRIBE = b''\n # replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout\n # 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes\n # to everything from engine 1, but there is no way to subscribe to\n # just stdout from everyone.\n # multiple calls to subscribe will add subscriptions, e.g. to subscribe to\n # engine 1's stderr and engine 2's stdout:\n # sub.SUBSCRIBE = b'engine.1.stderr'\n # sub.SUBSCRIBE = b'engine.2.stdout'\n sub.connect(iopub_url)\n while True:\n try:\n idents, msg = session.recv(sub, mode=0)\n except KeyboardInterrupt:\n return\n # ident always length 1 here\n topic = idents[0].decode('utf8', 'replace')\n if msg['msg_type'] == 'stream':\n # stdout/stderr\n # stream names are in msg['content']['name'], if you want to handle\n # them differently\n print(\"{}: {}\".format(topic, msg['content']['text']))\n elif msg['msg_type'] == 'error':\n # Python traceback\n c = msg['content']\n print(topic + ':')\n for line in c['traceback']:\n # indent lines\n print(' ' + line)\n elif msg['msg_type'] == 'error':\n # Python traceback\n c = msg['content']\n print(topic + ':')\n for line in c['traceback']:\n # indent lines\n print(' ' + line)", "def setup(self):\n # create the pull socket (to communicate with this actor, others\n # process have to connect a push socket to this socket)\n self.pull_socket, pull_port = self._create_socket(zmq.PULL, -1)\n\n # create the control socket (to control this actor, a process have to\n # connect a pair socket to this socket with the `control` method)\n self.control_socket, ctrl_port = self._create_socket(zmq.PAIR, 0)\n\n self.pull_socket_address = LOCAL_ADDR + ':' + str(pull_port)\n self.control_socket_address = LOCAL_ADDR + ':' + str(ctrl_port)\n\n self._pull_port.value = pull_port\n self._ctrl_port.value = ctrl_port\n self._values_available.set()", "def _connect(self):\r\n self.sock = socket.socket()\r\n host = \"pubsub.pubnub.com\"\r\n port = 80\r\n if self.use_ssl:\r\n self.sock = ssl.wrap_socket(self.sock)\r\n port = 443\r\n self.sock.connect((host, port))\r\n self.connected = True", "def _subscribe_to_peers(self):\n if not self.config['PEERS']:\n return\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.setsockopt(zmq.SUBSCRIBE, '')\n\n for ip, pub_port, api_port in self.config['PEERS']:\n if not self._is_self(ip, pub_port):\n address = '%s:%s' % (ip, pub_port)\n self.logger.debug('Subscribing to peer at: %s' % address)\n socket.connect('tcp://%s' % address)\n\n def new_msg_handler(sender, msg=None):\n topic, delimiter, packed = msg.partition(' ')\n topic = int(topic)\n message_dict = msgpack.unpackb(packed)\n #self.logger.debug('News for topic %s:%s arrived' %\n # (topic, constants.topics.get(topic)))\n self._handle_topic(topic, message_dict)\n\n sig = signal(constants.NEW_MESSAGE_TOPIC)\n sig.connect(new_msg_handler, weak=False)\n\n while True:\n msg = socket.recv()\n sig.send(self, msg=msg)\n gevent.sleep(.1)", "def init_client():\n init_config()\n begin_sending_packets()", "def __init__(self, factory, endpoint=None, identity=None):\n self.factory = factory\n self.endpoints = []\n self.identity = identity\n self.socket = Socket(factory.context, self.socketType)\n self.queue = deque()\n self.recv_parts = []\n self.read_scheduled = None\n\n self.fd = self.socket_get(constants.FD)\n self.socket_set(constants.LINGER, factory.lingerPeriod)\n\n if not ZMQ3:\n self.socket_set(\n constants.MCAST_LOOP, int(self.allowLoopbackMulticast))\n\n self.socket_set(constants.RATE, self.multicastRate)\n\n if not ZMQ3:\n self.socket_set(constants.HWM, self.highWaterMark)\n else:\n self.socket_set(constants.SNDHWM, self.highWaterMark)\n self.socket_set(constants.RCVHWM, self.highWaterMark)\n\n if ZMQ3 and self.tcpKeepalive:\n self.socket_set(\n constants.TCP_KEEPALIVE, self.tcpKeepalive)\n self.socket_set(\n constants.TCP_KEEPALIVE_CNT, self.tcpKeepaliveCount)\n self.socket_set(\n constants.TCP_KEEPALIVE_IDLE, self.tcpKeepaliveIdle)\n self.socket_set(\n constants.TCP_KEEPALIVE_INTVL, self.tcpKeepaliveInterval)\n\n if self.identity is not None:\n self.socket_set(constants.IDENTITY, self.identity)\n\n if endpoint:\n self.addEndpoints([endpoint])\n\n self.factory.connections.add(self)\n\n self.factory.reactor.addReader(self)\n self.doRead()", "def _init_socket_tcp(self, worker_id):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.host, self.port))\n if len(self.sockets) - 1 < worker_id:\n self.sockets.append(MessageSocket(sock))\n else:\n # socket was already initialized, MessageSocket implements a try:catch\n self.sockets[worker_id].close()\n self.sockets[worker_id] = MessageSocket(sock)", "def connect_to_worker():\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://localhost:5555\")\n return socket", "def start_server(self) -> None:\n with self.socket.bind(self.address):\n print(\"ZeroMQ Server listening at {}\".format(self.address))\n while True:\n payload_rx = self.socket.recv(flags=0)\n if payload_rx:\n self.decode_payload(payload_rx)\n self.socket.send_string(self.reply(), flags=0, copy=False)", "def __init__(self, topic, message_type): \n self.topic = topic\n \n # find message type\n package, message = message_type.split('/')\n m = load_pkg_module(package)\n\n m2 = getattr(m, 'msg')\n self.message = getattr(m2, message)\n self.publisher = rospy.Publisher(topic, self.message)", "def __init__(self):\n # Create a TCP/IP socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def zmq_qry_pub(context):\n app.logger.info(\"zmq_qry_pub started\")\n socket = context.socket(zmq.PUB)\n socket.connect('tcp://127.0.0.1:7000')\n\n timestamps = ['0810', '0811', '0812']\n idx = EquityIndex('CAC')\n\n # for ts in cycle(timestamps):\n for ts in timestamps:\n price_data = idx.components_last_px(ts)\n\n for topic, msg_data in price_data.iteritems():\n if msg_data:\n # push the code/ticker into the dict\n msg_data['ticker'] = topic\n # reformat with a colon\n msg_data['ts'] = ts[:2] + ':' + ts[2:]\n # and jsonify....\n msg = json.dumps(msg_data)\n socket.send(msg)\n\n gevent.sleep(WAIT)\n\n app.logger.info(\"zmq_qry_pub closed\")", "def __init__(self):\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n self.channel = self.connection.channel()", "def meta_trader_connector():\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(SOCKET_LOCAL_HOST)\n return socket", "def start(self):\n self._connect()\n self._init_exchange()\n self._init_queue()\n self._bind_queue()", "def setupTcp(self):\n \tself.tcpManager = QueuedConnectionManager()\n \tself.tcpReader = QueuedConnectionReader(self.tcpManager, 0)\n \tself.tcpWriter = ConnectionWriter(self.tcpManager, 0)", "def __init__(self,sub_topic=\"\",pub_topic=\"\",data_type=None,tag=\"\",alt_type=None):\n self.sub_topic=sub_topic;\n self.pub_topic=pub_topic;\n self.data_type=data_type;\n self.alt_type=alt_type;\n self.tag=tag;\n self.subscriber=rospy.Subscriber(self.sub_topic+self.tag,self.data_type, self.callback_function,queue_size=20);\n self.message_publisher=None;", "def connect(self):\n if self._zerorpc:\n return\n try:\n self._zerorpc = _ZeroRPCClient(connect_to=self._address, timeout=self._timeout)\n self._zerorpc._events.setsockopt(zmq.LINGER, 0) # when we teardown, we want to discard all messages\n except:\n self._zerorpc = None\n raise", "def __init():\n global sock, dstaddr\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n port = conf.get('plog', 'port')\n dstaddr = ('localhost', int(port))" ]
[ "0.78729033", "0.695564", "0.6900721", "0.6871261", "0.68266183", "0.6776164", "0.67308486", "0.6717901", "0.6422498", "0.6096077", "0.60760754", "0.6049589", "0.5961509", "0.5946767", "0.5928848", "0.59046185", "0.58697796", "0.583556", "0.5797219", "0.5789976", "0.5779875", "0.57580197", "0.571049", "0.56970537", "0.56929183", "0.5686143", "0.56465685", "0.5646136", "0.56154096", "0.5606366" ]
0.6983764
1
Checks if a guess letter matches a target letter
def match(self, target, guess): return guess == target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def correct_guess(self): # relies on the results from replace_letter() | helper function to display_correct_guess()\n if len(self.guess) == 1:\n if self.guess in self.chosen_word:\n self.replace_letter() # replaces display_word with correctly guessed letters\n return True\n return False", "def check_guess(self, user_guess):\n return user_guess in self.active_phrase", "def check_win(secret_word, old_letters_guessed):\n j=len(secret_word)\n for i in secret_word:\n j -= 1\n if i in old_letters_guessed:\n if j == 0:\n return True\n else: \n continue\n else:\n return False", "def test_same_letter_twice(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"anntn\")\n\n gallows.main()\n\n self.xprint.assert_any_call(\"You have already guessed that letter. \"\n \"Choose again.\")", "def results_of_guess(self):\r\n print(self.best_guess)\r\n print(self.chosen_letter)\r\n \r\n #self.best_guess = input(\"Enter word with correct letters and stars \" + \"as blank spaces.\")\r\n wrong_words = set()\r\n if self.chosen_letter in self.best_guess: # in case of success\r\n print(\"hit\")\r\n list_of_indices = [i for i, value in enumerate(self.best_guess) \r\n if value == self.chosen_letter]\r\n for word in self.valid_words:\r\n for index in list_of_indices:\r\n if word[index] != self.chosen_letter:\r\n wrong_words.add(word)\r\n elif word.count(self.chosen_letter) > len(list_of_indices):\r\n wrong_words.add(word)\r\n \r\n else: # in case of failure\r\n print(\"miss\")\r\n for word in self.valid_words:\r\n if self.chosen_letter in word:\r\n wrong_words.add(word)\r\n self.valid_words = self.valid_words.difference(wrong_words)", "def is_word_guessed(secret_word, letters_guessed):\n for char in secret_word:\n flag = False\n for letter in letters_guessed:\n if char == letter:\n flag = True\n if flag == False:\n break\n\n return flag", "def check_win(secret_word, old_letters_guessed):\n for i in secret_word:\n if i not in old_letters_guessed:\n return False\n # all letters from secret_word are in old_letters_guessed - the player won\n return True", "def incorrect_guess(self,\n letter): # relies on sanitise_guess, add_previous_guess(), display_correct_guess() & draw()\n if not self.sanitize_guess(letter): # ensures that it is alphabetical input\n return False\n if not self.add_previous_guess(): # ensures that it hasn't already been guessed\n return False\n if not self.display_correct_guess(): # ensures that it is not a correct guess\n self.attempts -= 1\n\n if self.attempts <= 0:\n Donatello.turtle_text(f\"Wrong guess! Attempts left: {self.attempts}\")\n Donatello.turtle_focused_text(\n f\"Oh no! You ran out of attempts. The word was '{self.chosen_word.upper()}'\")\n return False\n else:\n Donatello.turtle_text(f\"Wrong guess! Attempts left: {self.attempts}\")\n self.draw()\n Donatello.draw_word(self.display_word)\n return False, self.attempts", "def check_win(secret_word, old_letters_guessed):\r\n if show_hidden_word(secret_word, old_letters_guessed) == secret_word:\r\n return True\r\n else:\r\n return False", "def evaluate_guess(secret_word, guesses, ip):\n if len(ip) > 1:\n return \"Only single letter guesses\", False\n if not i.islpha():\n return \"Only alphabet\", False\n if ip in guesses:\n return \"Already guessed {}\".format(ip), False\n\n return \"\", True", "def check_valid_input(letter_guessed, old_letters_guessed):\r\n\tletter_guessed = letter_guessed.lower()\r\n\tif (len(letter_guessed) > 1):\r\n\t\treturn False\r\n\tif (not letter_guessed.isalpha()):\r\n\t\treturn False\r\n\tif (letter_guessed in old_letters_guessed):\r\n\t\treturn False\r\n\treturn True", "def checkLetter():\n\tguess = False\n\twhile guess != True:\n\t\tguess = str(raw_input(\"Guess a letter: \"))\n\t\tif guess.isalpha() and len(guess) == 1 :\n\t\t\treturn guess\n\t\telif not guess.isalpha() or len(guess) > 1:\n\t\t\tprint \"The input may be one letter only!\"\n\t\telse:\n\t\t\tprint \"Error in checkLetter\"", "def correct_word(self): # is a helper function to display_correct_guess()\n if self.guess == self.chosen_word: # they guess the whole word correctly\n self.display_word = self.chosen_word # process will continue until it reaches ***\n return True, self.display_word\n else:\n return False", "def check_valid_input(letter_guessed, old_letters_guessed):\n if len(letter_guessed) >= 2 or not (letter_guessed.islower()) or letter_guessed in old_letters_guessed:\n return False\n else:\n return True", "def check_if_guessed(the_guess, word_to_guess):\n\treturn word_to_guess.find(the_guess)", "def letter_picker(self):\r\n \r\n self.letter_count = dict.fromkeys(string.ascii_lowercase, 0) # reset\r\n for word in self.valid_words:\r\n word = \"\".join(set(word)) # removes duplicate letters in valid words\r\n for letter in word:\r\n self.letter_count[letter] += 1\r\n for letter in self.guesses: # prevents repeating guesses\r\n self.letter_count[letter] = 0\r\n self.chosen_letter = max(self.letter_count, key=self.letter_count.get)\r\n self.guesses.append(self.chosen_letter)\r\n print(f\"{self.chosen_letter} is the letter I think is right\")", "def guess_word(ga, word, ch):\n flag = 0\n sd = stringDatabase\n gameword = list(word)\n gamewordlen = len(gameword)\n\n # Checks if the option/choice is 'l'\n if ch == 'l':\n gl = sd.StringDatabase.get_letter()\n if not gl:\n return\n # print('here')\n if gl in gameword:\n if gl in ga.guess_list:\n print('Already flipped the letter, Guess another letter')\n return\n n = gameword.count(gl)\n print('You found', n, 'letter/s')\n for i in range(gamewordlen):\n if gameword[i] == gl:\n ga.guess_list[i] = gl\n if ga.guess_list == gameword:\n flag = 1\n ga.status = 'Success'\n print('Correct Guess!!')\n ga.calculate_score()\n else:\n print('You found 0 new letter/s, Try Again')\n ga.missed_letter += 1\n if ga.score == 0:\n ga.score -= 1\n else:\n ga.score -= (ga.score/4)\n per = (ga.score*10)/100\n ga.score -= per\n\n # Checks if the option/choice is 'g'\n if ch == 'g':\n gl = list(sd.StringDatabase.get_word())\n # print('\\n')\n if gl == gameword:\n count = 0\n for i in range(gamewordlen):\n if gameword[i] != ga.guess_list:\n count += 1\n if count >= 1:\n ga.calculate_score()\n ga.score += 10\n ga.status = 'Success'\n print('Correct guess!!')\n flag = 1\n else:\n ga.bad_guess += 1\n print('Wrong guess, Try Again')\n per = (ga.score * 30) / 100\n ga.score -= per\n\n # Checks if the option/choice is 't'\n if ch == 't':\n ga.score = ga.score/2\n ga.calculate_minus_score()\n ga.status = 'Gave up'\n print('Correct word is: ' + word)\n flag = 1\n\n return flag", "def check_valid_input(letter_guessed, old_letters_guessed):\n if (len(letter_guessed) == 1) and letter_guessed.isalpha() and (letter_guessed not in old_letters_guessed):\n return True\n else:\n return False", "def is_word_guessed(secret_word, letters_guessed):\n\n for letter in secret_word:\n if letter in letters_guessed:\n pass\n else:\n return False\n return True", "def check_guess(guess, secret_word):\n\n if guess == \"\" or len(guess) > 1 and len(guess) != len(secret_word) or guess.isdigit():\n message_to_player = \"YOU MUST INPUT A LETTER OR GUESS THE ENTIRE WORD.\"\n\n elif len(guess) == len(secret_word) and guess == secret_word:\n message_to_player = \"YOU GUESSED THE WORD. IT WAS \" + secret_word + \".\"\n game_won()\n\n elif secret_word != \"\" and guess in secret_word:\n message_to_player = \"THAT LETTER IS IN THE WORD! GOOD GUESS!!\"\n player.letters_guessed.append(guess)\n\n elif guess not in player.letters_guessed:\n message_to_player = \"WRONG! TRY AGAIN!\"\n\n player.add_body_part()\n player.letters_guessed.append(guess)\n\n else:\n message_to_player = \"YOU ALREADY GUESSED THAT LETTER!!!\\n\\n\"\n\n update(secret_word, \"Letters guessed:\\n\\t\" + str(player.letters_guessed) + \"\\n\\n\" + message_to_player)", "def close(self, target, guess):\r\n return ord(target)-5 <= ord(guess) and ord(guess) <= ord(target)+5", "def check_win(secret_word, old_letters_guessed):\n for letters_guessed in secret_word:\n if letters_guessed in old_letters_guessed:\n continue\n else:\n return False\n return True", "def check_valid_input(letter_guessed, old_letters_guessed):\n # if exactly 1-char-long and is from english alphabet and hasn't been previously guessed\n # using ('a' <= letter_guessed <= 'z') because isalpha() returns True for multiple languages, not only english\n return (len(letter_guessed) == 1) and ('a' <= letter_guessed <= 'z') and (letter_guessed not in old_letters_guessed)", "def guess_letter(self):\r\n letter = input(\"# Enter a Letter :\")\r\n if not letter:\r\n print(\"Please Enter a Valid Value\")\r\n else:\r\n result = game_instance.check_letter(letter)\r\n\r\n if result == \"NOT FOUND\":\r\n print(\"WRONG. No corresponding letters found in the word. Try Again!\")\r\n else:\r\n temp = list(self.current_word)\r\n count=0;\r\n for x in result:\r\n count+=1\r\n temp[x] = letter\r\n self.current_word = \"\".join(temp)\r\n print(\"Good Job. You Found \"+str(count)+\" Letters.\")", "def is_valid_input(letter_guessed):\n\n #calulate the lengh of the letters\n NumberOfCharacters = (len(letter_guessed))\n #convert input letters to Underscore\n NumberOfUnderscore = (NumberOfCharacters * \"_\")\n\n\n # All the letters in English\n EnglishLetter = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMOPQRSTUVWXYZ\"\n\n\n if NumberOfCharacters > 1:\n print(\"false\")\n\n # If the user entered English character the string will print the character a non-English character (for example, a sign such as: &, *), the string will print \"E2\n elif letter_guessed in EnglishLetter:\n print(\"true\")\n else:\n print(\"false\")", "def check_guess(letter, letters_pending, letters_found):\n\n # correct guess\n if letter in letters_pending:\n letter_index = letters_pending[letter].pop(0) # return first letter's index\n letters_found.add(letter_index)\n \n # check if list is empty\n if len(letters_pending[letter]) == 0:\n del letters_pending[letter]\n \n return True\n\n return False", "def get_guess(already_guessed):\n\n while True:\n print('Guess a letter.')\n guess = (input()).lower()\n if len(guess) != 1:\n print('Please enter a single letter.')\n elif guess == ' ':\n print('Space is not a valid entry. Please enter a single letter.')\n elif guess in already_guessed:\n print('\"Already guessed the letter. Choose again.')\n elif guess not in 'abcdefghijklmnopqrstuvwxyz':\n print('Please enter a LETTER.')\n else:\n return guess", "def try_to_guess(word):\n\n # set number of tries based on word length\n if 4 < len(word) < 7:\n tries = 4\n elif 7 < len(word) < 12:\n tries = 8\n else:\n tries = 12\n \n # create placeholder word eg: ---\n placeholder = ['-' for _ in range(len(word))]\n \n # list to check if letter was already guessed\n guesses = []\n\n while tries > 0:\n print('\\n' + ''.join(placeholder))\n letter = str(input(f\"Input a letter: \"))\n\n # only one lower case alphanum character\n if len(letter) > 1:\n print(\"You should input a single letter\")\n elif not letter.isalnum() or not letter.islower():\n print(\"It is not an ASCII lowercase letter\")\n \n elif letter in guesses:\n print(\"You already typed this letter\") \n elif letter not in word:\n print(\"No such letter in the word\")\n tries -= 1\n \n # we have a good letter\n else:\n for i, v in enumerate(word):\n \n if v == letter:\n placeholder[i] = letter\n \n if ''.join(placeholder) == word:\n print()\n print(''.join(placeholder))\n print(\"You guessed the word!\\nYou survived!\")\n return\n \n guesses.append(letter)\n \n else:\n print(\"You lost!\")\n print(f\"The word was {word}\")", "def is_valid_input(guess_letter):\r\n length = len(guess_letter)\r\n\r\n if length > 1 and not guess_letter.isalpha():\r\n return False\r\n elif not guess_letter.isalpha():\r\n return False\r\n elif length > 1:\r\n return False\r\n else:\r\n return True", "def check(word, guesses):\n status = \"\" # Current status of guess\n last_guess = guesses[-1]\n matches = 0 # Number of occurrences of last_guess in word\n #print(\"You guessed\", last_guess)\n\n matches = word.count(last_guess)\n \n if matches != 0:\n print(\"The word has {} {}'s.\".format(matches,last_guess))\n i = 0\n while i < len(word):\n if last_guess == word[i]:\n status += last_guess\n else:\n status += \"*\"\n i += 1\n else:\n print(\"Sorry, that appears 0 times.\")\n status += \"*\" * len(word)\n\n print(\"You've made {} guesses and gotten {}\".format(len(guesses),status))\n\n # Loop through word checking if each letter is in guesses\n # If it is, append the letter to status\n # If it is not, append an asterisk (*) to status\n # Also, each time a letter in word matches the last guess,\n # increment matches by 1.\n\n # Write a condition that outputs one of the following when\n # the user's last guess was \"A\":\n # 'The word has 2 \"A\"s.' (where 2 is the number of matches)\n # 'The word has one \"A\".'\n # 'Sorry. The word has no \"A\"s.'\n\n return status" ]
[ "0.724141", "0.6917954", "0.68295354", "0.6773037", "0.6772409", "0.6756728", "0.67525744", "0.67267394", "0.66725117", "0.6660826", "0.66500235", "0.6632575", "0.661038", "0.6583722", "0.6575876", "0.65603286", "0.6557356", "0.6556453", "0.6552717", "0.65468127", "0.6529176", "0.65230584", "0.6510601", "0.65029085", "0.64937234", "0.6440379", "0.6424502", "0.64132637", "0.6399337", "0.6389691" ]
0.75668406
0
Checks if a letter in a guess is above the target letter in the alphabet Return true if up, false is assumed to be down since this should never be called if there is a match
def up(self, target, guess): return ord(target) < ord(guess)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self, target, guess):\r\n return ord(target)-5 <= ord(guess) and ord(guess) <= ord(target)+5", "def check_guess(self, user_guess):\n return user_guess in self.active_phrase", "def check_the_guess(guess, a_followers, b_followers):\n if a_followers > b_followers:\n return guess == \"a\"\n else:\n return guess == \"b\"", "def match(self, target, guess):\r\n return guess == target", "def win_condition(self):\n if self.letters_wrong < 5:\n if '__ ' in self.new_string:\n return False\n else:\n return True\n else:\n return True", "def check_guess(number, guess):\r\n if number == guess:\r\n return False, \"you won.\"\r\n elif number < guess:\r\n return True, \"too high.\"\r\n else:\r\n return True, \"too low.\"", "def check_level(self):\n \n temp_flag = True\n \n for i in range(len(self.letters_in_play)):\n if self.responses_correct[i] < self.max_correct:\n levelup_flag = False\n\n if temp_flag:\n self.current_level += 1\n print(\"Level up\")\n if self.current_level > len(self.levels)-1:\n self.current_level = len(self.levels)-1\n self.update_letters_in_play\n print(\"Level up\")", "def incorrect_guess(self,\n letter): # relies on sanitise_guess, add_previous_guess(), display_correct_guess() & draw()\n if not self.sanitize_guess(letter): # ensures that it is alphabetical input\n return False\n if not self.add_previous_guess(): # ensures that it hasn't already been guessed\n return False\n if not self.display_correct_guess(): # ensures that it is not a correct guess\n self.attempts -= 1\n\n if self.attempts <= 0:\n Donatello.turtle_text(f\"Wrong guess! Attempts left: {self.attempts}\")\n Donatello.turtle_focused_text(\n f\"Oh no! You ran out of attempts. The word was '{self.chosen_word.upper()}'\")\n return False\n else:\n Donatello.turtle_text(f\"Wrong guess! Attempts left: {self.attempts}\")\n self.draw()\n Donatello.draw_word(self.display_word)\n return False, self.attempts", "def check_for_win(self, board_now, letter):\r\n if ((board_now[6] == letter and board_now[7] == letter and board_now[8] == letter) or\r\n (board_now[3] == letter and board_now[4] == letter and board_now[5] == letter) or\r\n (board_now[0] == letter and board_now[1] == letter and board_now[2] == letter) or\r\n (board_now[6] == letter and board_now[3] == letter and board_now[0] == letter) or\r\n (board_now[7] == letter and board_now[4] == letter and board_now[1] == letter) or\r\n (board_now[8] == letter and board_now[5] == letter and board_now[2] == letter) or\r\n (board_now[6] == letter and board_now[4] == letter and board_now[2] == letter) or\r\n (board_now[8] == letter and board_now[4] == letter and board_now[0] == letter)):\r\n return True\r\n return False", "def check_guess_if_previous(self): # is a helper function to add_previous_guess()\n if self.guess in self.past_guesses:\n return False\n else:\n return True", "def correct_guess(self): # relies on the results from replace_letter() | helper function to display_correct_guess()\n if len(self.guess) == 1:\n if self.guess in self.chosen_word:\n self.replace_letter() # replaces display_word with correctly guessed letters\n return True\n return False", "def check_guess(guess):\n while True:\n print(\" Was \" + str(guess) + \" too high, too low, or correct?\")\n answer = input()\n answer= answer.lower()\n \n if answer == 'too low' or answer == 'to low':\n return -1\n elif answer == 'too high' or answer == 'to high':\n return 1\n elif answer == 'correct':\n return 0\n else:\n print(\"I don't understand. Please enter 'too low', too high', or 'correct'.\")", "def check_answer(guess, a_followers, b_followers):\n if a_followers > b_followers:\n return guess == \"a\"\n else:\n return guess == \"b\"", "def check_answer(guess, a_followers, b_followers):\n if a_followers > b_followers:\n return guess == \"a\"\n else:\n return guess == \"b\"", "def check_valid_input(letter_guessed, old_letters_guessed):\n # if exactly 1-char-long and is from english alphabet and hasn't been previously guessed\n # using ('a' <= letter_guessed <= 'z') because isalpha() returns True for multiple languages, not only english\n return (len(letter_guessed) == 1) and ('a' <= letter_guessed <= 'z') and (letter_guessed not in old_letters_guessed)", "def check_win(secret_word, old_letters_guessed):\n j=len(secret_word)\n for i in secret_word:\n j -= 1\n if i in old_letters_guessed:\n if j == 0:\n return True\n else: \n continue\n else:\n return False", "def is_previous_order(current: str, target: str):\n c = c_major_chord_pos[current]\n t = c_major_chord_pos[target]\n if t == 7:\n t = 0\n if t + 1 == c:\n return True\n return False", "def check_valid_input(letter_guessed, old_letters_guessed):\n if len(letter_guessed) >= 2 or not (letter_guessed.islower()) or letter_guessed in old_letters_guessed:\n return False\n else:\n return True", "def is_upwards(line:tuple)->bool:\n return line[1][1] > line[0][1]", "def get_guess(self):\n guess = self.player.higher_or_lower", "def check_valid_input(letter_guessed, old_letters_guessed):\r\n\tletter_guessed = letter_guessed.lower()\r\n\tif (len(letter_guessed) > 1):\r\n\t\treturn False\r\n\tif (not letter_guessed.isalpha()):\r\n\t\treturn False\r\n\tif (letter_guessed in old_letters_guessed):\r\n\t\treturn False\r\n\treturn True", "def correct_guess(self, guess):\n \n if self.code == guess:\n return True\n return False", "def check_valid_input(letter_guessed, old_letters_guessed):\n if (len(letter_guessed) == 1) and letter_guessed.isalpha() and (letter_guessed not in old_letters_guessed):\n return True\n else:\n return False", "def checkMove(guess, xPos, yPos):\n\n\t# Return 0 if x position or y position are not valid\n\tif(xPos not in range(0, 5) or yPos not in range(0, 5)):\n\t\treturn 0\n\n\t# Return 0 f the guessed position is not water\n\tif(guess[yPos][xPos] != \"~\"):\n\t\treturn 0\n\n\treturn 1", "def guess(self, char: str) -> bool:\r\n equal_char = self.char.lower() == char.lower()\r\n\r\n if equal_char and self.was_guessed:\r\n raise ValueError(f'{char} har already been guessed')\r\n\r\n if equal_char:\r\n self.was_guessed = True\r\n return True\r\n\r\n return False", "def goal_reached(self):\r\n pos_0=self.goal[0]\r\n pos_1=self.goal[1]\r\n #self.start_score=self.string(self.start[0],self.start[1])\r\n #self.data_with_string[self.start_score]=self.start\r\n #self.goal_score=self.string(pos_0,pos_1)\r\n if self.h(self.current_score[0],self.current_score[1],self.current_score[2]) <=10 :\r\n self.goal_score=self.string(self.current_score[0],self.current_score[1],self.current_score[2])\r\n print(\"goal_reached\")\r\n #print(len(self.expanded))\r\n #print(\"self.expanded\",self.expanded)\r\n return True\r\n return False", "def is_word_complete(word, guesses):\n # go through each letter of the word\n # if the letter is not in guesses then False\n # if you don't get any false, then you can consider True\n\n for letter in word:\n if letter not in guesses:\n return False\n return True", "def check_if_guessed(the_guess, word_to_guess):\n\treturn word_to_guess.find(the_guess)", "def check_win(secret_word, old_letters_guessed):\n for i in secret_word:\n if i not in old_letters_guessed:\n return False\n # all letters from secret_word are in old_letters_guessed - the player won\n return True", "def leech(self) -> bool:\n return self._algorithm.is_leech(self._stat)" ]
[ "0.64126676", "0.6217245", "0.6208104", "0.6193162", "0.60669065", "0.5858622", "0.5852677", "0.5843748", "0.58351785", "0.58032477", "0.5785905", "0.5779188", "0.5771472", "0.57190967", "0.57088804", "0.56698114", "0.5663471", "0.56380683", "0.55878186", "0.55864286", "0.5575467", "0.5550489", "0.553591", "0.5506731", "0.5473053", "0.5466071", "0.5427324", "0.5415047", "0.539708", "0.5393481" ]
0.7994412
0
Check if the guess letter is within 5 letters of the target
def close(self, target, guess): return ord(target)-5 <= ord(guess) and ord(guess) <= ord(target)+5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def win_condition(self):\n if self.letters_wrong < 5:\n if '__ ' in self.new_string:\n return False\n else:\n return True\n else:\n return True", "def check_win(secret_word, old_letters_guessed):\n j=len(secret_word)\n for i in secret_word:\n j -= 1\n if i in old_letters_guessed:\n if j == 0:\n return True\n else: \n continue\n else:\n return False", "def results_of_guess(self):\r\n print(self.best_guess)\r\n print(self.chosen_letter)\r\n \r\n #self.best_guess = input(\"Enter word with correct letters and stars \" + \"as blank spaces.\")\r\n wrong_words = set()\r\n if self.chosen_letter in self.best_guess: # in case of success\r\n print(\"hit\")\r\n list_of_indices = [i for i, value in enumerate(self.best_guess) \r\n if value == self.chosen_letter]\r\n for word in self.valid_words:\r\n for index in list_of_indices:\r\n if word[index] != self.chosen_letter:\r\n wrong_words.add(word)\r\n elif word.count(self.chosen_letter) > len(list_of_indices):\r\n wrong_words.add(word)\r\n \r\n else: # in case of failure\r\n print(\"miss\")\r\n for word in self.valid_words:\r\n if self.chosen_letter in word:\r\n wrong_words.add(word)\r\n self.valid_words = self.valid_words.difference(wrong_words)", "def evaluate_guess(secret_word, guesses, ip):\n if len(ip) > 1:\n return \"Only single letter guesses\", False\n if not i.islpha():\n return \"Only alphabet\", False\n if ip in guesses:\n return \"Already guessed {}\".format(ip), False\n\n return \"\", True", "def check_win(secret_word, old_letters_guessed):\n for i in secret_word:\n if i not in old_letters_guessed:\n return False\n # all letters from secret_word are in old_letters_guessed - the player won\n return True", "def letter_picker(self):\r\n \r\n self.letter_count = dict.fromkeys(string.ascii_lowercase, 0) # reset\r\n for word in self.valid_words:\r\n word = \"\".join(set(word)) # removes duplicate letters in valid words\r\n for letter in word:\r\n self.letter_count[letter] += 1\r\n for letter in self.guesses: # prevents repeating guesses\r\n self.letter_count[letter] = 0\r\n self.chosen_letter = max(self.letter_count, key=self.letter_count.get)\r\n self.guesses.append(self.chosen_letter)\r\n print(f\"{self.chosen_letter} is the letter I think is right\")", "def is_valid_input(letter_guessed):\n\n #calulate the lengh of the letters\n NumberOfCharacters = (len(letter_guessed))\n #convert input letters to Underscore\n NumberOfUnderscore = (NumberOfCharacters * \"_\")\n\n\n # All the letters in English\n EnglishLetter = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMOPQRSTUVWXYZ\"\n\n\n if NumberOfCharacters > 1:\n print(\"false\")\n\n # If the user entered English character the string will print the character a non-English character (for example, a sign such as: &, *), the string will print \"E2\n elif letter_guessed in EnglishLetter:\n print(\"true\")\n else:\n print(\"false\")", "def check_valid_input(letter_guessed, old_letters_guessed):\n if len(letter_guessed) >= 2 or not (letter_guessed.islower()) or letter_guessed in old_letters_guessed:\n return False\n else:\n return True", "def check_valid_input(letter_guessed, old_letters_guessed):\n # if exactly 1-char-long and is from english alphabet and hasn't been previously guessed\n # using ('a' <= letter_guessed <= 'z') because isalpha() returns True for multiple languages, not only english\n return (len(letter_guessed) == 1) and ('a' <= letter_guessed <= 'z') and (letter_guessed not in old_letters_guessed)", "def check_guess(self, user_guess):\n return user_guess in self.active_phrase", "def correct_guess(self): # relies on the results from replace_letter() | helper function to display_correct_guess()\n if len(self.guess) == 1:\n if self.guess in self.chosen_word:\n self.replace_letter() # replaces display_word with correctly guessed letters\n return True\n return False", "def check_win(secret_word, old_letters_guessed):\r\n if show_hidden_word(secret_word, old_letters_guessed) == secret_word:\r\n return True\r\n else:\r\n return False", "def up(self, target, guess):\r\n return ord(target) < ord(guess)", "def new_round(guesses, letters_guessed = letters_guessed):\n\n # print(get_guessed_word(secret_word, letters_guessed) )\n print(\"You have \" + str(guesses) + \" guesses left.\")\n print(\"Available letters: \" + get_available_letters(letters_guessed))\n ans = input(\"Please guess a letter: \")\n if ans.isalpha():\n return ans.lower()\n else:\n return None", "def try_to_guess(word):\n\n # set number of tries based on word length\n if 4 < len(word) < 7:\n tries = 4\n elif 7 < len(word) < 12:\n tries = 8\n else:\n tries = 12\n \n # create placeholder word eg: ---\n placeholder = ['-' for _ in range(len(word))]\n \n # list to check if letter was already guessed\n guesses = []\n\n while tries > 0:\n print('\\n' + ''.join(placeholder))\n letter = str(input(f\"Input a letter: \"))\n\n # only one lower case alphanum character\n if len(letter) > 1:\n print(\"You should input a single letter\")\n elif not letter.isalnum() or not letter.islower():\n print(\"It is not an ASCII lowercase letter\")\n \n elif letter in guesses:\n print(\"You already typed this letter\") \n elif letter not in word:\n print(\"No such letter in the word\")\n tries -= 1\n \n # we have a good letter\n else:\n for i, v in enumerate(word):\n \n if v == letter:\n placeholder[i] = letter\n \n if ''.join(placeholder) == word:\n print()\n print(''.join(placeholder))\n print(\"You guessed the word!\\nYou survived!\")\n return\n \n guesses.append(letter)\n \n else:\n print(\"You lost!\")\n print(f\"The word was {word}\")", "def is_word_guessed(secret_word, letters_guessed):\n for char in secret_word:\n flag = False\n for letter in letters_guessed:\n if char == letter:\n flag = True\n if flag == False:\n break\n\n return flag", "def is_valid_input(guess_letter):\r\n length = len(guess_letter)\r\n\r\n if length > 1 and not guess_letter.isalpha():\r\n return False\r\n elif not guess_letter.isalpha():\r\n return False\r\n elif length > 1:\r\n return False\r\n else:\r\n return True", "def check_win(secret_word, old_letters_guessed):\n for letters_guessed in secret_word:\n if letters_guessed in old_letters_guessed:\n continue\n else:\n return False\n return True", "def main():\n answer = random_word().upper()\n dashed_word = ''\n for i in range(len(answer)):\n dashed_word += '-'\n guess_times = 0\n while True:\n if guess_times == N_TURNS:\n # This is the last chance to guess and user failed\n print('You are completely hung :\\'(')\n break\n print('The word looks like: ' + dashed_word + '\\nYou have ' + str(N_TURNS - guess_times) + ' guesses left.')\n guess = input('Your Guess: ')\n if len(guess) == 1 and guess.isalpha():\n # Legal format\n guess = guess.upper()\n if answer.find(guess) != -1:\n # The guess is correct and should uncover the dashed_word\n print('You are correct!')\n dashed_word = uncover_dash(guess, answer, dashed_word)\n if not dashed_word.find('-') > -1:\n # No dash left.\n print('You win!!')\n break\n else:\n # Wrong guess\n guess_times += 1\n print('There is no ' + guess + '\\'s in the word.')\n else:\n print('Illegal format')\n print('The word was: ' + answer)", "def theWinnerIs(field, letter):\r\n return (field[1] == letter and field[2] ==letter and field[3] == letter\r\n or field[4] == letter and field[5] == letter and field[6] == letter\r\n or field[7] == letter and field[8] == letter and field[9] == letter\r\n or field[1] == letter and field[4] == letter and field[7] == letter\r\n or field[2] == letter and field[5] == letter and field[8] == letter\r\n or field[3] == letter and field[6] == letter and field[9] == letter\r\n or field[1] == letter and field[5] == letter and field[9] == letter\r\n or field[3] == letter and field[5] == letter and field[7] == letter)", "def check_valid_input(letter_guessed, old_letters_guessed):\r\n\tletter_guessed = letter_guessed.lower()\r\n\tif (len(letter_guessed) > 1):\r\n\t\treturn False\r\n\tif (not letter_guessed.isalpha()):\r\n\t\treturn False\r\n\tif (letter_guessed in old_letters_guessed):\r\n\t\treturn False\r\n\treturn True", "def is_word_guessed(secret_word, letters_guessed):\n\n for letter in secret_word:\n if letter in letters_guessed:\n pass\n else:\n return False\n return True", "def incorrect_guess(self,\n letter): # relies on sanitise_guess, add_previous_guess(), display_correct_guess() & draw()\n if not self.sanitize_guess(letter): # ensures that it is alphabetical input\n return False\n if not self.add_previous_guess(): # ensures that it hasn't already been guessed\n return False\n if not self.display_correct_guess(): # ensures that it is not a correct guess\n self.attempts -= 1\n\n if self.attempts <= 0:\n Donatello.turtle_text(f\"Wrong guess! Attempts left: {self.attempts}\")\n Donatello.turtle_focused_text(\n f\"Oh no! You ran out of attempts. The word was '{self.chosen_word.upper()}'\")\n return False\n else:\n Donatello.turtle_text(f\"Wrong guess! Attempts left: {self.attempts}\")\n self.draw()\n Donatello.draw_word(self.display_word)\n return False, self.attempts", "def game_code(user_input, secret_word, my_letters, guess_count):\n#if str.isalpha(myinput1) == True and myinput1 not in my_letters and guess_count > 0:\n if user_input in secret_word and len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Good guess: ' + mytempstr1)\n return 0\n elif user_input in ['a','e','i','o','u'] and len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Oops! That letter is not in my word: ' + mytempstr1)\n return 1\n elif len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Oops! That letter is not in my word: ' + mytempstr1)\n return 2", "def minion_game(string):\n vowels = sum(len(string) - i for i, c in enumerate(string) if c in 'AEIOU')\n consonants = sum(len(string) - i for i, c in enumerate(string) if c not in 'AEIOU')\n\n if vowels == consonants:\n print(DRAW)\n elif vowels > consonants:\n print(f'{PLAYER_VOWELS} {vowels}')\n else:\n print(f'{PLAYER_CONSONANTS} {consonants}')", "def match(self, target, guess):\r\n return guess == target", "def check_valid_input(letter_guessed, old_letters_guessed):\n if (len(letter_guessed) == 1) and letter_guessed.isalpha() and (letter_guessed not in old_letters_guessed):\n return True\n else:\n return False", "def check_win(secret_word, old_letters_guessed):\n returned_list = list(secret_word)\n for i in range(len(secret_word)):\n if secret_word[i] in old_letters_guessed:\n returned_list[i] = secret_word[i] + ' '\n else:\n returned_list[i] = '_ '\n returned_str = ''.join(returned_list)\n\n string_no_spaces = returned_str.replace(\" \", \"\")\n if string_no_spaces == secret_word:\n return True\n else:\n return False", "def minion_game(string):\n vowels = ('A', 'E', 'I', 'O', 'U')\n kevin = 0\n stuart = 0\n length = len(string)\n for start in range(0, length):\n for end in range(start + 1, length + 1):\n substring = s[start:end]\n if substring[0] in vowels:\n kevin += 1\n else:\n stuart += 1\n\n # Find and print the winner and score or Draw\n if stuart > kevin:\n print('Stuart {}'.format(stuart))\n elif kevin > stuart:\n print('Kevin {}'.format(kevin))\n else:\n print('Draw')", "def check_for_win(self, board_now, letter):\r\n if ((board_now[6] == letter and board_now[7] == letter and board_now[8] == letter) or\r\n (board_now[3] == letter and board_now[4] == letter and board_now[5] == letter) or\r\n (board_now[0] == letter and board_now[1] == letter and board_now[2] == letter) or\r\n (board_now[6] == letter and board_now[3] == letter and board_now[0] == letter) or\r\n (board_now[7] == letter and board_now[4] == letter and board_now[1] == letter) or\r\n (board_now[8] == letter and board_now[5] == letter and board_now[2] == letter) or\r\n (board_now[6] == letter and board_now[4] == letter and board_now[2] == letter) or\r\n (board_now[8] == letter and board_now[4] == letter and board_now[0] == letter)):\r\n return True\r\n return False" ]
[ "0.6635689", "0.62288874", "0.60756695", "0.60500956", "0.59797996", "0.59784883", "0.59515333", "0.5945887", "0.5917881", "0.59072113", "0.587977", "0.5826533", "0.581762", "0.58094823", "0.580905", "0.5776484", "0.57689565", "0.5765766", "0.5741309", "0.57358634", "0.5733256", "0.5729564", "0.5721629", "0.57115155", "0.5706125", "0.56882185", "0.5676334", "0.5666979", "0.56387657", "0.5637493" ]
0.6528206
1
Get the names of all test programs by evaluating the SConscript file
def resolve_test_progs(sconscript_filename): reprg = re.compile(r"""^env.Program\(["'](.*?)['"]""") progs = [] for line in open(sconscript_filename): m = reprg.match(line.strip()) if m: progs.append(m.group(1)) return progs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_test_cases(program):\n\n return list(INFO[program].test_cases)", "def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]", "def list(self):\n print \"\\nAvailable Test Cases\"\n print \"====================\"\n for case in self.cases:\n print case.__name__", "def extract_programs(outputf):\t\n programs = []\n with open(outputf,'r') as f:\n\t combo_lines = f.readlines()\n for combo_line in combo_lines:\n combo = combo_line.split(' ',1)[1]\n\t programs.append(combo)\n return programs", "def list_tests(self, executable):\n # This will return an exit code with the number of tests available\n try:\n output = subprocess.check_output(\n [executable, \"--list-test-names-only\"],\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n except subprocess.CalledProcessError as e:\n output = e.output\n\n result = output.strip().split(\"\\n\")\n\n return result", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def list_programs():\n return list(INFO)", "def main():\n cpp = read_file('studenci_cpp.txt')\n python = read_file('studenci_python.txt')\n return [student for student in cpp if student in python]", "def tests(c):\n results = [test(c, i) for i, test_path in enumerate(TEST_PATHS)]\n print('\\n\\n\\n############## SUMMARY ##############')\n for i, test_path in enumerate(TEST_PATHS):\n print(i, test_path, 'PASSED' if result[i] == 0 else 'FAILED')", "def _get_resource_test_names(self):\n\t\ttests = []\n\t\tfor resource in self.resources:\n\t\t\tpath = os.path.join(self.history_path, '*', '*', resource)\n\t\t\tself.logger.info(\"Looking for Inca tests in %s\" % path)\n\t\t\ttests.extend(glob.glob(path))\n\t\treturn tests", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def main():\n for filename in sys.argv[1:]:\n test(filename)", "def run(self, uname):\n\n out = []\n\n sol_dir = get_ex_solution(self.exname, uname)\n exec_path = join(sol_dir, \"solution\")\n test_out = join(sol_dir, \"test_out\")\n\n if not isfile(exec_path):\n return\n\n for test in self.testcases:\n in_path = get_test_in_path(test)\n out_path = get_test_out_path(test)\n\n test_proc = subprocess.Popen([exec_path],\n stdin=open(in_path),\n stdout=open(test_out, \"w+\"))\n try:\n test_proc.wait(timeout=self.timeout)\n except:\n out.append({\n 'comment': \"Execution timed out after {0}s.\".format(self.timeout),\n 'percentage': 0,\n 'suggestion': True,\n 'description': []\n })\n\n # Compare output to expected output\n with open(test_out) as actual_out:\n with open(out_path) as expected_out:\n res = list(unified_diff(\n list(actual_out), list(expected_out),\n fromfile=\"actual\", tofile=\"expected\"))\n\n if res:\n out.append({\n 'comment': \"Incorrect output in testcase {0}.\".format(test),\n 'percentage': 30,\n 'suggestion': False,\n 'description': res\n })\n\n\n\n return out", "def get_gtest_testlist_raw(path_to_executable: str):\n command_vector = [path_to_executable, '--gtest_list_tests']\n with subprocess.Popen(\n command_vector, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE) as process:\n out = process.communicate()[0]\n return out.decode('UTF-8')", "def main():\n test_cases = ast.literal_eval(sys.argv[1])\n results = str(my_info()) + '\\t\\t'\n for test_case in test_cases:\n mode = test_case[0]\n id_1 = int(test_case[1])\n id_2 = int(test_case[2])\n if mode == 'jc':\n results += str(Jaccard_Coefficient(id_1, id_2)) + '\\t\\t'\n elif mode == 'cc':\n results += str(Correlation_Coefficient(id_1, id_2)) + '\\t\\t'\n else:\n exit('bad command')\n print results + '\\n'", "def extract_program_text(filename: str) -> List[str]:\n with open(filename) as conf:\n if filename.endswith('.tfstate'):\n configs = json.loads(conf.read())\n program_text = []\n resources = configs['modules'][0]['resources']\n for resource in resources:\n pattern = re.compile(\"signalform_detector.*\")\n if pattern.match(resource) is not None:\n program_text.append(\n re.sub(r'\\n +', '\\n', resources[resource]['primary']['attributes']['program_text']),\n )\n return program_text\n else:\n configs = conf.read()\n pattern = re.compile(r'program_text:.+(?:=>)?\\s+\\\"(.+)\\\"')\n return [re.sub(r'\\\\n +', '\\n', pattern_match) for pattern_match in re.findall(pattern, configs)]", "def _get_tests(cls, suite: dict):\n suite_file_name = \"{}.py\".format(str(suite[\"id\"]).replace('.', os.path.sep))\n with open(suite_file_name) as f:\n file_contents = f.read()\n module = ast.parse(file_contents)\n functions = [node for node in module.body if isinstance(node, ast.FunctionDef)]\n docs = [ast.get_docstring(f) for f in functions if f.name.startswith(cls.TEST_PREFIX)]\n return docs", "def _load_program():\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), PROGRAM_TXT)\n f = open(filepath, 'r')\n program = f.read()\n f.close()\n return program.strip().split('\\n')", "def test_get_execution_list_noname(self):\n q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)\n qc = q_program.get_circuit()\n qr = q_program.get_quantum_register()\n cr = q_program.get_classical_register()\n qc.h(qr[0])\n qc.cx(qr[0], qr[1])\n qc.measure(qr[0], cr[0])\n qc.measure(qr[1], cr[1])\n qobj = q_program.compile()\n result = q_program.get_execution_list(qobj, print_func=self.log.info)\n self.assertEqual(len(result), 1)", "def get_exec_names(wcl):\n\n execnamesarr = []\n exec_sectnames = intgmisc.get_exec_sections(wcl, pfwdefs.IW_EXECPREFIX)\n for sect in sorted(exec_sectnames):\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"section %s\" % sect)\n if 'execname' not in wcl[sect]:\n print \"Error: Missing execname in input wcl. sect =\", sect\n print \"wcl[sect] = \", miscutils.pretty_print_dict(wcl[sect])\n miscutils.fwdie(\"Error: Missing execname in input wcl\", pfwdefs.PF_EXIT_FAILURE)\n\n execnamesarr.append(wcl[sect]['execname'])\n\n return ','.join(execnamesarr)", "def visitTests(tests, grepStr=''):\n\n # First flatten the list of tests.\n testsFlat = []\n toCheck = [t for t in tests]\n while toCheck:\n test = toCheck.pop()\n if isinstance(test, unittest.TestSuite):\n toCheck += [t for t in test]\n else:\n if grepStr in str(type(test)):\n testsFlat.append(test)\n testsFlat.sort()\n\n # Follow the flattened list of tests and show the module, class\n # and name, in a nice way.\n lastClass = None\n lastModule = None\n \n grepPrint = '' if grepStr is '' else red(' (grep: %s)'%grepStr)\n\n for t in testsFlat:\n moduleName, className, testName = t.id().rsplit('.', 2)\n \n # If there is a failure loading the test, show it\n if moduleName.startswith('unittest.loader.ModuleImportFailure'):\n print red(moduleName), \" test:\", t.id()\n continue\n\n if moduleName != lastModule:\n lastModule = moduleName\n print(\" - From %s.py (to run all use --allPrograms)\"\n % '/'.join(moduleName.split('.')) + grepPrint)\n\n\n if className != lastClass:\n lastClass = className\n print(\" ./xmipp test %s\" % className)", "def runTestSuites(self):\n \n self.testsuitesToXML()\n \n\n tss = []\n jobStatus = {}\n for t in self.testsuites:\n d = t.testsuitedir\n runner = os.path.join(self.basepath, 'testSuiteRunner.py')\n tdir = os.path.join(d, 'testsuite.out')\n cmd = 'python %s %s>& %s' % (runner, d,tdir)\n #print 'about to popen the cmd: %s' % cmd\n tss.append((t.name, popen2.Popen3(cmd)))\n jobStatus[t.name] = ('running', nowSecs())\n ntests = len(tss)\n printJobStatus(jobStatus)\n\n while tss:\n toRemove = [p for p in tss if p[1].poll() != -1]\n if toRemove:\n [tss.remove(p) for p in toRemove]\n for p in toRemove:\n jobStatus[p[0]] = ('completed', nowSecs())\n\n printJobStatus(jobStatus)\n time.sleep(10)\n\n print 'all %d tests have completed' % ntests", "def main():\n import argparse\n\n # parse sys.argv\n parser = argparse.ArgumentParser(description='stylecheck')\n parser.add_argument('-v', '--version', action='version',\n version=('%(prog)s ' + __version__))\n parser.add_argument('-r', '--root_dir', type=str, default='../../ken3/',\n help='root dir path (default: \\'../../ken3/\\')')\n args = parser.parse_args()\n\n # run each test\n result = list(run(pick_names(args.root_dir), args.root_dir))\n if result:\n print(result)\n return len(result)", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def test_suite():\n return unittest.defaultTestLoader.loadTestsFromName(__name__)", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def testsuite():\n return unittest.TestLoader().discover(os.path.dirname(__file__))", "def test_case_5():\n print(\"*********Test_case_5***********\")\n result = find_files('.c', \"\")\n print(result)", "def get_cases(test_suite, test_name_regex):\n cases = []\n for test_case_name in dir(test_suite):\n test_case = getattr(test_suite, test_case_name)\n if callable(test_case) and re.match(test_name_regex, test_case_name):\n cases.append(test_case_name)\n\n return cases", "def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files" ]
[ "0.63810515", "0.6367894", "0.591205", "0.58999985", "0.5880722", "0.5872503", "0.5852329", "0.5840184", "0.58356047", "0.5792614", "0.5738614", "0.5733553", "0.5706752", "0.5701584", "0.56863666", "0.5659798", "0.5630291", "0.55575275", "0.554375", "0.5530323", "0.55250484", "0.55125153", "0.5466997", "0.5448855", "0.54475933", "0.5444608", "0.5426956", "0.54251057", "0.54214716", "0.5421069" ]
0.6682637
0
Counts the number of ways to run up a stair with n steps
def count_ways(n): if n < 0: return 0 elif n == 0: return 1 else: total = 0 for i in range(1, min(n, 3) + 1): total += count_ways(n - i) return total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_stair_ways(n):\n if n == 1:\n return 1\n if n == 2:\n return 2\n return count_stair_ways(n - 1) + count_stair_ways(n - 2)", "def num_of_ways(n):\n if n == 0 or n == 1:\n return 1\n \n n_minus_2_step = 1\n n_minus_1_step = 1\n n_step = None\n\n #num_of_ways(n) = num_of_ways(n-1) + num_of_ways(n-2)\n for i in range(n-1):\n n_step = n_minus_1_step + n_minus_2_step\n n_minus_2_step = n_minus_1_step\n n_minus_1_step = n_step\n \n return n_step", "def number_of_ways(n):\r\n return number_of_ways_helper([1, 5, 10, 25], n)", "def n_steps(self) -> int:\n return len(self) - 1 # subtract the base metric", "def backtrack_steps():\n\n # Initialize position and number of steps\n x = 0\n n_steps = 0\n\n # Walk until we get to positive 1\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n\n return n_steps", "def count_paths_staircase(length_staircase, possible_actions):\n path = [0] * length_staircase\n # First we add our possible_actions to our path count\n for i in possible_actions:\n path[i - 1] = 1\n # Compute number of path combinations to every step\n for i in range(length_staircase):\n for j in possible_actions:\n k = i + j\n if k >= length_staircase:\n continue\n path[k] += path[i]\n return path", "def count(steps: List[int]):\n # this needs two passes but does them with a builtin\n # the factor 2x should be much smaller than the Python vs Builtin factor\n return steps.count(1), steps.count(3)", "def number_of_steps(self) -> int:\n return len(self.step_points)", "def num_trials(self):", "def number_of_iterations(self) -> int:\n pass", "def amount_of_stairs(n):\n\n matrix = [[0] * n for i in range(n)]\n\n for i in range(0, n):\n for j in range(1, i):\n matrix[i][j] = sum(matrix[i - j - 1][:j])\n matrix[i][i] = 1\n\n # print_matrix(matrix)\n return sum(matrix[n-1])", "def num_steps(self):\n return self.torsoStepCount() + 1", "def get_steps_num():\n return 0", "def count_tilings(n: int) -> int:\n if n < 5:\n # handle recursive base case\n return 2**(n - 1)\n else:\n # place each tile at end of row and recurse on remainder\n return (count_tilings(n - 1) +\n count_tilings(n - 2) +\n count_tilings(n - 3) +\n count_tilings(n - 4))", "def n_timesteps(self) -> int:\n return len(self.time)", "def num_steps(self) -> int:\n return self._num_steps", "def countArrangement(self, n: int) -> int:\n def iter_digit(n):\n while n:\n yield n % 2\n n //= 2\n\n @lru_cache(None)\n def dfs(i, remains):\n if i == n+1:\n return 1\n cnt = 0\n for j, d in enumerate(iter_digit(remains)):\n if d == 0:\n continue\n if j%i == 0 or i%j == 0:\n remains ^= 2**j\n cnt += dfs(i+1, remains)\n remains ^= 2**j\n return cnt\n\n # starting from 11..10 (length is n+1)\n return dfs(1, 2**(n+1)-2)", "def total_steps(self):\n return self.turns + (self.rounds*self.game_length)", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def climbing_stairs(n):\n\tif n < 2:\n\t\treturn 1\n\tif n == 2:\n\t\treturn 2\n\treturn climbing_stairs(n-1) + climbing_stairs(n-2)", "def total_steps(self) -> global___Expression:", "def countArrangement(self, n: int) -> int:\n def dfs(i, remains: List[int]):\n if i == n+1:\n return 1\n cnt = 0\n for j in range(1, n+1):\n if remains[j] is None and (i%j == 0 or j%i == 0):\n remains[j] = i\n cnt += dfs(i+1, remains)\n remains[j] = None\n return cnt\n\n return dfs(1, [None]*(n+1))", "def numberOfSteps(num):\n steps = 0\n \n while num != 0:\n if num % 2 == 0:\n num /= 2\n steps += 1\n else:\n num -= 1\n steps += 1\n return steps", "def nb_triples(self) -> int:\n return 0", "def number_of_steps(molecule):\n # Thanks https://www.reddit.com/r/adventofcode/comments/3xflz8/day_19_solutions/cy4etju\n elements = [el.group() for el in re.finditer(r'[A-Z][a-z]?', molecule)]\n rn_or_ar = [el for el in elements if el == 'Rn' or el == 'Ar']\n y_elements = [el for el in elements if el == 'Y']\n\n steps = len(elements) - len(rn_or_ar) - 2*len(y_elements) - 1\n\n return steps", "def get_num_pairs(seq):\n n = len(seq)\n return int(n * (n-1)/2) # sum of arphmetic progression (n-1)...1", "def countArrangement(self, n: int) -> int:\n @lru_cache(None)\n def dfs(i, remains: Set[int]):\n if i == n+1:\n return 1\n cnt = 0\n for j in remains:\n if i%j == 0 or j%i == 0:\n cnt += dfs(i+1, remains - {j})\n return cnt\n\n return dfs(1, frozenset(range(1, n+1)))", "def hailstone_steps(num):\n steps = 0\n\n while num > 1:\n steps = steps + 1\n if num % 2 == 1:\n num = int(num * 3 + 1)\n else:\n num = int(num / 2)\n\n return steps", "def test_step_count(self):\n inp = [(0, 0), (1, 1), (1, 2)]\n expected = 2\n actual = get_num_steps(inp)\n self.assertEqual(expected, actual)", "def count_turns(spline):\n func = spline(spline._data[0])\n turns = sum(abs(diff(sign(diff(func))))) / 2\n return turns" ]
[ "0.7937585", "0.74169403", "0.7050495", "0.6761067", "0.6650301", "0.6604587", "0.65189266", "0.64957833", "0.6475029", "0.6464334", "0.64047503", "0.63859785", "0.63114244", "0.6301743", "0.6287839", "0.6278423", "0.6262987", "0.62302816", "0.622684", "0.6208675", "0.6200962", "0.61644", "0.61360615", "0.6106511", "0.6104732", "0.60955375", "0.60953844", "0.6029742", "0.60147715", "0.5999575" ]
0.75719696
1
Similar to the passing cars problem, the passing fish eat each other. Each downstream fish is pushed onto the downstream stack. Upstream fish are considered survivors if not eaten. When an upstream fish is encountered the downstream fish in its path are popped Each fish size is compared and teh smaller discarded.
def solution(A, B): downstream = [] survivor = [] for i in range(0, len(A)): print(f'pass {i}') if B[i]: # If a fish is swimming downstream place him in that stack downstream.append(A[i]) # print(f'survivor: <--{survivor}, downstream: {downstream}--> {A[i]} is A[{i}] -- Downstream encountered') continue elif downstream: # If the fish is swiming upstream and there are fish in the downstream while downstream: if downstream[-1] < A[i]: # This fish is compared to the downstream fish. # print(f'survivor: <--{survivor}, downstream: {downstream}--> {A[i]} is A[{i}]') downstream.pop() else: break # When this current fish is eaten by a downstream fish else: # All the downstream fish are eaten by the current upstream fish survivor.append(A[i]) # print(f'survivor: <--{survivor}, downstream: {downstream}-->') else: # All the downstream fish are eaten survivor.append(A[i]) # print(f'survivor: <--{survivor}, downstream: {downstream}-->') # print(f'survivor: <--{survivor}, downstream: {downstream}-->') return len(survivor+downstream)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PantrySorterEmptyShelf(Shelf: Shelf, stackableFood, unstackableFood):\n print(stackableFood, unstackableFood)\n unstackableFood = sorted(unstackableFood, reverse=True)\n stackableFood = sorted(stackableFood, reverse=True)\n print(\"sorted\\n{}\\n{}\".format(unstackableFood, stackableFood))\n for foodList in [unstackableFood, stackableFood]:\n for food in foodList:\n if(food.height >= Shelf.height):\n #can never be added because doesn't fit on Shelf\n print(\"Could not add {} due to exceeding height of Shelf.\".format(food))\n foodList.remove(food)\n iShelf = 0\n remainingWidth = Shelf.width\n\n while (not Shelf.isFull() and (len(stackableFood) != 0 or len(unstackableFood) != 0)) :\n\n Shelf.createStack()\n print(\"stack created\")\n # here I am trying to get the ith stack on the shelf. but it didnt work for me. It was an object of type list(?)\n curStack = Shelf.stacks[iShelf]\n for food in stackableFood:\n # adds as many stackable food items to stack as possible\n if Shelf.height > curStack.height + food.height:\n # if this is not true, it cant be stacked anyways\n if not curStack.items:\n # just first item in stack\n if food.depth < remainingWidth:\n curStack.addItem(food)\n print(\"adding {}\".format(food))\n else:\n curStack.addItem(food)\n remainingWidth -= food.depth\n for food in curStack.items:#remove items that have been shelved\n stackableFood.remove(food)\n removeThisFromUnstackable = None\n for food in unstackableFood:\n # adds as a non stackable food item to stack if possible\n if curStack.stackable and Shelf.height > curStack.height + food.height:\n # if this is not true, it cant be stacked anyways\n if not curStack.items:\n # just first item in stack\n if food.depth < remainingWidth:\n remainingWidth -= food.depth\n curStack.addItem(food, False)\n unstackableFood.remove(food)\n break\n else:\n curStack.addItem(food, False)\n unstackableFood.remove(food)\n break\n\n iShelf += 1 \n try:\n if(len(stackableFood) != 0 and remainingWidth < stackableFood[-1].depth):\n if(len(unstackableFood) != 0 and remainingWidth < unstackableFood[-1].depth):\n Shelf.setFull(True)\n elif(len(unstackableFood) != 0 and remainingWidth < unstackableFood[-1].depth):\n if(len(unstackableFood) != 0 and remainingWidth < unstackableFood[-1].depth):\n Shelf.setFull(True) \n # checks if not even the smallest items in both lists fit onto shelf. \n except:\n print(\"avoided something\")\n print(\"end\")\n return Shelf", "def __call__(self, stack: Sequence[Dep], queue: Sequence[Dep]) -> Action:\n\n #Left-arc condition\n if len(stack)>=2 and stack[-1].id==stack[-2].head and len(self.dependencies[stack[-2].id] - self.connected_childs) < 1:\n self.connected_childs.add(stack[-2].id)\n self.actions.append(Action.LEFT_ARC)\n self.features_list.append(feature_extraction(stack,queue))\n return Action.LEFT_ARC\n\n #Right-arch condition\n elif len(stack)>=2 and stack[-1].head==stack[-2].id and len(self.dependencies[stack[-1].id] - self.connected_childs) < 1:\n self.actions.append(Action.RIGHT_ARC)\n self.connected_childs.add(stack[-1].id)\n self.features_list.append(feature_extraction(stack,queue))\n return Action.RIGHT_ARC\n\n #Ensure Queue has elements for Shift\n elif len(queue)>0:\n self.actions.append(Action.SHIFT)\n self.features_list.append(feature_extraction(stack,queue))\n return Action.SHIFT\n\n else:\n #ensureq queue has elements for shift\n if len(queue)>0:\n self.actions.append(Action.SHIFT)\n self.features_list.append(feature_extraction(stack,queue))\n return Action.SHIFT\n #dummy action\n elif not (len(queue)==0 and len(stack)==1):\n self.features_list.append(feature_extraction(stack,queue))\n self.actions.append(Action.LEFT_ARC)\n return Action.LEFT_ARC", "def __siftup(heap, nodes, pos, stopPos = 0):\n # Loop until past stopping position\n while pos > stopPos:\n # Set parent position\n parentPos = (pos - 1) >> 1\n\n # Swap if child less than parent\n if heap[pos][0] < heap[parentPos][0]:\n Graph.__swapHeapNodes(heap, nodes, pos, parentPos)\n pos = parentPos\n \n # End sift if child's first tuple is greater than or equal to parent\n else: break", "def sift_up(heap, start, end):\n # Swap last node with parents until no longer greater.\n i = end - 1\n heaped = False\n while i > start and not heaped:\n parent = (i - 1) // 2\n if compare(heap[i], heap[parent]) > 0:\n heap[i], heap[parent] = heap[parent], heap[i]\n i = parent\n else:\n heaped = True", "def depthFirstSearch(problem):\n #\"*** YOUR CODE HERE ***\"\n\n \"\"\"\n Pseudocode:\n function G RAPH-S EARCH ( problem) returns a solution, or failure\n initialize the frontier using the initial state of problem\n initialize the explored set to be empty\n loop do\n if the frontier is empty then return failure\n choose a leaf node and remove it from the frontier\n if the node contains a goal state then return the corresponding solution\n add the node to the explored set\n expand the chosen node, adding the resulting nodes to the frontier\n only if not in the frontier or explored set\n\n \"\"\"\n frontier = util.Stack()\n #print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n #print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n #print 'Remove',repr(currNode.state)\n #print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n #print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored):\n # Si hacemos estas verificaciones entonces cuando se encuentra que un estado que se quiere expandir ya esta en la frontera\n # eliminamos ese estado de la frontera y lo expandimos ahora. Osea, damos prioridad a los nodos nuevos\n if(succNode.state in frontierSet):\n # Recurso'i:\n for frontierNode in frontier.list:\n if frontierNode.state == succNode.state:\n frontier.list.remove(frontierNode)\n frontierSet.remove(frontierNode.state)\n # if ((succNode.state not in explored) and (succNode.state not in frontierSet)): \n # Alternativa segun el libro. Lo que se hace es que se da prioridad a los nodos viejos.\n\n # Aca no verificaba si ya esta en la frontera porque alteraba el orden en el que se visitan los nodos.\n # Por ejemplo cuando esta pendiente (se genero pero no se expandio) un hijo con un estado,\n # pero en un nivel mas profundo se vuelve a generar el mismo estado y se tiene que expandir.\n # Si seguimos el DFS creo que tendriamos que expandir ese nodo ahi y no en la primera llamada donde quedo pendiente.\n \n frontier.push(succNode)\n #print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)\n\n #util.raiseNotDefined()", "def __sift_up(self, i: int):\n while i > 0:\n parent = (i - 1) // 2\n if self.__heap[i][0] < self.__heap[parent][0]:\n tmp = self.__heap[parent]\n self.__heap[parent] = self.__heap[i]\n self.__heap[i] = tmp\n i = parent", "def Queues__TalesOfTwoStacks():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:https://www.hackerrank.com/challenges/ctci-queue-using-two-stacks\n \"\"\"The solution below passes all test cases.\"\"\"\n\n class MyQueue(object):\n def __init__(self):\n self.pop_index = 0\n self.first = []\n self.second = []\n\n def peek(self):\n if self.pop_index == 0:\n return self.first[0]\n else:\n return self.first[self.pop_index]\n\n def pop(self):\n ret_val = self.first[self.pop_index]\n self.pop_index += 1\n if self.pop_index == len(self.first): # index is out of array.\n self.first = self.second\n self.second = []\n self.pop_index = 0\n return ret_val\n\n def put(self, value):\n if self.pop_index == 0:\n self.first.append(value)\n else:\n self.second.append(value)\n\n queue = MyQueue()\n t = int(input())\n for line in range(t):\n values = list(map(int, input().split()))\n\n if values[0] == 1:\n queue.put(values[1])\n elif values[0] == 2:\n queue.pop()\n else:\n print(queue.peek())\n\n ## Simple solution that simply moves stacks along. Not efficient. Fails some test cases.\n # class MyQueue(object):\n # def __init__(self):\n # self.first = []\n # self.second = []\n #\n # def peek(self):\n # return self.first[0]\n # # O(n). Not efficient.\n # def pop(self):\n # for _ in xrange(len(self.first) -1):\n # self.second.append(self.first.pop())\n # ret_val = self.first.pop()\n # for _ in xrange(len(self.second)):\n # self.first.append(self.second.pop())\n # return ret_val\n #\n # def put(self, value):\n # self.first.append(value)\n # ...", "def solve(deck):\n fringe = collections.deque()\n seen_states = dict()\n fringe.append(State.initial_state(deck))\n while fringe:\n state = fringe.popleft()\n if state.is_tableau_empty():\n return path(state, seen_states)\n for next_state in state.successors():\n if next_state not in seen_states:\n seen_states[next_state] = state\n fringe.append(next_state)\n return []", "def size(s: Stack) -> int:\n side_stack = Stack()\n count = 0\n # Pop everything off <s> and onto <side_stack>, counting as we go.\n while not s.is_empty():\n side_stack.push(s.pop())\n count += 1\n # Now pop everything off <side_stack> and back onto <s>.\n while not side_stack.is_empty():\n s.push(side_stack.pop())\n # <s> is restored to its state at the start of the function call.\n # We consider that it was not mutated.\n return count", "def breadthFirstSearch(problem):\n\n frontier = util.Queue()\n # print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n # print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n # print 'Remove',repr(currNode.state)\n # print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n # print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored) and (succNode.state not in frontierSet):\n \"\"\"Aca si hay que verificar si es que ya esta en la frontera porque es formato FIFO. Entonces los nodos que estan en la lista\n necesariamente van a ser verificados antes de que se vuelva a insertar otro.\n \"\"\"\n frontier.push(succNode)\n # print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)", "def __sift_down(self, i: int):\n while (2 * i + 1) <= self.__len__() - 1:\n\n child_idx = self.__get_smallest_child(i)\n\n if self.__heap[i][0] > self.__heap[child_idx][0]:\n tmp = self.__heap[i]\n self.__heap[i] = self.__heap[child_idx]\n self.__heap[child_idx] = tmp\n i = child_idx", "def bfs_traversal(graph, s, goals=[]):\n visited = []\n boundary = deque([s])\n while len(boundary) > 0:\n v = boundary.popleft()\n visited += [v]\n if v in goals:\n return visited\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n return visited\n\n \"\"\"\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop(0)\n visited += [v]\n for w in neighbours(v, graph):\n if w not in goals:\n if w not in visited and w not in boundary:\n boundary.append(w)\n else:\n if w not in visited and w not in boundary:\n boundary.append(w)\n v = boundary.pop(0)\n visited += [v]\n break\n return visited\n \"\"\"", "def sift_down(self, start, end):\n i, j = start, 2*start+1\n # Temporary variable to decrease exchange times\n temp = self.heap_list[start]\n # end is equal to len(self.heap_list)-1\n while j <= end:\n # compare left child node with right child node\n if j<end and self.heap_list[j]<self.heap_list[j+1]:\n j += 1\n if temp >= self.heap_list[j]:\n break\n else:\n #self.heap_list[i], self.heap_list[j] = self.heap_list[j], self.heap_list[i]\n self.heap_list[i] = self.heap_list[j]\n i = j\n j = 2*j+1\n self.heap_list[i] = temp", "def uniformCostSearch(problem):\n\n frontier = util.PriorityQueue()\n #print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node, initial_node.path_cost)\n #print 'Push ',repr(initial_node.state)\n frontierSet = set([(initial_node.state, initial_node.path_cost)])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove((currNode.state, currNode.path_cost))\n #print 'Remove',repr(currNode.state)\n #print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n #print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored):\n \"\"\"Aca si hay que verificar si es que ya esta en la frontera porque es formato FIFO. Entonces los nodos que estan en la lista necesariamente van a ser\n verificados antes de que se vuelva a insertar otro, cumpliendo con el algoritmo.\n \"\"\"\n\n StateInFrontierSet = False\n ExistsBetterPriority = False\n for frontierSet_node in frontierSet:\n if (succNode.state == frontierSet_node[0]):\n StateInFrontierSet = True\n if (succNode.path_cost < frontierSet_node[1]):\n ExistsBetterPriority = True\n frontierSet.remove(frontierSet_node)\n #print 'Remove ',repr((frontierSet_node[0], frontierSet_node[1]))\n\n #Recurso'i:\n for prio, count, frontierNode in frontier.heap:\n if frontierNode.state == succNode.state:\n frontier.heap.remove((prio, count, frontierNode))\n \"\"\"\n Recurso'i. Hay que cambiar la estructura de los nodos para que contenga solo el action_cost, en lugar del path_cost\n y para guardar la solucion tener una estructura aparte a la que se le van appendeando las acciones,\n o capaz seguir la implementacion del libro y hacer una funcion con el nodo como parametro y calcula la solucion,\n o hacer que frontier solo tenga los estados?\n frontier.update(succNode, succNode.path_cost) con esta operacion deberia de bastar\n \"\"\"\n break\n \n if not (StateInFrontierSet and not ExistsBetterPriority): # El caso en que no se hace nada es cuando ya esta en la frontera\n # pero con una mejor o igual prioridad\n frontier.push(succNode, succNode.path_cost)\n #print 'Push ',repr((succNode.state, succNode.path_cost))\n frontierSet.add((succNode.state, succNode.path_cost))", "def traverseBishop(self):\n\t\tmoves = np.empty(14, dtype=object)\n\t\tcnt = [0]\n\t\tPiece.traverse(self, cnt, moves, -1, -1)\n\t\tPiece.traverse(self, cnt, moves, -1, 1)\n\t\tPiece.traverse(self, cnt, moves, 1, -1)\n\t\tPiece.traverse(self, cnt, moves, 1, 1)\n\t\treturn moves[:cnt[0]]", "def dfs_traversal(graph, s, goals=[]):\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop()\n visited += [v]\n if v in goals:\n return visited\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n return visited\n\n \"\"\"\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop()\n visited += [v]\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n if w in goals:\n v = boundary.pop()\n visited += [v]\n return visited\n\"\"\"", "def grow(self):\n # expansion - get all fanins of this gate, except for ones already in\n next_frontier = set()\n added = 0\n remove = set()\n for g in self.frontier:\n new_fin = len((self.ckt[g].fins - self.members)) - 1\n if (new_fin + self.w) < self.max_w:\n print \"Adding\", g, \"to partition\"\n # add this to the partition\n self.members.add(g)\n next_frontier |= self.ckt[g].fins - self.members\n self.w += new_fin + 1\n else:\n remove.add(g)\n self.frontier = next_frontier\n if len(self.frontier) == 0:\n return None\n else:\n return True", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def bfs(self, initialSt, goalSt): # Breadth­First Search\n self.__reset_all_variables()\n\n start = time.perf_counter()\n\n frontier = deque() # deque will be treated as a queue\n frontier.append(initialSt)\n explored = set()\n frontier_U_explored = set() # for fasten up the lookup time\n\n max_frontier_size = 0\n max_ram_used = psutil.virtual_memory().used\n\n while len(frontier) != 0:\n currentState = frontier.popleft()\n explored.add(currentState)\n frontier_U_explored.add(currentState)\n\n if goalSt == currentState:\n end = time.perf_counter()\n self.__success(initialSt,\n currentState,\n len(explored)-1,\n len(frontier),\n max_frontier_size,\n frontier[-1].depth,\n end-start,\n max_ram_used,\n \"bfs\")\n return True\n\n for child in currentState.children():\n if child not in frontier_U_explored:\n frontier.append(child)\n\n max_frontier_size = len(frontier) if len(\n frontier) > max_frontier_size else max_frontier_size\n max_ram_used = psutil.virtual_memory().used if psutil.virtual_memory(\n ).used > max_ram_used else max_ram_used\n return False", "def TowerOfHanoi(fromStack, toStack, tempStack, toMove):\n\n if toMove == 1:\n toStack.append(fromStack.pop())\n else:\n TowerOfHanoi(fromStack, tempStack, toStack, toMove - 1)\n toStack.append(fromStack.pop())\n TowerOfHanoi(tempStack, toStack, fromStack, toMove - 1)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # fringe priority queue\n fringe = util.PriorityQueue()\n fringe.push([problem.getStartState()],1) # fringe will have (priority, order, [s0,s1,..])\n\n # closed set\n closed = []\n\n i = 0\n while not fringe.isEmpty():\n\n # get highest priority path for expansion e.g. [s0,s2,s4]\n path_exp = fringe.pop()\n\n # take last node in path e.g. s4\n node_exp = path_exp[-1]\n\n # check goal state\n if problem.isGoalState(node_exp): # check if goal\n actions = actions_for_path(problem,path_exp)\n #import pdb; pdb.set_trace()\n return actions\n\n # add expanded node into closed set e.g. [s0,s1,s2]\n if node_exp not in closed:\n closed.append(node_exp)\n else:\n # if it's in the closed set, don't expand\n continue\n\n # get sucessors to expand fringe\n successors = problem.getSuccessors(node_exp)\n for successor in successors:\n # unpack states, actions\n ss,aa,_ = successor\n if ss not in closed:\n path = path_exp+[ss]\n # expand fringe by adding candidate paths, prioritize by len of path\n fringe.push(path,len(path))\n\n #i+=1\n if i==1000:\n import pdb; pdb.set_trace()\n\n util.raiseNotDefined()", "def _sift_up(self, i):\n while i > 0:\n p = (i-1)//2\n if self._heap[i] < self._heap[p]:\n self._swap(i, p)\n i = p\n else:\n break", "def percolate_down(self, i):\n while (i * 2) <= self.size:\n max_child = self.max_child(i)\n if self.heap_list[max_child] > self.heap_list[i]:\n tmp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[max_child]\n self.heap_list[max_child] = tmp\n i = max_child", "def sift_up(self, index):\n if self.size() == 1:\n return\n parent_index = self.parent(index)\n # sift up if it is larger than its parent\n while index > 0 and self.heap[index] > self.heap[parent_index]:\n self.heap[index], self.heap[parent_index] = self.heap[parent_index], self.heap[index]\n # update index\n index = parent_index\n parent_index = self.parent(index)", "def _sift_down(self, i):\n mini = i\n l = 2*i + 1\n if l < self._size and\\\n self._heap[l] < self._heap[mini]:\n mini = l\n r = 2*i + 2\n if r < self._size and\\\n self._heap[r] < self._heap[mini]:\n mini = r\n if mini != i:\n self._swap(i, mini)\n self._sift_down(mini)", "def BFS(self,s,t,parent):\n #mark all vertices as not visited\n visited = [False]*(self.ROWS);\n # initialize a queue\n queue = []\n # add source to q and mark it visited\n queue.append(s)\n visited[s] = True\n #Breadth-first-search\n while queue:\n n = queue.pop(0)\n for index,val in enumerate(self.graph[n]):\n if visited[index] == False and val>0:\n queue.append(index)\n visited[index] = True\n parent[index] = n\n #return True if sink was visted\n if visited[t]:\n return True\n else:\n return False", "def ECMPBFS(self, flow, edge_mark):\n\t\t# Distance flag for each node\n\t\td = {v:float('inf') for v in self.topo.nodes}\n\t\t# Parent node for each node\n\t\tpa = {v:[] for v in self.topo.nodes}\n\t\t# Request info\n\t\ts = flow[0]\n\t\tt = flow[1]\n\n\t\t# BFS to find a min-hop path\n\t\tqueue = [s]; hdr = 0; d[s] = 0\n\t\twhile hdr < len(queue):\n\t\t\tu = queue[hdr]\n\t\t\thdr += 1\n\n\t\t\tfor v in self.topo.topo.neighbors(u):\t# This is directed neighbors in the context\n\t\t\t\tif edge_mark[(u, v)] or d[v] < d[u] + 1:\n\t\t\t\t\tcontinue\n\t\t\t\tif d[v] > d[u] + 1:\n\t\t\t\t\tqueue.append(v)\n\t\t\t\t\tpa[v] = [u]\n\t\t\t\telif d[v] == d[u] + 1:\n\t\t\t\t\tpa[v].append(u)\n\t\t\t\td[v] = d[u] + 1\n\n\t\tif d[t] == float('inf'):\n\t\t\treturn False\n\n\t\t# Iteratively find all paths until there is no\n\t\tp_lst = []\n\t\twhile True:\n\t\t\tp = [t]; v = t; branch = None; branch_idx = None\n\t\t\twhile v != s and v != -1:\n\t\t\t\tif len(pa[v]) > 0:\n\t\t\t\t\tif pa[v] > 1:\n\t\t\t\t\t\tbranch = v\n\t\t\t\t\t\tbranch_idx = 0\n\t\t\t\t\tv = pa[v][0]\n\t\t\t\t\tp.append(v)\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\tif v == s:\n\t\t\t\tp.reverse()\n\t\t\t\tp_lst.append(p)\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\tif branch:\n\t\t\t\tpa[branch].pop(branch_idx)\n\n\t\treturn p_lst", "def remove_big(s: Stack) -> None:\n #\n temp = Stack()\n while not s.is_empty():\n val = s.pop()\n # Only keep values less than or equal to five.\n if val <= 5:\n temp.push(val)\n\n # Restore the original stack.\n while not temp.is_empty():\n s.push(temp.pop())", "def dfs(self, starting_vertex, destination_vertex):\n \"\"\" LIFO\n Create a stack\n Create a set to store visited\n PUSH starting vertex into an array (STACK)\n While the STACK is NOT empty \n get((pop) first PATH vertex\n get Vertex from END of PATH\n check if NOT visited\n mark as visited\n check if vertex is destination_vertex\n If TRUE, return path \n PUSH path to ALL of neighbors\n make copy of current path\n add neighbor to path copy\n PUSH path copy\n \"\"\" \n s = Stack() # Create a stack\n s.push([starting_vertex]) # PUSH starting vertex into an array (STACK)\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the STACK is NOT empty\n path = s.pop() # get(pop) first PATH vertex)\n v = path[-1] # get Vertex from END of PATH \n\n while v not in visited: # check if NOT visited\n visited.add(v) # mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path \n\n for n in self.get_neighbors(v): # PUSH path to ALL of neighbors\n path_c = path[:] # make copy of current path\n # path_c.extend([n]) # add neighbor to path copy\n path_c.append(n) # add neighbor to path copy\n s.push(path_c) # PUSH path copy", "def size(stk: Stack) -> int:\n side_stack = Stack()\n count = 0\n # Pop everything off <stk> and onto <side_stack>, counting as we go.\n while not stk.is_empty():\n side_stack.add(stk.remove())\n count += 1\n # Now remove everything off <side_stack> and back onto <stk>.\n while not side_stack.is_empty():\n stk.add(side_stack.remove())\n # <stk> is restored to its state at the start of the function call.\n # We consider that it was not mutated.\n return count" ]
[ "0.63782394", "0.56742114", "0.5668357", "0.5521426", "0.5511266", "0.54922324", "0.54876566", "0.542339", "0.5420904", "0.5413091", "0.53986907", "0.53806275", "0.5353128", "0.5341598", "0.5308404", "0.5303239", "0.5296814", "0.52952766", "0.52823764", "0.5276772", "0.5268221", "0.52633375", "0.52473366", "0.52424324", "0.5232118", "0.5222923", "0.51877403", "0.51758385", "0.51724166", "0.5170096" ]
0.7700281
0
Load a package from a filesystem path. Infer the name and version of the package from the path.
def package_from_path(path): partial_path, version = os.path.split(path) base_path, name = os.path.split(partial_path) p = Package(name, version, path) return p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _from_path(cls, path):\n with open(path, encoding='utf-8') as open_file:\n pkg = cls._load(open_file)\n return pkg", "def load_from_path(path):\n module, attr = path.rsplit('.', 1)\n mod = importlib.import_module(module)\n return getattr(mod, attr)", "def load(path):\n pass", "def load_datamodule(cls, path: Union[str, Path]):\n if isinstance(path, str):\n path = Path(path)\n if not path.exists():\n raise FileNotFoundError(f\"{path} does not exist.\")\n datamodule = joblib.load(path)\n return datamodule", "def load_module(path: os.PathLike):\n path = Path(path)\n pwd = Path(os.getcwd())\n os.chdir(path.parent)\n try:\n mod = import_module(path.stem)\n except ModuleNotFoundError as err:\n raise err\n finally:\n os.chdir(pwd)\n return mod", "def load(self, path: str):\n pass", "def load_module(name, path):\n loader = importlib.machinery.SourceFileLoader(name, path)\n module = types.ModuleType(loader.name)\n loader.exec_module(module)\n return module", "def load(path):\n _, ext = os.path.splitext(path)\n\n if ext == '.json':\n return JSONRFile(path)\n elif ext == '.root':\n # NOTE: import is here to make dependency on uproot runtime optional\n # pylint: disable=import-outside-toplevel\n from .root_file import ROOTFile\n return ROOTFile(path)\n\n raise ValueError(\"Umknown file extension '%s'\" % (path, ))", "def load(self, path):\n pass", "def load(self, path):\n pass", "def load(self, path):\n\t\tmname = os.path.splitext(os.path.split(path)[-1])[0]\n\t\tmodule = imp.load_source(mname, path)\n\t\tif hasattr(module, \"Plugin\"):\n\t\t\tinst = module.Plugin(self)\n\t\telse:\n\t\t\treturn None\n\t\t\n\t\tinst.set_info()\n\t\t\n\t\tfor c in inst.name:\n\t\t\tif not c in string.ascii_lowercase+\"_\":\n\t\t\t\treturn None\n\t\tfor c in inst.require:\n\t\t\tif not c in string.ascii_lowercase+string.digits+\"_:,\":\n\t\t\t\treturn None\n\t\tif not type(inst.version) == int:\n\t\t\treturn None\n\t\t\n\t\tself.__plugins[inst.name] = inst\n\t\tself.__plugins[inst.name].path = path\n\t\t\n\t\tif not self.__plugins[inst.name].start():\n\t\t\treturn None\n\t\t\n\t\treturn inst.name", "def import_from_path(module: str, path: str, name: str):\n\n spec = importlib.util.spec_from_file_location(module, path)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return getattr(foo, name)", "def get_version(path=VERSION_PATH):\n namespace = {}\n exec(read(path), namespace)\n return namespace['get_version'](short=True)", "def import_package(name):\r\n mod = __import__(name)\r\n components = name.split('.')\r\n for comp in components[1:]:\r\n mod = getattr(mod, comp)\r\n return mod", "def import_load(pkg, name):\n def loader():\n mod = importlib.import_module(pkg)\n return getattr(mod, name)\n return loader", "def _import_pkg(self, package, package_config):\n\n # If package path defined, add to Python path\n path = '{0}_path'.format(package)\n if path in package_config:\n if not os.path.exists(package_config[path]):\n print('No such path to package {0}: {1}'.format(package, package_config[path]))\n return\n if path not in sys.path:\n sys.path.append(package_config[path])\n\n # Prepare import. PyDPI is not in Cinfony\n package_name = 'cinfony.{0}'.format(package)\n if package == 'pydpi':\n\n # RDKit needed for pydpi\n if 'rdk' not in self:\n print('Cannot load PyDPI, RDKit not available')\n return\n package_name = 'cheminfo_pydpi'\n\n # Try package import, report errors\n try:\n self[package] = importlib.import_module(package_name)\n except ImportError as e:\n print('Import error for package {0}: {1}'.format(package, e))\n except SyntaxError as e:\n print('Syntax error on import of package {0}: {1}'.format(package, e))\n except KeyError as e:\n print('Package {0}: not found.'.format(package))\n except Exception:\n print('Unexpected error for package {0}: {1}'.format(package, sys.exc_info()[0]))", "def import_module(name, path):\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module", "def run_import(path: Path) -> None:\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--no-input\", path.parent.as_posix()],\n stdout=subprocess.DEVNULL,\n )\n if (path / \"__main__.py\").exists():\n subprocess.check_call(\n [sys.executable, \"-c\", f\"import {path.name}\"],\n stdout=subprocess.DEVNULL,\n )\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"--no-input\", \"-y\", path.name],\n stdout=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None", "def require(path,className=None):\n (dirname, basename) = os.path.split(path)\n packageName = dirname.replace('/','.')\n moduleName = basename.rstrip('.py')\n\n logging.getLogger().debug(\"Loading: %s.%s[%s]\" %(packageName,moduleName,className))\n\n mod = __import__(packageName+'.'+moduleName, globals(), locals(), [className])\n if className:\n return getattr(mod, className)\n\n return mod", "def import_module_from_module_path(path):\n return SourceFileLoader('', path).load_module()", "def load_module(name_or_path):\n if os.path.exists(name_or_path):\n path = name_or_path.rstrip(\"/\")\n modname = os.path.splitext(os.path.basename(path))[0]\n if os.path.isdir(path):\n path = os.path.join(path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(modname, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n mod = importlib.import_module(name_or_path)\n try:\n path = mod.__path__[0]\n except AttributeError:\n path = mod.__file__\n return mod, path", "def load_resource(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def _import_by_path(path):\n module_path, attr_name = path.rsplit('.', 1)\n module = import_module(module_path)\n return getattr(module, attr_name)", "def load_from_disk(cls, path: Path) -> DirManifest:\n package = DirManifest()\n\n package.path = path\n packagepathstr = str(path)\n paths: List[str] = []\n\n # Simply return empty manifests if the given path isn't a dir.\n # (the server may intend to create it and is just asking what's\n # there already)\n if path.is_dir():\n # Build the full list of package-relative paths.\n for basename, _dirnames, filenames in os.walk(path):\n for filename in filenames:\n fullname = os.path.join(basename, filename)\n assert fullname.startswith(packagepathstr)\n paths.append(fullname[len(packagepathstr) + 1:])\n\n import hashlib\n from concurrent.futures import ThreadPoolExecutor\n from multiprocessing import cpu_count\n\n def _get_file_info(filepath: str) -> Tuple[str, DirManifestFile]:\n sha = hashlib.sha256()\n fullfilepath = os.path.join(packagepathstr, filepath)\n if not os.path.isfile(fullfilepath):\n raise Exception(f'File not found: \"{fullfilepath}\"')\n with open(fullfilepath, 'rb') as infile:\n filebytes = infile.read()\n filesize = len(filebytes)\n sha.update(filebytes)\n return (filepath,\n DirManifestFile(filehash=sha.hexdigest(),\n filesize=filesize))\n\n # Now use all procs to hash the files efficiently.\n with ThreadPoolExecutor(max_workers=cpu_count()) as executor:\n package.files = dict(executor.map(_get_file_info, paths))\n\n return package", "def import_module(path, package=None):\n if path.startswith('.'):\n if not package:\n raise TypeError(\"Relative imports require the 'package' argument\")\n start = 0\n while path[start] == \".\" or start < len(path):\n start += 1\n path = _resolve_name(path[start:], package, start)\n __import__(path)\n\n return sys.modules[path]", "def load_module(module_name: str, module_path: str) -> object:\n spec = module_util.spec_from_file_location(module_name, module_path)\n module = module_util.module_from_spec(spec)\n spec.loader.exec_module(module) # type: ignore\n return module", "def parse_package(package_path):\n\n if DEBUG: print \"Parsing package\",package_path\n\n package_path = os.path.normpath(package_path)\n dir,file = os.path.split(package_path)\n if dir == \"\":\n dir = \".\"\n return parse_subpackage(dir,file)", "def importFromPath(filename):\n try:\n path, name = os.path.split(filename)\n name, ext = os.path.splitext(name)\n file, filename, data = imp.find_module(name, [path])\n importedModule = imp.load_module(name, file, filename, data)\n except Exception as ae:\n raise Exception('Importing module '+ filename + ' at ' + path + os.sep + name + ' failed with error '+ str(ae))\n return importedModule", "def load_module_from_path(module_path):\n module_names = [os.path.splitext(os.path.basename(module_path))[0]]\n d = os.path.dirname(module_path)\n\n while os.path.exists(os.path.join(d, '__init__.py')):\n module_names.append(os.path.basename(d))\n d = os.path.dirname(d)\n\n d = [d]\n\n module = None\n full_module_name = ''\n for package_name in reversed(module_names):\n if module:\n d = module.__path__\n full_module_name += '.'\n r = imp.find_module(package_name, d)\n full_module_name += package_name\n module = imp.load_module(full_module_name, *r)\n return module", "def _load_module(modulepath):\n\n mod = __import__(modulepath)\n path = []\n for token in modulepath.split(\".\")[1:]:\n path.append(token)\n mod = getattr(mod, token)\n return mod" ]
[ "0.72946215", "0.6350694", "0.63120604", "0.62729806", "0.6185737", "0.60219616", "0.6021483", "0.5967492", "0.5905551", "0.5905551", "0.5899823", "0.5853805", "0.58521056", "0.5845826", "0.583879", "0.58262366", "0.581041", "0.5792163", "0.5784363", "0.5765604", "0.57580024", "0.5686246", "0.56728214", "0.5671111", "0.5662788", "0.56504995", "0.5643704", "0.5641877", "0.5641039", "0.5632371" ]
0.73681134
0
Load a package from a yaml receipt file.
def package_from_yaml(yaml_file): with open(yaml_file) as f: yaml_rep = yaml.load(f) name = yaml_rep['name'] version = yaml_rep['version'] base_path, receipt_file = os.path.split(yaml_file) p = Package(name, version, base_path) try: p.direct_dependencies = yaml_rep['dependencies'] except KeyError: pass return p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_autopkg_recipe(path):\n recipe = None\n\n if path.endswith(\".yaml\"):\n try:\n # try to read it as yaml\n with open(path, \"rb\") as f:\n recipe = yaml.safe_load(f)\n except Exception as err:\n print(\"{}: yaml parsing error: {}\".format(path, err))\n elif path.endswith(\".json\"):\n try:\n # try to read it as json\n with open(path, \"rb\") as f:\n recipe = json.load(f)\n except Exception as err:\n print(\"{}: json parsing error: {}\".format(path, err))\n else:\n try:\n # try to read it as a plist\n with open(path, \"rb\") as f:\n recipe = plistlib.load(f)\n except Exception as err:\n print(\"{}: plist parsing error: {}\".format(path, err))\n\n return recipe", "def load_yaml(cls, file=None):\n if file is None:\n file = f'{cls.base_path}rcp_{rcp.stage}.yml'\n try:\n with open(file, 'r') as f:\n recipe = yaml.load(f, Loader=yaml.FullLoader)\n rcp.__dict__ = recipe\n return rcp\n except FileNotFoundError:\n print(\"Recipe file doesn't exist.\")\n raise", "def loadFromFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n if os.path.exists(path) and os.path.isfile(path):\n self.load(yaml.load(open(path, 'r')))", "def LoadYaml(path):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n fp = None\n try:\n fp = open(path)\n \n data = yaml.load(fp)\n \n finally:\n if fp:\n fp.close()\n \n return data", "def load_yaml(filepath):\n with open(filepath, 'r') as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)", "def from_path(cls, path: str) -> Any:\n cls._check_yaml()\n with open(path) as f:\n return yaml.safe_load(f)", "def _load_file(self, f):\n if not os.path.exists(f):\n msg = '%s is a non-existant definition file' % f\n raise ValueError(msg)\n\n with open(f, 'r') as fh:\n return yaml.load(fh.read())", "def get_ymal_load(yamlfile):\n with open(yamlfile, 'r', encoding='utf-8') as fr:\n filedata = fr.read()\n yamldata = yaml.full_load(filedata)\n return yamldata", "def _load_datas(self) -> tp.Dict[str, dict]:\n with open(self._file, \"r\") as stream:\n try:\n load: tp.Dict[str, dict] = yaml.safe_load(stream)\n logger.info(\"YAML imported\")\n return load\n except yaml.YAMLError as exc:\n logger.debug(\"YAML import error : %s\", exc)\n raise", "def load_yaml(file: Text):\n with open(file) as fp:\n return yaml.load(fp, yaml.FullLoader)", "def load(text: str, options: Dict[str, str]) -> object:\n raise LoaderMissingError(\"Yaml is not installed on the system\") from e", "def loadfrom_yaml(key, path):\n\twith open(path, 'r') as f:\n\t\td = yaml.load(f)\n\t\tnew_namespace(key)\n\t\t\n\t\t# ns = get_namespace(key)\n\n\t\t# for key, value in d.items():\n\t\t# \t_recurse(0, key, value, ns)", "def load_yaml(path):\n fsock = open(path)\n \n try:\n yaml_string = fsock.read()\n yaml_obj = yaml.load(yaml_string)\n \n finally:\n fsock.close()\n\n return yaml_obj", "def _load_yaml_file(yaml_file):\n with io.open(yaml_file, 'r', encoding='utf-8') as stream:\n yaml_content = yaml.load(stream)\n FileUtils._check_format(yaml_file, yaml_content)", "def load_file(self, filepath):\n filepath = self._yaml_extension(filepath)\n data = self._load_data_yaml(filepath)\n return data", "def load(cls, filename):\n with open(filename) as f:\n d = yaml.load(f.read(), Loader=yaml.SafeLoader)\n return Portfolio.from_dict(d)", "def load_yaml(filename):\n try:\n f = file(filename, 'r')\n data = yaml.load(f)\n return data\n except (IOError, OSError) as e:\n err = e[0]\n reason = e[1]\n error = 'load_yaml: Failed to open {filename}: {reason} {err}'.format(filename=filename, reason=reason, err=err)\n raise IOError(error)", "def load_yaml_file(self, path):\n with path.open('r') as handle:\n data = load_yaml(handle)\n\n self.set_all(**self.SCHEMA.load(data).data)", "def load_from_file(cls, filename):\n try:\n yaml_file = settings.APPS_DIR.path('providers').file(filename)\n cls.providers = yaml.safe_load(yaml_file.read())\n return cls\n except IOError:\n raise ImproperlyConfigured('providers.yaml MUST be provided in the providers app folder')", "def FromYAML(cls, source):\n\n # Late import to avoid a circular dependency.\n try:\n import bulletml.bulletyaml\n import yaml\n except ImportError:\n raise ParseError(\"PyYAML is not available\")\n else:\n try:\n return yaml.load(source)\n except Exception as exc:\n raise ParseError(str(exc))", "def load_yaml(file_path):\n with open(file_path) as fin:\n content = yaml.load(fin, Loader=yaml.FullLoader)\n return content", "def load(self, file):\n self.namespace['workflow'].configfile(file)\n self.updateNamespace()", "def load_yaml(file):\n with open(file, 'r') as file:\n data = yaml.safe_load(file)\n return data", "def __init__(self, yaml_file_path: Path) -> None:\n with yaml_file_path.open(\"r\") as yaml_file:\n self._yaml = YAML().load(yaml_file.read())", "def load(path=\".travis.yml\"):\n if not path:\n path = \".travis.yml\"\n with open(path, 'r') as stream:\n return yaml.load(stream)", "def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yamal_file, ImportLoader)", "def load_yaml(input_path):\n yaml = ruamel.yaml.YAML()\n with open(input_path, 'rb') as input_file:\n return yaml.load(input_file)", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n # This should maybe throw an exception or something\n return {}", "def load_yaml(fname, schema=None):\n with open(fname) as fh:\n data = yaml.safe_load(fh.read())\n if schema:\n import jsonschema\n jsonschema.validate(data, schema=schema)\n return data", "def read_from_yaml(file_path, Loader=None):\n import yaml\n if Loader is None:\n Loader = yaml.FullLoader\n if os.path.isfile(file_path):\n with open(file_path, 'r') as stream:\n data = yaml.load(stream, Loader=Loader)\n return data\n else:\n raise Exception('File: {} does not exist.'.format(file_path))" ]
[ "0.6303922", "0.6289447", "0.6278501", "0.6247403", "0.6126887", "0.60465944", "0.60452366", "0.6026429", "0.5979628", "0.596223", "0.59619045", "0.5955822", "0.5950138", "0.59431976", "0.5911328", "0.59020746", "0.5893535", "0.58713704", "0.5863882", "0.58628803", "0.5861409", "0.5856635", "0.58515364", "0.584776", "0.5838553", "0.5826002", "0.5784902", "0.57710844", "0.57562286", "0.57361734" ]
0.7605227
0
List all installed packages.
def list_packages(): shelf_dir = settings.shelf_dir package_list = os.listdir(shelf_dir) package_list.sort() return package_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_packages(self):\n for tag, pkg in PACKAGES.iteritems():\n print \"{tag} - {label}\".format(tag=tag, label=pkg['label'])", "def get_installed_packages():\n global INSTALLED_PACKAGES\n chk = Popen(\"{} -m pip freeze\".format(sys.executable),\n shell=True, stdout=PIPE)\n installed = chk.communicate()[0].decode().splitlines()\n for pkg in installed:\n item = pkg.split(\"==\")\n INSTALLED_PACKAGES[item[0]] = item[1]", "def list_package(all: bool = False) -> List[List[str]]:\n if not all:\n pkgs_info = read_installation_records()\n else:\n pkgs_info = []\n for pkg in pkg_resources.working_set:\n pkgs_info.append([pkg.project_name, pkg.version])\n\n return pkgs_info", "def get_all_packages(self):\n return self._package_cache.values()", "def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages", "def get_installed_packages(cache=False,\n output_dir='.',\n output_filename='installed.pkgs.txt'):\n output = os.path.join(output_dir, output_filename)\n cmd = '''aptitude search '~i !~M' -F '%%p' | sort -u > %r''' % (\n output)\n ensure_file(cmd, output, shell=True, overwrite=not(cache))\n installed = list(read_lines(output))\n return installed", "def list_packages(self):\n\n # First extract loaded module names from sys.modules\n sys_modules = sys.modules.keys()\n\n packages = {}\n\n # First add moduels in sys.modules (built-ins,\n # preloads and already loaded ones)\n for name in sys_modules:\n d = self.find_package(name)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] }\n\n #import site\n # Loop through all directories in sys.path and check for modules\n # Dont iterate through <prefix>/lib directory\n libdir = os.path.join(sys.prefix, 'lib')\n\n walked = []\n for top_level in self.paths:\n if not os.path.isdir(top_level):\n continue\n\n # Dont iterate through libdir\n if os.path.abspath(top_level) == os.path.abspath(libdir):\n continue\n\n walked.append(top_level)\n for item in os.listdir(top_level):\n\n fullpath = os.path.join(top_level, item)\n if fullpath in walked: continue\n\n walked.append(fullpath)\n # Remove the extension\n idx = item.find('.')\n if idx != -1: item = item[:idx]\n d = self.find_package(item)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] } \n\n for key,item in packages.items():\n print\n print self.pkgTypeInfo(key)\n print\n\n # Print sorted\n listofitems = item.keys()\n listofitems.sort()\n\n for key2 in listofitems:\n print key2,':',item[key2]", "def getInstalledPackages(self) -> PackageContainer:\n\t\tself.getPackageManager()\n\t\tif self.package_manager == \"apt\":\n\t\t\tpackages = subprocess.check_output([\"apt\", \"list\", \"--installed\"], encoding='UTF-8', universal_newlines=True)\n\t\t\tpackages = packages.split(\"\\n\")[1:-1]\n\t\telse:\n\t\t\tlogger.error(\"Package manager not supported for extracting packages.\")\n\t\t\traise ValueError(\"Package manager unsupported\")\n\n\t\t# Parse packages to self.installed_packages\n\t\tself.parsePackages(packages)\n\n\t\tlogger.info(\"Installed packages collected\")\n\t\treturn self.installed_packages", "def installed_packages():\n with open(os.path.join(_DIRECTORY, 'package.json'), 'r') as f:\n packagejson = json.load(f)\n return packagejson['dependencies'].keys()", "def packages(self):\n return []", "def get_all_packages(cls):\n packages = Package.query.all()\n return packages", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def list_packages(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/apiservices/%s/packages\" % (self.project_key, self.service_id))", "def getAllInstalledPackages(installedPkgPath):\n allPkgVers = []\n if os.path.exists(installedPkgPath):\n for pkg in os.listdir(installedPkgPath):\n pkgVersions = os.listdir(os.path.join(installedPkgPath, pkg))\n for pkgVersion in pkgVersions:\n pkgPath = os.path.join(installedPkgPath, pkg)\n if not fnmatch.fnmatch(pkgVersion, '*.inprogress'):\n allPkgVers.append(os.path.join(pkgPath, pkgVersion))\n return allPkgVers", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def list_packages(pretty=False) -> Dict:\n\n packages = dict()\n lp = Commands._list_packages()\n inst_packages = lp.stdout.split('\\n')[:-1]\n\n for package in inst_packages:\n name, version = package.split('==')[0], package.split('==')[1]\n packages[name] = version\n \n if pretty:\n import json\n return json.dumps(packages, sort_keys=True, indent=4)\n return packages", "def get_all_packages(self):\n with self._conn.begin():\n return {\n rec.package\n for rec in self._conn.execute(self._packages.select())\n }", "def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())", "def get_installed_packages() -> List['Package']:\n repo_packages_names = set(expac(\"-S\", ['n'], []))\n\n # packages the user wants to install from aur\n aur_names = packages_from_other_sources()[0]\n repo_packages_names -= aur_names\n\n installed_packages_names = set(expac(\"-Q\", ['n'], []))\n installed_repo_packages_names = installed_packages_names & repo_packages_names\n unclassified_installed_names = installed_packages_names - installed_repo_packages_names\n\n return_list = []\n\n # installed repo packages\n if installed_repo_packages_names:\n return_list.extend(\n Package.get_packages_from_expac(\"-Q\", list(installed_repo_packages_names), PossibleTypes.REPO_PACKAGE)\n )\n\n # installed aur packages\n installed_aur_packages_names = set(\n [package.name for package in Package.get_packages_from_aur(list(unclassified_installed_names))]\n )\n\n # package names the user gave us must be in the aur\n for name in aur_names:\n if name not in installed_aur_packages_names:\n aurman_error(\"Package {} not found in AUR!\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n raise InvalidInput(\"Package {} not found in AUR!\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n\n if installed_aur_packages_names:\n return_list.extend(\n Package.get_packages_from_expac(\"-Q\", list(installed_aur_packages_names), PossibleTypes.AUR_PACKAGE)\n )\n\n unclassified_installed_names -= installed_aur_packages_names\n\n # installed not repo not aur packages\n if unclassified_installed_names:\n return_list.extend(\n Package.get_packages_from_expac(\n \"-Q\", list(unclassified_installed_names),\n PossibleTypes.PACKAGE_NOT_REPO_NOT_AUR\n )\n )\n\n return return_list", "def list():\n\n click.secho('List of libraries in SJSU-Dev2\\n', fg='white', bold=True)\n package_registry = GetListOfSJSUDev2Repos()\n library_list = [f'{x : <20}: {package_registry[x]}'\n for x in package_registry if x.startswith('lib')]\n print('\\n'.join(library_list))", "def log_installed_modules(self):\n\n # Get python modules installed locally\n\n installed_packages = pkg_resources.working_set\n\n python_str = f'python:{sys.version}\\n'\n print_str = \"\\n\".join(f\"{package.key}: {package.version}\" for package in installed_packages)\n msg = f\"Installed packages:\\n{python_str+print_str}\"\n logger.info(msg)", "def listInstalledLibraries(self):\n calcEngine = CalcEngine.factory(self.client_session)\n result = calcEngine.listInstalledLibraries()\n return result", "def get_all_versions(cls) -> list[str]:\n\n s = run([cls.command, \"install\", \"-l\"])\n versions = s.split()\n\n return list(filter(cls._is_python_version, versions))", "def sort_packages(self) -> None:\n self.recommended_packages = []\n self.required_packages = []\n for package in self.repository_packages:\n try:\n output = self.guest.execute(Command('rpm', '-q', package), silent=True)\n assert output.stdout\n self.debug(f\"Package '{output.stdout.strip()}' already installed.\")\n except tmt.utils.RunError:\n if self.skip_missing:\n self.recommended_packages.append(package)\n else:\n self.required_packages.append(package)", "def getMissingPackages(self, language=None, all=False, packages=None, showInstalled=False):\n if self._cache.broken_count > 0:\n raise SoftwareIndexBroken\n \n self.langpack_locales = {}\n self.pkg_translations = {}\n self.pkg_writing = {}\n filter_list = {}\n blacklist = []\n show = []\n self.missing = set()\n self.installed = set()\n self.system_pkgcode = ''\n \n for l in open(self.BLACKLIST):\n l = l.strip()\n if not l.startswith('#'):\n blacklist.append(l)\n \n for l in open(self.LANGCODE_TO_LOCALE):\n try:\n l = l.rstrip()\n if ':' in l:\n (pkgcode, locale) = l.split(':')\n else:\n pkgcode = l\n locale = l\n except ValueError:\n continue\n self.langpack_locales[locale] = pkgcode\n \n for l in open(self.PACKAGE_DEPENDS):\n if l.startswith('#'):\n continue\n try:\n l = l.rstrip()\n # sort out comments\n if l.find('#') >= 0:\n continue\n (c, lc, k, v) = l.split(':')\n except ValueError:\n continue\n if (c == 'tr' and lc == ''):\n filter_list[v] = k\n elif (c == 'wa' and lc != ''):\n if '|' in lc:\n for l in lc.split('|'):\n if not l in self.pkg_writing:\n self.pkg_writing[l] = []\n self.pkg_writing[l].append((\"%s\" % k, \"%s\" % v))\n else:\n if not lc in self.pkg_writing:\n self.pkg_writing[lc] = []\n self.pkg_writing[lc].append((\"%s\" % k, \"%s\" % v))\n\n # get list of all packages available on the system and filter them\n for item in self._cache.keys():\n if item in blacklist: \n continue\n for x in filter_list.keys():\n if item.startswith(x) and not item.endswith('-base'):\n # parse language code\n langcode = item.replace(x, '')\n #print \"%s\\t%s\" % (item, langcode)\n if langcode == 'zh':\n # special case: zh langpack split\n for langcode in ['zh-hans', 'zh-hant']:\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n elif langcode in self.langpack_locales.values():\n # langcode == pkgcode\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n else:\n # need to scan for LL-CC and LL-VARIANT codes\n for locale in self.langpack_locales.keys():\n if '_' in locale or '@' in locale:\n if '@' in locale:\n (locale, variant) = locale.split('@')\n else:\n variant = ''\n (lcode, ccode) = locale.split('_')\n if langcode in [\"%s-%s\" % (lcode, ccode.lower()),\n \"%s%s\" % (lcode, ccode.lower()),\n \"%s-%s\" % (lcode, variant),\n \"%s%s\" % (lcode, variant),\n \"%s-latn\" % lcode,\n \"%slatn\" % lcode,\n \"%s-%s-%s\" % (lcode, ccode.lower(), variant),\n \"%s%s%s\" % (lcode, ccode.lower(), variant)]:\n # match found, get matching pkgcode\n langcode = self.langpack_locales[locale]\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n break\n\n if language:\n pkgcode = ''\n if language == 'zh-hans' or language == 'zh-hant':\n self.system_pkgcode = language\n elif language in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[language]\n else:\n # pkgcode = ll\n if '_' in language:\n (self.system_pkgcode) = language.split('_')[0]\n elif '@' in language:\n (self.system_pkgcode) = language.split('@')[0]\n else:\n self.system_pkgcode = language\n\n if packages:\n self.findPackages(self.system_pkgcode, packages)\n else:\n self.findPackages(self.system_pkgcode)\n \n elif all:\n # try all available languages\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist:\n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base'):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n\n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n\n else:\n # get a list of language-packs we have already installed or are going to install\n # 1. system locale\n system_langcode = self._localeinfo.getSystemDefaultLanguage()[0]\n if system_langcode == None:\n system_langcode = 'en_US'\n if system_langcode in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[system_langcode]\n # 2. installed language-packs\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist: \n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base') and \\\n (self._cache[item].is_installed or \\\n self._cache[item].marked_install):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n if self.system_pkgcode and \\\n not self.system_pkgcode in pkgcodes:\n pkgcodes.append(self.system_pkgcode)\n \n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n \n if showInstalled:\n show = self.missing | self.installed\n else:\n show = self.missing\n\n return show", "def list_cmd(repo):\n click.echo('Packages and scripts installed through pipsi:')\n for venv, scripts in repo.list_everything():\n if not scripts:\n continue\n click.echo(' Package \"%s\":' % venv)\n for script in scripts:\n click.echo(' ' + script)", "def command_package_ls(*args):\n\n # Setup build, install, and data directories\n package_dirs = os.listdir(packages_path())\n\n for packname in package_dirs:\n # Filter to directories with config files\n if not os.path.isdir(package_path(packname)): continue\n if not package_load_config(packname): continue\n\n # Check for installation\n installed_flag = ''\n installdir = install_dir(packname)\n bindir = os.path.join(installdir, 'bin')\n # This is just a very basic sanity check for binaries we\n # require\n binfiles = (os.path.exists(bindir) and os.listdir(bindir)) or []\n if ( ('space' in binfiles or 'space_d' in binfiles) and\n ('cppoh' in binfiles or 'cppoh_d' in binfiles) ):\n installed_flag = '*'\n\n print packname, installed_flag\n\n return 0", "def get_packages():\n packages = []\n for repo in repositories:\n packages.extend(repo.get_packages())\n return packages", "def required_packages(cls) -> List[Text]:\n return []", "def packages(self):\r\n return self._packages" ]
[ "0.7494529", "0.72714067", "0.71055263", "0.69917494", "0.69516325", "0.68876415", "0.6830791", "0.6806516", "0.6731782", "0.6642389", "0.66308403", "0.6553867", "0.65283746", "0.65177107", "0.6454239", "0.6392868", "0.6387223", "0.6369047", "0.63391745", "0.6325736", "0.62821305", "0.6280666", "0.62698597", "0.6264208", "0.62345505", "0.62325287", "0.6229149", "0.62258255", "0.6217871", "0.6200502" ]
0.7367697
1
Load all installed packages.
def load_all_packages(): package_dict = dict((n, load_component_by_name(n)) for n in list_packages()) return package_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _LoadPackages():\n return {module.__name__.split('.')[-1]: module for module in\n import_util.LoadModulesForPath(__path__, __name__)}", "def packages(self):\n\n if self._packages:\n return self._packages\n\n self._load()\n return self._packages", "def load_plugins(self):\n self.__doing('load_plugins')\n self.__do_if_not_done('bootstrap')\n if self.env.mode in ('dummy', 'unit_test'):\n return\n for package in self.packages:\n self.add_package(package)", "def get_installed_packages():\n global INSTALLED_PACKAGES\n chk = Popen(\"{} -m pip freeze\".format(sys.executable),\n shell=True, stdout=PIPE)\n installed = chk.communicate()[0].decode().splitlines()\n for pkg in installed:\n item = pkg.split(\"==\")\n INSTALLED_PACKAGES[item[0]] = item[1]", "def list_packages(self):\n\n # First extract loaded module names from sys.modules\n sys_modules = sys.modules.keys()\n\n packages = {}\n\n # First add moduels in sys.modules (built-ins,\n # preloads and already loaded ones)\n for name in sys_modules:\n d = self.find_package(name)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] }\n\n #import site\n # Loop through all directories in sys.path and check for modules\n # Dont iterate through <prefix>/lib directory\n libdir = os.path.join(sys.prefix, 'lib')\n\n walked = []\n for top_level in self.paths:\n if not os.path.isdir(top_level):\n continue\n\n # Dont iterate through libdir\n if os.path.abspath(top_level) == os.path.abspath(libdir):\n continue\n\n walked.append(top_level)\n for item in os.listdir(top_level):\n\n fullpath = os.path.join(top_level, item)\n if fullpath in walked: continue\n\n walked.append(fullpath)\n # Remove the extension\n idx = item.find('.')\n if idx != -1: item = item[:idx]\n d = self.find_package(item)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] } \n\n for key,item in packages.items():\n print\n print self.pkgTypeInfo(key)\n print\n\n # Print sorted\n listofitems = item.keys()\n listofitems.sort()\n\n for key2 in listofitems:\n print key2,':',item[key2]", "def set_installed_packages():\n global INSTALLED_PACKAGES, REQUIRED_VERSION\n if INSTALLED_PACKAGES:\n return\n\n if os.path.exists(BIN_PYTHON):\n pip = subprocess.Popen(\n (BIN_PYTHON, '-m', 'pip', 'freeze'),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n (stdout, stderr) = pip.communicate()\n pip.wait()\n\n INSTALLED_PACKAGES = [normalize_package_name(r.decode().split('==')[0].lower()) for r in stdout.split()]\n REQUIRED_VERSION = next((package for package in INSTALLED_PACKAGES if re.match(r'^lore[!<>=]', package)), None)\n if REQUIRED_VERSION:\n REQUIRED_VERSION = re.split(r'[!<>=]', REQUIRED_VERSION)[-1]", "def _install_packages(packages):\n for package in packages:\n cuisine.package_ensure(package)", "def discover_all_plugins(self):\n for v in pkg_resources.iter_entry_points('dgit.plugins'):\n m = v.load()\n m.setup(self)", "def packages():", "def install_packages(self):\n for package in self.packages:\n utils.exec_cmd('yum install -v -y {0}'.format(package))", "def get_all_packages(self):\n return self._package_cache.values()", "def install_packages():\n\n require('environment', provided_by=env.environments)\n packages_file = os.path.join(PROJECT_ROOT, 'requirements', 'packages.txt')\n system.install_packages_from_file(packages_file)", "def _load_all_modules(path, names):\n module_names = []\n # For each module in the current directory...\n for importer, module_name, is_package in pkgutil.iter_modules(\n [os.path.dirname(path)]\n ):\n # print(\"importing:\", names + '.' + module_name)\n # Import the module.\n importlib.import_module(names + '.' + module_name)\n module_names.append(module_name)\n\n return module_names", "def handle_loadall(bot, ievent):\n plugs.loadall(plugin_packages, force=True)\n ievent.done()", "def get_all_modules(package):\n base = Path(inspect.getabsfile(package)).parent\n\n for fl in base.glob(\"*.py\"):\n print(f\"loading module {fl}\")\n yield load_module(fl)", "def getMissingPackages(self, language=None, all=False, packages=None, showInstalled=False):\n if self._cache.broken_count > 0:\n raise SoftwareIndexBroken\n \n self.langpack_locales = {}\n self.pkg_translations = {}\n self.pkg_writing = {}\n filter_list = {}\n blacklist = []\n show = []\n self.missing = set()\n self.installed = set()\n self.system_pkgcode = ''\n \n for l in open(self.BLACKLIST):\n l = l.strip()\n if not l.startswith('#'):\n blacklist.append(l)\n \n for l in open(self.LANGCODE_TO_LOCALE):\n try:\n l = l.rstrip()\n if ':' in l:\n (pkgcode, locale) = l.split(':')\n else:\n pkgcode = l\n locale = l\n except ValueError:\n continue\n self.langpack_locales[locale] = pkgcode\n \n for l in open(self.PACKAGE_DEPENDS):\n if l.startswith('#'):\n continue\n try:\n l = l.rstrip()\n # sort out comments\n if l.find('#') >= 0:\n continue\n (c, lc, k, v) = l.split(':')\n except ValueError:\n continue\n if (c == 'tr' and lc == ''):\n filter_list[v] = k\n elif (c == 'wa' and lc != ''):\n if '|' in lc:\n for l in lc.split('|'):\n if not l in self.pkg_writing:\n self.pkg_writing[l] = []\n self.pkg_writing[l].append((\"%s\" % k, \"%s\" % v))\n else:\n if not lc in self.pkg_writing:\n self.pkg_writing[lc] = []\n self.pkg_writing[lc].append((\"%s\" % k, \"%s\" % v))\n\n # get list of all packages available on the system and filter them\n for item in self._cache.keys():\n if item in blacklist: \n continue\n for x in filter_list.keys():\n if item.startswith(x) and not item.endswith('-base'):\n # parse language code\n langcode = item.replace(x, '')\n #print \"%s\\t%s\" % (item, langcode)\n if langcode == 'zh':\n # special case: zh langpack split\n for langcode in ['zh-hans', 'zh-hant']:\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n elif langcode in self.langpack_locales.values():\n # langcode == pkgcode\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n else:\n # need to scan for LL-CC and LL-VARIANT codes\n for locale in self.langpack_locales.keys():\n if '_' in locale or '@' in locale:\n if '@' in locale:\n (locale, variant) = locale.split('@')\n else:\n variant = ''\n (lcode, ccode) = locale.split('_')\n if langcode in [\"%s-%s\" % (lcode, ccode.lower()),\n \"%s%s\" % (lcode, ccode.lower()),\n \"%s-%s\" % (lcode, variant),\n \"%s%s\" % (lcode, variant),\n \"%s-latn\" % lcode,\n \"%slatn\" % lcode,\n \"%s-%s-%s\" % (lcode, ccode.lower(), variant),\n \"%s%s%s\" % (lcode, ccode.lower(), variant)]:\n # match found, get matching pkgcode\n langcode = self.langpack_locales[locale]\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n break\n\n if language:\n pkgcode = ''\n if language == 'zh-hans' or language == 'zh-hant':\n self.system_pkgcode = language\n elif language in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[language]\n else:\n # pkgcode = ll\n if '_' in language:\n (self.system_pkgcode) = language.split('_')[0]\n elif '@' in language:\n (self.system_pkgcode) = language.split('@')[0]\n else:\n self.system_pkgcode = language\n\n if packages:\n self.findPackages(self.system_pkgcode, packages)\n else:\n self.findPackages(self.system_pkgcode)\n \n elif all:\n # try all available languages\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist:\n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base'):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n\n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n\n else:\n # get a list of language-packs we have already installed or are going to install\n # 1. system locale\n system_langcode = self._localeinfo.getSystemDefaultLanguage()[0]\n if system_langcode == None:\n system_langcode = 'en_US'\n if system_langcode in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[system_langcode]\n # 2. installed language-packs\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist: \n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base') and \\\n (self._cache[item].is_installed or \\\n self._cache[item].marked_install):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n if self.system_pkgcode and \\\n not self.system_pkgcode in pkgcodes:\n pkgcodes.append(self.system_pkgcode)\n \n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n \n if showInstalled:\n show = self.missing | self.installed\n else:\n show = self.missing\n\n return show", "def load_plugins(self) -> None:\n import importlib\n import pkgutil\n import stactools\n\n # From https://packaging.python.org/guides/creating-and-discovering-plugins/#using-namespace-packages # noqa\n def iter_namespace(ns_pkg: ModuleType) -> Iterator[ModuleInfo]:\n # Specifying the second argument (prefix) to iter_modules makes the\n # returned name an absolute name instead of a relative one. This allows\n # import_module to work without having to do additional modification to\n # the name.\n return pkgutil.iter_modules(\n ns_pkg.__path__, # type: ignore # mypy issue #1422\n ns_pkg.__name__ + '.')\n\n discovered_plugins = {\n name: importlib.import_module(name)\n for finder, name, ispkg in iter_namespace(stactools)\n }\n\n for name, module in discovered_plugins.items():\n register_plugin = getattr(module, 'register_plugin', None)\n if register_plugin:\n register_plugin(self)", "def LoadInstallations(counter):\n process = subprocess.Popen([\"pip\", \"list\", \"--format=json\"],\n stdout=subprocess.PIPE)\n output, _ = process.communicate()\n installations = json.loads(output.decode())\n for i in installations:\n counter.labels(i[\"name\"], i[\"version\"]).inc()", "def load_packages(locker: Locker, lock_data: _TOMLDocument) -> List[Package]:\n locker._lock_data = lock_data\n repository = locker.locked_repository(with_dev_reqs=True)\n activate_dependencies(repository.packages)\n return repository.packages # type: ignore[no-any-return] # noqa: F723", "def add_packages(self, packages):\n for p in packages:\n self.add_package(p)", "def load_all(**options):\n\n return get_component(CachingPackage.COMPONENT_NAME).load_all(**options)", "def get_packages(self):\n raise NotImplementedError(\"get_packages is not implemented\")", "def collectPlugins(self):\n\t\tself.locatePlugins()\n\t\tself.loadPlugins()", "def getsitepackages():\n\tpass", "def get_all_packages(paths, extensions=['.py', '.ipynb'],\n include_imported_dependencies=False):\n if isinstance(paths, str):\n paths = [paths]\n\n all_packages = set()\n for path in paths:\n if os.path.isfile(path):\n basename, ext = os.path.splitext(path)\n file_dict = {ext: [path]}\n\n else:\n file_dict = find_all_files(path, extensions=extensions)\n\n for ext, files in file_dict.items():\n if ext not in parser_map:\n raise ValueError('File extension \"{0}\" is not supported.'\n .format(ext))\n\n for file in files:\n _packages = parser_map[ext](file)\n all_packages = all_packages.union(_packages)\n\n if include_imported_dependencies:\n init_modules = sys.modules.copy()\n\n # Now we have a list of package names, so we can import them and track\n # what other packages are imported as dependencies. If requested, we add\n # those to the package list as well\n for package_name in all_packages:\n try:\n importlib.import_module(package_name)\n except ImportError:\n # here, just skip if we can't import: a warning is issued later\n pass\n\n loaded_modules = sys.modules.copy()\n diff_modules = set(loaded_modules.keys()) - set(init_modules.keys())\n\n additional_modules = set()\n for module in diff_modules:\n top_level = module.split('.')[0]\n\n if top_level.startswith('_'):\n continue\n\n additional_modules.add(top_level)\n\n all_packages = all_packages.union(additional_modules)\n\n return all_packages", "def import_packages_global():\n return \"\"", "def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages", "def sort_packages(self) -> None:\n self.recommended_packages = []\n self.required_packages = []\n for package in self.repository_packages:\n try:\n output = self.guest.execute(Command('rpm', '-q', package), silent=True)\n assert output.stdout\n self.debug(f\"Package '{output.stdout.strip()}' already installed.\")\n except tmt.utils.RunError:\n if self.skip_missing:\n self.recommended_packages.append(package)\n else:\n self.required_packages.append(package)", "def pre_install(self, installable_pkgs):\n pass", "def import_packages(filename:str, path:str=None):\r\n logging.info(\"Importing Package List From '{}'...\".format(filename))\r\n if path is None:\r\n path = get_site_packages_path()\r\n\r\n i = 1\r\n packages = open(filename, 'r').readlines()\r\n for p in packages:\r\n package, version = p.split(\"=\")\r\n logging.info(\"Installing Package {} ({} of {})...\".format(package, i, len(packages)))\r\n install(package, version, path)\r\n i += 1\r\n logging.info(\"Finished Importing Package List From '{}'\".format(filename))" ]
[ "0.7246274", "0.7029434", "0.684438", "0.675976", "0.6676979", "0.6629688", "0.65730417", "0.6394994", "0.6333747", "0.6324303", "0.63189465", "0.6227781", "0.6209056", "0.6157023", "0.6069455", "0.59823287", "0.59752005", "0.5964119", "0.5946842", "0.59437484", "0.5934385", "0.5925212", "0.59061795", "0.5898069", "0.5890754", "0.5861168", "0.5861116", "0.5852368", "0.58479434", "0.584673" ]
0.74235123
0
Creates batch of warping coordinates.
def _make_warp(batch_size, warp_height, warp_width, dtype): x, y = np.meshgrid(np.linspace(0, warp_width - 1, warp_width), np.linspace(0, warp_height - 1, warp_height)) warp = np.concatenate((x.reshape([warp_height, warp_width, 1]), y.reshape([warp_height, warp_width, 1])), 2) warp = np.tile(warp.reshape([1, warp_height, warp_width, 2]), [batch_size, 1, 1, 1]) warp += np.random.randn(*warp.shape) return warp.astype(dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_all_locations(grid, shape):", "def prep_ships(self):\n self.ships = Group()\n for ship_number in range(self.stats.ships_left):\n ship = Ship(self.ai_settings, self.screen)\n ship.rect.x = 10 + ship_number * ship.rect.width\n ship.rect.y = 10\n self.ships.add(ship)", "def generatePolygons():", "def _generate_batch(self, tasks: List):\n x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])\n y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])\n\n return x_batch, y_batch", "def prep_spaceships(self):\n self.spaceships = Group()\n for spaceship_number in range(self.stats.spaceships_left):\n spaceship = SmallSpaceship(self.ai_game)\n spaceship.rect.x = 20 + (spaceship_number * \n spaceship.rect.width) + (spaceship_number * 10)\n spaceship.rect.y = 20\n self.spaceships.add(spaceship)", "def prep_ships(self):\r\n\t\tself.ships=Group()\r\n\t\tfor ship_number in range(self.stats.ships_left):\r\n\t\t\tship=Ship(self.ai_settings, self.screen)\r\n\t\t\tship.transform()\r\n\t\t\tship.rect.x=10+ship_number*ship.rect.width\r\n\t\t\tship.rect.y=10\r\n\t\t\tself.ships.add(ship)", "def island_procreate(self):\n for y in self.island_map:\n for cell in y:\n cell.procreate()", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def shift_point_cloud(batch_data, shift_range=0.1):\n B, N, C = batch_data.shape\n shifts = np.random.uniform(-shift_range, shift_range, (B,3))\n for batch_index in range(B):\n batch_data[batch_index,:,:] += shifts[batch_index,:]\n return batch_data", "def shot_geolocations(self) -> geopandas.array.GeometryArray:\n geolocations = np.array(\n [shapely.geometry.Point(lon, lat) for lon, lat in self.shot_lon_lat],\n dtype=shapely.geometry.Point,\n )\n\n return geopandas.array.GeometryArray(geolocations, crs=WGS84)", "def new_ships_set(self):\n\n with open(SHIPS_TEMPLATE, \"r\") as file:\n for line in file:\n size, text_cords, direction = line.strip().split(\" \")\n self.__create_ship(int(size), text_cords, direction)", "def place_allowed_tower_sites():\n self.coordinates__tower_sites = []\n for tk in xrange(self.N_tower_kinds):\n #Each kind of tower will have the correct number of sites placed\n \n coords = []\n while len(coords)<self.N_tower_sites[tk]:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y) \n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords.append(p)\n self.coordinates__tower_sites.append(coords)", "def to_world(self, x, y, **kwargs):", "def prep_ships(self):\n\t\t\tself.ships = Group()\n\t\t\tfor ship_number in range(self.stats.ships_left):\n\t\t\t\tship = Ship(self.ai_settings, self.screen)\n\t\t\t\tship.rect.x = 10 + ship_number * ship.rect.width\n\t\t\t\tship.rect.y = 10\n\t\t\t\tself.ships.add(ship)", "def prep_ships(self):\n self.ships = Group()\n for ship_id in range(self.game.ships_remaining):\n ship = Ship(self.game)\n ship.rect.x = 10 + ship_id * ship.rect.width\n ship.rect.y = 10\n self.ships.add(ship)", "def CreateTargetGeoMap(latS, latN, lonW, lonE, latlen, lonlen):\n\n lat_grid = np.linspace(latS, latN, latlen)\n lon_grid = np.linspace(lonW, lonE, lonlen)\n\n return lat_grid,lon_grid", "def test_spreading_players(self):\n params = [3, 4, 11, 20]\n w = gen.generate_map(height=50, width=80, params=params)\n coords = s.spread_across_the_map(w, 4)\n for c in coords:\n x = c[0]\n y = c[1]\n self.assertNotEqual(w[x][y], 0)\n self.assertNotEqual(w[x][y], 3) # uncomment the block to see an overview\n # w[x][y] = 4\n # image = img.get_map_overview(w)\n # image2 = img.get_resized_map_overview(image, 781, 521)\n # image2.show()", "def flow_to_warp(flow):\n batch, _, ht, wd = flow.shape\n coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))\n coords = torch.stack(coords[::-1], dim=0).float()\n coords = coords[None].repeat(batch, 1, 1, 1)\n return coords + flow", "def shatter_batch(self, batch):\n return [tuple([elem[i] for elem in batch])\n for i in range(batch.size)]", "def prepare(self):\n per_col = 5\n spray_diameter = 10\n jids = []\n for i in range(self.gom_count):\n # Create JIDs\n gom_jid = f\"{settings.AGENT_NAMES['gom_base']}{i + 1}@{settings.HOST}\"\n tr_jid = f\"{settings.AGENT_NAMES['tr_base']}{i + 1}@{settings.HOST}\"\n jids.append((gom_jid, tr_jid))\n\n # Create GoM and TR positions\n y = (i % per_col) * 48 - 96\n x = int(i / per_col) * 64 - 32\n xo = random.gauss(0, spray_diameter)\n yo = random.gauss(0, spray_diameter)\n\n self.factory_map[gom_jid] = Point(x=float(x), y=float(y))\n self.tr_map[tr_jid] = Point(x=float(x + xo), y=float(y + yo))\n\n return jids", "def generate_world(x_size, y_size):\n\n\tdef make_blank_world():\n\t\t\"\"\"\n\t\tCreates an x-by-y list of lists of zeroes.\n\t\t\"\"\"\n\t\tblank_array = [[Blank() for j in range(y_size + 1)] for i in range(x_size + 1)]\n\t\treturn blank_array\n\n\n\tdef check_surroundings(x_coord, y_coord, value):\n\t\t\"\"\"\n\t\tIf the variable world has already been defined, it checks all x and y coords within one square (aka, checks the 8 surrounding squares) for a given value. If that value is present in 1 or more squares, returns True; else, False.\n\t\t\"\"\"\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\texamining = world[x_coord - 1 + i][y_coord - 1 + j]\n\t\t\t\tif examining.name == value:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\treturn False\n\n\n\tworld = make_blank_world()\n\n\tworld[random.randint(2, x_size-2)][random.randint(2, y_size-2)] = Water()\n\n\tfor i in range(x_size):\n\t\tfor j in range(y_size):\n\t\t\tseed = random.random()\n\t\t\tif check_surroundings(i, j, 'water'):\n\t\t\t\tif seed >= 0.5:\n\t\t\t\t\tworld[i][j] = Water()\n\t\t\t\telif seed >= 0.4:\n\t\t\t\t\tworld[i][j] = Tree()\n\t\t\t\telse:\n\t\t\t\t\tworld[i][j] = Grass()\n\t\t\telif not check_surroundings(i, j, 'tree'):\n\t\t\t\tif seed >= 0.5:\n\t\t\t\t\tworld[i][j] = Tree()\n\t\t\t\telse:\n\t\t\t\t\tworld[i][j] = Grass()\n\t\t\telse:\n\t\t\t\tworld[i][j] = Grass()\n\treturn [row[:y_size+1] for row in world[:x_size+1]]", "def make_locations(x_width, y_height, count, x_offset):\n bottom = set()\n while len(bottom) < count:\n loc = random_location(x_offset, x_offset + x_width, 0, y_height)\n bottom.add(loc)\n return bottom", "def _create_room(new_map, room):\n for x in range(room.x1 + 1, room.x2):\n for y in range(room.y1 + 1, room.y2):\n new_map.terrain[x][y] = 1", "def generate_coordinates(coords):\n x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten()\n y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords))\n z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords))\n\n return x, y, z", "def stamp_walls_and_make_lists(n, m, num_walls, square_size):\r\n global all_squares, free_squares, wall_squares, start_x, start_y\r\n # wall turtle stamper setup\r\n wall_stamper = turtle.clone()\r\n wall_stamper.shape(\"square\")\r\n wall_stamper.color(\"gray\")\r\n wall_stamper.hideturtle()\r\n wall_stamper.penup()\r\n # make the random wall coordinates\r\n start_x = - (m * square_size) // 2\r\n start_y = - (n * square_size) // 2\r\n # wall_coords = [(random.randint(0,n-1), random.randint(0,m-1)) for c in range(num_walls)]\r\n wall_coords = set() # to eliminate repeats\r\n while len(wall_coords) < num_walls: # to get exact number of walls\r\n random_pos = (random.randint(0,n-1), random.randint(0,m-1))\r\n wall_coords.add(random_pos)\r\n wall_coords = list(wall_coords) # easier way to deal with the coords\r\n # loop over all coordinates and construct the lists\r\n for r in range(n):\r\n for c in range(m):\r\n this_square = (start_x + (c * square_size), start_y + (r * square_size))\r\n if (r,c) in wall_coords: # if it's a wall\r\n wall_squares.append(this_square)\r\n wall_stamper.goto(this_square)\r\n wall_stamper.stamp()\r\n else: # it's a free square\r\n free_squares.append(this_square)\r\n all_squares.append(this_square)", "def place_targets():\n\n \n coords = []\n while len(coords)<self.N_targets:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y)\n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords +=[p]\n self.coordinates__targets = coords", "def _build_point_grid(n_per_side: int) -> np.ndarray:\n offset = 1 / (2 * n_per_side)\n points_one_side = np.linspace(offset, 1 - offset, n_per_side)\n points_x = np.tile(points_one_side[None, :], (n_per_side, 1))\n points_y = np.tile(points_one_side[:, None], (1, n_per_side))\n points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)\n return points", "def add_spawns_outside_boss_doors(self: WWRandomizer):\n \n rooms_to_add_new_spawns_to = [\n (\"M_NewD2\", 10, TGDR, None, 11),\n #(\"kindan\", 16, TGDR, None, 13), # Already has a spawn, ID 1.\n (\"Siren\", 18, TGDR, None, 13),\n (\"sea\", 1, ACTR, 1, 56),\n (\"M_Dai\", 15, TGDR, None, 17),\n (\"kaze\", 12, TGDR, None, 13),\n ]\n \n for stage_name, room_number, chunk, layer, boss_door_index in rooms_to_add_new_spawns_to:\n new_spawn_id = 27\n \n dzs = self.get_arc(\"files/res/Stage/%s/Stage.arc\" % stage_name).get_file(\"stage.dzs\", DZx)\n dzr = self.get_arc(\"files/res/Stage/%s/Room%d.arc\" % (stage_name, room_number)).get_file(\"room.dzr\", DZx)\n \n if chunk == TGDR:\n dzx_for_door = dzs\n else:\n dzx_for_door = dzr\n \n door = dzx_for_door.entries_by_type_and_layer(chunk, layer=layer)[boss_door_index]\n spawn_dist_from_door = 200\n y_rot = door.y_rot\n if door.from_room_num != room_number and door.from_room_num != 63:\n y_rot = (y_rot + 0x8000) % 0x10000\n y_rot_degrees = y_rot * (90.0 / 0x4000)\n x_offset = math.sin(math.radians(y_rot_degrees)) * spawn_dist_from_door\n z_offset = math.cos(math.radians(y_rot_degrees)) * spawn_dist_from_door\n x_pos = door.x_pos + x_offset\n y_pos = door.y_pos\n z_pos = door.z_pos + z_offset\n \n if stage_name in [\"M_Dai\", \"kaze\"]:\n # Earth and Wind temple spawns must be in the stage instead of the room or the game will crash.\n dzx_for_spawn = dzs\n else:\n dzx_for_spawn = dzr\n \n spawns = dzx_for_spawn.entries_by_type(PLYR)\n assert len([spawn for spawn in spawns if spawn.spawn_id == new_spawn_id]) == 0\n \n new_spawn = dzx_for_spawn.add_entity(PLYR)\n new_spawn.spawn_type = 0\n new_spawn.room_num = room_number\n new_spawn.x_pos = x_pos\n new_spawn.y_pos = y_pos\n new_spawn.z_pos = z_pos\n new_spawn.y_rot = y_rot\n new_spawn.spawn_id = new_spawn_id\n \n dzx_for_spawn.save_changes()", "def create_position(self):\n area = utils.AreaCreator(\n self._width, self._height, starts_at=self._starts_at,\n is_battle_area=False)\n for coordinate in area.get_coordinates():\n position = coordinate.get_position()\n self._cells.append(PShipCell(position))\n self._update_battle_position(self._cells)", "def create_coords_medium(ph):\n # Min: 8, max 12\n for start_row in xrange(ph.pizza.shape[0]):\n for start_col in xrange(ph.pizza.shape[1]-2*ph.min_ing_per_slice+1):\n # First scenario\n for i in xrange(ph.min_ing_per_slice*2, ph.max_cells_per_slice+1):\n end_row = start_row + 1\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0]-1):\n for start_col in xrange(ph.pizza.shape[1]-3):\n # Second scenario\n for i in xrange(ph.min_ing_per_slice, ph.min_ing_per_slice+3):\n end_row = start_row + 2\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0] - 2):\n for start_col in xrange(ph.pizza.shape[1] - 2):\n # Third scenario\n for i in xrange(3, 5):\n end_row = start_row + 3\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)" ]
[ "0.56328344", "0.5563046", "0.54961437", "0.54955286", "0.5456121", "0.542565", "0.53909725", "0.536948", "0.536837", "0.53514147", "0.53505296", "0.5334579", "0.5306303", "0.5297764", "0.52908945", "0.52849275", "0.5259853", "0.5256899", "0.52537173", "0.5246708", "0.52387613", "0.5217389", "0.5209394", "0.5208833", "0.51964825", "0.51928407", "0.5185849", "0.51747185", "0.51562905", "0.5136792" ]
0.5722996
0
does this graph have stereo of any kind?
def has_stereo(gra): return bool(atom_stereo_keys(gra) or bond_stereo_keys(gra))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connected(geo, stereo=True):\n return len(components_graph(geo, stereo=stereo)) == 1", "def is_multigraph(self):\n # TO DO: Call coloring algorithm\n return True", "def is_multigraph(self):\n # TO DO: Call coloring algorithm\n return False", "def process_stereo(self, image1, image2, disp1, disp2):\n return _elas.Elas_process_stereo(self, image1, image2, disp1, disp2)", "def is_vertex(self): \n return False", "def is_multigraph(G):\n return G.is_multigraph()", "def bond_parity_evaluator_to_local_stereo_(gra):\n return bond_parity_evaluator_from_local_stereo_(gra)", "def GetStereoisomerCount(m, options=...): # -> Any:\n ...", "def reflect_local_stereo(gra):\n atm_par_dct = atom_stereo_parities(gra)\n atm_par_dct = dict_.transform_values(\n atm_par_dct, lambda x: x if x is None else not x)\n gra = set_atom_stereo_parities(gra, atm_par_dct)\n return gra", "def is_simplex(self):\n return self.affine_dimension()+1==self.n_vertices()", "def atom_parity_evaluator_to_local_stereo_(gra):\n return atom_parity_evaluator_from_local_stereo_(gra)", "def EnumerateStereoisomers(m, options=..., verbose=...): # -> Generator[Unknown, None, None]:\n ...", "def components_graph(geo, stereo=True):\n return automol.graph.connected_components(graph(geo, stereo=stereo))", "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def is_vert(e) :\n f = e[0][0]\n for t in e :\n if f != t[0] :\n return False\n return True", "def is_directed(G):\n return G.is_directed()", "def __filterEdges(self):", "def graph(self):\n ...", "def is_vertex(self):\n return True", "def is_vertex(self):\n return True", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y", "def is_vertex(self):\n return False", "def _similar_stereo(geo, geoi, arg=None):\n _ = arg # Added just to make wrapper function work\n ich = inchi(geo)\n ichi = inchi(geoi)\n return bool(ich == ichi)", "def isScalene(self):\n\t\treturn self.a != self.b != self.c", "def is_planar(G):\n result=True\n bad_minor=[]\n n=len(G.nodes())\n iterazione=0\n if n>5:\n print 'N >5'\n\n for subnodes in it.combinations(G.nodes(),6):\n iterazione+=1\n print 'iterazione %d'%iterazione\n subG=G.subgraph(subnodes)\n if bipartite.is_bipartite(G):# check if the graph G has a subgraph K(3,3)\n X, Y = bipartite.sets(G)\n if len(X)==3:\n result=False\n bad_minor=subnodes\n return result,bad_minor\n iterazione=0\n if n>4 and result:\n print 'N >4'\n\n for subnodes in it.combinations(G.nodes(),5):\n print 'iterazione %d'%iterazione\n subG=G.subgraph(subnodes)\n if len(subG.edges())==10:# check if the graph G has a subgraph K(5)\n result=False\n bad_minor=subnodes\n return result,bad_minor\n\n return result,bad_minor", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def is_directed(self):\n return True", "def _get_full_graph(self):", "def evert(self):\n for e in self.edges:\n self.invert()\n for f in self.faces:\n f.invert()", "def stereo2square(x,y):\n # make sure we stay where half2square is fast and reliable\n msky = y <= 0.\n mskx = x <= 0.\n x = np.where(mskx, x, -x)\n y = np.where(msky, y, -y)\n x,y = half2square(stereo2half(x,y))\n x = np.where(mskx, x, 1.-x)\n y = np.where(msky, y, 1.-y)\n return x,y" ]
[ "0.5916504", "0.59065694", "0.58779186", "0.57271165", "0.5674469", "0.5657861", "0.5590091", "0.5554247", "0.555055", "0.5433868", "0.54046094", "0.535443", "0.5328535", "0.5321897", "0.5259553", "0.52561355", "0.52349555", "0.52111995", "0.51804113", "0.51804113", "0.5168713", "0.5158819", "0.51583564", "0.5144581", "0.5124784", "0.510519", "0.50939906", "0.50683564", "0.50678015", "0.5066129" ]
0.6523304
0
all stereomers compatible with this graph's assignments
def substereomers(gra): _assigned = functools.partial( dict_.filter_by_value, func=lambda x: x is not None) known_atm_ste_par_dct = _assigned(atom_stereo_parities(gra)) known_bnd_ste_par_dct = _assigned(bond_stereo_parities(gra)) def _is_compatible(sgr): atm_ste_par_dct = _assigned(atom_stereo_parities(sgr)) bnd_ste_par_dct = _assigned(bond_stereo_parities(sgr)) _compat_atm_assgns = (set(known_atm_ste_par_dct.items()) <= set(atm_ste_par_dct.items())) _compat_bnd_assgns = (set(known_bnd_ste_par_dct.items()) <= set(bnd_ste_par_dct.items())) return _compat_atm_assgns and _compat_bnd_assgns sgrs = tuple(filter(_is_compatible, stereomers(gra))) return sgrs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_full_graph(self):", "def pass_assign_for_mentor(cls):\n assignments_list = cls.list_from_sql()\n return assignments_list", "def assign_passengers(self):\n\n # Update the state of the parallel server after every assignment.\n self.parallel_server.update_state()\n\n # While the assignment agent's queue is not empty and there is space\n # to assign passengers in the parallel block...\n while self.parallel_server.has_space_in_a_server_queue is True and \\\n len(self.queue) > 0:\n\n # Pop the first passenger in line and assign to the shortest queue.\n tmp = self.queue.popleft()\n self.parallel_server.min_queue.queue.append(tmp)\n\n # Update the state of the parallel server after every assignment.\n self.parallel_server.update_state()", "def initialize_assignment(self):\n # Initialize empty frozensets for each agent\n init_assignment = frozendict({a:frozenset() for a in self.agents})\n \n # Add hard assignments\n if self.hard_assignment:\n init_dict = dict(init_assignment)\n for a, t in self.hard_assignment.items():\n init_dict[a] = init_dict[a] | t\n init_assignment = frozendict(init_dict)\n \n return init_assignment", "def referents(self) -> Collection[\"BaseAssignment\"]:\n return self.__assignments", "def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"", "def sat_solve(self):\n # YOUR CODE HERE\n o = frozenset()\n if self.isfalse:\n return False\n elif self.istrue:\n return set()\n l = self.generate_candidate_assignments()\n print(\"assignments,\", l)\n for i in l:\n st = sat_apply_assignment(self, i)\n print(\"i:\", i, \"new set\", st)\n\n if st.istrue:\n return {i}\n elif not st.isfalse:\n sat_solve(st)\n\n return {i}", "def sync_target_network(self):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(e)", "def compare_graphs(self):\n\t\tpass", "def setAvailSystems(self):\n self.availSystems = []\n if self.toSystem != self.fromSystem:\n self.availSystems.append(self.fromSystem)\n return\n else:\n mySystem = self.myGalaxy.systems[self.fromSystem]\n if mySystem.myEmpireID == self.empireID or globals.diplomacy[self.myGalaxy.empires[mySystem.myEmpireID].diplomacy[self.empireID].diplomacyID]['alliance'] == 1:\n self.availSystems = mySystem.getAllConnections()\n else:\n for otherSystemID in mySystem.connectedSystems:\n otherSystem = self.myGalaxy.systems[otherSystemID]\n if otherSystem.myEmpireID == self.empireID or globals.diplomacy[self.myGalaxy.empires[otherSystem.myEmpireID].diplomacy[self.empireID].diplomacyID]['move'] == 1:\n self.availSystems.append(otherSystemID)\n self.oldAvailSystems = copy.copy(self.availSystems)", "def canonical_enantiomer(gra):\n ste_atm_keys = atom_stereo_keys(gra)\n if not ste_atm_keys:\n can_enant_gra = canonical(gra)\n is_reflected = None\n else:\n # Calculate canonical keys for the unreflected graph\n ugra = gra\n uloc_gra, ucan_key_dct = _to_local_stereo_with_class_indices(\n ugra, break_ties=True)\n\n # Reflect the graph in the local stereo representation\n rloc_gra = reflect_local_stereo(uloc_gra)\n\n # Determine canonical keys for the reflected graph\n rgra, rcan_key_dct = _from_local_stereo_with_class_indices(\n rloc_gra, break_ties=True)\n\n # Convert both to canonical graphs\n ucan_gra = relabel(ugra, ucan_key_dct)\n rcan_gra = relabel(rgra, rcan_key_dct)\n\n # Read and compare their parities\n ste_atm_keys = sorted(atom_stereo_keys(ucan_gra))\n assert ste_atm_keys == sorted(atom_stereo_keys(rcan_gra)), (\n \"Sanity check. This should always be true.\")\n uatm_par_dct = atom_stereo_parities(ucan_gra)\n ratm_par_dct = atom_stereo_parities(rcan_gra)\n\n uatm_pars = dict_.values_by_key(uatm_par_dct, ste_atm_keys)\n ratm_pars = dict_.values_by_key(ratm_par_dct, ste_atm_keys)\n\n # If the parities are the same, this is not an enantiomer\n if uatm_pars == ratm_pars:\n can_enant_gra = ucan_gra\n is_reflected = None\n # If the unreflected parities have lower sort order, don't reflect\n elif uatm_pars < ratm_pars:\n can_enant_gra = ucan_gra\n is_reflected = False\n # If the reflected parities have lower sort order, reflect\n else:\n can_enant_gra = rcan_gra\n is_reflected = True\n\n return can_enant_gra, is_reflected", "def stereomers(gra):\n bool_vals = (False, True)\n\n def _expand_atom_stereo(sgr):\n atm_ste_keys = stereogenic_atom_keys(sgr)\n nste_atms = len(atm_ste_keys)\n sgrs = [set_atom_stereo_parities(sgr, dict(zip(atm_ste_keys,\n atm_ste_par_vals)))\n for atm_ste_par_vals\n in itertools.product(bool_vals, repeat=nste_atms)]\n return sgrs\n\n def _expand_bond_stereo(sgr):\n bnd_ste_keys = stereogenic_bond_keys(sgr)\n nste_bnds = len(bnd_ste_keys)\n sgrs = [set_bond_stereo_parities(sgr, dict(zip(bnd_ste_keys,\n bnd_ste_par_vals)))\n for bnd_ste_par_vals\n in itertools.product(bool_vals, repeat=nste_bnds)]\n return sgrs\n\n last_sgrs = []\n sgrs = [without_stereo_parities(gra)]\n\n while sgrs != last_sgrs:\n last_sgrs = sgrs\n sgrs = list(itertools.chain(*map(_expand_atom_stereo, sgrs)))\n sgrs = list(itertools.chain(*map(_expand_bond_stereo, sgrs)))\n\n return tuple(sorted(sgrs, key=frozen))", "def infer_assignment(self):\r\n self.support_pruning()\r\n return {v: self.curr_domains[v][0]\r\n for v in self.variables if 1 == len(self.curr_domains[v])}", "def remove_stems(graph = None):\n\tfor x,y in basepairs(graph = graph):\n\t\tgraph.remove_node(x)\n\t\tgraph.remove_node(y)", "def find_allocation_with_min_shering(self):\n for consumption_graph in self.graph_generator.generate_all_consumption_graph():\n self.find_allocation_for_graph(consumption_graph)\n return self.min_sharing_allocation", "def __getstate__(self):\n state = Object.__getstate__(self)\n state['_strain'] = set()\n return state", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def _estimate_assignments(self, graph: GraphRepresentation) -> None:\n embed_graph = augment_diagonal(graph)\n latent = AdjacencySpectralEmbed(\n n_components=self.n_components, **self.embed_kws\n ).fit_transform(embed_graph)\n if isinstance(latent, tuple):\n latent = np.concatenate(latent, axis=1)\n gc = GaussianCluster(\n min_components=self.min_comm,\n max_components=self.max_comm,\n **self.cluster_kws\n )\n vertex_assignments = gc.fit_predict(latent) # type: ignore\n self.vertex_assignments_ = vertex_assignments", "def all_pairs(self):\n return chain(self.nx_graph.edges(), nx.non_edges(self.nx_graph))", "def sources(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if self.rank == self.midpoint - 1 and partner == self.right:\n partners = set()\n elif self.rank == self.midpoint - 1 and partner == self.right - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n else:\n partner = self.left + (self.rank - self.midpoint)\n if self.rank == self.right - 1 and partner == self.midpoint:\n partners = set()\n elif self.rank == self.right - 1 and partner == self.midpoint - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n\n return partners", "def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True", "def on_serialize(self):\n for sw in self.sig_words:\n sw[1] = set(map(lambda s: s.name(), sw[1]))\n self.most_common_synsets = list(map(lambda s: s.name(), self.most_common_synsets))", "def alleles(self) -> set[str]:\n return {self.ancestral_state} | {m.derived_state for m in self.mutations}", "def known_safes(self):\n if self.count==0:\n return self.cells\n return set()\n #raise NotImplementedError", "def assign(self, starts):\n # Initialize the set of open and closed nodes, and the connection map\n open_set, closed_set = starts, set()\n \n # Initialize a map of assignments and associated profits\n profits = {s:0 for s in starts}\n \n while open_set:\n\n # Explore the most promising node\n current = max(open_set, key=lambda n: profits[n])\n \n # Move the current node from the open set to the closed set\n open_set.remove(current)\n closed_set.add(current)\n \n # Track if assignment is complete\n assignment_finished = True\n \n # Determine all possible next assignment steps\n for agent in self.agents:\n # Determine possible tasks the agent may be assigned to\n poss_tasks = self.assign_agent(agent, current)\n \n # If assignments are possible, the assignment is not complete\n if poss_tasks: assignment_finished = False\n \n for task in poss_tasks:\n # Determine next assignment step\n next_dict = dict(current)\n next_dict[agent] = next_dict[agent] | {task}\n next_assignment = frozendict(next_dict)\n \n # If we have already explored this assignment, continue\n if next_assignment in closed_set:\n continue\n # Else add the assignment to the open set\n else:\n open_set.add(next_assignment)\n profits[next_assignment] = self.calc_profit(next_assignment)\n \n # If assignment is finished, add it to finished assignments\n if assignment_finished:\n \n # Check if assignment is also complete\n if self.complete and not self.is_complete(current):\n continue\n \n self.finished_assignments[current] = profits[current]\n \n # Update current fair / max profit and print if applicable\n # Procedure for fair profit (max profit tiebreaker)\n if self.fair:\n cur_fair_profit = self.calc_fair_profit(current)\n if ((cur_fair_profit > self.fair_profit) or \n (cur_fair_profit == self.fair_profit and\n profits[current] > self.max_profit)):\n self.fair_profit = cur_fair_profit\n self.max_profit = profits[current]\n self.print_assignment(current, profits[current])\n elif (self.verbose and profits[current] >= self.max_profit\n and cur_fair_profit >= self.fair_profit):\n self.print_assignment(current, profits[current])\n # Procedure for maximum profit\n else:\n if profits[current] > self.max_profit:\n self.max_profit = profits[current]\n self.print_assignment(current, profits[current])\n elif self.verbose and profits[current] >= self.max_profit:\n self.print_assignment(current, profits[current])", "def infer_reuse_pattern(fgraph, outputs_to_disown):\r\n rval = set()\r\n for o in outputs_to_disown:\r\n view_tree_set(alias_root(o), rval)\r\n # remove from rval all of the inputs, constants, values.\r\n rval = set(r for r in rval if r.owner is not None)\r\n\r\n return rval", "def isValidCompatible(cls,root):\n valid = True\n # the order of node types in chains is restricted\n # (this would be easier if the data was in a Corpus-instance)\n allowed = NX.XDiGraph(selfloops=True)\n\n # continue from here!\n allowed.add_edge('Physical','Physical')\n allowed.add_edge('Property','Physical')\n allowed.add_edge('Process','Physical')\n allowed.add_edge('Regulation','Physical')\n\n allowed.add_edge('Property','Property')\n allowed.add_edge('Process','Property')\n allowed.add_edge('Regulation','Property')\n\n allowed.add_edge('Property','Process')\n# allowed.add_edge('Process','Process')\n allowed.add_edge('Regulation','Process')\n\n allowed.add_edge('Property','Regulation')\n# allowed.add_edge('Process','Regulation')\n allowed.add_edge('Regulation','Regulation')\n\n mapping = {}\n for a in root.find(\"ontologies\").findall(\"ontology\"):\n if a.attrib['id']=='interaction':\n for x in a.getiterator(\"ontnode\"):\n if x.attrib.has_key('effect') and x.attrib['effect'].endswith('regulation'):\n t = 'Regulation'\n else:\n t = x.attrib['onttype']\n mapping[x.attrib['id']] = t\n \n for a in root.getiterator(\"relannotation\"):\n t2type = dict( [(x.attrib['id'],x.attrib['type'])\n for x in a.findall(\"reltoken\")] )\n n2t = dict( [(x.attrib['id'],x.attrib['token'])\n for x in a.findall(\"relnode\")] )\n for x in a.findall(\"reledge\"):\n bt = t2type[n2t[x.attrib['bgn']]]\n et = t2type[n2t[x.attrib['end']]]\n bgn = mapping[bt]\n end = mapping[et]\n if not allowed.has_edge(bgn,end):\n printError(cls,inspect.stack()[1][3],\n \"%s -- %s (%s) -> %s (%s) is not a valid edge\"%\n (x.attrib['id'].split('.')[1],bgn,bt,end,et))\n valid = False\n \n return(valid)", "def possible(self):\n return [tuple(path) for path in nx.all_shortest_paths(self._gpm.Graph, source=self.source, target=self.target)]", "def accessible(g, s):\n\tacc = set()\n\tacc.add(s)\n\tlist = [s]\n\twhile len(list) > 0:\n\t\tx = list[0]\n\t\tlist = list[1 : ]\n\t\tfor y in g.parseNout(x):\n\t\t\tif y not in acc:\n\t\t\t\tacc.add(y)\n\t\t\t\tlist.append(y)\n\treturn acc", "def duplicity(self, serder, sigers):\n pass" ]
[ "0.54077244", "0.53233546", "0.529461", "0.5199094", "0.51901215", "0.5178197", "0.5165792", "0.5160593", "0.5153728", "0.5084569", "0.505208", "0.50347584", "0.501747", "0.50132996", "0.49907267", "0.49830773", "0.49814856", "0.49584734", "0.4938241", "0.49313956", "0.49264503", "0.49250823", "0.49154085", "0.49143997", "0.49108982", "0.4910014", "0.49065053", "0.48904037", "0.48866665", "0.48623732" ]
0.5362716
1
get the neighbor keys of an atom sorted by stereo priority
def stereo_sorted_atom_neighbor_keys(gra, atm_key, atm_ngb_keys): atm_ngb_keys = list(atm_ngb_keys) # explicitly create an object array because otherwise the argsort # interprets [()] as [] atm_pri_vecs = numpy.empty(len(atm_ngb_keys), dtype=numpy.object_) atm_pri_vecs[:] = [stereo_priority_vector(gra, atm_key, atm_ngb_key) for atm_ngb_key in atm_ngb_keys] sort_idxs = numpy.argsort(atm_pri_vecs) sorted_atm_ngb_keys = tuple(map(atm_ngb_keys.__getitem__, sort_idxs)) return sorted_atm_ngb_keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def atom_stereo_keys(sgr):\n atm_ste_keys = dict_.keys_by_value(atom_stereo_parities(sgr),\n lambda x: x in [True, False])\n return atm_ste_keys", "def _keys_in_sorted(move):\n return (move.picking_id.id, move.product_id.responsible_id.id)", "def keys(self):\n\t\treturn tuple(self.dist.keys())", "def bond_stereo_keys(sgr):\n bnd_ste_keys = dict_.keys_by_value(bond_stereo_parities(sgr),\n lambda x: x in [True, False])\n return bnd_ste_keys", "def sortKey( self, mode, matrix ):\n # distance calculation...\n distance = polygonsort.distances(\n LOCAL_ORIGIN,\n modelView = matrix,\n projection = mode.getProjection(),\n viewport = mode.getViewport(),\n )[0]\n if self.appearance:\n key = self.appearance.sortKey( mode, matrix )\n else:\n key = (False,[],None)\n if key[0]:\n distance = -distance\n return key[0:2]+ (distance,) + key[1:]", "def stereogenic_atom_keys(gra):\n gra = without_bond_orders(gra)\n gra = explicit(gra) # for simplicity, add the explicit hydrogens back in\n atm_keys = dict_.keys_by_value(atom_bond_valences(gra), lambda x: x == 4)\n atm_keys -= atom_stereo_keys(gra)\n\n atm_ngb_keys_dct = atom_neighbor_keys(gra)\n\n def _is_stereogenic(atm_key):\n atm_ngb_keys = list(atm_ngb_keys_dct[atm_key])\n pri_vecs = [stereo_priority_vector(gra, atm_key, atm_ngb_key)\n for atm_ngb_key in atm_ngb_keys]\n return not any(pv1 == pv2\n for pv1, pv2 in itertools.combinations(pri_vecs, r=2))\n\n ste_gen_atm_keys = frozenset(filter(_is_stereogenic, atm_keys))\n return ste_gen_atm_keys", "def keys():", "def _common_keypoints(self, *others):\n matches = self._match_table.dropna(0)\n keypoints = []\n for other in others:\n indices = matches[other.position.id].astype(int).values\n # the coordinates have to be flipped for later processing, hence the ::-1\n keypoints.append(other.keypoints[indices, ::-1])\n return np.stack(keypoints, axis=1)", "def stereo_priority_vector(gra, atm_key, atm_ngb_key):\n bbn_keys = backbone_keys(gra)\n exp_hyd_keys = explicit_hydrogen_keys(gra)\n\n if atm_ngb_key not in bbn_keys:\n assert atm_ngb_key in exp_hyd_keys\n assert frozenset({atm_key, atm_ngb_key}) in bonds(gra)\n pri_vec = ()\n else:\n gra = implicit(gra)\n atm_dct = atoms(gra)\n bnd_dct = bonds(gra)\n assert atm_key in bbn_keys\n assert frozenset({atm_key, atm_ngb_key}) in bnd_dct\n\n # here, switch to an implicit graph\n atm_ngb_keys_dct = atom_neighbor_keys(gra)\n\n def _priority_vector(atm1_key, atm2_key, seen_keys):\n # we keep a list of seen keys to cut off cycles, avoiding infinite\n # loops\n\n bnd_val = bnd_dct[frozenset({atm1_key, atm2_key})]\n atm_val = atm_dct[atm2_key]\n\n bnd_val = _replace_nones_with_negative_infinity(bnd_val)\n atm_val = _replace_nones_with_negative_infinity(atm_val)\n\n if atm2_key in seen_keys:\n ret = (bnd_val,)\n else:\n seen_keys.update({atm1_key, atm2_key})\n atm3_keys = atm_ngb_keys_dct[atm2_key] - {atm1_key}\n if atm3_keys:\n next_vals, seen_keys = zip(*[\n _priority_vector(atm2_key, atm3_key, seen_keys)\n for atm3_key in atm3_keys])\n ret = (bnd_val, atm_val) + next_vals\n else:\n ret = (bnd_val, atm_val)\n\n return ret, seen_keys\n\n pri_vec, _ = _priority_vector(atm_key, atm_ngb_key, set())\n\n return pri_vec", "def get_neighbors(self):\n \n return self.adjacent.keys()", "def get_neighbours(self):\n return self.points_to.keys()", "def sources(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if self.rank == self.midpoint - 1 and partner == self.right:\n partners = set()\n elif self.rank == self.midpoint - 1 and partner == self.right - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n else:\n partner = self.left + (self.rank - self.midpoint)\n if self.rank == self.right - 1 and partner == self.midpoint:\n partners = set()\n elif self.rank == self.right - 1 and partner == self.midpoint - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n\n return partners", "def sortedKeys(self):\n sortedItems = self.items()\n compare = lambda x, y: sign(y[1] - x[1])\n sortedItems.sort(cmp=compare)\n return [x[0] for x in sortedItems]", "def top_k_betweenness_centrality(self):\n d={}\n l=[]\n for v in vertices:\n a=self.betweenness_centrality(v)\n d[v]=a\n l.append(a)\n m=max(l)\n l1=[]\n for key in d:\n if d[key]==m:\n l1.append(key)\n\n return l1", "def sortedKeys(self):\n sortedItems = list(self.items())\n\n def compare(x, y): return sign(y[1] - x[1])\n sortedItems.sort(cmp=compare)\n return [x[0] for x in sortedItems]", "def theKNearNeibor(self,user_rating,movie_user,user,k = 10):\n neighbors = set()\n user_list = user_rating[user] #{movie1:rate,movie2:rate,...}\n list = []\n\n for movie in user_list.keys():\n for neighbor in movie_user[movie]:\n if neighbor != user and neighbor not in neighbors:\n neighbors.add(neighbor)\n for nei in neighbors:\n similarity = self.calSimilarity(user_rating,user,nei)\n list.append([similarity,nei])\n list.sort(key=lambda x:x[0],reverse=True)\n return list[:k]", "def vertices(self):\r\n return self.adjacent.keys()", "def keys(self):\n klst = list(self._maps.keys())\n klst.sort()\n return klst", "def required_by(self, node):\n return sorted(self.__reverse_map[node], key=node_key)", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def bfs_keys(current_pt, grid, found_keys):\n keys = {}\n pt_distances = {current_pt: 0}\n bfs = deque([current_pt])\n\n while bfs:\n new_pt = bfs.popleft()\n\n for ngbr in get_neighbours(new_pt, grid):\n if ngbr in pt_distances: # already visited this point\n continue\n\n pt_distances[ngbr] = pt_distances[new_pt] + 1\n\n point_type = grid[ngbr[0]][ngbr[1]]\n\n if point_type.isupper() and point_type.lower() not in found_keys: # door and do not have key\n continue\n elif point_type.islower() and point_type not in found_keys: # found key\n keys[point_type] = ngbr, pt_distances[ngbr]\n else:\n bfs.append(ngbr)\n return keys", "def Keys(self) -> _n_1_t_4:", "def secondary_keys(self):", "def get_closest_node(self, point, n=1):\n n = min(n,len(self.nodes))#prevent index error\n if n > 1:\n tmp = zip(*self.nkdtree.query(point,n))\n return [(d, self.nkdtree_keys[i]) for d,i in tmp]\n else:\n dist, id = self.nkdtree.query(point,n)\n return [(dist, self.nkdtree_keys[id])]", "def keys_sorted_by_frequency(self, cutoff=100):\n return [key for key, _ in self.counter.most_common()][:cutoff]", "def keysAll():", "def getKerningPairsRef(self, font):\n kerningRef = font.kerning.keys()[:]\n\n for k in kerningRef:\n left, right = k\n\n if left in font.groups:\n groupGlyphs = font.groups[left]\n groupGlyphs.sort()\n # get first glyphname in the group\n leftRef = groupGlyphs[0]\n else:\n leftRef = left\n\n if right in font.groups:\n groupGlyphs = font.groups[right]\n groupGlyphs.sort()\n # get first glyphname in the group\n rightRef = groupGlyphs[0]\n else:\n rightRef = right\n\n i = kerningRef.index(k)\n kerningRef[i] = (leftRef, rightRef), (left, right)\n\n kerningRef.sort()\n return kerningRef", "def get_closest_relationship(self, point, n=1):\n n = min(n,len(self.rtype_vectors))#prevent index error\n if n > 1:\n tmp = zip(*self.rkdtree.query(point,n))\n return [(d, self.rkdtree_keys[i]) for d,i in tmp]\n else:\n dist, id = self.rkdtree.query(point,n)\n return [(dist, self.rkdtree_keys[id])]", "def sinks(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if partner == self.right:\n partner -= 1\n else:\n partner = self.left + (self.rank - self.midpoint)\n if partner == self.midpoint:\n partner -= 1\n\n return {partner}", "def get_adjacent_keys(self, key: str) -> List[str]:\n return [k for k in self.get_adjacent(key)]" ]
[ "0.64455867", "0.5702487", "0.5683594", "0.5632385", "0.5532785", "0.5505051", "0.5495498", "0.54925495", "0.5450763", "0.5449452", "0.5423081", "0.53951913", "0.5394365", "0.53459764", "0.53347504", "0.5302554", "0.5298473", "0.5288237", "0.5263176", "0.52053976", "0.5203688", "0.5191572", "0.51885897", "0.5182508", "0.51810735", "0.5178576", "0.51759636", "0.5174639", "0.51723206", "0.51699513" ]
0.7218674
0
Make a cover song analogy; given audio for (A, A'), and B, \ make B'
def makeAnalogy(X, Fs, beatsA, filename_b, hopSize, winSize, ws, TempoBias, MFCCWeight = 1.0, HPCPWeight = 1.0): #Step 1: Load in new example from artist 1 (B song) print("Loading new example...") XA = X[:, 0] XAp = X[:, 1] XB, Fs2 = librosa.load(filename_b) XB = librosa.core.to_mono(XB) #Step 2: Use rubberband library to change tempo of B so that #it's in line with tempo of song A tempoB, beatsB = librosa.beat.beat_track(XB, Fs2, start_bpm = TempoBias, hop_length = hopSize) tempoA = 60.0/(np.mean(beatsA[1::] - beatsA[0:-1])/float(Fs)) print("tempoA = %g, tempoB = %g"%(tempoA, tempoB)) ratio = float(tempoA)/tempoB print("Shifting by ratio: %g"%ratio) XB = pyrb.time_stretch(XB, Fs2, ratio)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset,\n clip_duration, sample_volume, ramp_in, ramp_out):\n ramp_out_index = clip_duration - ramp_out\n track_end = min(track_offset + clip_duration, track_data.shape[0])\n track_end = min(track_end,\n track_offset + (sample_data.shape[0] - sample_offset))\n sample_range = track_end - track_offset\n for i in range(sample_range):\n if i < ramp_in:\n envelope_scale = i / ramp_in\n elif i > ramp_out_index:\n envelope_scale = (clip_duration - i) / ramp_out\n else:\n envelope_scale = 1\n sample_input = sample_data[sample_offset + i]\n track_data[track_offset\n + i] += sample_input * envelope_scale * sample_volume", "def remix(self):\n self.log(\"Looking up track...\", 5)\n self.getTag()\n self.processArt()\n\n self.log(\"Listening to %s...\" % ('\"%s\"' % self.tag['title'] if 'title' in self.tag else 'song'), 5)\n self.original = audio.LocalAudioFile(self.infile, False)\n if not 'title' in self.tag:\n self.detectSong(self.original)\n self.st = FastModify()\n \n self.log(\"Choosing key and tempo...\", 10)\n self.tonic = self.original.analysis.key['value']\n self.tempo = self.original.analysis.tempo['value']\n self.bars = self.original.analysis.bars\n self.beats = self.original.analysis.beats\n self.sections = self.original.analysis.sections\n self.tag['key'] = self.keys[self.tonic] if self.tonic >= 0 and self.tonic < 12 else '?'\n self.tag['tempo'] = self.template['tempo']\n\n self.log(\"Arranging intro...\", 40.0/(len(self.sections) + 1))\n self.partialEncode(self.compileIntro())\n\n past_progress = 0\n hats = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n\n i = 0 # Required if there are no sections\n for i, section in enumerate(self.sections):\n self.log(\"Arranging section %s of %s...\" % (i+1, len(self.sections)), 40.0/(len(self.sections) + 1))\n a, b = self.compileSection(i, section, hats)\n self.partialEncode(a)\n self.partialEncode(b)\n del a, b\n del hats\n self.original.unload()\n\n self.log(\"Adding ending...\", 5)\n self.partialEncode(\n audio.AudioData(\n self.sample_path + self.template['splash_ends'][(i + 1) % len(self.template['splash_ends'])],\n sampleRate=44100,\n numChannels=2,\n verbose=False\n )\n )\n \n self.log(\"Mixing...\", 5)\n self.mixwav(self.tempfile)\n\n if self.deleteOriginal:\n try:\n unlink(self.infile)\n except:\n pass # File could have been deleted by an eager cleanup script\n\n self.log(\"Mastering...\", 5)\n self.lame(self.tempfile, self.outfile)\n unlink(self.tempfile)\n \n self.log(\"Adding artwork...\", 20)\n self.updateTags(titleSuffix = \" (Wub Machine Remix)\")\n \n return self.outfile", "def musicGrid():\n \n a = os.path.join('samples','0c.wav') #the paths for mixer to locate audio files\n b = os.path.join('samples','1b.wav')\n c = os.path.join('samples','2a.wav')\n d = os.path.join('samples','3g.wav')\n e = os.path.join('samples','4f.wav')\n f = os.path.join('samples','5e.wav')\n g = os.path.join('samples','6d.wav')\n h = os.path.join('samples','7c.wav')\n i = os.path.join('samples','8b1.wav')\n j = os.path.join('samples','9a2.wav')\n k = os.path.join('samples','10g2.wav')\n l = os.path.join('samples','11f2.wav')\n m = os.path.join('samples','12e2.wav')\n n = os.path.join('samples','13d2.wav')\n o = os.path.join('samples','14c2.wav')\n p = os.path.join('samples','15b3.wav')\n \n aa = pygame.mixer.Sound(a) #commands to play each sound\n bb = pygame.mixer.Sound(b)\n cc = pygame.mixer.Sound(c)\n dd = pygame.mixer.Sound(d)\n ee = pygame.mixer.Sound(e)\n ff = pygame.mixer.Sound(f)\n gg = pygame.mixer.Sound(g)\n hh = pygame.mixer.Sound(h)\n ii = pygame.mixer.Sound(i)\n jj = pygame.mixer.Sound(j)\n kk = pygame.mixer.Sound(k)\n ll = pygame.mixer.Sound(l)\n mm = pygame.mixer.Sound(m)\n nn = pygame.mixer.Sound(n)\n oo = pygame.mixer.Sound(o)\n pp = pygame.mixer.Sound(p)\n \n aaa = pygame.mixer.Channel(0) #assigning a mixer channel to each sound command\n bbb = pygame.mixer.Channel(1)\n ccc = pygame.mixer.Channel(2)\n ddd = pygame.mixer.Channel(3)\n eee = pygame.mixer.Channel(4)\n fff = pygame.mixer.Channel(5)\n ggg = pygame.mixer.Channel(6)\n hhh = pygame.mixer.Channel(7)\n iii = pygame.mixer.Channel(0)\n jjj = pygame.mixer.Channel(1)\n kkk = pygame.mixer.Channel(2)\n lll = pygame.mixer.Channel(3)\n mmm = pygame.mixer.Channel(4)\n nnn = pygame.mixer.Channel(5)\n ooo = pygame.mixer.Channel(6)\n ppp = pygame.mixer.Channel(7)\n\n#---------------------------------------------------------------------\n\n pygame.init()\n size = 320\n game = pygame.display.set_mode((size,size))\n \n \n board = [] #sets up what will be the x-y arrangement of selected squares\n for i in range(0,16):\n board.append([])\n for j in range(0,16):\n board[i].append(False) #sets note squares as initially unselected\n \n addingNotes = True\n while addingNotes:\n drawGrid(board,game,size) #create grid\n for event in pygame.event.get():\n if event.type==pygame.QUIT: #OK to use exit button in top left\n pygame.display.quit()\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n \n mousePosition = pygame.mouse.get_pos()\n x = mousePosition[0] #separate mousePosition value into x&y\n y = mousePosition[1]\n x = int(x/20)\n y = int(y/20) #gets index by dividing by square width(pixels)\n board[x][y] = not board[x][y]\n elif event.type == pygame.KEYDOWN:\n print \"Now playing arrangement. Press any key to clear the Grid: \"\n addingNotes = False\n play = False\n makingNoise= True\n while makingNoise: #playbackloop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.display.quit()\n sys.exit(i)\n for i in range(0,16):\n for j in range(0,16):\n pygame.time.delay(10) #delays (10ms) iteration through each column to give space between notes during playback\n if board[i][j]:\n hue = 40\n game.fill((100,100,240),pygame.Rect((i*20),(j*20),20,20)) \n \n pygame.display.flip()\n if j==0: aaa.play(aa)\n if j==2: bbb.play(bb)\n if j==3: ccc.play(cc)\n if j==4: ddd.play(dd)\n if j==5: eee.play(ee)\n if j==6: fff.play(ff)\n if j==7: ggg.play(gg)\n if j==8: hhh.play(hh)\n if j==9: iii.play(ii)\n if j==10: jjj.play(jj)\n if j==11: kkk.play(kk)\n if j==12: lll.play(ll)\n if j==13: mmm.play(mm)\n if j==14: nnn.play(nn)\n if j==15: ooo.play(oo)\n if j==16: ppp.play(pp)\n \n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n print \"Grid refreshed.\"\n makingNoise = False\n play = True", "def test_two_tracks_same_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)", "def additive_mixing(s, n):\n mixed_audio = s + n\n \n alpha = 1. / np.max(np.abs(mixed_audio))\n mixed_audio *= alpha\n s *= alpha\n n *= alpha\n return mixed_audio, s, n, alpha", "def additive_mixing(s, n):\n mixed_audio = s + n\n \n alpha = 1. / np.max(np.abs(mixed_audio))\n mixed_audio *= alpha\n s *= alpha\n n *= alpha\n return mixed_audio, s, n, alpha", "def convert_one_song(audiofile,output,mbconnect=None,verbose=0,DESTROYAUDIO=False):\n # inputs + sanity checks\n if not os.path.exists(audiofile):\n print 'ERROR: song file does not exist:',songfile\n return 0\n if os.path.exists(output):\n print 'ERROR: hdf5 output file already exist:',output,', delete or choose new path'\n return 0\n # get EN track / song / artist for that song\n if verbose>0: print 'get analysis for file:',audiofile\n track = trackEN.track_from_filename(audiofile)\n song_id = track.song_id\n song = songEN.Song(song_id)\n if verbose>0: print 'found song:',song.title,'(',song_id,')'\n artist_id = song.artist_id\n artist = artistEN.Artist(artist_id)\n if verbose>0: print 'found artist:',artist.name,'(',artist_id,')'\n # hack to fill missing values\n try:\n track.foreign_id\n except AttributeError:\n track.__setattr__('foreign_id','')\n if verbose>0: print 'no track foreign_id found'\n try:\n track.foreign_release_id\n except AttributeError:\n track.__setattr__('foreign_release_id','')\n if verbose>0: print 'no track foreign_release_id found'\n # create HDF5 file\n if verbose>0: print 'create HDF5 file:',output\n HDF5.create_song_file(output,force=False)\n # fill hdf5 file from track\n if verbose>0:\n if mbconnect is None:\n print 'fill HDF5 file with info from track/song/artist'\n else:\n print 'fill HDF5 file with info from track/song/artist/musicbrainz'\n h5 = HDF5.open_h5_file_append(output)\n HDF5.fill_hdf5_from_artist(h5,artist)\n HDF5.fill_hdf5_from_song(h5,song)\n HDF5.fill_hdf5_from_track(h5,track)\n if not mbconnect is None:\n HDF5.fill_hdf5_from_musicbrainz(h5,mbconnect)\n h5.close()\n # done\n if DESTROYAUDIO:\n if verbose>0: print 'We remove audio file:',audiofile\n os.remove(audiofile)\n return 1", "def test_transform_track_album_based_on_album_title_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, True)", "def _reproducir(self,mark):\n sp = pysounds.SoundPlayer(self.cant_tracks())\n duracion = mark.obtener_duracion()\n sonidos_a_reproducir = []\n for track_numero in mark.obtener_habilitados():\n track = self.tracks[track_numero]\n tipo = track.obtener_tipo()\n freq = track.obtener_frecuencia()\n vol = track.obtener_volumen()\n if tipo == \"sine\":\n sonidos_a_reproducir.append(pysounds.SoundFactory.get_sine_sound(freq,vol))\n if tipo == \"triangular\":\n sonidos_a_reproducir.append(pysounds.SoundFactory.get_triangular_sound(freq,vol))\n if tipo == \"square\":\n sonidos_a_reproducir.append(pysounds.SoundFactory.get_square_sound(freq,vol))\n sp.play_sounds(sonidos_a_reproducir, duracion)", "def test_transform_track_change_album(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1, cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.transformed, True)", "def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def has_album_cover(audio) -> bool:\r\n if type(audio) == str: audio: File = File(audio)\r\n try:\r\n fix_cover(audio)\r\n if 'APIC:' in audio:\r\n apic: mutagen.id3.APIC = audio['APIC:']\r\n if apic.encoding != Encoding.LATIN1:\r\n apic.encoding = Encoding.LATIN1\r\n audio['APIC:'] = apic\r\n audio.save()\r\n return True\r\n except KeyError: audio.add_tags()\r\n return False", "def convert(\n album,\n):\n for track in list_dir(album):\n ext = splitext(track)[1]\n if ext != \".mp3\":\n new_track = track.replace(ext, \".mp3\")\n if not exists(new_track):\n track_non_mp3 = AudioSegment.from_file(track, format=ext[1:])\n print(f\"{track} -> {new_track}\")\n track_non_mp3.export(new_track, format=\"mp3\")\n os.remove(track)", "def test_transform_track_album_based_on_album_title_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 3')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_track_album_based_on_artist_album_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, True)", "def make_audio_track(language_pair, items, part_number):\n global sequence_builder\n try:\n sequence_builder.make_audio_track(language_pair, items, part_number)\n except Exception as e:\n print(str(e))\n print_exc()", "def song_album(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n return album", "def test_transform_track_album_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 3')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_two_tracks_mismatched_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_album=True, album='Album 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('changed to', status)\n self.assertEqual(self.get_album_count(), 0)", "def _augment_gain(audio, low=0.5, high=1.5):\n g = low + np.random.random_sample(1) * (high - low)\n return audio * g", "def mix(num):\n g.content = g.content or generate_songlist_display()\n if g.browse_mode != \"normal\":\n g.message = F('mix only videos')\n else:\n item = (g.model.songs[int(num) - 1])\n if item is None:\n g.message = F('invalid item')\n return\n item = get_pafy(item)\n # Mix playlists are made up of 'RD' + video_id\n try:\n plist(\"RD\" + item.videoid)\n except OSError:\n g.message = F('no mix')", "def main(beatmap_sounds, effect_volume, music, music_volume, skin, input, output):\n output_format = os.path.splitext(output)[1][1:]\n\n bm_audios = load_sounds(beatmap_sounds) if beatmap_sounds else {}\n skin_audios = load_sounds(skin) if skin else {}\n\n beatmap = Beatmap.from_path(input)\n track = Track.from_beatmap(beatmap, bm_audios, skin_audios)\n beatmap_audio = track.compile()\n beatmap_audio = audioseg_adjust_volume(beatmap_audio, effect_volume)\n\n result = beatmap_audio\n\n if music:\n music_audio = AudioSegment.from_file(music)\n music_audio = audioseg_adjust_volume(music_audio, music_volume)\n\n result = music_audio.overlay(AudioSegment.silent(24) + result)\n\n result.export(output, output_format)\n\n return 0", "def test_transform_album_album_based_on_artist_album_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album 2')\n self.assertEqual(album.transformed, True)", "def test_single_track_with_transform(self):\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.app.load_data()\n\n self.add_mp3()\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist 2', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist 2')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)\n self.assertEqual(album.last_transform, tf_pk)", "def test_transform_album_change_album(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1, cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.album, 'Album 2')\n self.assertEqual(album.transformed, True)", "def test_transform_track_album_based_on_album_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 2',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, False)", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def main():\n input_video = sys.argv[1]\n input_audio = sys.argv[2]\n output_video = sys.argv[3]\n set_audio(input_video, input_audio, output_video)", "def cut_audio(old_path, new_path, start, end):\r\n fs, data = wavfile.read(old_path)\r\n indx_start = int(start*fs)\r\n indx_end = int(end*fs)+1\r\n wavfile.write(new_path,fs,data[indx_start:indx_end])\r\n\r\n return True" ]
[ "0.6006216", "0.58060926", "0.5637287", "0.5634883", "0.5594459", "0.5594459", "0.55667746", "0.55425465", "0.55012476", "0.5492657", "0.54915565", "0.548919", "0.54564226", "0.54428786", "0.5437798", "0.5433615", "0.5421229", "0.5392255", "0.5341979", "0.5328841", "0.53193", "0.53153294", "0.5302522", "0.52981836", "0.5295047", "0.52947474", "0.5281799", "0.5256188", "0.52445066", "0.5230556" ]
0.59700465
1
Set names for the each block
def set_blockname(self, names: Iterable): if len(names) != self.n_blocks_: raise TypeError(f'length mismatch [self.n_blocks_: {self.n_blocks_}, names(given): {len(names)}]') self.block_names_ = names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _genBlocksByName(self):\n self.blocksByName = {\n block.getName(): block for block in self.getBlocks(includeAll=True)\n }", "def list_blocks(self, _):\n print(self.data.name)", "def genBlocksByLocName(self):\n self.blocksByLocName = {\n block.getLocation(): block for block in self.getBlocks(includeAll=True)\n }", "def store_names(self):\n # clear the text buffer, or create new buffer\n try:\n namestxt = bpy.data.texts[\"FullNames\"]\n namestxt.clear()\n except KeyError:\n namestxt = bpy.data.texts.new(\"FullNames\")\n \n # write the names to the text buffer\n for block, shortname in self.nif_import.dict_names.items():\n block_name = block.name.decode()\n if block_name and shortname != block_name:\n namestxt.write('%s;%s\\n' % (shortname, block_name))", "def set_name(self, item_name):\r\n self.name = item_name", "def rename_cluster(self, name):\n\n def walk(block):\n if not block.is_leaf and not block.is_open:\n block.name = name\n\n for child in block.blocks.values():\n walk(child)\n\n walk(self)", "def start_name(self, attributes):\n self.name = True", "def name(self):\n\t\tname = self.__class__.__name__.replace('Block', '')\n\t\tname = INITIAL_CAPS.sub(r'\\1 \\2', name)\n\t\treturn CAMEL_CASE.sub(r'\\1 \\2', name)", "def name(self):\r\n return self.block_id", "def name(self, name):\n\n self.container['name'] = name", "def name(self, name):\n\n self.container['name'] = name", "def names():\n pass", "def setIndexNames(self):\n self.theta = self.i1\n self.radial = self.i2", "def getName(self):\n return self.block.__name__", "def create_labels(self):\n for name in self.names:\n new_label = Label(text=name)\n self.root.ids.names_box.add_widget(new_label)", "def set_name(self, name):\n\t\tself.name_ = name", "def names(self, names):\n\n self._names = names", "def name(self, name):\n pass", "def addSerialNumbersToNames(calibBlocks: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]:\n calibBlocks = sorted(calibBlocks, key=lambda block: block[\"name\"])\n for key, group in itertools.groupby(calibBlocks, key=lambda block: block[\"name\"]):\n group = list(group)\n for i, elem in enumerate(group, start=1):\n elem[\"name\"] += f\"_{i}\"\n\n return calibBlocks", "def display_name_labels(self):\n for name in self.names:\n # create a label for each name\n self.root.add_widget(Label(text=name))", "def rename_all_blocks(hpo_data, cyto_bed, block_prefix):\n\n for hpo, hdat in hpo_data.items():\n\n block_dict = {bid : bd['credset_bt'] for bid, bd in hdat['blocks'].items()}\n\n rename_dict = rename_blocks(block_dict, cyto_bed, '_'.join([hpo, block_prefix]))\n\n for old_id, new_id in rename_dict.items():\n hpo_data[hpo]['blocks'][new_id] = hpo_data[hpo]['blocks'][old_id]\n hpo_data[hpo]['blocks'].pop(old_id)\n \n return hpo_data", "def setCaptainNames(self):\n self.captainNames = anwp.func.names.getNames('system_names.txt',self.maxCaptainNames+100, self.rand.randint(1,100))\n self.currentCaptainName = 0", "def __set_name__(self, cls, name):\n pass", "def customize_html(course_data, block_data):\n if is_id(block_data['name']) or not block_data['name']:\n block_data['name'] = 'HTML Page'", "def __str__(self):\n return 'MLBlock - {}'.format(self.name)", "def _name_changed(self):\n self._named = True", "def set_name(self,name):\r\n self._name = __name", "def set(self, block, name, value):\n self._kvs.set(self._key(block, name), value)", "def doName(self):\n _short = self.p_nameShort\n _str_func = '[{0}] doName'.format(_short)\n log.debug(cgmGEN.logString_start(_str_func))\n\n _d = NAMETOOLS.returnObjectGeneratedNameDict(_short)\n\n _direction = self.getEnumValueString('side')\n if _direction != 'none':\n log.debug(\"|{0}| >> direction: {1}\".format(_str_func,_direction))\n _d['cgmDirection'] = _direction \n self.doStore('cgmDirection',_direction)\n else:\n if _d.get('cgmDirection'):_d.pop('cgmDirection')\n self.doStore('cgmDirection','')\n log.debug(\"|{0}| >> cgmDirection: {1}\".format(_str_func,self.cgmDirection))\n \n\n _position = self.getMayaAttr('position')#self.getEnumValueString('position')\n if _position and _position != '':\n _d['cgmPosition'] = _position \n self.doStore('cgmPosition',_position)\n else:self.cgmPosition = ''\n\n #Get Raw name\n\n for a in 'cgmName','baseName','puppetName',:\n if self.hasAttr(a):\n _d['cgmName'] = ATTR.get(_short,a)\n continue\n \"\"\"\n if self.hasAttr('blockProfile'):\n _blockProfile = self.getMayaAttr('blockProfile') or ''\n if _d.get('cgmName','') not in _blockProfile:\n #if _d.get('cgmName','') in _blockProfile:\n _blockProfile = _blockProfile.replace(_d['cgmName'],'')\n if len(_blockProfile):\n _d['cgmNameModifier'] = STR.camelCase(_blockProfile)\n \"\"\"\n _blockType = ATTR.get(_short,'blockType')\n _d['cgmType'] = _blockType + 'Block'\n \n pprint.pprint(_d)\n \"\"\"\n if self.getMayaAttr('position'):\n _d['cgmPosition'] = self.getEnumValueString('position')\n if self.getMayaAttr('side'):\n _value = self.getEnumValueString('side')\n _d['cgmDirection'] = _value\n self.doStore('cgmDirection',_value)\"\"\"\n #pprint.pprint(vars())\n #Check for special attributes to replace data, name\n _d_new = {}\n for k,v in _d.iteritems():\n if v in ['none','None','NONE',None]:\n continue\n _d_new[k] = v\n log.debug(\"|{0}| >> dict: {1}\".format(_str_func,_d_new))\n self.rename(NAMETOOLS.returnCombinedNameFromDict(_d_new))\n \n \n if self.getMessage('moduleTarget'):\n log.debug(\"|{0}| >> Module target naming...\".format(_str_func)) \n self.moduleTarget.doName()\n \n \n ml_objs = get_blockDagNodes(self)\n for mObj in ml_objs:\n if mObj != self:\n mObj.doName()\n \n for plug in ['formNull','noTransFormNull',\n 'prerigNull','noTransPrerigNull',\n 'defineNull','noTransDefineNull',\n 'moduleTarget']:\n mPlug = self.getMessageAsMeta(plug)\n if mPlug:\n mPlug.doName()", "def populate_names(self,plan_id):\n if not plan_id in self.names:\n try:\n a=self._get_objects_in_plan_generator(plan_id)\n self.names[plan_id]=[]\n for plan in a:\n self.names[plan_id].append(plan[\"name\"])\n except:\n logging.warning(f\"could not get existing buckets from planId: {plan_id}\")\n self.names[plan_id]=[]" ]
[ "0.7619002", "0.6661282", "0.64977086", "0.592721", "0.59218395", "0.5869824", "0.5778553", "0.5733629", "0.5602355", "0.5550933", "0.5550933", "0.55498064", "0.5536557", "0.55123836", "0.54910976", "0.5476435", "0.54742354", "0.54710126", "0.5457501", "0.5434298", "0.5430053", "0.542977", "0.54236907", "0.54037297", "0.54028845", "0.5402718", "0.5391483", "0.53652966", "0.5355531", "0.53483874" ]
0.7410156
1
Assign reduction rule to a gate PPCA
def set_gate_reducer(self, reducer: ReductionRule): self.gate_reducer_ = reducer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)", "def convert_reduce(g, op, block):\n\n op_map = {\n \"reduce_all\": \"all\",\n \"reduce_any\": \"any\",\n \"reduce_max\": \"max\",\n \"reduce_min\": \"min\",\n \"reduce_prod\": \"prod\",\n \"reduce_sum\": \"sum\",\n \"reduce_mean\": \"mean\",\n }\n op_name = op_map[op.type]\n input_x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"dim\")\n if op.attr(\"reduce_all\"):\n axis = None\n keepdims = op.attr(\"keep_dim\")\n out = get_relay_op(op_name)(input_x, axis=axis, keepdims=keepdims)\n if not axis and not keepdims:\n # use `expand_dims` to solve the following situation\n # for TVM, the shape of `out` will be (, )\n # for Paddle, the shape of `out` will be [1]\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)", "def __init__(self, input, output) :\n Gate.__init__(self, [input], output)", "def rule_mutation(state, name, value, p):\r\n\r\n\r\n global mutations\r\n\r\n if name in mutations['list']:\r\n\r\n given_function = mutations['list'][name]['function']\r\n\r\n intensity = mutations[name]['intensity']\r\n\r\n if given_function == 'UNKNOWN':\r\n given_function = mutations['default_function']\r\n\r\n if given_function == 'LOF':\r\n if value == True:\r\n if random.random() < intensity:\r\n value = False\r\n\r\n elif given_function == 'GOF':\r\n if value == False:\r\n if random.random() < intensity:\r\n value = True\r\n\r\n# setattr( state, name, value )\r\n# setattr should be used only once and only in set_value().\r\n return value", "def reduction_implication_network(self, rounding_parameter):\n current_N = self.A.shape[1]\n A_csc = self.A.tocsc()\n B = 10 ** rounding_parameter * A_csc.transpose() * A_csc\n int_y = (10 ** rounding_parameter * self.y + 0.1 * np.ones_like(self.y)).astype(int)\n diagonal = B.diagonal()\n B -= csr_matrix(np.diag(diagonal))\n A_y = self.A.transpose() @ int_y\n outeredges = csr_matrix(2 * A_y - diagonal)\n Adjacency_matrix = 2 * sparse.bmat([[None, 2 * B, None, None],\n [None, None, None, outeredges.transpose()],\n [outeredges, None, None, None],\n [None, None, 0, None]], format='csr')\n max_flow_output = csgraph.maximum_flow(Adjacency_matrix, 2*current_N, 2*current_N + 1)\n\n flow = max_flow_output.residual\n symmetric_central_flow = flow[:current_N, current_N: 2*current_N] + flow[:current_N, current_N: 2*current_N].transpose()\n symmetric_central_flow.data //= 2\n flow[:current_N, current_N: 2 * current_N] = symmetric_central_flow\n flow[current_N: 2 * current_N, :current_N] = -symmetric_central_flow\n symmetric_outer_flow = \\\n flow[2*current_N, :current_N] + flow[current_N: 2 * current_N, 2*current_N + 1].transpose()\n symmetric_outer_flow.data //= 2\n flow[2 * current_N, :current_N] = symmetric_outer_flow\n flow[current_N: 2 * current_N, 2 * current_N + 1] = symmetric_outer_flow.transpose()\n\n residual = Adjacency_matrix - flow\n residual.eliminate_zeros()\n n_components, labels = csgraph.connected_components(residual, connection='strong')\n\n component_type = np.zeros(n_components, dtype=int)\n # Type 14: u and 1 - u are both contained in the component\n # Type 15: u is contained in the component, 1 - u is not, no path from u to u - 1\n # Type 16: u is contained in the component, 1 - u is not, there exists a path from u to u - 1\n indices = []\n vals = []\n for i in range(current_N):\n component = labels[i]\n if component_type[component] == 0:\n if component == labels[i + current_N]:\n component_type[component] = 14\n else:\n reachable = csgraph.breadth_first_order(residual, i, return_predecessors=False)\n if i + current_N in reachable:\n component_type[component] = 16\n else:\n component_type[component] = 15\n if component_type[component] == 15:\n indices.append(i)\n vals.append(1)\n elif component_type[component] == 16:\n indices.append(i)\n vals.append(0)\n\n no_reductions = len(indices)\n order = np.array(indices).argsort()\n for j in range(no_reductions):\n i = order[no_reductions - j - 1]\n self.problem_reduction_single(indices[i], vals[i])\n return 0", "def _apply_one_mode_gate(G, T, i):\n\n T[i] *= G\n return T", "def convert_assign_value(g, op, block):\n\n keys = [\"bool_values\", \"fp32_values\", \"int32_values\", \"int64_values\"]\n dtypes = [\"bool\", \"float32\", \"int32\", \"int64\"]\n for i, key in enumerate(keys):\n dtype = dtypes[i]\n value = np.array(op.attr(key)).astype(dtype)\n if value is not None and value.size >= 1:\n break\n shape = op.attr(\"shape\")\n value = value.reshape(shape)\n out = _op.const(value, dtype=dtype)\n g.add_node(op.output(\"Out\")[0], out)", "def _transfer_rule(self, ratio, r, ref_r, prod_name, m, t):\n prod = getattr(m, prod_name)\n return prod[r, t] == prod[ref_r, t] * ratio # TODO tolerance??", "def __init__(self, input0, input1, output) :\n Gate.__init__(self, [input0,input1], output)", "def rule154_network():\n # fmt: off\n tpm = np.array([\n [0, 0, 0, 0, 0],\n [0, 1, 0, 0, 1],\n [1, 0, 1, 0, 0],\n [1, 0, 1, 0, 1],\n [0, 1, 0, 1, 0],\n [0, 0, 0, 1, 1],\n [1, 1, 0, 1, 0],\n [1, 1, 0, 1, 1],\n [0, 0, 1, 0, 1],\n [0, 1, 1, 0, 0],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 0],\n [0, 1, 1, 0, 1],\n [0, 0, 1, 0, 0],\n [1, 1, 1, 0, 1],\n [1, 1, 1, 0, 0],\n [1, 0, 0, 1, 0],\n [0, 1, 0, 1, 1],\n [0, 0, 1, 1, 0],\n [1, 0, 1, 1, 1],\n [1, 1, 0, 0, 0],\n [0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0],\n [1, 1, 0, 0, 1],\n [1, 0, 1, 1, 0],\n [0, 1, 1, 1, 1],\n [0, 0, 0, 1, 0],\n [1, 0, 0, 1, 1],\n [1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1],\n [0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1],\n ])\n cm = np.array([\n [1, 1, 0, 0, 1],\n [1, 1, 1, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1],\n [1, 0, 0, 1, 1],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def test_careduce():\r\n for scalar_op, careduce_op in [\r\n (theano.scalar.mul, tensor.elemwise.CAReduceDtype),\r\n (theano.scalar.add, tensor.elemwise.CAReduceDtype),\r\n (theano.scalar.maximum, tensor.CAReduce),\r\n (theano.scalar.minimum, tensor.CAReduce)\r\n #The following 2 cases could work if the scalar_op.c_code work with float* dtype.\r\n #Currently we have this error:\r\n #error: invalid operands of types 'npy_float32' and 'npy_float32' to binary 'operator&'\r\n #(theano.scalar.and_, tensor.elemwise.CAReduce),\r\n #(theano.scalar.or_, tensor.elemwise.CAReduce),\r\n ]:\r\n for shape, pattern in [((1,1),(1,)),\r\n ((1,0),(1,)),\r\n ((0,1),(1,)),\r\n ((0,0),(1,)),\r\n ((0,0,0),(1,2)),\r\n ((0,0,0,0),(1,2,3)),\r\n ((2,1),(1,)),\r\n ((1,2),(1,)),\r\n ((100,3,1300),[1]),\r\n ((0,),[0]),((5,),[0]),\r\n ((0,0),[0,1]),((1,0),[0,1]),((5,4),[0,1]),((33,31),[0,1]),((5,4),[1]),((5,4),[0]),#need something bigger then 32 for some opt test.\r\n ((5,4,3),[0]),((5,4,3),[1]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[1,2]),((5,4,3),[0,1,2]),\r\n ((0,0,0,0),[0,1,2,3]),\r\n ((5,4,3,20),[2,3]), ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),((5,4,3,2),[1,2,3]),\r\n ((5,4,3,10,11),[1,2]),\r\n ((5,4,3,20),[2,3]), ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),((5,4,3,2),[1,2,3]),\r\n\r\n #test shape bigger then 4096 on each dimension to make sure that we work correctly when we don't have enough thread/block in each dimensions\r\n ((4100,3),[0]),((3,4101),[0]),#10\r\n ((1024,33),[0]),((33,1024),[0]),#10\r\n ((1025,33),[0]),((33,1025),[0]),#10\r\n\r\n ((4100,3),[1]),((3,4101),[1]),#01\r\n ((1024,33),[1]),((33,1024),[1]),#01\r\n ((1025,33),[1]),((33,1025),[1]),#01\r\n\r\n ((4100,3),[0,1]),((3,4101),[0,1]),#11\r\n ((1024,33),[0,1]),((33,1024),[0,1]),#01\r\n ((1025,33),[0,1]),((33,1025),[0,1]),#01\r\n\r\n ((4100,4,3),[0]),((5,4100,3),[0]),((5,4,4100),[0]), ((3,65536,1), [0]),#100\r\n ((4100,4,3),[1]),((5,4100,3),[1]),((5,4,4100),[1]),#010\r\n ((4100,4,3),[2]),((5,4100,3),[2]),((5,4,4100),[2]),#001\r\n ((4100,4,3),[0,1]),((5,4100,3),[0,1]),((5,4,4100),[0,1]),#110\r\n ((4100,4,3),[1,2]),((5,4100,3),[1,2]),((5,4,4100),[1,2]),#011\r\n #((4100,4,3),[0,2]),((5,4100,3),[0,2]),((5,4,4100),[0,2]),#101 ##not implemented\r\n ((4100,4,3),[0,1,2]),((5,4100,3),[0,1,2]),((5,4,4100),[0,1,2]),#111\r\n ((65,4,3),[0,1,2]),((5,65,3),[0,1,2]),((5,4,65),[0,1,2]),#111\r\n\r\n ((4100,4,3,2),[2,3]),((4,4100,3,2),[2,3]),((4,3,4100,2),[2,3]),((4,3,2,4100),[2,3]),#0011\r\n ((4100,4,3,2),[1,3]),((4,4100,3,2),[1,3]),((4,3,4100,2),[1,3]),((4,3,2,4100),[1,3]),#0101\r\n ((4100,4,3,2),[0,2,3]),((4,4100,3,2),[0,2,3]),((4,3,4100,2),[0,2,3]),#((4,3,2,4100),[0,2,3]),#1011\r\n ((4100,4,3,2),[1,2,3]),((4,4100,3,2),[1,2,3]),((4,3,4100,2),[1,2,3]),((4,3,2,4100),[1,2,3]),#0111\r\n ((65,4,3,2),[1,2,3]),((4,65,3,2),[1,2,3]),((4,3,65,2),[1,2,3]),((4,3,2,65),[1,2,3]),#0111\r\n ((4100,2,3,4),[0,1,2,3]),((2,4100,3,4),[0,1,2,3]),((2,3,4100,4),[0,1,2,3]),((2,3,4,4100),[0,1,2,3]),((128,1,3,3), [0,1,2,3]),#1111\r\n\r\n\r\n #test pattern implemented by reshape\r\n ((4100,4,3,2),[0]),((4,4100,3,2),[0]),((4,3,4100,2),[0]),((4,3,2,4100),[0]),#1000\r\n ((4100,4,3,2),[1]),((4,4100,3,2),[1]),((4,3,4100,2),[1]),((4,3,2,4100),[1]),#0100\r\n ((4100,4,3,2),[2]),((4,4100,3,2),[2]),((4,3,4100,2),[2]),((4,3,2,4100),[2]),#0010\r\n ((4100,4,3,2),[3]),((4,4100,3,2),[3]),((4,3,4100,2),[3]),((4,3,2,4100),[3]),#0001\r\n ((1100,2,3,4,5),[0,1,2,3,4]),((2,1100,3,4,5),[0,1,2,3,4]),((2,3,1100,4,5),[0,1,2,3,4]),((2,3,4,1100,5),[0,1,2,3,4]),((2,3,4,5,1100),[0,1,2,3,4]),#11111\r\n\r\n ]:\r\n\r\n op = careduce_op(scalar_op, axis=pattern)\r\n pat = tensor_pattern_to_gpu_pattern(shape, pattern)\r\n\r\n a = tensor.TensorType('float32', (False,) * len(shape))()\r\n b = op(a)\r\n val = numpy.random.rand(numpy.prod(shape)).reshape(shape)\r\n # val = numpy.ones(shape)\r\n # val = numpy.arange(numpy.prod(shape)).reshape(shape)\r\n val = theano._asarray(val, dtype='float32')\r\n f = theano.function([a], b, mode=mode_with_gpu)\r\n f2 = theano.function([a], b, mode=mode_without_gpu)\r\n assert tcn.GpuCAReduce in [x.op.__class__\r\n for x in f.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert op.__class__ in [x.op.__class__\r\n for x in f2.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n f_caused_value_error = False\r\n try:\r\n f_out = f(val)\r\n except ValueError, e:\r\n exc = e\r\n f_caused_value_error = True\r\n except NotImplementedError:\r\n if (numpy.prod(shape) == 0 and\r\n getattr(scalar_op, 'identity', None) != 0):\r\n continue\r\n raise\r\n\r\n f2_caused_value_error = False\r\n try:\r\n f2_out = f2(val)\r\n except ValueError, e:\r\n exc2 = e\r\n f2_caused_value_error = True\r\n\r\n if f_caused_value_error != f2_caused_value_error:\r\n if f_caused_value_error:\r\n print 'f caused this value error:'\r\n print exc\r\n else:\r\n print 'f did not raise a value error, but should have'\r\n if f2_caused_value_error:\r\n print 'f2 caused this value error:'\r\n print exc2\r\n else:\r\n print 'f should not have raised a value error'\r\n print 'shape was: ', shape\r\n print 'pattern was: ', pattern\r\n assert False\r\n\r\n try:\r\n #We raise the error threashold as we sum big matrix\r\n #and this cause small rounding difference with some seed\r\n #example in debug mode with unittests.rseed=9275\r\n orig_rtol = theano.tensor.basic.float32_rtol\r\n theano.tensor.basic.float32_rtol = 2e-5\r\n assert _allclose(f_out, f2_out), ('shape', shape,\r\n 'pattern', pattern,\r\n scalar_op,\r\n sum([shape[i] for i in pattern]),\r\n f2(val), f(val), val)\r\n finally:\r\n theano.tensor.basic.float32_rtol = orig_rtol\r\n\r\n\r\n #test with dimshuffle\r\n #we shuffle the 2 outer dims.\r\n for shape, pattern in [#((5,),[0]),\r\n ((5,4),[0,1]),((5,4),[0]),\r\n ((5,4,3),[0]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[0,1,2]),\r\n ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),\r\n ((128,1,3,3),[0,1,2,3]),\r\n ]:\r\n op = careduce_op(scalar_op, axis=pattern)\r\n pat = tensor_pattern_to_gpu_pattern(shape, pattern)\r\n\r\n a = tensor.TensorType('float32', (False,) * len(shape))()\r\n dim_pattern = range(len(shape))\r\n dim_pattern[0] = 1\r\n dim_pattern[1] = 0\r\n a = a.dimshuffle(dim_pattern)\r\n b = op(a)\r\n val = numpy.random.rand(numpy.prod(shape)).reshape(shape)\r\n # val = numpy.ones(shape)\r\n # val = numpy.arange(numpy.prod(shape)).reshape(shape)\r\n val = theano._asarray(val, dtype='float32')\r\n f = theano.function([a], b, mode=mode_with_gpu)\r\n f2 = theano.function([a], b, mode=mode_without_gpu)\r\n assert tcn.GpuCAReduce in [x.op.__class__\r\n for x in f.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert op.__class__ in [x.op.__class__\r\n for x in f2.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert _allclose(f2(val), f(val)), ('shape', shape,\r\n 'pattern', pattern,\r\n scalar_op,\r\n sum([shape[i] for i in pattern]))\r\n\r\n #test with broadcast\r\n for shape, pattern in [((5,),[0]),\r\n ((5,4),[0,1]),((5,4),[0]),\r\n ((5,4,3),[0]),((5,4,3),[0,1]),\r\n ((5,4,3),[2]),((5,4,3),[0,1,2]),\r\n ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),\r\n ((128,1,3,3),[0,1,2,3]),\r\n ]:\r\n op = careduce_op(scalar_op, axis=pattern)\r\n pat = tensor_pattern_to_gpu_pattern(shape, pattern)\r\n\r\n shape = numpy.asarray(shape) * 2\r\n a = tensor.TensorType('float32', (False,) * len(shape))()\r\n a2 = tcn.CudaNdarrayType((False,) * len(shape))()\r\n b = op(a)\r\n b2 = op(a2)\r\n val = numpy.random.rand(numpy.prod(shape)).reshape(shape)\r\n # val = numpy.ones(shape)\r\n # val = numpy.arange(numpy.prod(shape)).reshape(shape)\r\n val = theano._asarray(val, dtype='float32')\r\n val2 = cuda.CudaNdarray(val)\r\n if len(shape) == 1:\r\n val = val[::2]\r\n val2 = val2[::2]\r\n elif len(shape) == 2:\r\n val = val[::2, ::2]\r\n val2 = val2[::2, ::2]\r\n elif len(shape) == 3:\r\n val = val[::2, ::2, ::2]\r\n val2 = val2[::2, ::2, ::2]\r\n elif len(shape) == 4:\r\n val = val[::2, ::2, ::2, ::2]\r\n val2 = val2[::2, ::2, ::2, ::2]\r\n f = theano.function([a], b, mode=mode_without_gpu)\r\n f2 = theano.function([a2], b2, mode=mode_with_gpu)\r\n assert tcn.GpuCAReduce in [x.op.__class__\r\n for x in f2.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert op.__class__ in [x.op.__class__\r\n for x in f.maker.fgraph.toposort()], (\r\n scalar_op, shape, pattern)\r\n assert _allclose(f2(val2), f(val)), ('shape', shape,\r\n 'pattern', pattern,\r\n sum([shape[i] for i in pattern]))", "def defaultAccumulator(gm=None,operator=None):\n if gm is not None:\n operator=gm.operator\n elif operator is None and gm is None:\n raise NameError(\"at least a gm or an operator must be given\")\n if operator=='adder':\n return 'minimizer'\n elif operator=='multiplier':\n return 'maximizer'\n else:\n raise RuntimeError(\"unknown operator: \"+ operator)", "def NACAcompute(self):\n if self.p == 0:\n self.NACA4digitsSym()\n else:\n self.NACA4digitsCam()", "def _create_objective(self, meta, m):\n ## cashflow eval\n rule = partial(self._cashflow_rule, meta)\n m.obj = pyo.Objective(rule=rule, sense=pyo.maximize)", "def associativity(ob):\n return 0", "def proximal(self):\n if self.A is None:\n def prox(x,y):\n return operator_P(self.proj, x, y, None)\n else:\n def prox(x,y,u):\n return operator_P(self.proj, x, y, u)\n \n return lambda x,eps: set_z(self,prox)(x)", "def gen_apply(self, g, ng, node):\n with About(node.debug, self.relation):\n if node is g.output:\n new_node = ng.add_parameter()\n else:\n new_node = ng.apply()\n # NOTE: First parameter to remap_node is (g, node) instead of just\n # node. This lets us dispatch to a different node depending on whether\n # it belongs to the graph that uses it, or is a free variable.\n self.remap_node((g, node), g, node, ng, new_node)", "def make_accumulator():\n pass # replace with your solution", "def _auto_multiplier(self, dgmod):\n\n assert self._new_constraints and all(\n con.type() == \"rdConstraint\" for con in self._new_constraints\n )\n\n root, _ = self._tree_root\n\n # Use existing multiplier, if any, to support branching\n mult = root.shape(\"rdConstraintMultiplier\")\n\n if not mult:\n con = root.shape(\"rdConstraint\")\n\n if con is not None:\n mult = con[\"multiplierNode\"].connection(\n type=\"rdConstraintMultiplier\")\n\n if mult:\n for constraint in self._new_constraints:\n dgmod.connect(mult[\"ragdollId\"], constraint[\"multiplierNode\"])\n\n else:\n # There isn't any, let's make one\n mult = commands.multiply_constraints(self._new_constraints,\n parent=root)\n mult.rename(i__.unique_name(\"rGuideMultiplier\"))\n\n # Forward some convenience attributes\n multiplier_attrs = i__.UserAttributes(mult, root)\n multiplier_attrs.add(\"driveStrength\",\n long_name=\"strengthMultiplier\",\n nice_name=\"Strength Multiplier\")\n\n self._new_multipliers.append(mult)\n self._new_userattrs += [multiplier_attrs]\n\n return mult", "def makeRule(self, datapoint):\n\t\t\n\t\tant = []\n\t\tcons = []\n\t\tmembershipsFactors = []\n\n\t\top = \"and\"\n\n\t\t# define antecedent\n\t\tfor i, inp in enumerate(self.inputs):\n\t\t\tmemb = inp.calculate_memberships(datapoint[:-len(self.outputs)][i])\n\t\t\tmaxInMemb = (-1, \"\")\n\t\t\tfor key in memb:\n\t\t\t\tif (memb[key] > maxInMemb[0]):\n\t\t\t\t\tmaxInMemb = (memb[key], key)\n\t\t\tant.append(maxInMemb[1])\n\t\t\tmembershipsFactors.append(maxInMemb[0])\n\n\t\t# define consequent\n\t\tfor i, outp in enumerate(self.outputs):\n\t\t\tmemb = outp.calculate_memberships(datapoint[-len(self.outputs):][i])\n\t\t\tmaxInMemb = (-1, \"\")\n\t\t\tfor key in memb:\n\t\t\t\tif (memb[key] > maxInMemb[0]):\n\t\t\t\t\tmaxInMemb = (memb[key], key)\n\t\t\tcons.append(maxInMemb[1])\n\t\t\tmembershipsFactors.append(maxInMemb[0])\n\n\t\t# increase counter to keep track of amount of rules\n\t\tself.counter += 1\n\n\t\t# if (np.product(membershipsFactors) > 1.0):\n\t\t# \tprint membershipsFactors, np.product(membershipsFactors) (debug)\n\n\t\t# return the new rule and it's degree\n\t\treturn basic.Rule(self.counter, ant, op, cons[0], self.andMeth, self.orMeth), np.product(membershipsFactors)", "def _rewrite_default(self, node: saldag.OpNode):\n\n node.is_mpc = node.requires_mpc()", "def __init__(\n self,\n formula,\n aliases,\n reduction_op=\"Sum\",\n axis=0,\n dtype=None,\n opt_arg=None,\n formula2=None,\n cuda_type=None,\n dtype_acc=\"auto\",\n use_double_acc=False,\n sum_scheme=\"auto\",\n enable_chunks=True,\n rec_multVar_highdim=False,\n ):\n\n if dtype:\n pyKeOps_Warning(\n \"keyword argument dtype in Genred is deprecated ; argument is ignored.\"\n )\n if cuda_type:\n pyKeOps_Warning(\n \"keyword argument cuda_type in Genred is deprecated ; argument is ignored.\"\n )\n\n self.reduction_op = reduction_op\n reduction_op_internal, formula2 = preprocess(reduction_op, formula2)\n\n self.optional_flags = get_optional_flags(\n reduction_op_internal,\n dtype_acc,\n use_double_acc,\n sum_scheme,\n enable_chunks,\n )\n\n str_opt_arg = \",\" + str(opt_arg) if opt_arg else \"\"\n str_formula2 = \",\" + formula2 if formula2 else \"\"\n\n self.formula = (\n reduction_op_internal\n + \"_Reduction(\"\n + formula\n + str_opt_arg\n + \",\"\n + str(axis2cat(axis))\n + str_formula2\n + \")\"\n )\n self.aliases = complete_aliases(\n self.formula, list(aliases)\n ) # just in case the user provided a tuple\n self.axis = axis\n self.opt_arg = opt_arg\n\n self.rec_multVar_highdim = rec_multVar_highdim", "def RHSnetFomp(y,t,a,b0,b1,g,k,w):\n dy = fn.rhs_omp(P,y,t,a,b0,b1,g,k,w,2)\n return dy", "def _eval_prior_casadi(self, state, action):\n\n return mtimes(self.a, state.T) + mtimes(self.b, action.T)", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def prelu(input, weight):\n return FunctionLib.apply('PRelu', input.device, [input, weight])", "def _arithmetize1(self, operand: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n # Data length might be changed after evaluation\n # operand = recycle_value(operand, self.data.shape[0])\n return op_func(operand)", "def _setup_proximal_operator(\n self,\n weight_list,\n learning_rate,\n regularization_lambda,\n reciprocal_stable_factor=0.0001,\n weight_reshape_to_norm=lambda x: x,\n weight_reshape_from_norm=lambda x: x\n ):\n eta = learning_rate * regularization_lambda\n epsilon = eta * reciprocal_stable_factor\n weight_update_ops = []\n weight_shapes = []\n weight_reshaped_list = []\n weight_reshaped_shapes = []\n for weight_origin in weight_list:\n weight = weight_reshape_to_norm(weight_origin)\n weight_shape = list(map(\n lambda x: x.value,\n weight.shape\n ))\n weight_shapes.append(weight_shape)\n weight_reshaped = tf.reshape(\n weight,\n shape=(weight_shape[0], weight_shape[1], -1)\n )\n weight_reshaped_list.append(weight_reshaped)\n weight_reshaped_shapes.append(\n list(map(lambda x: x.value, weight_reshaped.shape))\n )\n weight_reshaped_combined = tf.concat(\n values=weight_reshaped_list,\n axis=-1\n )\n # proximal update #\n weight_new_reshaped_combined = self.proximal_operator(\n weight=weight_reshaped_combined,\n eta=eta,\n epsilon=epsilon\n )\n\n weight_new_reshaped_list = tf.split(\n value=weight_new_reshaped_combined,\n num_or_size_splits=list(map(lambda x: x[-1], weight_reshaped_shapes)),\n axis=-1\n )\n for i in range(len(weight_new_reshaped_list)):\n weight_new_reshaped = weight_new_reshaped_list[i]\n weight_shape = weight_shapes[i]\n weight_origin = weight_list[i]\n weight_new = tf.reshape(\n weight_new_reshaped,\n shape=weight_shape,\n )\n weight_origin_new = weight_reshape_from_norm(weight_new)\n weight_update_op = weight_origin.assign(weight_origin_new)\n weight_update_ops.append(weight_update_op)\n return tf.group(*weight_update_ops)", "def update_op(self, loss, learning_rate,var):\n #train_op = None\n ####### Implementation Here ######\n #pass\n train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss = loss,var_list = var )\n return train_op", "def _apply_individual_nbody1_accumulate_python(\n coeff: 'Nparray', ocoeff: 'Nparray', icoeff: 'Nparray',\n amap: 'Nparray', btarget: 'Nparray', bsource: 'Nparray',\n bparity: 'Nparray') -> None:\n for sourcea, targeta, paritya in amap:\n ocoeff[targeta, btarget] += coeff * paritya * numpy.multiply(\n icoeff[sourcea, bsource], bparity)" ]
[ "0.5356251", "0.5183916", "0.5143067", "0.5094079", "0.50892544", "0.5080843", "0.50643283", "0.50482136", "0.5025356", "0.5016265", "0.50033015", "0.50028324", "0.49897406", "0.49796098", "0.49785227", "0.49708816", "0.49432823", "0.49322098", "0.4927745", "0.49176073", "0.4899228", "0.48970172", "0.48848477", "0.4877299", "0.48677754", "0.4864539", "0.48616868", "0.48609212", "0.4852008", "0.48517922" ]
0.5607902
0
Reconstruct the input from the intermediate reduced representation. that means a flow, X > intermediate reduction > intermediate recon.
def reconstruct_intermediate(self, X: numpy.ndarray) -> numpy.ndarray: reduced_repr_intr = self.intermediate_transform(X) return self.intermediate_inverse_transform(reduced_repr_intr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reconstruct(self, X: numpy.ndarray) -> numpy.ndarray:\n\n reduced_repr = self.transform(X)\n return self.inverse_transform(reduced_repr)", "def reconstruct(self, X):", "def reconstruct(self, X):", "def reconstruct(self, x):\n return self.inverse_transform(self.transform(x))", "def reconstructX(self, inputs):\n if self.act_dec is None:\n act_dec = lambda x: x\n else:\n act_dec = self.act_dec\n return act_dec(self.decodeX(inputs))", "def reconstruct(self,\n input_data,\n sample=False):\n return self.backward_propagate(self.forward_propagate(input_data, sample), sample)", "def reconstruct(self, X):\n return self.sess.run(self.reconstruction, feed_dict={self.x: X, self.scale: self.training_scale})", "def reconstruct_input(self, ix):", "def reconstruct(self, X):\n return self.sess.run(self.x_reconstr_mean,\n feed_dict={self.x_raw: X})", "def reconstruct(self, data):\n self.recons = self.trf.reconstruct(data)", "def reconstruct_input_ext(self, model_in):", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.act(self.drop(self.node(input.view(*self.G.d_in))))\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the last dim\n self.result = self.act(self.drop(self.node(torch.cat(in_result, in_result[0].dim() - 1))))\n\n return self.result.view(*self.G.d_out)", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.drop(self.node(input))\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the last dim\n self.result = self.drop(self.node(torch.cat(in_result, in_result[0].dim()-1).type(_tensor(\"LongTensor\"))))\n\n return self.result.view(*self.G.d_out)", "def get_reconstructed_input(self):\n\n\t\treturn self.activation(\n\t\t\ttheano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +\n\t\t\tself.reverse_bias)", "def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "def transform(self, original_input):\n raise NotImplementedError()", "def reshape(input):\n\n input = input / 255\n input = trans.resize(input, (args.size, args.size))\n input = np.reshape(input, input.shape + (1,))\n input = np.reshape(input, (1,) + input.shape)\n return input", "def _ReshapeToInput(op: ops.Operation, grad):\n return array_ops.reshape(\n _IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0]))", "def _reconstruct(self, num_samples=None):", "def _flatten(prev_layer):\n\n with tf.name_scope('flatten'):\n shape = int(np.prod(prev_layer.get_shape()[1:]))\n return tf.reshape(prev_layer, [-1, shape])", "def forward(self, input):\n return input.view(input.size(0), -1)", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.node(input)\n\n elif self.result is None:\n raise ValueError(\"There must be some input or a previously calculated result\")\n\n return self.result.view(*self.G.d_out)", "def _flatten(self):\n n = self.B\n idx = self.nodect - 1\n self.seq = []\n while n is not None:\n n['idx'] = idx\n self.seq.insert(0, n)\n idx -= 1\n n = n['pred']", "def get_final_reconstruction(self):", "def reconstruct_signal(_X):\n width = _X.shape[1]\n N = _X.shape[0]\n n = N // 2\n\n head = _X[:n, 0]\n tail = _X[n:, width - 1]\n body = np.array([_X[n:, i] + _X[:n, i + 1] for i in range(width - 1)]).reshape(n * (width - 1))\n\n return np.append(head, np.append(body, tail))", "def reconstruct(self, X, y):\n return self.sess.run(self.x_reconstr_mean,\n feed_dict={self.x: X, self.y: y.reshape([-1, 1])})", "def reduce_recurrents(h_prev):\n return a_reduce_recurrents(conv2d(h_prev, self.W_red_rec))", "def forward(self, input=None):\n self._repackage()\n if (input is not None) and (self.result is None):\n if self.G.ntype == \"lstm\":\n self.result, both = self.node(input, (self.hidden, self.state))\n self.hidden, self.state = both\n else:\n self.result, self.hidden = self.node(input, self.hidden)\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the dim input_size\n if self.G.ntype == \"lstm\":\n self.result, both = self.node(torch.cat(in_result, 2), (self.hidden, self.state))\n self.hidden, self.state = both\n else:\n self.result, self.hidden = self.node(torch.cat(in_result, 2), self.hidden)\n\n return self.result.view(*self.G.d_out)", "def forward(self, x):\n if self.training:\n x = self.input_pert(x)\n x = self.encoder(x)\n x = self.decoder(x)\n return x", "def intermediate_transform(self, X: numpy.ndarray, vectify=True) -> Union[numpy.ndarray, List[numpy.ndarray]]:\n\n decomposed_X = self.__decompose_blocks(X)\n buffer = []\n\n for block_X, block_pca, block_reduct in zip(decomposed_X, self.intermediates_, self.intermediate_reducers_):\n effective_dim = block_reduct.effective_dim(block_pca)\n block_feature = block_pca.transform(block_X)[:, :effective_dim]\n buffer.append(block_feature)\n\n if vectify:\n return numpy.hstack(buffer)\n else:\n return buffer" ]
[ "0.6733002", "0.669029", "0.669029", "0.6568794", "0.6450592", "0.64080805", "0.63954407", "0.61722416", "0.61493784", "0.60019034", "0.59541947", "0.58344096", "0.5821323", "0.57803905", "0.5773008", "0.57342213", "0.57054216", "0.5696215", "0.5630937", "0.5615154", "0.560185", "0.55616134", "0.5539546", "0.55262667", "0.5485034", "0.5481173", "0.5475349", "0.54264057", "0.54070085", "0.53983134" ]
0.7525129
0
Installs the standard syntax directives.
def install_syntax_functions(self): self.syntax_functions[':head'] = head_prediction_generator self.syntax_functions[':optional'] = optional_prediction_generator self.syntax_functions[':sequence'] = sequence_prediction_generator self.syntax_functions[':any'] = any_prediction_generator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def install() -> None:\n # Get locals from parent frame\n frames = inspect.getouterframes(inspect.currentframe())\n if len(frames) > 1:\n parent_frame = frames[1]\n parent_locals = parent_frame.frame.f_locals\n locals().update(parent_locals)\n\n # For tab completion and arrow key support\n readline.parse_and_bind(\"tab: complete\")\n\n command = ''\n continued_command = False\n while True:\n try:\n if continued_command:\n command += '\\n'\n else:\n command = ''\n\n prompt = '... ' if continued_command else '>>> '\n new_input = input(prompt)\n\n if new_input != '':\n command += new_input\n else:\n continued_command = False\n\n except KeyboardInterrupt:\n print()\n continue\n\n except EOFError:\n print()\n sys.exit(0)\n\n if continued_command:\n continue\n\n try:\n ast_obj = ast.parse(command, '<input>', 'single')\n except SyntaxError:\n try:\n code_obj = code.compile_command(command)\n if code_obj is None:\n continued_command = True\n continue\n\n except BaseException:\n traceback.print_exc()\n continue\n\n assert isinstance(ast_obj, ast.Interactive)\n patch_shell_commands(ast_obj)\n\n try:\n code_obj = compile(ast_obj, '<input>', 'single')\n assert code_obj is not None\n exec(code_obj)\n\n except SystemExit as e:\n sys.exit(e.code)\n\n except BaseException:\n traceback.print_exc()", "def _swift_generic_setup(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n self._pull_configs('generic')\n self._swift_install()\n self._set_onhold('generic')\n self._final_install_touches()", "def syntax():\n\tversion()\n print \"Goal: Install NodeJS and NPM on a Debian Stable system\"\n print \"Syntax: \"\n print \" -h: Display the help message and exit\"\n print \" -v: Display the version and exit\"\n print \" -d: Run the script in debug mode (log in the \"+_LOG_FILE+\" file)\"\n print \" -o PATH: Set the installation PATH (default is \"+_DEFAULT_PATH+\")\"", "def install_pygments():\n print(\"Checking pygments...\")\n try:\n import pygments\n print(\"pygments already installed!\")\n except ImportError:\n print(\"Could not find pygments, installing it...\")\n subprocess.run(\"python3 -m pip install pygments\".split())\n print(\"Installed pygments\")", "def lint_setup_py(session):\n session.install(\"docutils\", \"pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")", "def _swift_install(self, sys_type='generic'):\n sudo('''\n export DEBIAN_FRONTEND=noninteractive;\n apt-get update -qq -o Acquire::http::No-Cache=True;\n ''')\n self._setup_swiftuser()\n sudo('apt-get install %s %s' % (self.apt_opts, self.swift_generic))\n\n if sys_type == 'proxy':\n sudo('''\n apt-get install %s %s %s\n ''' % (self.apt_opts, self.swift_proxy, self.swift_others))\n elif sys_type == 'storage':\n sudo('''\n apt-get install %s %s %s\n ''' % (self.apt_opts, self.swift_storage, self.swift_others))\n elif sys_type == 'saio':\n sudo('''\n apt-get install %s %s %s %s\n ''' % (self.apt_opts, self.swift_proxy,\n self.swift_storage, self.swift_others))", "def setup_vim():\n pass", "def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)", "def install(self, repo):\n\n for subsystem in repo.options.get('subsystems', []):\n name = subsystem.get('name')\n args = subsystem.get('args', {})\n\n if name is None:\n raise InvalidSettingError('subsystem name', 'missing in settings file.')\n\n if name != 'SphinxDocumentation':\n raise InvalidSettingError('subsystem name', \"name '{}' is unknown\".format(name))\n\n repo.add_subsystem(SphinxDocumentation(repo, **args))\n\n repo.add_subsystem(BasicPythonSupport(repo))", "def install_deps():\n dist = check_distribution()\n if dist == Distribution.TEXLIVE:\n texlive_install_deps()\n elif dist == Distribution.MIKTEX:\n miktex_install_deps()\n\n install_pygments()", "def main(prefix=\"\", net_install=False):\n if prefix == \"\":\n prefix = os.path.realpath(prefix)\n prefix = os.path.join(prefix, \"mast\")\n else:\n prefix = os.path.realpath(prefix)\n install_anaconda(prefix)\n install_packages(prefix, net_install)\n add_scripts(prefix)\n generate_docs(prefix)", "def install_python_ta():\n if not python_ta_installed():\n print(\"Installing / Updating the style checker\", end='')\n\n i = 0\n while not python_ta_installed() and i != -1:\n print(\".\", end='')\n i = attempt_python_ta_installation(i)\n\n print(\"\")", "def prepare():\n sh('pip install pylint pyflakes behave nose clonedigger pep8 sphinx')\n sh('pip install watchdog coverage ipython sphinx_rtd_theme')\n develop()", "def setup(app):\r\n\r\n # This is only a lexer, so adding it below to pygments appears sufficient.\r\n # But if somebody knows that the right API usage should be to do that via\r\n # sphinx, by all means fix it here. At least having this setup.py\r\n # suppresses the sphinx warning we'd get without it.\r\n pass", "def add_syntax_definition(pdcmd: PandocCmd, syntax_definition_dir: str) -> None:\n\n if not os.path.exists(syntax_definition_dir):\n print(f'syntax definition directory {syntax_definition_dir} does not exist')\n return\n\n filepattern = os.path.join(syntax_definition_dir, '*.xml')\n for xmlfile in glob.glob(filepattern):\n pdcmd.append('--syntax-definition')\n pdcmd.append(xmlfile)", "def install_system_packages():\n print(\"Installiere notwendige Pakete...\")\n _run('sudo apt update')\n _run(\n \"sudo apt install \"\n \"apache2 apache2-dev python3-dev python3-venv python3-pip postgresql-contrib libpq-dev\"\n )\n print(\"Fertig!\", end=\"\\n\\n\")", "def add_myst(original: str) -> str:\n # add myst_parser extension and its own extensions configuration\n content = original.splitlines()\n myst = '\\n# Enable markdown\\nextensions.append(\"myst_parser\")\\n'\n myst_extensions = template(\"myst_extensions\").template # raw string\n j = next(i for i, line in enumerate(content) if line.startswith(\"source_suffix =\"))\n content[j] = 'source_suffix = [\".rst\", \".md\"]'\n content.insert(j - 1, myst)\n content.insert(j, myst_extensions)\n return \"\\n\".join(content)", "def setup_latex_preamble():\n from sage.misc.latex import latex\n latex.add_package_to_preamble_if_available('tikz')\n latex.add_to_mathjax_avoid_list(\"tikz\")\n if latex.has_file(\"tikz.sty\"):\n latex.add_to_preamble(r'\\usetikzlibrary{automata}')", "def docs(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)", "def set_js_linters():\n if is_installed('nodejs'):\n cmd = 'sudo -H npm -g install jscs jshint'\n run(cmd)\n else:\n print(red('npm not installed. Re-run this task after installing npm'))\n\n # patterns\n before = '^let g:syntastic_javascript_checkers.*$'\n after = \"let g:syntastic_javascript_checkers = ['jscs', 'jshint']\"\n\n print(green('Setting jscs and jshint as default linters on vim.'))\n sed('.vim/vimrc', before, after)\n\n print(green('Uploading configuration files'))\n config_path = 'conventions/.jscsrc'\n put(config_path)\n config_path = 'conventions/.jshintrc'\n put(config_path)", "def demo_legacy_grammar():\n from nltk.grammar import parse_fcfg\n\n g = parse_fcfg(\"\"\"\n % start S\n S[sem=<hello>] -> 'hello'\n \"\"\")\n print \"Reading grammar: %s\" % g\n print \"*\" * 20\n for reading in batch_interpret(['hello'], g, semkey='sem'):\n syn, sem = reading[0]\n print\n print \"output: \", sem", "def install_step(self):\n\n# if LooseVersion(self.version) < LooseVersion('2012-10-05'):\n\tif (False):\n self.inchworm()\n self.chrysalis()\n self.kmer()\n self.butterfly()\n\n bwapluginver = self.cfg['bwapluginver']\n if bwapluginver:\n self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)\n\n if self.cfg['RSEMmod']:\n self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))\n\n else:\n self.jellyfish()\n\n inchworm_flags = self.inchworm(run=False)\n chrysalis_flags = self.chrysalis(run=False)\n\n cc = os.getenv('CC')\n cxx = os.getenv('CXX')\n\n lib_flags = \"\"\n for lib in ['ncurses', 'zlib']:\n libroot = get_software_root(lib)\n if libroot:\n lib_flags += \" -L%s/lib\" % libroot\n\n fn = \"Makefile\"\n for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):\n\n line = re.sub(r'^(INCHWORM_CONFIGURE_FLAGS\\s*=\\s*).*$', r'\\1%s' % inchworm_flags, line)\n line = re.sub(r'^(CHRYSALIS_MAKE_FLAGS\\s*=\\s*).*$', r'\\1%s' % chrysalis_flags, line)\n line = re.sub(r'(/rsem && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=%s CXX=\"%s %s\" CFLAGS_EXTRA=\"%s\"\\n' % (cc, cxx, lib_flags, lib_flags), line)\n line = re.sub(r'(/fastool && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=\"%s -std=c99\" CFLAGS=\"%s ${CFLAGS}\"\\n' % (cc, lib_flags), line)\n\n sys.stdout.write(line)\n\n trinity_compiler = None\n comp_fam = self.toolchain.comp_family()\n if comp_fam in [toolchain.INTELCOMP]:\n trinity_compiler = \"intel\"\n elif comp_fam in [toolchain.GCC]:\n trinity_compiler = \"gcc\"\n else:\n self.log.error(\"Don't know how to set TRINITY_COMPILER for %s compiler\" % comp_fam)\n\n cmd = \"make TRINITY_COMPILER=%s\" % trinity_compiler\n run_cmd(cmd)\n\n # butterfly is not included in standard build\n self.butterfly()\n\n # remove sample data if desired\n if not self.cfg['withsampledata']:\n try:\n shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))\n except OSError, err:\n self.log.error(\"Failed to remove sample data: %s\" % err)", "def _install(self):\n\n pass", "def __instructions(self):\n\n self += comment('Arm Allinea Studio version {}'.format(self.__version))\n\n if self.__ospackages:\n self += packages(ospackages=self.__ospackages)\n\n if self.__tarball:\n self += copy(src=self.__tarball, dest=self.__wd)\n\n self += shell(commands=self.__commands)\n self += environment(variables=self.environment_step())", "def add_standard_imports(doctest_namespace):\n import py3dep\n\n doctest_namespace[\"py3dep\"] = py3dep", "def dev():\n\n # Python build headers.\n packages = [\n 'python3-setuptools',\n 'python3-dev',\n 'python3-tk',\n 'python-setuptools',\n 'python-dev',\n 'python-tk',\n ]\n\n sudo('apt-get -y install {}'.format(' '.join(packages)))", "def standard(self) -> global___Snippet.Standard:", "def commands_lint():\n lint()", "def setup_zxpy_repl() -> None:\n print(\"zxpy shell\")\n print(\"Python\", sys.version)\n print()\n\n install()", "def prepareDocument(self):\n self.checkSyntaxDocument()" ]
[ "0.56438464", "0.5318523", "0.5294665", "0.5194252", "0.5179585", "0.51310945", "0.5099968", "0.50895035", "0.5047722", "0.50111884", "0.498702", "0.4986996", "0.49659818", "0.49514064", "0.4923191", "0.48950475", "0.48576126", "0.4807199", "0.48069134", "0.47965986", "0.47840992", "0.4760702", "0.46992522", "0.46985945", "0.46738753", "0.46597734", "0.46565324", "0.4653961", "0.46047318", "0.46028623" ]
0.6467446
0
Checks to see if any preparsers would like to handle the token. If not, None is returned, otherwise the result of preparsing the token is returned.
def check_preparsers(self, token): for [matcher, function] in self.preparsers: match = matcher.match(token) if match is None or match.end() != len(token): pass else: return function(token) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preparse(self, token):\n result = self.check_preparsers(token)\n if result is None:\n return token\n else:\n return result", "def _handle_token(self, token: str) -> Optional[str]:\n return token or self._token_handler.token", "def _handle_token(self, token: str) -> Optional[str]:\n return token", "def parse(self) -> T.Optional[SyntaxNode]:\n if self.tokens:\n return self.parse_expr()\n return None", "def _try_parse(self, *parse_funcs: ParseFunc) -> Optional[node.NodeType]:\n for parse_func in parse_funcs:\n try:\n with self.tokens:\n return parse_func()\n except ParserException:\n pass\n return None", "def peek_for_token(self, ch, check_tok, yes_tok, no_tok):\n if self.peek_char() == check_tok:\n first = ch\n self.read_char()\n literal = first + self.char\n return Token(yes_tok, first + self.char)\n else:\n return Token(no_tok, ch)", "def sanitize_token(self, token):\n token_type = token['type']\n if token_type in ['StartTag', 'EndTag', 'EmptyTag']:\n if token['name'] in self.allowed_elements:\n return self.allow_token(token)\n\n elif self.strip_disallowed_elements:\n return None\n\n else:\n if 'data' in token:\n # Alphabetize the attributes before calling .disallowed_token()\n # so that the resulting string is stable\n token['data'] = alphabetize_attributes(token['data'])\n return self.disallowed_token(token)\n\n elif token_type == 'Comment':\n if not self.strip_html_comments:\n return token\n else:\n return None\n\n elif token_type == 'Characters':\n return self.sanitize_characters(token)\n\n else:\n return token", "def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token", "def pre_processing(self) -> Optional[Callable]:\n if (\n \"transforms\" not in self._spec\n or \"pre\" not in self._spec[\"transforms\"]\n ):\n # Passthrough\n return lambda x: x\n f = find_class(self._spec[\"transforms\"][\"pre\"])\n return f(self.options)", "def fetch(self, scanner):\n\n token = None\n if self._token:\n token = self._token\n self._token = None\n else:\n token = self._tokenize(scanner)\n\n return token", "def first_token(self):\n if self.tokens:\n return self.tokens[0]\n return \"None\"", "def start_tag_or_none(self, token):\n if self.patterns['start_tag'].match(token):\n return token[2:-6].upper()", "def parse(token):\n\n pass", "def get_parser_best(self):\n if len(self.parses):\n return min(self, key=lambda parse: parse.parser_rank)\n else:\n return None", "def _get_token(self):\n self._skip()\n\n token = None\n # Checks single-quoted string.\n if self.current_char == \"'\":\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == \"'\"):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks double-quoted string.\n elif self.current_char == '\"':\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == '\"'):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a digit.\n elif self.current_char.isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() == \".\":\n self._next_char()\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a dot.\n elif self.current_char == \".\":\n if self._peek().isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks word begins with an alphabetic letter or an underscore.\n elif self.current_char.isalpha() or self.current_char == \"_\":\n start_position = self.current_position\n while True:\n if (self._peek() in [\" \", \"\\t\", \"\\r\", \"\\n\", \"\\0\"]\n or self._peek() in _token_names.SEPARATORS\n or self._peek() in _token_names.OPERATORS):\n break\n self._next_char()\n word = self.stream[start_position:self.current_position + 1]\n # Checks if word is a keyword.\n if word in _token_names.Keywords.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.Keywords(word).name, word)\n elif word in _token_names.KeywordsType.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsType(word).name, word)\n elif word in _token_names.KeywordsAttribute.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsAttribute(word).name, word)\n # Otherwise put it as identifier.\n else:\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.IDENTIFIER, word)\n\n # Checks if is a separator.\n elif self.current_char in _token_names.Separators.values():\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks if is an operator.\n elif self.current_char in _token_names.Operators.values():\n last_position = self.current_position\n if self.current_char not in [\"&\", \"|\"] and self._peek() == \"=\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"+\" and self._peek() == \"+\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"-\" and self._peek() == \"-\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"&\" and self._peek() == \"&\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"|\" and self._peek() == \"|\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Operators(self.current_char).name, self.current_char)\n\n # Checks if is EOF\n elif self.current_char == \"\\0\":\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.EOF, self.current_char)\n\n # Raise error if is an unknown token.\n else:\n raise LexerError(self.current_position)\n\n self._next_char()\n return token", "def _handle_token(self, token: str) -> Optional[str]:\n raise RuntimeError('Cannot use _handle_token of this abstract class.')", "def token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token\")", "def _parse(tokens: Deque[Token]) -> object:\n token = tokens.popleft()\n\n if token.type == 'left_bracket':\n return parse_array(tokens)\n\n if token.type == 'left_brace':\n return parse_object(tokens)\n\n if token.type == 'string':\n return parse_string(token)\n\n if token.type == 'number':\n return parse_number(token)\n\n special_tokens = {\n 'true': True,\n 'false': False,\n 'null': None,\n }\n if token.type in ('boolean', 'null'):\n return special_tokens[token.value]\n\n raise ParseError(\n f\"Unexpected token: {token.value} \"\n f\"(line {token.line} column {token.column})\")", "def tokenfunc_():\r\n if len( lexer.pendingtokens ):\r\n return lexer.pendingtokens.pop(0)\r\n\r\n tok = lexer.token()\r\n\r\n if len( lexer.pendingtokens ) and ( tok and tok.type != 'EOL'):\r\n pending = lexer.pendingtokens.pop(0)\r\n lexer.pendingtokens.append(tok)\r\n return pending\r\n\r\n return tok", "def next_token(self) -> T.Optional[Token]:\n if self.has_finished():\n return None\n token_type = None\n token_chars = []\n if is_number_char(self.current):\n token_type = \"N\"\n while not self.has_finished() and is_number_char(self.current):\n token_chars.append(self.consume())\n elif is_char_token(self.current):\n if self.current in [\"(\", \")\"]:\n token_type = self.current\n elif self.current in [\"+\", \"-\"]:\n token_type = \"S\"\n elif self.current in [\"*\", \"/\"]:\n token_type = \"M\"\n else:\n raise ExprSyntaxError\n token_chars.append(self.consume())\n elif self.current.isspace():\n self.consume()\n return self.next_token()\n else:\n raise UnexpectedChar\n return Token(token_type, \"\".join(token_chars))", "def is_preprocessing(self):\r\n return conf.lib.clang_isPreprocessing(self)", "def _parse_word(self, token, ctxinfo) :\n ignore = False\n if token.startswith(\"|\") and token.endswith(\"|\") : # regular token\n token = token[1:-1]\n token_parts = token.rsplit( \"_\", 1 )\n if len(token_parts) == 2 :\n lemma_and_index, pos = token_parts\n lemma_parts = lemma_and_index.rsplit( \":\", 1 )\n if len(lemma_parts) == 2 : \n lemma, index = lemma_parts\n if lemma.endswith(\"\\\\\") :\n lemma = lemma[:-1] # separator was \\: \n else :\n ignore = True\n else :\n ignore = True\n if ignore :\n ctxinfo.warn(\"Ignoring bad token `{token}`\", token=token)\n return None\n else : \n return (lemma, index, pos)", "def _parse_token(token: str):\r\n if token in OPERATOR_TOKENS:\r\n return Operator(token)\r\n if token.isdigit():\r\n return Number(int(token))\r\n if \".\" in token:\r\n if token.count(\".\") > 1 or token[-1] == '.':\r\n raise BadNumber(token)\r\n return Number(float(token))\r\n if token == \"i\":\r\n return ComplexNumber(0, 1)\r\n if token.isalpha():\r\n return Variable(token)\r\n raise UnknownToken(token)", "def returnToken(self, token):\n if self.hide_token:\n return None\n else:\n return token", "def preparse(self, raw):\n return raw", "def next(self):\n if not self.tokens:\n return None\n else:\n return self.tokens[0]", "def pre(self, emulator=None):\n\n # Are we using an emulator?\n if emulator is not None:\n return emulator.emulatePre(self.step)\n\n logging.info(\"Steps.Executors.%s.pre called\", self.__class__.__name__)\n return None", "def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_atx_heading:\n atx_token = cast(AtxHeadingMarkdownToken, token)\n self.__handle_atx_heading(context, atx_token)\n elif token.is_setext_heading:\n setext_token = cast(SetextHeadingMarkdownToken, token)\n self.__handle_setext_heading(setext_token)\n elif token.is_text:\n text_token = cast(TextMarkdownToken, token)\n self.__handle_text(text_token)\n elif token.is_setext_heading_end:\n end_token = cast(EndMarkdownToken, token)\n self.__handle_setext_heading_end(context, end_token)", "def get_token(self, symbol):\r\n for token in self:\r\n if token[\"symbol\"].lower() == symbol.lower():\r\n return token\r\n return None", "def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_atx_heading:\n atx_token = cast(AtxHeadingMarkdownToken, token)\n if not atx_token.remove_trailing_count:\n self.__atx_heading_token = token\n elif token.is_paragraph_end:\n self.__atx_heading_token = None\n elif token.is_text:\n text_token = cast(TextMarkdownToken, token)\n resolved_extracted_whitespace = ParserHelper.remove_all_from_text(\n text_token.extracted_whitespace\n )\n if self.__atx_heading_token and len(resolved_extracted_whitespace) > 1:\n self.report_next_token_error(context, self.__atx_heading_token)" ]
[ "0.86988527", "0.6444068", "0.6082054", "0.58015805", "0.57239443", "0.5629078", "0.5507728", "0.5504912", "0.55046606", "0.54920423", "0.5478602", "0.54365534", "0.54039836", "0.5380746", "0.5339962", "0.5317274", "0.52795863", "0.5271098", "0.5268484", "0.5233755", "0.5146655", "0.51395607", "0.5121068", "0.5080159", "0.50561094", "0.4993005", "0.4987736", "0.49759728", "0.49662817", "0.48993418" ]
0.7874867
1
Runs the token through any relevant preparser.
def preparse(self, token): result = self.check_preparsers(token) if result is None: return token else: return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def process(self, tokens):\n return await self.parser.process(tokens)", "def parse(token):\n\n pass", "def handle_input(self, token):\n self.pipeline.handle_input(token)", "def parse_tokens(self, tokens):\n for token in tokens:\n self.parse_token(token)", "def next_token(self, context, token):", "def check_preparsers(self, token):\n for [matcher, function] in self.preparsers:\n match = matcher.match(token)\n if match is None or match.end() != len(token):\n pass\n else:\n return function(token)\n return None", "def parse(self, tokenizer):\n pass", "def run(self, parsed):", "def tokens():\n pass", "def pre_processor(self):", "def parse(self):\n logger=self.logger\n tokenizer=Tokenizer()\n self.scope=produtil.testing.parsetree.Scope()\n self.override(self.scope)\n self.parser=Parser(self.run_mode,logger,self.verbose)\n self.parser.requested_platform_name=self.platform_name\n morevars=self.make_vars()\n with open(self.inloc,'rt') as fileobj:\n self.parse_result=self.parser.parse(\n TokenizeFile(tokenizer,fileobj,self.inloc,1),self.scope,\n unique_id=self.unique_id,morevars=morevars)", "def run(self, root):\n blocks = root.iter(\"pre\")\n for block in blocks:\n if len(block) == 1 and block[0].tag == \"code\":\n html = highlight(block[0].text, self.config, self.markdown.tab_length)\n placeholder = self.markdown.htmlStash.store(html)\n # Clear codeblock in etree instance\n block.clear()\n # Change to p element which will later\n # be removed when inserting raw html\n block.tag = \"p\"\n block.text = placeholder", "def tokenize(src):\n\n pass", "def run_parser(self, code_text):\n stream = io.TextIOWrapper(io.BytesIO(code_text), encoding=\"utf8\")\n self.scanner = MyScanner(stream, self.language)\n self.scanner.libraries = []\n\n while 1:\n logging.info(\"in parser, starting while\")\n token = self.scanner.read()\n logging.info(\"in run parser, token {}\".format(token))\n logging.info(\"in run parser, scanner position {}\".format(self.scanner.position()))\n if token[0] == KEYWORD:\n self.keywords.append(token[1])\n elif token[0] == OPERATOR:\n self.operations.append(token[1])\n elif token[0] == LITERAL:\n self.literals.append(token[1])\n\n if token[0] is None:\n break\n elif token[0] == \"unrecognized\":\n pass\n # raise errors.UnrecognizedInput(self.scanner, '')\n elif token[0] == COMMENT or token[0] == STRING:\n parsed = (token[0], token[1], self.scanner.position())\n self.list_of_tuples.append(parsed)\n else:\n self.full_list[token[1]] = token[0]\n parsed = (token[0], token[1], self.scanner.position())\n self.list_of_tuples.append(parsed)\n return self.full_list, self.list_of_tuples", "def _iter_tokens(self):\n reobj, actions, nextstates = self._rules[self.states[-1]]\n mobj = reobj.match(self.string, self.pos)\n while mobj is not None:\n text = mobj.group(0)\n idx = mobj.lastindex - 1\n nextstate = nextstates[idx]\n\n # Take action\n actions[idx](self, text)\n while self.tokens:\n yield self.pop_token()\n if nextstate and nextstate != self.states[-1]:\n self.states[-1] = nextstate\n\n # Update position variables\n self.pos = mobj.end()\n lines = text.split(\"\\n\")\n nlines = len(lines) - 1\n if nlines == 0:\n self.offset = self.offset + len(lines[0])\n else:\n self.lineno = self.lineno + nlines\n self.offset = 1 + len(lines[-1])\n\n reobj, actions, nextstates = self._rules[self.states[-1]]\n mobj = reobj.match(self.string, self.pos)\n\n if self.pos != len(self.string):\n msg = \"unexpected character\"\n text = self.string[self.pos]\n raise RunError(msg, self.lineno, self.offset, text)\n\n yield EndToken(\"\", self.lineno, self.offset)", "def interpret(self):\n tree = self.parser.parse()\n if tree is None:\n return ''\n self.visit(tree)", "def _pre_argument_parsing(self):\n pass", "def __call__(self, tokenized_text):\n raise NotImplementedError()", "def _parse(self):\n try:\n # parse token stream into abstract syntax tree (AST)\n self._ast = self._rule_container()\n\n except ParseError:\n raise\n\n except Exception as exc:\n raise ParseError(u'Unexpected error: {0}'.format(unicode(exc)))", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def consume():\n depth = tokenizer.depth()\n for token in source:\n yield token\n if tokenizer.depth() < depth:\n return", "def consume():\n depth = tokenizer.depth()\n for token in source:\n yield token\n if tokenizer.depth() < depth:\n return", "def consume():\n depth = tokenizer.depth()\n for token in source:\n yield token\n if tokenizer.depth() < depth:\n return", "def parse(self, lexer):\n self._parse_tag(lexer)\n self._parsed_args=self.signature.resolve_arguments(self.args, self.kwargs)\n if self.isBlock:\n self._parse_block(lexer)", "def _candidates(self, token):", "def tokenize(self, start_pos=0, text=None):\n pass", "def parse_tokens(self, tokens, debug=0):\n self.reset()\n self.debug = debug\n for position, token in enumerate(tokens):\n if self.stem:\n token = self.stemmer.stem(token)\n if not isinstance(token, basestring):\n raise TypeError(\n 'Only string tokens are allowed; %s is not a string.' % (token,))\n self.reference(token, self.position, self.position, 0.0)\n preparse = self.check_preparsers(token)\n if preparse:\n self.reference(preparse, self.position, self.position, 0.0)\n self.position = position + 1\n return self.complete_parses(len(tokens))" ]
[ "0.637824", "0.62770957", "0.60263747", "0.5982428", "0.5950361", "0.59161896", "0.5890809", "0.5738108", "0.5696121", "0.567717", "0.5516384", "0.55131054", "0.5486995", "0.54364085", "0.5401569", "0.5397503", "0.5392797", "0.5386328", "0.5379222", "0.5370733", "0.5370733", "0.5370733", "0.5370733", "0.53640944", "0.53640944", "0.53640944", "0.5335578", "0.5286012", "0.52613217", "0.52592003" ]
0.68738174
0
Adds a phrasal pattern to a class. The phrasal_pattern argument is a string using the phrasal pattern syntax,
def add_phrasal_pattern(self, base, phrasal_pattern): if not base in self.phrasal_patterns: self.phrasal_patterns[base] = [phrasal_pattern] else: self.phrasal_patterns[base].append(phrasal_pattern) pattern_parser = PhrasalPatternParser(stem=self.stem) pp_obj = pattern_parser.parse(phrasal_pattern) self.add_phrasal_pattern_object(base, pp_obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_pattern(self, pattern):\n self.patterns.append(pattern)", "def __init__(self, pattern):\n self._pattern = re.compile(pattern)", "def add_pattern(self, pattern, callback):\n self.patterns.append((pattern, callback))", "def __init__(self, pattern):\r\n self.pattern = pattern", "def parse(self, pattern):\n phrasal_pattern = self.convert_parse_tree_to_phrasal_pattern(\n self.parse_tree(pattern))\n return phrasal_pattern", "def add_pattern(self, name, pattern=None):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(pattern, (list, np.ndarray, Pattern)), \"pattern must be a list or Pattern\"\n \n if not isinstance(pattern, Pattern):\n pattern = Pattern(name, multipliers=pattern, time_options=self._options.time) \n else: #elif pattern.time_options is None:\n pattern.time_options = self._options.time\n if pattern.name in self._data.keys():\n raise ValueError('Pattern name already exists')\n self[name] = pattern", "def add_pattern(self, name, pattern=None):\n self._pattern_reg.add_pattern(name, pattern)", "def __init__(self, pattern):\n self._pattern = pattern.lower()", "def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))", "def add_pattern(self, command, pattern_string):\n parts = pattern_string.split(\":\")\n if len(parts) != 2:\n raise AssertionError(\"Cannot parse '{value}' to pattern\".format(value = value))\n pattern_id = parts[0]\n pattern_values = parts[1].split(',')\n pattern = Pattern(pattern_id, pattern_values)\n self._config.add_pattern(command, pattern)", "def __init__(self, pattern, flags=0):\n if flags:\n str_flags = hre.decodeflags(flags)\n pattern = r\"(?%s:%s)\"%(str_flags, pattern)\n super(Regex, self).__init__(pattern)", "def _maybe_add_pattern(attr, patterns):\n handler_type = getattr(attr, '_gen_handler', False)\n\n if not handler_type:\n return\n if handler_type not in ['call', 'cast', 'info']:\n raise AttributeError(\"unknown handler type {}\".format(handler_type))\n\n o = attr._gen_order\n p = attr._gen_pattern\n LOG.debug(\"adding {} {} with pattern {}\".format(handler_type,\n attr,\n p))\n patterns[handler_type].append((o, p))", "def add_substitution(self, pattern, repl):\r\n\r\n self.substitutions.append( (re.compile(pattern), repl) )", "def addRegexClass(self, label, regex, userdist=3):\n self._call_java('addRegexClass', label, regex, userdist)\n return self", "def register( self, pattern, callback ):\n self.patterns.append((pattern, callback))", "def add_patterns(self, patterns: Iterable[AttributeRulerPatternType]) -> None:\n for p in patterns:\n self.add(**p) # type: ignore[arg-type]", "def literal(cls, log_pattern_string: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"literal\", [log_pattern_string])", "def __init__(self,pattern):\n\t\tself.__type__ = 'pol'\n\t\tif type(pattern)!=list and type(pattern)!=tuple :\n\t\t\traise InvalidArgumentException(\"No puedo construir un polinomio con este argumento\")", "def _add_patterns(\n self, fuzzy_patterns: List[Dict[str, Any]], regex_patterns: List[Dict[str, Any]]\n ) -> None:\n for entry in fuzzy_patterns + regex_patterns:\n label = entry[\"label\"]\n if \"id\" in entry:\n ent_label = label\n label = self._create_label(label, entry[\"id\"])\n self._ent_ids[label] = (ent_label, entry[\"id\"])\n pattern = entry[\"pattern\"]\n kwargs = entry[\"kwargs\"]\n if isinstance(pattern, Doc):\n self.fuzzy_patterns[label][\"patterns\"].append(pattern)\n self.fuzzy_patterns[label][\"kwargs\"].append(kwargs)\n elif isinstance(pattern, str):\n self.regex_patterns[label][\"patterns\"].append(pattern)\n self.regex_patterns[label][\"kwargs\"].append(kwargs)\n else:\n raise ValueError(\n (\n \"One or more patterns do not conform\",\n \"to spaczz pattern structure:\",\n \"{label (str), pattern (str), type (str),\",\n \"optional kwargs (Dict[str, Any]),\",\n \"and optional id (str)}.\",\n )\n )\n\n for label, pattern in self.fuzzy_patterns.items():\n self.fuzzy_matcher.add(label, pattern[\"patterns\"], pattern[\"kwargs\"])\n for label, pattern in self.regex_patterns.items():\n self.regex_matcher.add(label, pattern[\"patterns\"], pattern[\"kwargs\"])", "def add_includepattern(self, patterntuple):\n if isinstance(patterntuple, tuple) and isinstance(patterntuple[0], str):\n self._includepatterns.append(patterntuple)\n else:\n warn(\"Include patterns must be tuples of a field name and a compiled regex.\")", "def add_re(self,rexp):\n crexp=re.compile(rexp)\n self.rexps.append(crexp)", "def add_command_regex(self, pattern):\n # Make regex match consistent with wildcards, i.e. full string match\n if not pattern.endswith('$'):\n pattern += '$'\n self._command_regexs.append(re.compile(pattern))", "def save_pattern(self, pattern: Pattern):", "def save_pattern(self, pattern: Pattern):", "def __init__(self, pattern, use_regex=False, pid=None, cpu=None):\n parts = bytes(pattern).split(b':')\n if len(parts) == 1:\n parts = [b\"p\", b\"\", parts[0]]\n elif len(parts) == 2:\n parts = [b\"p\", parts[0], parts[1]]\n elif len(parts) == 3:\n if parts[0] == b\"t\":\n parts = [b\"t\", b\"\", b\"%s:%s\" % tuple(parts[1:])]\n if parts[0] not in [b\"p\", b\"t\", b\"u\"]:\n raise Exception(\"Type must be 'p', 't', or 'u', but got %s\" %\n parts[0])\n else:\n raise Exception(\"Too many ':'-separated components in pattern %s\" %\n pattern)\n\n (self.type, self.library, self.pattern) = parts\n if not use_regex:\n self.pattern = self.pattern.replace(b'*', b'.*')\n self.pattern = b'^' + self.pattern + b'$'\n\n if (self.type == b\"p\" and self.library) or self.type == b\"u\":\n libpath = BPF.find_library(self.library)\n if libpath is None:\n # This might be an executable (e.g. 'bash')\n libpath = BPF.find_exe(str(self.library))\n if libpath is None or len(libpath) == 0:\n raise Exception(\"unable to find library %s\" % self.library)\n self.library = libpath\n\n self.pid = pid\n self.cpu = cpu\n self.matched = 0\n self.trace_functions = {} # map location number to function name", "def from_regex(pattern:str) -> str:\n raise NotImplementedError()", "def __init__(self, pattern1, pattern2):\n self.pattern1 = pattern1\n self.pattern2 = pattern2", "def create_pattern_function(self):\n\n type_regex = \"(?:\\w+(?:\\:\\:)?)+\"\n regex = \"^(?P<indent>\\s*)(?P<virtual>virtual )?(?P<function_return>(?:const )?\" + type_regex + \"(?P<subtype><?\" + type_regex + \">?)?) (?P<function_name>.*)\\((?P<args>.*)\\)(?P<const_qualifier> const)?(?: = 0)?;\\n$\"\n return regex", "def _create_regex(pattern, ignore_case=False, whole_words=False, literal_pattern=False):\n if literal_pattern:\n pattern = re.escape(pattern)\n if whole_words:\n b = r'\\b' if isinstance(pattern, str) else br'\\b'\n pattern = b + pattern + b\n\n regex = re.compile(pattern, re.I if ignore_case else 0)\n return regex", "def __new__(cls, name, build_pattern: str = None, parse_pattern: re.Pattern = None):\n obj = super().__new__(cls, name)\n\n if parse_pattern is not None:\n obj.parse_pattern = parse_pattern\n\n if build_pattern is not None:\n obj.build_pattern = build_pattern\n\n return obj" ]
[ "0.65718514", "0.62494636", "0.61294127", "0.61095756", "0.6079538", "0.5993424", "0.5964391", "0.59567755", "0.5663467", "0.5644707", "0.56338227", "0.56133646", "0.5608848", "0.55708647", "0.54960144", "0.54752636", "0.53281647", "0.5323162", "0.5294432", "0.5294158", "0.529167", "0.5284166", "0.5262308", "0.5262308", "0.52425456", "0.5214207", "0.51902974", "0.5182043", "0.51781267", "0.5094503" ]
0.7822084
0
Adds a prediction to the set of anytime predictions.
def index_anytime_prediction(self, prediction): if self.debug > 1: print 'Indexing anytime prediction %s' % (prediction,) self.index_prediction(self.anytime_predictions, prediction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_prediction(self, truth_label, prediction, doc_id, doc_price=0):\n assert (truth_label == '1' or truth_label == '-1')\n \n if truth_label == prediction:\n if truth_label == '1':\n self.tp += 1\n else:\n self.tn += 1\n else:\n if truth_label == '1':\n self.fn += 1\n else:\n self.fp += 1\n\n if self.is_query_level and prediction != '-1':\n self.query_prediction.add_doc_predicted_relevant(doc_id, truth_label, doc_price)", "def predictions(self, predictions):\n\n self._predictions = predictions", "def update_predictions(data):\n # TODO: Priority 1 - update predictions with inference results\n # TODO: Understand from a research team exactly what the data is going to look like\n trackID = data[0]\n prediction = data[1]\n confidence = data[2]\n to_Insert_Array = [trackID, prediction, confidence]\n OutPut_Data[trackID] = to_Insert_Array", "def add_prediction_op(self):\n raise NotImplementedError(\"Each Model must re-implement this method.\")", "def setPrediction(self,pred):\n self.prediction = pred", "def add_element(self, prediction):\n\n if self.in_concept_change:\n self.reset()\n\n self.in_concept_change = False\n\n self.m_n += 1\n\n if prediction == 1.0:\n self.in_warning_zone = False\n self.delay = 0\n self.m_num_errors += 1\n self.m_lastd = self.m_d\n self.m_d = self.m_n - 1\n distance = self.m_d - self.m_lastd\n old_mean = self.m_mean\n self.m_mean = self.m_mean + (float(distance) - self.m_mean) / self.m_num_errors\n self.estimation = self.m_mean\n self.m_std_temp = self.m_std_temp + (distance - self.m_mean) * (distance - old_mean)\n std = np.sqrt(self.m_std_temp/self.m_num_errors)\n m2s = self.m_mean + 2 * std\n\n if self.m_n < self.FDDM_MIN_NUM_INSTANCES:\n return\n\n if m2s > self.m_m2s_max:\n self.m_m2s_max = m2s\n else:\n p = m2s / self.m_m2s_max\n if (self.m_num_errors > self.m_min_num_errors) and (p < self.FDDM_OUTCONTROL):\n self.in_concept_change = True\n\n elif (self.m_num_errors > self.m_min_num_errors) and (p < self.FDDM_WARNING):\n self.in_warning_zone = True\n\n else:\n self.in_warning_zone = False", "def register(self, predictor):\n assert self.round == 0, \"Simulation is already running: \" + \\\n \"Predictors can not be added any more!\"\n#würde \"register\" abbrechen, wenn self.round > 0 ist,\n# dient nur der Programmsicherheit\n\n if isinstance(predictor, MetaInductivist):\n self.miList.append(predictor)\n else:\n self.non_miList.append(predictor)\n self.favN[predictor.name] = 0\n self.absucc[predictor.name] = 0\n self.nonultdeceivers.append(predictor)\n self.deceiveCount[predictor.name] = 0\n self.deceiveState[predictor.name] = False\n \n predictor.registeredBy(self)\n #print str(predictor)", "def append_predictions(ds, predictions):\n def _append_predictions(x, p):\n return dict(x, prediction=p)\n predictions_ds = tf.data.Dataset.from_tensor_slices(predictions)\n return (tf.data.Dataset\n .zip((ds, predictions_ds))\n .map(_append_predictions, num_parallel_calls=TF_AUTOTUNE))", "def add_dnn_prediction_instance(self, game: Game, prediction_prob: List[float]) -> Tuple[bool, bool]:\n assert len(prediction_prob) == 2, f\"There should only be 2 percentages in prediction_prob, found \" \\\n f\"{len(prediction_prob)}: {prediction_prob}\"\n self.instance_predictions.setdefault(game, list())\n actual_winner = game.home_team if game.home_team.scores.get(GamePeriod.TOTAL) > game.away_team.scores.get(\n GamePeriod.TOTAL) else game.away_team\n actual_loser = game.away_team if actual_winner is game.home_team else game.home_team\n predicted_winner = game.home_team if prediction_prob[0] > prediction_prob[1] else game.away_team\n predicted_loser = game.away_team if predicted_winner is game.home_team else game.home_team\n self.logger.info(f\"PREDICTED: {predicted_winner.name} beats {predicted_loser.name} for game {game.code}\")\n self.logger.info(f\"ACTUAL RESULT: {actual_winner.name} beats {actual_loser.name} for game {game.code}\\n\")\n self.instance_predictions[game].append(predicted_winner)\n actual_home_team_won = True if actual_winner == game.home_team else False\n predicted_home_team_won = True if predicted_winner == game.home_team else False\n return actual_home_team_won, predicted_home_team_won", "def update_future_match_predictions(predictions: List[CleanPredictionData]) -> None:\n future_match_count = Match.objects.filter(\n start_date_time__gt=timezone.now()\n ).count()\n\n assert future_match_count > 0, (\n \"No future matches exist in the DB. Try updating fixture data, \"\n \"then updating predictions again.\"\n )\n\n for pred in predictions:\n Prediction.update_or_create_from_raw_data(pred, future_only=True)", "def total_predictions(self, total_predictions):\n\n self._total_predictions = total_predictions", "def update_predictions(self, context):\n x, y, o = context.get_predictions()\n self.x_eval += x\n self.y_eval += y\n self.o_eval += o\n self.write_predictions(o)", "def update_prediction_run(session: Session, prediction_run: PredictionModelRunTimestamp):\n session.add(prediction_run)\n session.commit()", "def insert_predicted_admission(university, historic, predictions):\n historic_data = str(historic[0]) + \":\" + str(historic[1])\n if predictions:\n predictions_data = str(predictions[0]) + \":\" + str(predictions[1])\n else:\n predictions_data = None\n params = (university, historic_data, predictions_data)\n query = \"INSERT INTO predicted_admissions_table VALUES (%s, %s, %s)\"\n general_db_func.insert_to_db(query, params)", "def store_prediction(self, doa_list):\n\n true_doas = [utility_methods.cylindrical(self.current_position + doa_list[0]),\n utility_methods.cylindrical(self.current_position + doa_list[1])]\n\n self.predictions.append(true_doas)", "def add_pred_to_list(temp: [], res: []):\n # Calculate the average probability of all predictions in temp\n avg_prob = sum(x[PRED_PROBABILITY_INDEX] for x in temp) / len(temp)\n\n if avg_prob >= PRED_PROB_THRESHOLD and len(temp) >= PRED_AMOUNT_THRESHOLD:\n res.append({\n 'begin': temp[0][PRED_TIME_INDEX],\n 'end': temp[len(temp) - 1][PRED_TIME_INDEX],\n 'label': temp[0][PRED_LABEL_INDEX],\n 'avg_probability': avg_prob\n })", "def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()", "def add(self, predicted, target):\n # Parameter check\n if predicted.size() != target.size():\n raise ValueError(\n \"size mismatch, {} != {}\".format(predicted.size(), target.size())\n )\n elif tuple(predicted.unique(sorted=True)) not in [(0, 1), (0,), (1,)]:\n raise ValueError(\"predicted values are not binary\")\n elif tuple(target.unique(sorted=True)) not in [(0, 1), (0,), (1,)]:\n raise ValueError(\"target values are not binary\")\n\n # Flatten the tensor and convert to numpy\n predicted = predicted.squeeze(1).cpu().numpy()\n target = target.squeeze(1).cpu().numpy()\n\n for p, t in zip(predicted, target):\n # Try to split the segmentation mask in into one mask per ship\n predicted_ships = split_ships(p, max_ships=self.max_ships)\n target_ships = split_ships(t, max_ships=self.max_ships)\n score = f_score(\n predicted_ships,\n target_ships,\n beta=self.beta,\n thresholds=self.thresholds,\n )\n self.fscore_history.append(score)", "def postprocess(self, prediction_dict, **params):\r\n pass", "def addpredicate(self, pred):\n self._preds.append(pred)", "def append(appliance):\n\n # Handle optional params\n if request.json.get('observation') != '':\n observation = request.json.get('observation')\n\n # append value\n lstm_models[appliance].append(float(observation))\n\n # logging\n logging.info(\"Response for \" + str(request.remote_addr) +\n \": observation: '\" + observation +\n \"' appended to series.\")\n\n value = lstm_models[appliance].predict()\n\n # logging\n logging.info(\"Response for \" + str(request.remote_addr) +\n \": Forecast: \" + str(value))\n\n return jsonify({'prediction': str(value)}), 200", "def add_present_prob(self, probability):\n self.present_probs.append(probability)", "def write_predictions(self):\n unique_id_to_result = {}\n for result in self._all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\",\n \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for example in self._eval_examples:\n example_id = example.qas_id if (\"squad\" in self._name or \"cmrc2018\" in self._name or \"drcd\" in self._name) else example.qid\n features = self._task.featurize(example, False, for_eval=True)\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature[self._name + \"_eid\"]]\n if self._config.joint_prediction:\n start_indexes = result.start_top_index\n end_indexes = result.end_top_index\n else:\n start_indexes = _get_best_indexes(result.start_logits,\n self._config.n_best_size)\n end_indexes = _get_best_indexes(result.end_logits,\n self._config.n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if self._v2:\n if self._config.answerable_classifier:\n feature_null_score = result.answerable_logit\n else:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n for i, start_index in enumerate(start_indexes):\n for j, end_index in enumerate(\n end_indexes[i] if self._config.joint_prediction else end_indexes):\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature[self._name + \"_tokens\"]):\n continue\n if end_index >= len(feature[self._name + \"_tokens\"]):\n continue\n if start_index == 0:\n continue\n if start_index not in feature[self._name + \"_token_to_orig_map\"]:\n continue\n if end_index not in feature[self._name + \"_token_to_orig_map\"]:\n continue\n if not feature[self._name + \"_token_is_max_context\"].get(\n start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > self._config.max_answer_length:\n continue\n start_logit = (result.start_top_log_probs[i] if\n self._config.joint_prediction else\n result.start_logits[start_index])\n end_logit = (result.end_top_log_probs[i, j] if\n self._config.joint_prediction else\n result.end_logits[end_index])\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=start_logit,\n end_logit=end_logit))\n\n if self._v2:\n if len(prelim_predictions) == 0 and self._config.debug:\n tokid = sorted(feature[self._name + \"_token_to_orig_map\"].keys())[0]\n prelim_predictions.append(_PrelimPrediction(\n feature_index=0,\n start_index=tokid,\n end_index=tokid + 1,\n start_logit=1.0,\n end_logit=1.0))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= self._config.n_best_size:\n break\n feature = features[pred.feature_index]\n tok_tokens = feature[self._name + \"_tokens\"][\n pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature[\n self._name + \"_token_to_orig_map\"][pred.start_index]\n orig_doc_end = feature[\n self._name + \"_token_to_orig_map\"][pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(self._config, tok_text, orig_text)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(dict(output))\n\n assert len(nbest_json) >= 1\n\n if not self._v2:\n all_predictions[example_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n if self._config.answerable_classifier:\n score_diff = score_null\n else:\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example_id] = score_diff\n all_predictions[example_id] = best_non_null_entry.text\n\n all_nbest_json[example_id] = nbest_json\n\n utils.write_json(dict(all_predictions),\n self._config.qa_preds_file(self._name+\"_\"+self._split))\n if self._v2:\n utils.write_json({\n k: float(v) for k, v in six.iteritems(scores_diff_json)},\n self._config.qa_na_file(self._name+\"_\"+self._split))", "def submit_forecast(self,question_id,method_name,predictions):\n \n url = self.external_prediction_sets_url\n \n params={'external_prediction_set':{'question_id':question_id,\n 'external_predictor_attributes':\n {'method_name':method_name},\n 'external_predictions_attributes':predictions}\n }\n \n return self._post(url,params)", "def send_to_prediction(satz_ids, data):\n predictions = []\n global df_hisotorical\n df_hisotorical = get_historical_data(data[\"UserID\"])\n\n for x in satz_ids:\n full_data = accumulate_satz_id(x, data)\n p = predict(full_data)\n predictions.append([x, p])\n\n return predictions", "def add_signal_align_predictions(self):\n # TODO call signalalign if not called\n sa_events = self.get_signalalign_events()\n # cut out duplicates\n sa_events = np.unique(sa_events)\n events = self.get_resegment_basecall()\n predictions = match_events_with_signalalign(sa_events=sa_events, event_detections=events)\n # rna reference positions are on 5' edge aka right side of kmer\n if self.rna:\n predictions[\"reference_index\"] -= self.kmer_index\n else:\n predictions[\"reference_index\"] += self.kmer_index\n self.aligned_signal.add_label(predictions, name=\"full_signalalign\", label_type='prediction')\n return True", "def store_predictions(self, preds, df, feature):\n\n prev_values = list(df[feature].iloc[:len(df) - len(self.players)])\n prev_values.extend(preds)\n\n df[feature] = prev_values\n\n return df", "def add_prediction_endpoint(self, endpoint_id, saved_model_id):\n self.settings[\"endpoints\"].append({\n \"id\" : endpoint_id,\n \"type\" : \"STD_PREDICTION\",\n \"modelRef\": saved_model_id\n })", "def prediction_a_all(self):\n return self._prediction_a_all", "def update(self, y_true, y_pred):\n self.y_true.append(y_true)\n self.y_pred.append(y_pred)" ]
[ "0.65096253", "0.6502434", "0.62210304", "0.61110085", "0.605963", "0.5962398", "0.5924518", "0.58317405", "0.5813396", "0.5808941", "0.5747288", "0.5687968", "0.56817526", "0.567845", "0.56692386", "0.55811673", "0.5569443", "0.5561001", "0.55457646", "0.5533378", "0.54548186", "0.5388634", "0.5339418", "0.53100294", "0.529265", "0.5267277", "0.52566725", "0.52511495", "0.52338666", "0.52237594" ]
0.6842723
0
Adds a prediction to the set of dynamic predictions.
def index_dynamic_prediction(self, prediction): if self.debug > 1: print 'Indexing dynamic prediction %s' % (prediction,) self.index_prediction(self.dynamic_predictions, prediction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predictions(self, predictions):\n\n self._predictions = predictions", "def add_prediction(self, truth_label, prediction, doc_id, doc_price=0):\n assert (truth_label == '1' or truth_label == '-1')\n \n if truth_label == prediction:\n if truth_label == '1':\n self.tp += 1\n else:\n self.tn += 1\n else:\n if truth_label == '1':\n self.fn += 1\n else:\n self.fp += 1\n\n if self.is_query_level and prediction != '-1':\n self.query_prediction.add_doc_predicted_relevant(doc_id, truth_label, doc_price)", "def setPrediction(self,pred):\n self.prediction = pred", "def add_prediction_op(self):\n raise NotImplementedError(\"Each Model must re-implement this method.\")", "def postprocess(self, prediction_dict, **params):\r\n pass", "def append_predictions(ds, predictions):\n def _append_predictions(x, p):\n return dict(x, prediction=p)\n predictions_ds = tf.data.Dataset.from_tensor_slices(predictions)\n return (tf.data.Dataset\n .zip((ds, predictions_ds))\n .map(_append_predictions, num_parallel_calls=TF_AUTOTUNE))", "def add_element(self, prediction):\n\n if self.in_concept_change:\n self.reset()\n\n self.in_concept_change = False\n\n self.m_n += 1\n\n if prediction == 1.0:\n self.in_warning_zone = False\n self.delay = 0\n self.m_num_errors += 1\n self.m_lastd = self.m_d\n self.m_d = self.m_n - 1\n distance = self.m_d - self.m_lastd\n old_mean = self.m_mean\n self.m_mean = self.m_mean + (float(distance) - self.m_mean) / self.m_num_errors\n self.estimation = self.m_mean\n self.m_std_temp = self.m_std_temp + (distance - self.m_mean) * (distance - old_mean)\n std = np.sqrt(self.m_std_temp/self.m_num_errors)\n m2s = self.m_mean + 2 * std\n\n if self.m_n < self.FDDM_MIN_NUM_INSTANCES:\n return\n\n if m2s > self.m_m2s_max:\n self.m_m2s_max = m2s\n else:\n p = m2s / self.m_m2s_max\n if (self.m_num_errors > self.m_min_num_errors) and (p < self.FDDM_OUTCONTROL):\n self.in_concept_change = True\n\n elif (self.m_num_errors > self.m_min_num_errors) and (p < self.FDDM_WARNING):\n self.in_warning_zone = True\n\n else:\n self.in_warning_zone = False", "def update_predictions(data):\n # TODO: Priority 1 - update predictions with inference results\n # TODO: Understand from a research team exactly what the data is going to look like\n trackID = data[0]\n prediction = data[1]\n confidence = data[2]\n to_Insert_Array = [trackID, prediction, confidence]\n OutPut_Data[trackID] = to_Insert_Array", "def register(self, predictor):\n assert self.round == 0, \"Simulation is already running: \" + \\\n \"Predictors can not be added any more!\"\n#würde \"register\" abbrechen, wenn self.round > 0 ist,\n# dient nur der Programmsicherheit\n\n if isinstance(predictor, MetaInductivist):\n self.miList.append(predictor)\n else:\n self.non_miList.append(predictor)\n self.favN[predictor.name] = 0\n self.absucc[predictor.name] = 0\n self.nonultdeceivers.append(predictor)\n self.deceiveCount[predictor.name] = 0\n self.deceiveState[predictor.name] = False\n \n predictor.registeredBy(self)\n #print str(predictor)", "def add(self, predicted, target):\n # Parameter check\n if predicted.size() != target.size():\n raise ValueError(\n \"size mismatch, {} != {}\".format(predicted.size(), target.size())\n )\n elif tuple(predicted.unique(sorted=True)) not in [(0, 1), (0,), (1,)]:\n raise ValueError(\"predicted values are not binary\")\n elif tuple(target.unique(sorted=True)) not in [(0, 1), (0,), (1,)]:\n raise ValueError(\"target values are not binary\")\n\n # Flatten the tensor and convert to numpy\n predicted = predicted.squeeze(1).cpu().numpy()\n target = target.squeeze(1).cpu().numpy()\n\n for p, t in zip(predicted, target):\n # Try to split the segmentation mask in into one mask per ship\n predicted_ships = split_ships(p, max_ships=self.max_ships)\n target_ships = split_ships(t, max_ships=self.max_ships)\n score = f_score(\n predicted_ships,\n target_ships,\n beta=self.beta,\n thresholds=self.thresholds,\n )\n self.fscore_history.append(score)", "def update(self, y_true, y_pred):\n self.y_true.append(y_true)\n self.y_pred.append(y_pred)", "def addpredicate(self, pred):\n self._preds.append(pred)", "def add_dnn_prediction_instance(self, game: Game, prediction_prob: List[float]) -> Tuple[bool, bool]:\n assert len(prediction_prob) == 2, f\"There should only be 2 percentages in prediction_prob, found \" \\\n f\"{len(prediction_prob)}: {prediction_prob}\"\n self.instance_predictions.setdefault(game, list())\n actual_winner = game.home_team if game.home_team.scores.get(GamePeriod.TOTAL) > game.away_team.scores.get(\n GamePeriod.TOTAL) else game.away_team\n actual_loser = game.away_team if actual_winner is game.home_team else game.home_team\n predicted_winner = game.home_team if prediction_prob[0] > prediction_prob[1] else game.away_team\n predicted_loser = game.away_team if predicted_winner is game.home_team else game.home_team\n self.logger.info(f\"PREDICTED: {predicted_winner.name} beats {predicted_loser.name} for game {game.code}\")\n self.logger.info(f\"ACTUAL RESULT: {actual_winner.name} beats {actual_loser.name} for game {game.code}\\n\")\n self.instance_predictions[game].append(predicted_winner)\n actual_home_team_won = True if actual_winner == game.home_team else False\n predicted_home_team_won = True if predicted_winner == game.home_team else False\n return actual_home_team_won, predicted_home_team_won", "def store_prediction(self, doa_list):\n\n true_doas = [utility_methods.cylindrical(self.current_position + doa_list[0]),\n utility_methods.cylindrical(self.current_position + doa_list[1])]\n\n self.predictions.append(true_doas)", "def predict(self, predPoints=None):", "def add(self, targets, predictions, values=None):\n if len(targets) != len(predictions):\n raise ValueError, \\\n \"Targets[%d] and predictions[%d]\" % (len(targets),\n len(predictions)) + \\\n \" have different number of samples\"\n\n if values is not None and len(targets) != len(values):\n raise ValueError, \\\n \"Targets[%d] and values[%d]\" % (len(targets),\n len(values)) + \\\n \" have different number of samples\"\n\n # enforce labels in predictions to be of the same datatype as in\n # targets, since otherwise we are getting doubles for unknown at a\n # given moment labels\n nonetype = type(None)\n for i in xrange(len(targets)):\n t1, t2 = type(targets[i]), type(predictions[i])\n # if there were no prediction made - leave None, otherwise\n # convert to appropriate type\n if t1 != t2 and t2 != nonetype:\n #warning(\"Obtained target %s and prediction %s are of \" %\n # (t1, t2) + \"different datatypes.\")\n if isinstance(predictions, tuple):\n predictions = list(predictions)\n predictions[i] = t1(predictions[i])\n\n if values is not None:\n # assure that we have a copy, or otherwise further in-place\n # modifications might screw things up (some classifiers share\n # values and spit out results)\n values = copy.deepcopy(values)\n\n self.__sets.append( (targets, predictions, values) )\n self._computed = False", "def update_predictions(self, context):\n x, y, o = context.get_predictions()\n self.x_eval += x\n self.y_eval += y\n self.o_eval += o\n self.write_predictions(o)", "def fit_predict(self):\n raise AttributeError", "def build_predictions(self, predict_ids, params):\n raise NotImplementedError()", "def add_prediction_op(self):\n pred = tf.get_variable(\n name='pred',\n shape=(self.batch_size, self.config.n_classes),\n initializer=tf.zeros_initializer()\n )\n\n return pred", "def predict(self, instances):\r\n raise NotImplementedError", "def predict(self, **kwargs):\n raise NotImplementedError", "def predict(self):\n raise NotImplementedError", "def add_prediction_endpoint(self, endpoint_id, saved_model_id):\n self.settings[\"endpoints\"].append({\n \"id\" : endpoint_id,\n \"type\" : \"STD_PREDICTION\",\n \"modelRef\": saved_model_id\n })", "def predict(self, predictions):\n assert self.predictions.shape == predictions.shape, \\\n 'Predictions shape is not the same as the training predictions\\'.'\n\n new = self.alphas[:, None] * predictions[self.classifiers, :]\n return np.sign(np.sum(new, axis=0))", "def prediction_processing_no_reload(lines, predictions):\n\n final_predictions = []\n for idx, line in enumerate(lines):\n if len(line['predicted_pages']) == 0:\n line['predicted_evidence'] = []\n else:\n line['predicted_evidence'] = [[prediction[0], int(prediction[1])] for prediction in predictions[idx]]\n line['predicted_label'] = \"REFUTES\"\n final_predictions.append(line)\n\n return final_predictions", "def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds", "def predict(self, session, *args, predict_data_iterator=None, **kwargs):\n raise NotImplementedError(\"Implement predict() method\")", "def post_process_predictions(self, labels, scene):\n pass", "def predict(\n self,\n targetSeries,\n exogenousSeries=None\n ):\n pass" ]
[ "0.7123987", "0.6687101", "0.6608172", "0.6588574", "0.6438798", "0.63834846", "0.63128114", "0.6136088", "0.6132608", "0.61325115", "0.59819067", "0.59658396", "0.58679914", "0.5849889", "0.58309", "0.5822418", "0.5814575", "0.5764383", "0.57549876", "0.573824", "0.5735496", "0.57278144", "0.5698848", "0.5691779", "0.5686346", "0.5674337", "0.5673037", "0.56657386", "0.563941", "0.5622357" ]
0.70860356
1
Checks whether a term in a phrasal pattern is a syntax
def is_syntax_directive(self, term): if isinstance(term, list): if term[0] in self.syntax_functions: return True raise Error('%s is not a valid syntax function.' % (term[0],)) else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_logic_syntax(string):\n return logExp.matches(string)", "def _CheckExceptionTerm(self, term, rules):\n flag = False\n for keyword in rules:\n if rules[keyword] == 'starts':\n flag = flag or term.startswith(keyword)\n if rules[keyword] == 'ends':\n flag = flag or term.endswith(keyword)\n if rules[keyword] == 'contains':\n flag = flag or (keyword in term)\n return flag", "def valid_syntax(command):\n\n for ev, value in bash_iter(command, syntax_check=True):\n if ev == \"err\":\n if value.endswith(\"syntax error: unexpected end of file\"):\n return False\n if \"unexpected EOF while looking for matching\" in value:\n return False\n if \"here-document at line\" in value:\n return False\n return value == 0", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def is_raw_hmeta_key(term: str) -> bool:\n\n # The minimal viable term is '([ZZZ])', We do some quick checks\n if term is None or len(term) < 7:\n return False\n\n if not term.startswith('(') or not term.endswith(')') or \\\n term.index('[') == -1 or term.index(']') == -1:\n return False\n\n # TODO: the has_hsilo_syntax is doing a very basic check. Some\n # responsability will be passed for check later, like actually\n # compare to an know language. But we could at least check\n # here for wrong number of open and closed () [].\n\n return True", "def isValid(text):\n return bool(re.search(r'\\b(start|stop) (look|watch|guard)ing\\b', text, re.IGNORECASE))", "def check_syntax(self, isInitialSet=False):\n expression = self.raw_expression\n if SymEq.check_expression_syntax(expression, isInitialSet):\n return True\n else:\n self.raw_expressions = None\n raise Exception(\"[RectangleSet ERROR]: Syntax NOT valid.\")", "def _contains_synset(cq: str) -> bool:\n return re.search(r'\\[.*\\]', cq) is not None", "def is_simple (self, phrase):\r\n\r\n return not self.contains(phrase,'()&|>#')", "def isValid(text):\n return bool(re.search(r'\\b((kill|stop) the (alarm|clock|music))\\b', text, re.IGNORECASE))", "def is_sphinx_markup(docstring):\n # this could be made much more clever\n return (\"`\" in docstring or \"::\" in docstring)", "def w_is_typed(tokens):\n return (\n 'type' in tokens or\n 'answerblock' in tokens or\n 'drawbox' in tokens or\n 'answerfigure' in tokens\n )", "def contains_tokens(pattern):\n return type(pattern) is list and len(pattern) > 0", "def syntaxValid(s):\n\tif not s or type(s) != str or s[0] != '[':\n\t\treturn False\n\n\t#using stack to check for matching brackets\n\t#push '[' and pop for ']'\n\tbracketStack = []\n\tfor i in range(len(s)):\n\t\tcurrentChar = s[i]\n\t\tif currentChar == '[':\n\t\t\t#check for there is a delimiter before open bracket or trying to double nest\n\t\t\tif i != 0 and s[i-1] == '[' or i > 1 and s[i-2] != ',':\n\t\t\t\treturn False\n\t\t\tbracketStack.append('[')\n\t\telif currentChar == ']':\n\t\t\t#check if stack is prematurely empty or no delimiter after closed bracket\n\t\t\tif not bracketStack or (i != len(s)-1 and s[i+1] != ']' and s[i+1] != ','):\n\t\t\t\treturn False \n\t\t\telse: \n\t\t\t\tbracketStack.pop()\n\t\telif currentChar == ',':\n\t\t\t#check if delimiter is invalid\n\t\t\tif i == 0 or i == len(s)-1 or s[i-1] == '[' or s[i+1] != ' ':\n\t\t\t\treturn False\n\t#if the stack is empty, we have matched brackets, hence s is valid\n\treturn len(bracketStack) == 0", "def isValid(text):\r\n return bool(re.search(r'\\bcommute\\b', text, re.IGNORECASE))", "def isRegexPossible(self):\n if self._lastToken is None:\n # No token has been produced yet: at the start of the input,\n # no division is possible, so a regex literal _is_ possible.\n return True\n\n if self._lastToken.type == ECMAScriptLexer.Identifier or \\\n self._lastToken.type == ECMAScriptLexer.NullLiteral or \\\n self._lastToken.type == ECMAScriptLexer.BooleanLiteral or \\\n self._lastToken.type == ECMAScriptLexer.This or \\\n self._lastToken.type == ECMAScriptLexer.CloseBracket or \\\n self._lastToken.type == ECMAScriptLexer.CloseParen or \\\n self._lastToken.type == ECMAScriptLexer.OctalIntegerLiteral or \\\n self._lastToken.type == ECMAScriptLexer.DecimalLiteral or \\\n self._lastToken.type == ECMAScriptLexer.HexIntegerLiteral or \\\n self._lastToken.type == ECMAScriptLexer.StringLiteral or \\\n self._lastToken.type == ECMAScriptLexer.PlusPlus or \\\n self._lastToken.type == ECMAScriptLexer.MinusMinus:\n # After any of the tokens above, no regex literal can follow.\n return False\n else:\n # In all other cases, a regex literal _is_ possible.\n return True", "def _has_brackets(content_type: str) -> bool:\n for compositional_type in SPECIFICATION_COMPOSITIONAL_TYPES:\n if content_type.startswith(compositional_type):\n content_type = content_type[len(compositional_type) :]\n if len(content_type) < 2:\n return False\n return content_type[0] == \"[\" and content_type[len(content_type) - 1] == \"]\"\n raise SyntaxError(\"Content type must be a compositional type!\")", "def syntax_text():", "def test_no_spelling_mistakes_for_technical_terms_in_context(self, style):\n content = \"{s}{e}\\n\\\"\\\"\\\"technicalterm\\\"\\\"\\\")\\n\\n technicalterm\\n\"\n result = self._spellcheck_lint(content, style)\n\n self.assertTrue(result)", "def is_variable(pattern):\n return (type(pattern) is str\n and pattern[0] == '?'\n and len(pattern) > 1\n and pattern[1] != '*'\n and pattern[1] in string.ascii_letters\n and ' ' not in pattern)", "def test_spires_syntax_detected_naked_a(self):\n converter = search_engine_query_parser.SpiresToInvenioSyntaxConverter()\n spi_search = converter.is_applicable(\"a ellis\")\n self.assertEqual(spi_search, True)", "def validate_syntax(self):\n resolves_present = False\n uses_present = False\n if not self.wf.get('workflow', None):\n pu.fail('A workflow block must be present\\n')\n else:\n for _, wf_block in dict(self.wf['workflow']).items():\n if wf_block.get('resolves', None):\n resolves_present = True\n if not resolves_present:\n pu.fail('[resolves] attribute must be present\\n')\n if not self.wf.get('action', None):\n pu.fail('Atleast one action block must be present\\n')\n else:\n for _, a_block in self.wf['action'].items():\n if a_block.get('uses', None):\n uses_present = True\n if not uses_present:\n pu.fail('[uses] attribute must be present\\n')", "def no_or_clauses (self,phrase):\r\n \r\n for x in phrase:\r\n if isinstance(x,list) and x[0] == '@':\r\n return False\r\n return True", "def is_math_line(line):\n if '=' in line:\n # Check it isn't some other command\n for cmd in CMD_LIST:\n if re.findall(f\"^{cmd} \", line):\n return False\n\n str_txt, non_str = gen_parse.get_str_between_delims(line, '\"')\n if any(j in non_str for j in '<>-+/*^'):\n return True\n return False", "def should_lex(cls, char):\n return char == '{' or char == '}'", "def test_invalid_pseudo_open(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div')", "def is_lval(t):\n if not t:\n return False\n i = iter(t)\n if i.next() not in IDENTIFIER_START:\n return False\n return all(e in IDENTIFIER_PART for e in i)", "def token_filter(tok):\n return tok is token or \\\n tok.dep_.endswith(\"mod\") or \\\n tok.dep_ == \"compound\"", "def _pattern_is_simple(pattern):\n return bool(re.match('[\\\\w_]+$', tostring(pattern)))", "def all_simple (phrase):\r\n\r\n\r\n for x in phrase:\r\n if (x not in self.operations and not (isinstance(x,(int,type(ListType()),float,bool) or (isinstance(x,str) and quoted(x)))) or self.current_register.contains(x)):\r\n return False\r\n return True" ]
[ "0.6473199", "0.6298758", "0.62886566", "0.6095183", "0.6074437", "0.6054373", "0.6012495", "0.59231627", "0.59226996", "0.5920593", "0.5886903", "0.5808357", "0.57052237", "0.56754154", "0.5669591", "0.5662537", "0.56541157", "0.56399435", "0.56335765", "0.56320757", "0.56301147", "0.55804867", "0.5536514", "0.55334735", "0.5527889", "0.55099446", "0.5495642", "0.54941386", "0.5490371", "0.54773664" ]
0.72898144
0
Creates a description with the specified base class and slots.
def find_frame(self, base, slots): return logic.Description(base, slots)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, name=None, slots=None):\n default_attr = dict(name=str(),\n slots=dict())\n self.name = name\n self.slots = slots\n self._set_default_attr(default_attr)", "def generate_slot(slot_name, slot_description, slot_raw_filename):\n slot = {\n 'enumerationValues': [],\n \"name\": slot_name,\n \"description\": slot_description\n }\n slot_raw_vals = read_raw_vals(slot_raw_filename)\n for slot_val in slot_raw_vals:\n slot['enumerationValues'].append({'value': slot_val})\n\n return slot", "def __init__(self, describers: list, **kwargs):\n steps = [(i.__class__.__name__, i) for i in describers]\n super().__init__(steps, **kwargs)", "def generate_launch_description():\n composed = ComposableNodeContainer(\n name='ComponentManager',\n namespace='',\n package='rclcpp_components',\n executable='component_container',\n composable_node_descriptions=[\n ComposableNode(\n package='composition',\n plugin='composition::Talker',\n name='comp_talker'),\n ComposableNode(\n package='composition',\n plugin='composition::Listener',\n name='comp_listener')\n ],\n output='both',\n )\n\n jsp = Node(\n package='joint_state_publisher',\n executable='joint_state_publisher',\n output='both',\n )\n\n lc_talker = LifecycleNode(\n name='lc_talker',\n namespace='',\n package='lifecycle',\n executable='lifecycle_talker',\n output='both',\n )\n\n listener = Node(\n package='demo_nodes_cpp',\n executable='listener',\n output='both',\n )\n\n talker = Node(\n package='demo_nodes_cpp',\n executable='listener',\n output='both',\n )\n\n turtle = Node(\n package='turtlesim',\n executable='turtlesim_node',\n output='both',\n )\n\n return launch.LaunchDescription([\n composed,\n jsp,\n lc_talker,\n listener,\n talker,\n turtle,\n ])", "def get_description():\n desc = {\"description\": __doc__}\n sts = utc() - timedelta(hours=26)\n ets = utc() - timedelta(hours=2)\n desc[\"arguments\"] = [\n {\n \"type\": \"datetime\",\n \"name\": \"sts\",\n \"default\": sts.strftime(\"%Y/%m/%d %H00\"),\n \"label\": \"Start Timestamp (UTC):\",\n \"min\": \"1986/01/01 0000\",\n },\n {\n \"type\": \"datetime\",\n \"name\": \"ets\",\n \"default\": ets.strftime(\"%Y/%m/%d %H00\"),\n \"label\": (\n \"End Timestamp [inclusive] (UTC), \"\n \"interval must be less than 4 days\"\n ),\n \"min\": \"1986/01/01 0000\",\n },\n {\n \"type\": \"select\",\n \"options\": PDICT,\n \"default\": \"min\",\n \"name\": \"w\",\n \"label\": \"Which statistic to compute\",\n },\n {\n \"type\": \"csector\",\n \"name\": \"csector\",\n \"default\": \"IA\",\n \"label\": \"Select state/sector\",\n },\n {\n \"type\": \"select\",\n \"options\": PDICT2,\n \"default\": \"user\",\n \"label\": \"Plotting mode (user defined color-ramp or freezing)\",\n \"name\": \"mode\",\n },\n {\n \"type\": \"cmap\",\n \"name\": \"cmap\",\n \"default\": \"gnuplot2\",\n \"label\": \"Color Ramp:\",\n },\n ]\n return desc", "def __init__(self, title=None, description=None):\n super().__init__(self.COMPONENT_NAME)\n self.__title = title\n self.__description = description", "def create_descr(self, attr_name):", "def __new__(cls, name, bases, dct):\n _cls = super().__new__(cls, name, bases, dct)\n PeaType._dct.update({name: {'cls': cls,\n 'name': name,\n 'bases': bases,\n 'dct': dct}})\n return _cls", "def __new__(cls, name, bases, dct):\n _cls = super().__new__(cls, name, bases, dct)\n PeaType._dct.update({name: {'cls': cls,\n 'name': name,\n 'bases': bases,\n 'dct': dct}})\n return _cls", "def get_description():\n desc = {\"description\": __doc__, \"data\": True, \"cache\": 600}\n today = datetime.date.today()\n desc[\"arguments\"] = [\n dict(\n type=\"csector\",\n name=\"csector\",\n default=\"IA\",\n label=\"Select state/sector to plot\",\n ),\n dict(\n type=\"date\",\n name=\"sdate\",\n default=f\"{today.year}/01/01\",\n label=\"Start Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"date\",\n name=\"edate\",\n default=today.strftime(\"%Y/%m/%d\"),\n label=\"End Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"select\",\n name=\"d\",\n default=\"0\",\n options=PDICT,\n label=\"Select Drought Classification (at and above counted):\",\n ),\n dict(\n type=\"select\",\n name=\"w\",\n default=\"percent\",\n options=PDICT2,\n label=\"How to express time for plot:\",\n ),\n dict(type=\"cmap\", name=\"cmap\", default=\"plasma\", label=\"Color Ramp:\"),\n ]\n return desc", "def __new__(\n mcs,\n name: str,\n bases: typing.Tuple[type, ...],\n namespace: typing.Dict[str, typing.Any],\n ):\n namespace['__repr__'] = lambda self: repr(type(self))\n return super().__new__(mcs, name, bases, namespace)", "def BoostDesc_create(desc=None, use_scale_orientation=None, scale_factor=None): # real signature unknown; restored from __doc__\n pass", "def create_plaquette(self, baseStr):\n raise NotImplementedError(\"Derived class must implement this.\")", "def __init__(self, name: str, description: str):\n self.name = name\n self.description = description", "def __new__(mcs, name, bases, attrs):\n assert \"__slots__\" not in attrs, \\\n \"Class '%s' defines __slots__ when it should not\" % name\n\n attrs[\"__slots__\"] = mcs._GetSlots(attrs)\n\n return type.__new__(mcs, name, bases, attrs)", "def with_metaclass(meta, base=object):\r\n return meta(\"NewBase\", (base,), {})", "def __new__(cls, name, bases, attrs):\n super_new = super(ClientMeta, cls).__new__\n\n # Create the class.\n module = attrs.pop('__module__')\n base_attrs = {'__module__': module,\n '_exchanges':[],\n '_queues':[],\n '_consumers':[],\n '_tasks':[],\n }\n new_class = super_new(cls, name, bases, base_attrs)\n\n for obj_name, obj in attrs.items():\n new_class.add_to_class(obj_name, obj)\n\n return new_class", "def make_class(attributes, base_classes=()):\r\n \"*** YOUR CODE HERE ***\"", "def create_definition(self, block_type, slug=None):\n raise NotImplementedError()", "def convert(self, slot, desc, *args, **kwargs):\n desc = desc[0] if isinstance(desc, list) else desc\n return build_module(desc, slot, *args, **kwargs)", "def with_metaclass(meta, *bases):\r\n return meta(\"NewBase\", bases, {})", "def with_metaclass(meta, *bases):\r\n return meta(\"NewBase\", bases, {})", "def with_metaclass(meta, base=object):\n return meta(\"NewBase\", (base,), {})", "def with_metaclass(meta, base=object):\n return meta(\"NewBase\", (base,), {})", "def table_description(classname, nclassname, shape=()):\n classdict = {}\n colpos = append_columns(classdict, shape)\n\n ndescr = nested_description(nclassname, colpos, shape=shape)\n classdict['c_nested'] = ndescr\n colpos += 1\n\n extracol = tb.IntCol(shape=shape, pos=colpos)\n classdict['c_extra'] = extracol\n colpos += 1\n\n idxextracol = tb.IntCol(shape=shape, pos=colpos)\n classdict['c_idxextra'] = idxextracol\n colpos += 1\n\n return type(classname, (tb.IsDescription,), classdict)", "def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"var\",\n default=\"precip\",\n label=\"Which Variable:\",\n options=PDICT,\n ),\n ]\n return desc", "def __init__(name, title=\"\", description=\"\"):", "def generate_class_string(typename, props, description, namespace):\n # TODO _prop_names, _type, _namespace, and available_properties\n # can be modified by a Dash JS developer via setattr\n # TODO - Tab out the repr for the repr of these components to make it\n # look more like a hierarchical tree\n # TODO - Include \"description\" \"defaultValue\" in the repr and docstring\n #\n # TODO - Handle \"required\"\n #\n # TODO - How to handle user-given `null` values? I want to include\n # an expanded docstring like Dropdown(value=None, id=None)\n # but by templating in those None values, I have no way of knowing\n # whether a property is None because the user explicitly wanted\n # it to be `null` or whether that was just the default value.\n # The solution might be to deal with default values better although\n # not all component authors will supply those.\n\n filtered_props = filter_props(props)\n prop_keys = list(filtered_props.keys())\n string_attributes = \"\"\n for p in prop_keys:\n # TODO support wildcard attributes\n if p[-1] != \"*\":\n string_attributes += \"has '{}' => (\\n is => 'rw'\\n);\\n\".format(p)\n perl_assets_package_name = _perl_assets_package_name_from_shortname(namespace)\n common = \"my $dash_namespace = '\" + namespace + \"';\\n\\nsub DashNamespace {\\n return $dash_namespace;\\n}\\nsub _js_dist {\\n return \" + perl_assets_package_name + \"::_js_dist;\\n}\\n\"\n return string_attributes + common", "def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"var\",\n default=\"spi\",\n options=PDICT,\n label=\"Select which metric to plot:\",\n ),\n dict(\n type=\"select\",\n name=\"c\",\n default=\"ncei91\",\n options=PDICT2,\n label=\"Which climatology to use for averages:\",\n ),\n dict(\n type=\"int\",\n name=\"days\",\n default=90,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n ]\n return desc", "def create_component(self, name: str, nx_class: str, description: str) -> Component:\n name = _convert_name_with_spaces(name)\n parent_group = self.nexus.instrument\n if nx_class in COMPONENTS_IN_ENTRY:\n parent_group = self.nexus.entry\n component_group = self.nexus.create_nx_group(name, nx_class, parent_group)\n component = create_component(self.nexus, component_group)\n component.description = description\n return component" ]
[ "0.58070856", "0.5718173", "0.5609317", "0.5423587", "0.5419044", "0.53676623", "0.5367074", "0.5291225", "0.5291225", "0.5285099", "0.5282149", "0.5216667", "0.51968807", "0.5177209", "0.51751906", "0.5170609", "0.5164997", "0.5153529", "0.51534", "0.51493376", "0.51250994", "0.51250994", "0.5084144", "0.5084144", "0.50833327", "0.5073547", "0.5071239", "0.5065956", "0.50562406", "0.50534475" ]
0.6458275
0
If the prediction is waiting for a slotfiller, and the item we saw can fill the slot, add the slot with filler to the predictions slots.
def extend_slots(self, prediction, item): spec = prediction.phrasal_pattern[0] slots = prediction.slots if is_role_specifier(spec): new_slots = copy(slots) new_slot = self.role_specifier(spec) if new_slot in new_slots: raise DuplicateSlotError('Slot %s already exists in %s.' % ( new_slot, prediction)) new_slots[new_slot] = item return new_slots else: return slots
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)", "def merge_slots(self, pred_slots, item_slots):\n for slot in pred_slots:\n if slot in item_slots:\n raise DuplicateSlotError('Slot %s already has the value %s.' % (\n slot, item_slots[slot]))\n slots = {}\n for slot in pred_slots:\n slots[slot] = pred_slots[slot]\n for slot in item_slots:\n slots[slot] = item_slots[slot]\n return slots", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)", "def fill_last_one(slots):\n tempslots = deepcopy(slots)\n for key, items in tempslots.items():\n if sum([1 for i in items if i]) == 4:\n last_element = list(set(globals()[key.upper()]).difference(set(items)))[0]\n slots[key] = [i if i else last_element for i in items]\n return slots", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def insert(self, item):\r\n if not self.is_full():\r\n for i in range(1,len(self.items)):\r\n if self.items[i] is None:\r\n self.items[i] = item\r\n self.size += 1\r\n self.perc_up(i)\r\n return True\r\n return False", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def required_slots(self,tracker) -> List[Text]:", "def decide(self, slot_index):\n self._slot_decided[slot_index] = True", "def pickUpItem(self, app, newItem: Stack):\n\n if newItem.isEmpty(): return\n\n # Prioritize existing stacks of the item first\n for (i, slot) in enumerate(self.inventory):\n stack = slot.stack\n if stack.isInfinite() and stack.item == newItem.item:\n # It just stacks into an infinite slot, so no change\n return\n elif newItem.isInfinite() and stack.item == newItem.item:\n # ditto\n return \n elif stack.amount > 0 and stack.item == newItem.item:\n self.inventory[i].stack.amount += newItem.amount\n return\n\n # If that fails, then just add the item to the next open space\n for (i, slot) in enumerate(self.inventory):\n if slot.isEmpty():\n self.inventory[i].stack = newItem\n return\n \n # TODO: Full inventory??\n 1 / 0", "def vacate_slot(self, slot, reg_num):\n self.__occupied_slots__.pop(slot.slot_number)\n self.__vehicle_slot_mapping__.pop(reg_num)\n self.__available_slots__.add(slot)", "def extra_slots(self, extra_slots):\n\n self._extra_slots = extra_slots", "def promote_needed_optional_slots(self, context_to_update):\n prod_line_name = context_to_update.get_slot_value(\"production_line\")\n if prod_line_name is None:\n # Unreachable\n raise RuntimeError(\"Tried to check if optional slots needed a \"+\n \"promotion while all mandarory slots where not \"+\n \"filled ('production_line' missing).\")\n\n if Phi.findLine(prod_line_name) is None:\n return context_to_update.promote_slot(\"line_number\")\n return False", "def test_multiple_slots_released(self):\r\n JOB_ID = 20\r\n JOB_START= 50\r\n job = simulation_multi.Job(2, JOB_START)\r\n self.simulation.jobs[JOB_ID] = job\r\n\r\n worker = self.simulation.workers[1]\r\n self.assertEqual(worker.num_free_slots, 4)\r\n events = worker.add_probe(JOB_ID, JOB_START)\r\n self.assertEqual(worker.num_free_slots, 0)\r\n # The events shoudl include 2 task end events and 1 noop.\r\n self.assertEqual(len(events), 3)\r\n # Run the noop event.\r\n events[-1][1].run(events[-1][0])\r\n self.assertEqual(worker.num_free_slots, 2)", "def test_overFill(self):\r\n high = 15\r\n for _ in range(high):\r\n self.nb.add(_)\r\n\r\n self.assertFalse(self.nb.isEmpty())\r\n self.assertTrue(self.nb.isFull())\r\n self.assertEqual(5, len(self.nb))\r\n\r\n # check all are still present\r\n for _ in range(high-1, high - SIZE-1, -1):\r\n self.assertTrue(_ in self.nb)", "def take_item(self, item):\r\n if len(self.items) <= 2:\r\n self.items.append(item)\r\n if self.got_both():\r\n self.working = True", "def perceive(self, slot):\n # right now, let's just observe the first element in the world\n if isinstance(slot, Slot):\n self.percept = slot.get_state()\n else:\n raise RuntimeError(\"Cannot observe other objects right now!\")", "def grow(self):\r\n # Double the physical size if no more room for items\r\n # and add the fillValue to the new cells in the underlying list\r\n for count in range(len(self)):\r\n self._items.append(self._fillValue)", "def add_last_ring(self, fill, rot=None):\n self.fills.append(fill)\n self.rot.append(rot)", "def fill_item(self, args, producing_job):\n pass", "def container_for_slot(self, slot):\n\n for l in self.metalist:\n if not len(l):\n continue\n if slot < len(l):\n return l, slot\n slot -= len(l)", "def can_fit_task_starting_at_slot(self, task, i):\n slots_required = ceil(float(task._length) / float(self._segment_length))\n if slots_required == 0:\n raise ValueError(\"slots required should end up being > 0 for proper tasks\")\n for delta in range(slots_required):\n if self._segmented_tasks[i + delta] is not None:\n return False\n return True", "def append(self, item):\n if self.full or self.pre_allocated:\n # overwrite\n self.data[self.cur] = item\n else:\n self.data.append(item)\n if not self.full:\n self.full = self.cur == self.max - 1\n self.cur = (self.cur + 1) % self.max", "def item_starred(self, item):\n self.update_item(item)", "def process_new_items(self, new_items):\n self.items_hat = np.hstack([self.items_hat, new_items])", "def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')", "def remove_slot(self, slot):\n if slot in self.slots:\n idx = self.slots.index(slot)\n # update location of rest slot\n for s in self.slots[idx:]:\n s.set_location(s.get_location()-1)\n self.slots.remove(slot)", "def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)" ]
[ "0.58766544", "0.57055455", "0.56512886", "0.55791277", "0.5512329", "0.54924816", "0.5338965", "0.5329639", "0.52402145", "0.5155657", "0.51417106", "0.5121062", "0.50382066", "0.5012749", "0.4931047", "0.49296585", "0.49216405", "0.48618618", "0.48344553", "0.4827673", "0.47947904", "0.4781251", "0.47748557", "0.47520798", "0.47402212", "0.47357515", "0.47357464", "0.472748", "0.4725559", "0.47244096" ]
0.70102435
0
Looks up the constraint on the specified slot for item.
def slot_constraint(self, item, role_spec): return self.kb.slot_value( logic.expr(item), CONSTRAINT_EXPR, logic.expr(role_spec))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None", "def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]", "def containing_slot(self, point):\n for i, bounds in enumerate(self._slot_bounds):\n if bounds.contains_point(point):\n return i + 1\n\n return None", "def constraint(self, item, handle, glue_item):\n start = MatrixProjection(self.start, glue_item.matrix_i2c)\n end = MatrixProjection(self.end, glue_item.matrix_i2c)\n point = MatrixProjection(handle.pos, item.matrix_i2c)\n\n cx = EqualsConstraint(point.x, start.x)\n cy = BetweenConstraint(point.y, start.y, end.y)\n\n return MultiConstraint(start, end, point, cx, cy)", "def find(self,item):\n sig = str(item)\n try:\n return self.index[sig]\n except:\n return None", "def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)", "def get_slot_definition(self, slot: DeckSlotName) -> SlotDefV3:\n deck_def = self.get_deck_definition()\n\n for slot_def in deck_def[\"locations\"][\"orderedSlots\"]:\n if slot_def[\"id\"] == slot.id:\n return slot_def\n\n raise errors.SlotDoesNotExistError(\n f\"Slot ID {slot.id} does not exist in deck {deck_def['otId']}\"\n )", "def get_closest_slot(self):\n if not self.__available_slots__:\n return None\n return min(self.__available_slots__)", "def get_constraint(self, attribute_name):\n\n for constraint in self.constraints:\n if constraint.key == attribute_name:\n return constraint\n\n # If it can't be found, return None.\n return None", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')", "def findItem(self, item):\n found_location = self.__find(item)\n\n if found_location is not None and found_location.item == item:\n return found_location\n else:\n raise NotFoundError(\"The item '\" + str(item) + \"' was not found!\")", "def get_slot(aMap, key, default=None):\n\tbucket = get_bucket(aMap, key)\n\n\tfor i, kv in enumerate(bucket):\n\t\tk, v = kv\n\t\tif key == k:\n\t\t\treturn i, k, v\n\n\treturn -1, key, default", "def __getitem__(self, item):\n\n for _var in self.inputs + self.outputs:\n if _var.name == item:\n return _var\n\n raise KeyError('No input: {}'.format(item))", "def _get_slot_variable(self, layer_name, slot_name):\n return self._tls._slot_variables.get(layer_name, {}).get(\n slot_name, None\n )", "def get_slot(aMap, key, default=None):\n\t#now that we know which bucket the key could be in\n\t#we iterate through all the elements of that bucket until it finds the key\n\t\n\tbucket = get_bucket(aMap, key)\n\t\n\tfor i, kv in enumerate(bucket):\n\t#enumerate returns a tuple containing the count (starting at 0) \n\t#and values obtained from iterating over the sequence\n\t\tk, v = kv\n\t\t#unpacks the elements in the bucket into 'key' and 'value'\n\t\tif key == k:\n\t\t\treturn i, k, v \n\t#if the slot does not contain the key, then it returns \"none\"\n\treturn -1, key, default", "def get_item(self, key):\n search_slot = self.count_hash(key, len(self.slots))\n\n if self.slots[search_slot] == key:\n data = self.data[search_slot]\n elif isinstance(self.slots[search_slot], tuple):\n index_tuple = (self.slots[search_slot].index(key))\n data = (self.data[search_slot][index_tuple])\n else:\n data = None\n\n return data", "def get_by_slot(self, parent_object, slot):\n placeholder = self.parent(parent_object).get(slot=slot)\n placeholder.parent = parent_object # fill the reverse cache\n return placeholder", "def _spc(self, spcID):\r\n if spcID in self.add_constraints:\r\n return self.add_constraints[spcID]\r\n return self.constraints[spcID]", "def get_item_index(self, item):\n return self.__item_map[item]", "def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)", "def get_slot(aMap,key,default=None):\n\tbucket=get_bucket(aMap,key)\n\t\n\tfor i,kv in enumerate(bucket):\n\t\tk,v=kv\n\t\tif key==k:\n\t\t\treturn i,k,v\n\t\t\n\treturn -1,key,default", "def _get_bond_constraint(self, atom1, atom2, system):\n # TODO: This algorithm is incredibly inefficient.\n # Instead, generate a dictionary lookup of constrained distances.\n\n atom_indices = set([atom1.idx, atom2.idx])\n n_constraints = system.getNumConstraints()\n constraint = None\n for i in range(n_constraints):\n p1, p2, length = system.getConstraintParameters(i)\n constraint_atoms = set([p1, p2])\n if len(constraint_atoms.intersection(atom_indices))==2:\n constraint = length\n\n if constraint is not None:\n check_dimensionality(constraint, unit.nanometers)\n return constraint", "def _get_nearest_slot(self):\n available_slots = [pslot for pslot in self.slots.values() if pslot.available]\n if not available_slots:\n return None\n\n return sorted(available_slots, key=lambda x: x.slot_no)[0]", "def constraint(self) -> Constraint:\n return self._constraint", "def getConstraint(self, *args):\n return _libsbml.Model_getConstraint(self, *args)", "def equip(self, item, actor):\n if (item.slot not in self.EqSlots.keys()):\n # Not an equipment.\n return 1\n\n old_item = self.EqSlots.get(item.slot)\n\n # Ok, equip and remove from list.\n self.EqSlots[item.slot] = item\n self.inventory_remove(item)\n item.give_bonus(actor)\n\n if (old_item is not None):\n # Was not empty - remove (any) old equipment bonus and add to inventory\n old_item.remove_bonus(actor)\n self.inventory_add(old_item)\n return 0", "def container_for_slot(self, slot):\n\n for l in self.metalist:\n if not len(l):\n continue\n if slot < len(l):\n return l, slot\n slot -= len(l)", "def get_slot_position(self, slot: DeckSlotName) -> Point:\n slot_def = self.get_slot_definition(slot)\n position = slot_def[\"position\"]\n\n return Point(x=position[0], y=position[1], z=position[2])", "def __getitem__(self, item):\n return self.get_segment_by_name(item)", "def get_player_item_val(self, choice_of_item, user):\n\n for key in user.player_inventory:\n\n if choice_of_item == key:\n return user.player_inventory[key]\n\n return False" ]
[ "0.6716188", "0.6055658", "0.59715843", "0.5913708", "0.56738085", "0.56528217", "0.5513649", "0.5497099", "0.54896563", "0.54380274", "0.5436859", "0.5412239", "0.5348998", "0.5327917", "0.5307018", "0.52860284", "0.5240431", "0.5230452", "0.5210546", "0.51916254", "0.5186451", "0.51824474", "0.5177055", "0.51616406", "0.5148614", "0.50986004", "0.508573", "0.5069076", "0.5059091", "0.5031225" ]
0.7476286
0
Parses a string containing a phrasal pattern into a tree representation.
def parse(self, pattern): phrasal_pattern = self.convert_parse_tree_to_phrasal_pattern( self.parse_tree(pattern)) return phrasal_pattern
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_tree(s):\n return parser.parse(lexer=lexer, input=s)", "def tree_string_parser(tree_string):\n tree_hash = create_hash(tree_string)\n tree_string = tree_string.replace(\"(\", \"{\")\n tree_string = tree_string.replace(\")\", \"}\")\n tree_string = \"{\" + tree_string + \"}\"\n tree_string = tree_string.replace(\"'\", \"\")\n\n data = re.sub(r'([\\w\\.\\-]+)', r'\"\\1\"', tree_string) # {\"val1\" {\"val2\" {\"d1\" \"d2\" \"d3\"}}}\n data = re.sub(r'\"\\s*{', r'\": {', data) # {\"val1\": {\"val2\": {\"d1\" \"d2\" \"d3\"}}}\n data = re.sub(r'\" \"', r'\", \"', data) # {\"val1\": {\"val2\": {\"d1\", \"d2\", \"d3\"}}}\n data = re.sub(r'{([^{}]*)}', r'[\\1]', data) # {\"val1\": {\"val2\": [\"d1\", \"d2\", \"d3\"]}}\n data = re.sub(r'\\[([\\\"\\:\\, \\w\\.\\-]+:+[\\\"\\:\\, \\w\\.\\-]+)\\]', r'\\1', data)\n data = re.sub(r'({[\\\"\\w\\.\\-]+),', r'\\1: [],', data)\n data = re.sub(r', (\\\"[\\\"\\w\\.\\-]+\\\")}', r', \\1: []}', data)\n data = re.sub(r', (\\\"[\\\"\\w\\.\\-]+\\\"),(?![^\\[]*\\])', r', \\1: [],', data)\n data = re.sub(r', (\\\"[\\\"\\w\\.\\-]+\\\"),(?![^\\[]*\\])', r', \\1: [],', data)\n data = re.sub(r', \\[(.*?)\\]', r', \"ARGS\": [[\\1]]', data)\n try:\n tree_dict = json.loads(data)\n except:\n print(\"Error from json.loads: \" + data)\n try:\n fix_up_tree(tree_dict)\n except:\n print(\"Error from fix_up_tree: \" + data)\n try:\n plot_tree(tree_dict, tree_hash)\n except:\n print(\"Error from plot tree: \" + data)", "def parse(self, s: str) -> Tree:\n tokens = lex(s, pattern=PENMAN_RE)\n return self._parse(tokens)", "def read_tree(cls, string):\n \n # first, remove the brackets ()\n string = string.strip()\n if len(string) == 0:\n raise ValueError(\"empty string cannot be a Synstactic Tree\")\n if string[0] == \"(\" and string[-1] == \")\":\n string = string[1:-1]\n \n # split the string with blank character\n # if the string has exactly or fewer than one element, it cannot be a \n # tree\n # if it has two elements, it must be a tree with a terminal node as the\n # root\n # if it has more than two elements, take the first element as the root\n # and other elements as the branches \n elements = string.split()\n if len(elements) <= 1 or elements[1] == \"\":\n raise ValueError(\"%s cannot be a tree or subtree\" %string)\n else:\n if len(elements) == 2:\n # TODO: if the label comes from CCG parser, turn [] into ()\n root = cls._read_terminal_node(elements)\n else:\n branch_string = \" \".join(elements[1:])\n root = SyntacticNode(elements[0])\n branches = cls._read_branches(branch_string)\n for branch in branches:\n root.add_child(branch._root)\n \n return SyntacticTree(root)", "def parseNewick(string):\n if string.find(';') != -1:\n string = string[:string.find(';')]\n return PhyloTree(parseNewickNode(string))", "def parse(self, text: str) -> Tree:\n return self.parser.parse(text)", "def parse(self, string, root=None):\n\n\t\tphrases = []\n\n\t\tmeta = self.meta.search(string)\n\n\t\twhile meta:\n\n\t\t\t# Save some function calls\n\t\t\tpos = meta.start()\n\n\t\t\tif meta.group() == \"<\":\n\t\t\t\tstring, child, meta = self.open_phrase(string, pos)\n\n\t\t\t\tif child and root:\n\t\t\t\t\troot.nested.append(child)\n\t\t\t\telif child:\n\t\t\t\t\tphrases.append(child)\n\n\t\t\t\t# else it was escaped (+ new meta)\n\t\t\t\tcontinue\n\n\t\t\telif root:\n\n\t\t\t\tif meta.group() == \"(\":\n\t\t\t\t\tmeta = self.meta.search(string, pos + 1)\n\t\t\t\t\tif meta.group() == \")\":\n\t\t\t\t\t\tstring, root, meta = self.handle_arguments(string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t root,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pos,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t meta.start())\n\t\t\t\t\t\tcontinue\n\n\t\t\t\telif meta.group() == \">\":\n\t\t\t\t\tstring, phrase, meta = self.close_phrase(string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t root,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pos)\n\t\t\t\t\tif phrase:\n\t\t\t\t\t\treturn string, phrase\n\n\t\t\t\t\t# else was escaped (+ new meta)\n\t\t\t\t\tcontinue\n\n\t\t\tstring, meta = self.escape_meta(string, pos)\n\n\t\tif not root:\n\t\t\treturn string, phrases\n\n\t\t# If this is not the first stack-depth the function should\n\t\t# have returned upon finding a closing tag,\n\t\t# i.e. we should never have gotten here.\n\t\tword = re.search(r\"([\\w\\s]+)(?![\\d]*>[\\w\\s]+>)\", string)\n\n\t\twhat = \"No closing tag found for opening tag\"\n\n\t\tif word:\n\t\t\twhat += \" after expression '{0}'\".format(word.group())\n\n\t\traise errors.ParseError(what + \"!\")", "def tree(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n return Text(string, token)", "def parse (self, phrase):\r\n\r\n if isinstance(phrase,str):\r\n #If the phrase is a string\r\n if self.is_simple(phrase):\r\n #EXITS the recursion\r\n if phrase[0:2] == '~~':\r\n return phrase[2:]\r\n #Eliminates negations that cancel each other\r\n return phrase\r\n elif self.bracketed(phrase):\r\n #Eliminate top-level parantheses\r\n return self.parse(phrase[1:-1])\r\n elif phrase[0] == '~':\r\n #If the phrase begins with a negating prefix...\r\n negations,phrase = self.heading_count(phrase)\r\n \r\n if self.bracketed(phrase):\r\n #If the negated phrase is bracketed\r\n if negations % 2 == 1:\r\n subphrase = self.split_into_phrases(phrase[1:-1])\r\n if subphrase[0] != '@': \r\n #De Morgan's Law \r\n return self.parse(['@']+['~'+x for x in subphrase])\r\n else:\r\n #De Morgan's Law\r\n return self.parse(['~'+x for x in subphrase[1:]])\r\n else:\r\n return self.parse(phrase[1:-1])\r\n return self.parse(self.split_into_phrases((negations%2)*'~'+phrase))\r\n \r\n else:\r\n return self.parse(self.split_into_phrases(phrase))\r\n # IF the phrase is a list\r\n if self.all_is_P(phrase,predicate_function=self.is_simple):\r\n #If every terms of the phrase list is simple...\r\n #This prepares for EXIT from recursion\r\n return [self.parse(x) for x in phrase]\r\n return self.parse([self.parse(x) for x in phrase])", "def parse(s: str) -> StateFormula:\n tree = PCTL_PARSER.parse(s.replace(\" \", \"\"))\n return PCTLTransformer.transform(tree)", "def parse_sexprs(ppddl_txt):\n tokens = _ppddl_tokenize(ppddl_txt)\n parse_root = parse_ptr = HList(None)\n # we parse begin -> end\n # just reverse so that pop() is efficient\n tokens_reverse = tokens[::-1]\n while tokens_reverse:\n token = tokens_reverse.pop()\n if token == '(':\n # push\n new_ptr = HList(parse_ptr)\n parse_ptr.append(new_ptr)\n parse_ptr = new_ptr\n elif token == ')':\n # pop\n parse_ptr = parse_ptr.parent\n else:\n # add\n parse_ptr.append(token)\n return parse_root", "def from_string(s):\n r_rule = re.compile(\"^(\\w+): (.*)$\")\n try:\n parent_tag, rules_string = s.split(\" -> \")\n rules = []\n for i in rules_string.split(\",\"):\n optional = i.strip().startswith(\"(\")\n match = r_rule.match(i.strip().strip(\"()\"))\n assert match\n tag, rule = match.groups()\n rules.append(\n {\"optional\": optional, \"tag\": tag, \"rule\": rule})\n return Grammar(parent_tag, rules)\n except (ValueError, AssertionError):\n raise Exception(\"Can not parse.\")", "def tokens_from_treestring(s):\n return re.sub(r\"\\([0-9] |\\)\", \"\", s).split()", "def parse_logic(logic):\n\n ###print \"parse_logic(logic): logic:\",logic\n\n tokens = logic.split()\n\n # begin recursive logic parse\n return grammar_0(tokens)", "def parseNewickNode(string):\n first = string.find('(')\n last = string[::-1].find(')') # look from the back\n if first == -1 and last == -1: # we are at leaf\n y = string.split(':')\n node = PhyloNode(label=y[0])\n if len(y) >= 2:\n node.dist = float(y[1])\n return node\n elif first >= 0 and last >= 0:\n # remove parentheses\n last = len(string) - last - 1 # correct index to refer from start instead of end of string\n embed = string[first + 1:last]\n tail = string[last + 1:]\n # find where corresp comma is\n commas = _findComma(embed)\n if len(commas) < 1:\n raise RuntimeError('Invalid format: invalid placement of \",\" in sub-string \"' + embed + '\"')\n prev_comma = 0\n child_tokens = []\n for comma in commas:\n child_tokens.append(embed[prev_comma:comma].strip())\n prev_comma = comma + 1\n child_tokens.append(embed[prev_comma:].strip())\n y = tail.split(':')\n node = PhyloNode(label=y[0]) # node is an instance of the PhyloNode() class\n if len(y) >= 2:\n node.dist = float(y[1])\n node.children = []\n for tok in child_tokens:\n child = parseNewickNode(tok)\n child.parent = node\n node.children.append(child)\n return node\n else:\n raise RuntimeError('Invalid format: unbalanced parentheses in sub-string \"' + string + '\"')", "def parse_from_regex(string,pattern,fields):\n\n string = string.replace('\\\\','/') # USE POSIX PLEASE\n num_groups = flat_paren_counter(pattern)\n if isinstance(fields,str):\n fields = [fields]\n num_fields = len(fields)\n if not num_fields == num_groups:\n return {}\n match = re.search(pattern,string)\n if not num_groups == len(match.groups()):\n return {}\n \n l = []\n \n for field,value in zip(fields,list(match.groups())):\n d = nested_notation_to_tree(field,value)\n l.append(d)\n return deep_merge_N(l)", "def tree_build(sv, piece):\r\n if piece==None: return None \r\n # process various string expressions (or triplets without args for conditions and values)\r\n piece=piece[0].strip(Space) if type(piece)==tuple else piece.strip(Space) # convert to string \r\n alphabetic=Alphakwords+sv.Object_list\r\n \r\n # empty expression\r\n if not piece: return None\r\n\r\n # a string between quotes\r\n if piece[0]==Quote and piece[-1]==Quote: return (piece, None, None) # return string as a leaf\r\n \r\n # a protected string: restore without further parsing \r\n key=piece.strip(Special) \r\n if key in sv.Strings: return (Quote+sv.Strings[key]+Quote, None, None) # return string as a leaf\r\n\r\n # a bracketed expression: parse from outer ones on, RECURSIVE\r\n if key in sv.Blocks: return (Obr, tree_build(sv, sv.Blocks[key]), None)\r\n\r\n piece=save_bracketed(sv, piece) # protect outer bracketed expressions from parsing\r\n piece=Space+piece+Space # add Spaces to help detect alphabetic keys \r\n \r\n # PARSE by operator priority and descending order of position \r\n for op_group in Priority_groups+[sv.Object_list]: # ops by priority groups\r\n op_list=find_op(sv, piece, op_group, alphabetic) # detect operators of this group\r\n\r\n for o, op in op_list: # found ops from this group in reverse order of occurrence\r\n\r\n # process comma operator \r\n if o==Comma and o in piece: return make_list(sv, piece) # list will be linear (not a tree). Build RECURSIVE \r\n\r\n # process unary functions and defined objects (all unary operators are alphabetic)\r\n if o in Unary or o in sv.Object: # unary operators (non space-delimited)\r\n if piece.startswith(op): # operator must be at the start (space-delimited)\r\n res=make_unary(sv, piece, o, op)\r\n if res and (not res[1] or o in [Begin, End]):\r\n return special_unary(sv, res) # process special case \r\n return res\r\n \r\n # process binary operators (always lower priority than unary). Build RECURSIVE\r\n elif op in piece:\r\n res=make_binary(sv, piece, o, op) # binary operators (space-delimited)\r\n if res and (not res[1] or o==Isnot):\r\n return special_binary(sv, res) # process special case \r\n return res\r\n\r\n # process other (args and doubly) subscripted objects. Build RECURSIVE\r\n piece=piece.strip(Space)\r\n if Special+Bloc in piece: return make_subscripted(sv, piece) # the object is subscripted / has args\r\n\r\n # when all operators have been processed, only leaves remain\r\n return make_leaf(sv, piece)", "def parse(s):\n return expr.parseString(s, parseAll=True)", "def deserialize(string):\r\n # For empty string return None, which makes the logic simpler\r\n # at node creation\r\n if len(string) == 0:\r\n return None \r\n\r\n # Simplest case - leaf, no children\r\n if string.count(\"(\") == 0:\r\n return Node(string)\r\n\r\n # Extract parent and children\r\n subs = string.split(\"(\", 1)\r\n root = subs[0]\r\n # Remove closing bracket\r\n rest = subs[1][:-1]\r\n\r\n # Simple case - left child has no children\r\n left, right = rest.split(\"|\", 1)\r\n if left.count(\"(\") == 0:\r\n return Node(root, deserialize(left), deserialize(right))\r\n\r\n # Keep moving right until we find the end of left's children\r\n next_index = rest.index(\"|\", len(left)+1)\r\n left = rest[:next_index]\r\n right = rest[next_index+1:]\r\n while left.count('(') != left.count(')'):\r\n next_index = rest.index(\"|\", next_index+1)\r\n left = rest[:next_index]\r\n right = rest[next_index+1:]\r\n\r\n return Node(root, deserialize(left), deserialize(right))", "def buildtree(text):\n tokens = text.strip().replace('//TT_ERR','').replace('\\n','').replace('(', ' ( ').replace(')', ' ) ').split()\n # print 'tokens = {}'.format(tokens)\n queue = processtext(tokens)\n # print 'queue = {}'.format(queue)\n stack = []\n while queue:\n token = queue.pop(0)\n if token == ')':\n # If ')', start processing\n content = [] # Content in the stack\n while stack:\n cont = stack.pop()\n if cont == '(':\n break\n else:\n content.append(cont)\n content.reverse() # Reverse to the original order\n # Parse according to the first content word\n if len(content) < 2:\n raise ValueError(\"content = {}\".format(content))\n label = content.pop(0)\n if label == 'Root':\n node = SpanNode(prop=label)\n node = createnode(node, content)\n stack.append(node)\n elif label == 'Nucleus':\n node = SpanNode(prop=label)\n node = createnode(node, content)\n stack.append(node)\n elif label == 'Satellite':\n node = SpanNode(prop=label)\n node = createnode(node, content)\n stack.append(node)\n elif label == 'span':\n # Merge\n beginindex = int(content.pop(0))\n endindex = int(content.pop(0))\n stack.append(('span', beginindex, endindex))\n elif label == 'leaf':\n # Merge \n eduindex = int(content.pop(0))\n checkcontent(label, content)\n stack.append(('leaf', eduindex, eduindex))\n elif label == 'rel2par':\n # Merge\n relation = content.pop(0)\n checkcontent(label, content)\n stack.append(('relation',relation))\n elif label == 'text':\n # Merge\n txt = createtext(content)\n stack.append(('text', txt))\n else:\n raise ValueError(\"Unrecognized parsing label: {} \\n\\twith content = {}\\n\\tstack={}\\n\\tqueue={}\".format(label, content, stack, queue))\n else:\n # else, keep push into the stack\n stack.append(token)\n return stack[-1]", "def parse_maths_string(string:str, parent_name:str, variables:dict):\n if string == \"\":\n return None\n maths_tree = MathTree.construct_tree(string, parent_name, variables)\n \n val = maths_tree.get_tree_value(parent_name)\n #print(\"String:\",string)\n #print(\"Maths Tree:\", maths_tree)\n #print(\"result:\",val)\n #print()\n \n return val", "def from_str(triple):\n def nodeIsBlank(element):\n return (element == '%p') or (element.startswith('?'))\n\n elements = triple.strip().split(' ')\n # check if current triple pattern is well formed\n if (len(elements) < 3) or (len(elements) > 3):\n raise SyntaxError('The pattern {} is not well formed : '\n 'it must contains exactly three nodes.'\n .format(triple.strip()))\n\n # seralize it\n subject = Node(elements[0], nodeIsBlank(elements[0]))\n predicate = Node(elements[1], nodeIsBlank(elements[1]))\n obj = Node(elements[2], nodeIsBlank(elements[2]))\n return TriplePattern(subject, predicate, obj)", "def parse_from_tree(self, parse):\n pass", "def __init__(self,treeString):\n\t\tself.treeString=treeString\n\t\tif self.checkString(self.treeString) == True:\n\t\t\tself.root=node.node(None)\n\t\t\tself.currNode=self.root\n\t\t\tself.buildTree(self.treeString)\n\t\t\t\"\"\"once information from buildTree is assigned to individual nodes, \n\t\t\tthe nodes then parse that information into names, branch lengths, etc\n\t\t\trecursive function allows the call to be made only to the root\"\"\"\n\t\t\t#use this step to do likelihood calculation\n\t\t\tself.root.processInfo()\n\t\telse:\n\t\t\t#change to an error in long run\n\t\t\tprint \"improperly formatted newick string\"", "def deserialize(self, data: str) -> TreeNode:\n\t\tif data[0] == '_':\n\t\t\treturn None\n\n\t\tarr = data.split()\n\t\trootVal = arr[0]\n\t\tdata = ' '.join(arr[1:])\n\t\tsubtreeStrings = []\n\t\tnested = 0\n\t\tfor i in range(len(data)):\n\t\t\tif not nested and data[i] == '_':\n\t\t\t\tsubtreeStrings.append('_')\n\t\t\tif data[i] == '(':\n\t\t\t\tnested += 1\n\t\t\t\tif nested == 1:\n\t\t\t\t\tstart = i\n\t\t\tif data[i] == ')':\n\t\t\t\tnested -= 1\n\t\t\t\tif not nested:\n\t\t\t\t\tend = i\n\t\t\t\t\tsubtreeStrings.append(data[start+1:end])\n\n\t\treturn TreeNode(rootVal, self.deserialize(subtreeStrings[0]), self.deserialize(subtreeStrings[1]))", "def deserialize(st: str, words: Optional[List] = None, convert_underscores: bool = True) -> Tuple[TreeNode, List]:\n tree_stack = []\n current_depth = 0\n last_token = ''\n root = TreeNode()\n tree_stack.append((root, current_depth))\n next_is_distance = False\n\n combo = []\n my_stack = []\n my_stack.append((combo, current_depth))\n\n for token in _tokenize_newick(st, convert_underscores=convert_underscores):\n # Check for a label\n if last_token not in '(,):':\n val = Sequence(words[int(last_token)]) if words else int(last_token)\n if not next_is_distance:\n tree_stack[-1][0].name = val if last_token else None\n else:\n next_is_distance = False\n if last_token:\n my_stack[-1][0].append(val)\n else:\n my_stack[-1][0].append(None)\n\n # Check for a distance\n if token == ':':\n next_is_distance = True\n elif last_token == ':':\n try:\n tree_stack[-1][0].length = float(token)\n except ValueError:\n raise NewickFormatError(\"Could not read length as numeric type\"\n \": %s.\" % token)\n elif token == '(':\n current_depth += 1\n tree_stack.append((TreeNode(), current_depth))\n my_stack.append((list(), current_depth))\n elif token == ',':\n tree_stack.append((TreeNode(), current_depth))\n my_stack.append((list(), current_depth))\n elif token == ')':\n if len(tree_stack) < 2:\n raise NewickFormatError(\"Could not parse file as newick.\"\n \" Parenthesis are unbalanced.\")\n children = []\n my_children = []\n # Pop all nodes at this depth as they belong to the remaining\n # node on the top of the stack as children.\n while current_depth == tree_stack[-1][1]:\n node, _ = tree_stack.pop()\n children.insert(0, node)\n nc, _ = my_stack.pop()\n [my_children.insert(0, c) for c in nc]\n parent = tree_stack[-1][0]\n my_parent = my_stack[-1][0]\n\n if parent.children:\n raise NewickFormatError(\"Could not parse file as newick.\"\n \" Contains unnested children.\")\n # This is much faster than TreeNode.extend\n for child in children:\n child.parent = parent\n parent.children = children\n\n my_parent.append(my_children)\n\n current_depth -= 1\n elif token == ';':\n if len(tree_stack) == 1:\n return root, my_stack\n break\n\n last_token = token\n\n raise NewickFormatError(\"Could not parse file as newick.\"\n \" `(Parenthesis)`, `'single-quotes'`,\"\n \" `[comments]` may be unbalanced, or tree may be\"\n \" missing its root.\")", "def parse(s):\n return s", "def from_string(representation):\r\n gramm = Grammar()\r\n\r\n for rule in representation.strip().split('\\n'):\r\n gramm._add_rule(rule)\r\n\r\n return gramm", "def parse(string):\n posslash = string.find('/')\n if posslash < 0:\n return Rational(int(string), 1)\n else:\n strs = string.split('/')\n return Rational(int(strs[0].strip()), int(strs[1].strip()))", "def test04(self):\n\n s = \"a;\"\n t = parse_newick(s);\n self.assertTrue(self.isTree(t) and t.label == \"a\" and t.isLeaf())" ]
[ "0.6565261", "0.6406509", "0.62351835", "0.6228951", "0.61783403", "0.6132944", "0.58582824", "0.5843517", "0.5839673", "0.5832786", "0.57039857", "0.5695232", "0.5673272", "0.5672355", "0.56367236", "0.5616294", "0.55375797", "0.5523444", "0.5484524", "0.5459082", "0.5456929", "0.5456326", "0.53962237", "0.533629", "0.5323135", "0.53145343", "0.5300824", "0.52782327", "0.52569747", "0.5245008" ]
0.73521155
0
Installs an index set.
def install(self, index_set): index_set.indices = map(self.stem, index_set.indices) index_set.required_indices = map(self.stem, index_set.required_indices) self.unique_target_concepts[index_set.target_concept] = True for index in index_set.indices: if not index in self.target_concepts.get(index, []): self.target_concepts[index] = ([index_set.target_concept] + self.target_concepts.get(index, [])) if not index_set in self.index_sets.get(index, []): self.index_sets[index] = [index_set] + self.index_sets.get(index, [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def install_packages_from_index(self, env=None):\n # extract non-source packages from package list\n index_packages = [p for p in self.pkg_arguments if not\n utils.assert_package_is_source(p)]\n # skip this step if there are no packages to be installed\n if not index_packages:\n print(\"No index packages set for installation. Skipping ...\")\n return\n # build command for installing packages from index\n cmd_args = {\n 'exe': self.pkg_executable,\n 'cmds': \" \".join(self.pkg_commands),\n 'flags': \" \".join(self.pkg_flags),\n 'pkgs': \" \".join(index_packages),\n }\n cmd_install_index = self.cmd_install.format(**cmd_args)\n print(\"Installing index packages to environment ...\")\n with click_spinner.spinner():\n errno, stdout, stderr = utils.run_command(cmd_install_index,\n env=env, shell=True)\n if errno:\n raise Exception(\"Installation of packages failed (STDERR: {}\"\n .format(stderr))", "def install(self):\n raise NotImplementedError", "def test_pip_install_index(self):\n output, _error = self.executor.pip_install(['attrs'], index_url='http://orbifold.xyz')\n self.assertEqual(output, 'attrs installed from orbifold')", "def install(cls):\n return cls.interface.set_table(cls.schema)", "def install(des, tipe):\n from xbooks.Xinit import Xrc\n if tipe == \"Xbook\":\n src = \"Xblog/docs/Assets/html/index.html\"\n shutil.copy2(src, des)\n ccc.success(\"installing index of \" + des)\n if des.replace(os.path.basename(des), \"\") == \"Xblog/docs/notebooks/\":\n addToNavBar(des, tipe, Xrc)\n else:\n addToParentIndex(des, tipe, Xrc)\n if tipe == \"Xpage\":\n linkAssets(des, Xrc)\n if des.replace(os.path.basename(des), \"\") == \"Xblog/docs/notebooks/\":\n if not des.endswith(\"welcome.html\"):\n addToNavBar(des, tipe, Xrc)\n else:\n addToParentIndex(des, tipe, Xrc)\n ccc.success(\"installtion procedures for \" + des)", "def add_index_set(self, target_concept, indexsetpattern):\n indexset = self.index_set_pattern_parser.parse(\n logic.expr(target_concept), indexsetpattern)\n self.install(indexset)", "def _install(self):\n\n pass", "def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)", "def _SetupIndexes(self, _open=open):\n pass", "def _install(args, use_cache, debug):\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n if args['dataset'].endswith('.zip') or args.get('hash_value'):\n path_to_archive = args['dataset']\n if args.get('hash_value'):\n path_to_archive = os.path.join(\n PROVENANCE_DIR, args['dataset'],\n '{}-{}.zip'.format(args['dataset'], args['hash_value']))\n if not os.path.exists(path_to_archive):\n print('The committed file does not exist.')\n engine = install_committed(path_to_archive,\n engine,\n force=args.get('force', False))\n return engine\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n print(\"=> Installing\", data_sets_script.name)\n try:\n if engine.name == \"HDF5\":\n sqlite_opts = {\n 'command': 'install',\n 'dataset': data_sets_script,\n 'engine': 'sqlite',\n 'file': (args[\"file\"].split(\".\"))[0] + \".db\",\n 'table_name': args[\"table_name\"],\n 'data_dir': args[\"data_dir\"]\n }\n sqlite_engine = choose_engine(sqlite_opts)\n data_sets_script.download(sqlite_engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n elif args['dataset'].startswith('socrata') and not data_sets_scripts:\n socrata_id = args['dataset'].split('-', 1)[1]\n resource = find_socrata_dataset_by_id(socrata_id)\n\n if \"error\" in resource.keys():\n if resource[\"datatype\"][0] == \"map\":\n print(\"{} because map type datasets are not supported\".format(\n resource[\"error\"]))\n else:\n print(\"{} because it is of type {} and not tabular\".format(\n resource[\"error\"], resource[\"datatype\"][1]))\n elif len(resource.keys()) == 0:\n return\n else:\n print(\"=> Installing\", args['dataset'])\n name = f\"socrata-{socrata_id}\"\n create_socrata_dataset(engine, name, resource)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n elif args['dataset'].startswith('rdataset') and not data_sets_scripts:\n print(\"=> Installing\", args['dataset'])\n rdataset = args['dataset'].split('-')\n update_rdataset_catalog()\n package, dataset_name = rdataset[1], rdataset[2]\n create_rdataset(engine, package, dataset_name)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n else:\n message = \"Run retriever.datasets() to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine", "def install(self):\n return self._process('install')", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def install(self, *packages):\n raise NotImplementedError", "def setup(self):\n collection = self._get_collection()\n\n indices = copy(self.params[\"indices\"])\n\n if \"when\" not in indices:\n indices[\"when\"] = {}\n\n for index in indices:\n self.log(DEBUG, \"Ensuring we have index for {}\".format(index))\n\n options = indices[index]\n collection.create_index(index, *options)\n self.log(DEBUG, \"Done.\")", "def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()", "def build_ireq_set(specifiers, # type: Iterable[str]\n index_urls=None, # type: Optional[Iterable[str]]\n prereleases=False, # type: bool\n resolve_canonical_names=True, # type: bool\n resolve_source_dir=None, # type: str\n resolve_versions=True, # type: bool\n sort_specifiers=True, # type: bool\n ):\n # type: (...) -> InstallReqSet\n install_requirements = ordered_set.OrderedSet()\n if index_urls is None:\n index_urls = []\n if sort_specifiers:\n specifiers = sorted(specifiers)\n for specifier in specifiers:\n if specifier.startswith('-e'):\n ireq = HashableInstallRequirement.from_line(specifier)\n else:\n args = []\n for index_url in index_urls:\n args.extend(['--extra-index-url', index_url])\n ireq = resolve_specifier(specifier, prereleases, resolve_versions,\n *args)\n if resolve_canonical_names and not ireq.editable:\n package_name = ireq.name\n canonical_name = get_canonical_name(\n package_name=package_name, index_urls=index_urls)\n update_ireq_name(\n install_requirement=ireq, package_name=canonical_name)\n elif resolve_source_dir is not None and ireq.source_dir:\n try:\n ireq.source_dir = str(\n pathlib.Path(ireq.source_dir)\n .relative_to(pathlib.Path(resolve_source_dir)))\n ireq.link = pip.index.Link('file://{}'.format(\n ireq.source_dir))\n except ValueError:\n pass\n install_requirements.add(ireq)\n return install_requirements", "def install(self):\n # This installs the packages defined in self.packages\n super().install()\n # Do any other installation work that is needed. If a license key is\n # required then use the custom_assess_status_check() function below to\n # determine whether it is needed.\n # This assess_status() will determine what status the charm is at after\n # install.\n self.assess_status()", "def install():\n deploy()\n configure()", "def install(self, spec, prefix):\n make(\"install\", parallel=False)", "def init(self):\n self._es.create_index_template(\n name=DATASETS_INDEX_NAME,\n template=DATASETS_INDEX_TEMPLATE,\n force_recreate=True,\n )\n self._es.create_index(DATASETS_INDEX_NAME)", "def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))", "def solr_reindex(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n cmd = 'bin/django update_index dasa --batch-size=5000 --remove --verbosity=2'\n run(cmd)", "def install(cls):\n\n\t\t# Go through each Record type\n\t\tfor o in cls._install:\n\n\t\t\t# Install the table\n\t\t\tif not o.table_create():\n\t\t\t\tprint(\"Failed to create `%s` table\" % o.tableName())", "def __init__(self):\n super(InstallCommand, self).__init__()\n # change the default location of the index.\n self.parser.remove_option('-i')\n self.parser.add_option(\n '-i', '--index-url',\n dest='index_url',\n metavar='URL',\n default='http://ryppl.github.com/index',\n help='Base URL of Ryppl Package Index (default %default)')", "def test_install_set_existing(self):\n expected = copy.deepcopy(test_xdata)\n expected.find(\"Text\").text = \"Changed content\"\n self._install([lxml.etree.Element(\"Set\", path=\"Test/Text/#text\",\n value=\"Changed content\")],\n expected)", "def set_index(self, idx, rel, attrs):\n\n query = 'CREATE INDEX {} ON {} ({})'.format(idx, rel, ','.join(attrs))\n\n with self.tpch_cxn.cursor() as curs:\n try:\n curs.execute(query)\n except pg.ProgrammingError as e:\n print(e)", "def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )", "def _install(self, host):\n pass", "def add_catalog_indexes(context, logger):\n if logger is None:\n logger = logging.getLogger('bungenicms.membershipdirectory')\n \n # Run the catalog.xml step as that may have defined new metadata columns. \n # We could instead add <depends name=\"catalog\"/> to the registration of our \n # import step in zcml, but doing it in code makes this method usable as \n # upgrade step as well. Note that this silently does nothing when there is \n # no catalog.xml, so it is quite safe.\n setup = getToolByName(context, 'portal_setup')\n setup.runImportStepFromProfile(PROFILE_ID, 'catalog')\n \n catalog = getToolByName(context, 'portal_catalog')\n indexes = catalog.indexes()\n \n # Specify the indexes you want, with ('index_name', 'index_type')\n wanted = (('county', 'FieldIndex'),\n ('constituency', 'FieldIndex'),\n ('priority_number', 'FieldIndex'), \n ('political_party', 'FieldIndex'),\n ('elected_nominated', 'FieldIndex'),\n ('member_status', 'FieldIndex'),\n ('special_interest', 'FieldIndex'),\n ('other_names', 'FieldIndex'),\n ('member_role', 'FieldIndex'),\n ('member_title', 'FieldIndex'),\n ('body_text', 'FieldIndex'),\n ('member_full_names', 'ZCTextIndex'),\n )\n\n indexables = []\n for (name, meta_type) in wanted:\n if meta_type and name not in indexes:\n if meta_type == 'ZCTextIndex':\n item_extras = Empty()\n item_extras.doc_attr = name\n item_extras.index_type = 'Okapi BM25 Rank'\n item_extras.lexicon_id = 'plone_lexicon'\n catalog.addIndex(name, meta_type, item_extras)\n else:\n catalog.addIndex(name, meta_type)\n \n indexables.append(name)\n logger.info('Added %s for field %s.', meta_type, name)\n if len(indexables) > 0:\n logger.info('Indexing new indexes %s.', ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def start_reindex(self):\n self.reindex_button.click() # lint-amnesty, pylint: disable=no-member" ]
[ "0.7080118", "0.6201861", "0.61710644", "0.61431223", "0.6129671", "0.6036387", "0.5905684", "0.58934444", "0.5872627", "0.58533996", "0.5815387", "0.57821035", "0.5777143", "0.5716653", "0.56888473", "0.55257696", "0.54855067", "0.54117346", "0.5403791", "0.539142", "0.5389289", "0.53780377", "0.5368651", "0.53483886", "0.5319934", "0.53117704", "0.5295406", "0.52886045", "0.5278407", "0.52687395" ]
0.8006205
0
Adds an index set to the target concept. The indexsetpattern must be a string containing an indexset pattern (see IndexSetPatternParser).
def add_index_set(self, target_concept, indexsetpattern): indexset = self.index_set_pattern_parser.parse( logic.expr(target_concept), indexsetpattern) self.install(indexset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def install(self, index_set):\n index_set.indices = map(self.stem, index_set.indices)\n index_set.required_indices = map(self.stem, index_set.required_indices)\n self.unique_target_concepts[index_set.target_concept] = True\n for index in index_set.indices:\n if not index in self.target_concepts.get(index, []):\n self.target_concepts[index] = ([index_set.target_concept] +\n self.target_concepts.get(index, []))\n if not index_set in self.index_sets.get(index, []):\n self.index_sets[index] = [index_set] + self.index_sets.get(index, [])", "def AddIndex(self, target):\n if \"w\" not in self.mode:\n raise IOError(\"FileStoreImage %s is not in write mode.\", self.urn)\n predicate = (\"index:target:%s\" % target).lower()\n data_store.DB.MultiSet(self.urn, {predicate: target}, token=self.token,\n replace=True, sync=False)", "def add(\n self, patterns: Iterable[MatcherPatternType], attrs: Dict, index: int = 0\n ) -> None:\n # We need to make a string here, because otherwise the ID we pass back\n # will be interpreted as the hash of a string, rather than an ordinal.\n key = str(len(self.attrs))\n self.matcher.add(self.vocab.strings.add(key), patterns) # type: ignore[arg-type]\n self._attrs_unnormed.append(attrs)\n attrs = normalize_token_attrs(self.vocab, attrs)\n self.attrs.append(attrs)\n self.indices.append(index)", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def insert(self, index, pset):\n self._sets.insert(index, pset)", "def index(self, pset):\n self._sets.index(pset)", "def add_input_set(name, my_session):\n iset = InputSet(name=name)\n my_session.add(iset)\n my_session.commit()\n log.info('Added input set \"%s\"' % name, 'input.py')\n return iset.id", "def add_set(self): # TODO test\n self.set_tree.remove_node(self.adding_node)\n i = len(self.exercise.sets)\n self.exercise.sets.append(Set())\n item = TreeViewLabel(text=\"Set \" + str(i))\n set_node = TreeViewSet(exercise=self.exercise, set_id=i, session=self.session)\n self.set_tree.add_node(item)\n self.set_tree.add_node(set_node, item)\n self.set_tree.add_node(self.adding_node)\n print(\"add set\")", "def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set", "def add_set(self, repres):\n s = self.set_indx(repres)\n if not s is None:\n raise Exception\n self._data.append(set(repres))", "def test_add_to_index(koan, assert_index_includes_added_file):\n koan.shell('')\n koan.shell('')\n koan.shell('')", "def parse(self, target, pattern):\n indexset = IndexSet(target)\n return self.read(indexset, pattern, 0)", "def add_pattern(self, pattern):\n self.patterns.append(pattern)", "def setIndex(self,index):\n if isinstance(index,str):\n index = MaterialIndex(index)\n self[0].refractiveindex = index", "def add_mode_index(self) -> None:", "def addIndex(self, index):\r\n assert type(index)==int\r\n assert 0<=index and index < self._dataset.getSize()\r\n\r\n if not (index in self._indices):\r\n self._indices.append(index)", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def add_subset_to(self, ds, pattern, rmprefix=None):\n d = self.col_subset(pattern, rmprefix)\n for (i, r) in d.iterrows():\n feats = r.drop(['sesid','age'])\n v = feats.T.tolist()\n feat_names = feats.axes[0].tolist()\n ds.add_samplet(samplet_id=r.sesid, target=r.age,\n features=v, feature_names=feat_names,\n overwrite=True)", "def option_index(args):\n print(\"= MAKE INDEX =\")\n print()\n print(\"Database folder:\\t{}\".format(args.folder))\n if not os.path.isdir(args.folder):\n raise OSError(\"No such directory!\")\n print(\"Index file:\\t\\t{}\".format(args.indexfile))\n\n indexer.create_index_from_folder(args.folder, args.indexfile)", "def add(self, *filesets):\r\n for fileset in filesets:\r\n paths = fileset() if isinstance(fileset, Fileset) \\\r\n else fileset if hasattr(fileset, '__iter__') \\\r\n else [fileset]\r\n for path in paths:\r\n abspath = path\r\n if not os.path.isabs(abspath):\r\n abspath = os.path.join(self._base, path)\r\n if not os.path.exists(abspath):\r\n raise ValueError('Given path: %s with absolute path: %s which does not exist'\r\n % (path, abspath))\r\n self.filemap[abspath] = self.mapper(abspath)\r\n return self", "def _SetupIndexes(self, _open=open):\n pass", "def addCatalogIndexes(portal):\n catalog = getToolByName(portal, 'portal_catalog')\n indexes = catalog.indexes()\n wanted = (('standardTags', 'KeywordIndex'),\n ('iamTags', 'KeywordIndex'),\n ('isearchTags', 'KeywordIndex'),\n ('hiddenTags', 'KeywordIndex'))\n indexables = []\n for name, meta_type in wanted:\n if name not in indexes:\n catalog.addIndex(name, meta_type)\n indexables.append(name)\n logger.info(\"Added %s for field %s.\", meta_type, name)\n if len(indexables) > 0:\n logger.info(\"Indexing new indexes %s.\", ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def register_vocab(self, start_concept, end_concept, alias_of, regex_str):\n self.register_vocabulary(start_concept, end_concept,\n alias_of, regex_str)", "def add_index(self, idx, subproblem_shape):\n self.indices.append(int(idx))\n self.subproblem_shapes.append(subproblem_shape)", "def add_index_sig(self, index_sig):\n self.index_sigs.append(index_sig)", "def add_index_sig(self, index_sig):\n self.index_sigs.append(index_sig)", "def addSemanticsAnnotation(self, *args):\n return _libsbml.ASTNode_addSemanticsAnnotation(self, *args)", "def append(self):\n target_index = get_index_from_alias(self.alias_name)\n if not target_index:\n self.replace()\n else:\n self.index_all(target_index)", "def create_index():" ]
[ "0.62463635", "0.59897494", "0.5209074", "0.5204011", "0.5071411", "0.50280774", "0.49698648", "0.4935041", "0.48192346", "0.47978112", "0.4796129", "0.47408307", "0.46981147", "0.4673346", "0.4620533", "0.46111295", "0.46000364", "0.46000364", "0.45934823", "0.45888665", "0.45861402", "0.45771286", "0.4558441", "0.45426786", "0.4532357", "0.45246544", "0.45246544", "0.45166877", "0.4515877", "0.45032543" ]
0.90745026
0
Parses a string containing a indexset pattern and returns an IndexSet.
def parse(self, target, pattern): indexset = IndexSet(target) return self.read(indexset, pattern, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index_range(raw):\n if not raw:\n return None\n\n indices = set()\n try:\n with open(raw, 'r') as f:\n for l in f:\n l = l.strip()\n if len(l) == 0 or l[0] == '#':\n continue\n if \" \" in l:\n l = l.split()[0]\n indices.add(int(l))\n return indices\n except FileNotFoundError:\n pass\n\n for s in raw.split(\",\"):\n if \"-\" in s:\n start, end = s.split(\"-\")\n indices.update(range(int(start), int(end)+1))\n else:\n indices.add(int(s))\n return indices", "def parse_selection(selection_str: str) -> List[int]:\n indices = []\n for group in selection_str.split(','):\n if not re.match(r'^(?:-?\\d+)|(?:\\d+(?:-\\d+))$', group):\n print(\"Invalid selection\", group)\n sys.exit()\n spl = group.split('-')\n if len(spl) == 1:\n indices.append(int(spl[0]))\n elif len(spl) == 2:\n begin = int(spl[0]) if spl[0] else 0\n end = int(spl[1])\n indices.extend(range(begin, end + 1))\n return indices", "def add_index_set(self, target_concept, indexsetpattern):\n indexset = self.index_set_pattern_parser.parse(\n logic.expr(target_concept), indexsetpattern)\n self.install(indexset)", "def parse_int_set(nputstr=\"\"):\n selection = set()\n invalid = set()\n # tokens are comma seperated values\n tokens = [x.strip() for x in nputstr.split(',')]\n for i in tokens:\n try:\n # typically tokens are plain old integers\n selection.add(int(i))\n except:\n # if not, then it might be a range\n try:\n token = [int(k.strip()) for k in i.split('-')]\n if len(token) > 1:\n token.sort()\n # we have items seperated by a dash\n # try to build a valid range\n first = token[0]\n last = token[len(token)-1]\n for x in range(first, last+1):\n selection.add(x)\n except:\n # not an int and not a range...\n invalid.add(i)\n # Report invalid tokens before returning valid selection\n # print \"Invalid set: \" + str(invalid)\n return selection", "def parse_set(field, star_range):\n ranges = tuple(parse_range(r, star_range) for r in field.split(\",\"))\n return crontab.Set(ranges)", "def str_to_productionset(string):\r\n return strlist_to_production_set(string.split('\\n'))", "def _parse_ins_string(string):\n istart_markers = set([\"[\", \"(\", \"!\"])\n marker_dict = {\"[\": \"]\", \"(\": \")\", \"!\": \"!\"}\n # iend_markers = set([\"]\",\")\",\"!\"])\n setdum = {\"dum\", \"DUM\"}\n obs_names = []\n slen = len(string)\n idx = 0\n while True:\n if idx >= slen - 1:\n break\n char = string[idx]\n if char in istart_markers:\n # em = iend_markers[istart_markers.index(char)]\n em = marker_dict[char]\n # print(\"\\n\",idx)\n # print(string)\n # print(string[idx+1:])\n # print(string[idx+1:].index(em))\n # print(string[idx+1:].index(em)+idx+1)\n eidx = min(slen, string.find(em, idx + 1))\n obs_name = string[idx + 1 : eidx]\n if obs_name not in setdum:\n obs_names.append(obs_name)\n idx = eidx + 1\n else:\n idx += 1\n return obs_names", "def ParseIndex(self, text):\n taxonStart = 0\n taxonStop = len(self.alignment) - 1\n columnStart = 0\n columnStop = self.alignment.get_alignment_length() - 1\n if (',' not in text):\n self.AlertMessage('Invalid index format. (taxa or columns missing)', 'high')\n return (-1,-1,-1,-1)\n else:\n text = text.strip()\n indices = text.split(',')\n if (len(indices) > 2):\n self.AlertMessage('Invalid index format. (too many fields)', 'high')\n return (-1,-1,-1,-1)\n else:\n if (':' in indices[0]): #there is a range specified in the taxon index\n taxonIndices = indices[0].split(':')\n if (taxonIndices[0]): #a start taxon is specified\n try:\n taxonStart = int(taxonIndices[0].strip())\n except:\n self.AlertMessage('Invalid index format. (taxon start index not an integer)', 'high')\n return (-1, -1, -1, -1)\n if (taxonIndices[1]): #a stop taxon is specified\n try:\n taxonStop = int(taxonIndices[1].strip())\n except:\n self.AlertMessage('Invalid index format. (taxon stop index not an integer)', 'high')\n return (-1, -1, -1, -1)\n elif (indices[0]): #a single taxon is specified\n try:\n taxonStart = int(indices[0].strip())\n taxonStop = int(indices[0].strip())\n except:\n self.AlertMessage('Invalid index format. (taxon start or stop index not an integer)', 'high')\n return (-1, -1, -1, -1)\n if (':' in indices[1]): #there is a range specified in the taxon index\n columnIndices = indices[1].split(':')\n if (columnIndices[0]): #a start taxon is specified\n try:\n columnStart = int(columnIndices[0].strip())\n except:\n self.AlertMessage('Invalid index format. (column start index not an integer)', 'high')\n return (-1, -1, -1, -1)\n if (columnIndices[1]): #a stop taxon is specified\n try:\n columnStop = int(columnIndices[1].strip())\n except:\n self.AlertMessage('Invalid index format. (column stop index not an integer)', 'high')\n return (-1, -1, -1, -1)\n elif (indices[1]): #a single taxon is specified\n try:\n columnStart = int(indices[1].strip())\n columnStop = int(indices[1].strip())\n except:\n self.AlertMessage('Invalid index format. (column start or stop index not an integer)', 'high')\n return (-1, -1, -1, -1)\n if ((0 <= taxonStart <= taxonStop) & (0 <= columnStart <= columnStop)):\n return (taxonStart, taxonStop, columnStart, columnStop)\n else:\n self.AlertMessage('Invalid index range. (start > stop or index < 0)', 'high')\n return (-1,-1,-1,-1)", "def parse_range_set(range_string):\n # TODO: add UTs for this.\n\n # Parse a range string as specified by format_range_set() below\n # Be generous dealing with duplicate entries in the specification.\n if not range_string:\n return []\n ranges = [\n (lambda sublist: range(sublist[0], sublist[-1] + 1))\n (list(map(int, subrange.split('-')))) for subrange in range_string.split(',')]\n return list(set([y for x in ranges for y in x]))", "def parse_index_name(index_name):\n if index_name.count('-') == 3:\n hostname, schema, language, type_name = index_name.split('-')\n version = None\n elif index_name.count('-') == 4:\n hostname, schema, language, type_name, version = index_name.split('-')\n else:\n hostname = None\n schema = None\n language = None\n type_name = None\n version = None\n\n return IndexParts(\n hostname=hostname,\n schema=schema,\n language=language,\n type_name=type_name,\n version=version\n )", "def __sub_set_from_linescsv(self, csv_path):\n # gather line ids\n ids = []\n with open(csv_path, 'r') as f_csv:\n reader = csv.reader(f_csv, delimiter=' ')\n for line_id in reader:\n ids.append(line_id[0])\n # create sub-set based on the indices\n sub_set = self.__line_id_subset(ids=ids)\n return sub_set", "def parser(string, queryset):\n QueryObjects.D = {}\n QueryObjects.B = []\n QueryObjects.IND = 0\n QueryObjects.TEMP_FIELD = None\n\n algebra = boolean.BooleanAlgebra()\n query_list = lexer(string)\n query_string = ' '.join(query_list)\n qs = algebra.parse(query_string)\n\n if QueryObjects.TEMP_FIELD:\n queryset = queryset.annotate(**QueryObjects.TEMP_FIELD)\n QueryObjects.TEMP_FIELD = None\n\n locals().update(QueryObjects.D.items())\n query = str(qs)\n query = eval(query)\n queryset = queryset.filter(query)\n return queryset", "def index_set(self):\n return self._index", "def test_parse_set_query():\n # List of 2-tuples of (query, expected_result)\n set_tests = {\n 'foo=bar': [\n ('intersection', 'foo', 'bar'),\n ],\n 'foo=bar owner=jathan': [\n ('intersection', 'foo', 'bar'),\n ('intersection', 'owner', 'jathan'),\n ],\n '-owner=gary': [\n ('difference', 'owner', 'gary'),\n ],\n 'cluster +foo=baz': [\n ('intersection', 'cluster', ''),\n ('union', 'foo', 'baz'),\n ],\n # Extra white space\n 'cluster=lax +foo=baz': [\n ('intersection', 'cluster', 'lax'),\n ('union', 'foo', 'baz'),\n ],\n }\n\n # Make sure that result matches expected_result\n for query, expected_result in set_tests.iteritems():\n result = parse_set_query(query)\n assert result == expected_result", "def from_ruleset_string(ruleset_string: str) -> Sum1DRuleset:\n\n tokens = ruleset_string.split(\",\")\n\n # First 3 tokens are fixed: Radius, C, Middle\n radius = int(tokens[0][1:])\n c = int(tokens[1][1:]) # todo: find out what C is and incorporate it in the rules.\n middle = bool(int(tokens[2][1:]))\n\n # Remaining tokens are either Survive or Born, all other outcomes are assumed dead.\n outcomes = {}\n for token in tokens[3:]:\n if token.startswith(\"S\"):\n outcomes[int(token[1:])] = \"same\"\n elif token.startswith(\"B\"):\n outcomes[int(token[1:])] = True\n else:\n raise ValueError(f\"Error parsing token: {token}\")\n\n # Offsets\n offsets = list(range(-radius, radius + 1))\n if not middle:\n offsets.pop(radius)\n\n return Sum1DRuleset(outcomes, offsets)", "def string_to_index(s):\n s = Unquote(s)\n if s == \".\":\n return ()\n return tuple(s.split(\"/\"))", "def fromString(cls, string):\n # From SAM specification v1.5, slightly adapted for single-token parsing\n pattern = r\"^[0-9]+[MIDNSHPX=]\" \n string = string.strip()\n if string == '*':\n return CIGAR.fromList(['*'])\n parsed = []\n s = string\n # Parse string token (e.g. 14M) by token, re.findall is not enough,\n # because non-matching subsequences between (e.g. \"14Mblabla3D4M\") would\n # go unnoticed! Also it would be good to abort as early as possible if\n # an invalid string is found to avoid parsing possibly very long strings\n while s != '':\n r = re.match(pattern, s)\n if not r:\n raise ValueError('Invalid CIGAR string: \"'+string+'\"')\n g = r.group(0)\n parsed.append(g)\n s = s[len(g):]\n \n parsed = [(int(p[:-1]), p[-1:]) for p in parsed]\n\n return CIGAR.fromList(parsed)", "def parse(self):\n index = Index.create()\n cursor = index.parse(self.filepath, args=[\"-std=c++11\"]).cursor\n self.cursor = cursor", "def parse(self, string, parse_all=False):\n return self._parseString(string, parse_all=parse_all)", "def parse(parser, string, ignore_white=True, trace=False):\n input_reader = InputReader(string, ignore_white)\n input_reader.trace = trace\n tokens = []\n try:\n tokens = parser.match(input_reader)\n parseResult = ParseResult(input_reader, tokens)\n except ParseException as e:\n parseResult = ParseResult(input_reader, tokens)\n parseResult.error = e\n parseResult.line = input_reader.line\n parseResult.linePos = input_reader.linePos\n return parseResult", "def str_to_index_list(index_str):\n if str2index_convertible(index_str):\n index_list = []\n if \",\" in index_str:\n index_str = index_str.split(\",\")\n for index in index_str:\n index_list.append(int(index))\n elif \"-\" in index_str:\n index_str = index_str.split(\"-\")\n if len(index_str) == 2:\n start = int(index_str[0])\n end = int(index_str[1])\n for index in range(start, end+1):\n index_list.append(int(index))\n else:\n index_list = [int(index_str)]\n return index_list\n else:\n print(\"Error: wrong index list formatting\")\n return None", "def get_index(cls, df, selector, start=None, stop=None, names=[]):\n\n assert cls.is_selector(selector)\n\n tuples = cls.get_tuples(df, selector, start, stop)\n if not tuples:\n raise ValueError('no tuples matching selector found')\n\n # XXX This probably could be made faster by directly manipulating the\n # existing MultiIndex:\n if all(map(np.iterable, tuples)):\n if np.iterable(names) and names:\n return pd.MultiIndex.from_tuples(tuples, names=names)\n elif names:\n return pd.MultiIndex.from_tuples(tuples, names=[names])\n else:\n return pd.MultiIndex.from_tuples(tuples)\n else:\n if np.iterable(names) and names:\n return pd.Index(tuples, name=names[0])\n elif names:\n return pd.Index(tuples, name=names)\n else:\n return pd.Index(tuples)", "def parse(s):\n return expr.parseString(s, parseAll=True)", "def _parser(self,\n search_str):\n return {line_index: parsed_line_keys for (line_index, parsed_line_keys)\n in enumerate(self._load_line(search_str=search_str))\n if parsed_line_keys\n }", "def read(string):\n\treturn (re.finditer('(?<=\\[)[a-z]+(?=\\])', string), re.finditer('(?<=\\])[a-z]+|[a-z]+(?=\\[)', string))", "def load_strands_from_string(strands_string, seperator='\\n'):\n return load_strands([strand for strand in strands_string.split(seperator)])", "def parse(self, s):\n\n segments = self.compiled.split(self._whitespace.sub(\" \", s))\n literals = segments[::2]\n raw = segments[1::2]\n\n if not raw:\n return []\n\n case = list(map(str.casefold, raw))\n prefixes = [{}] + [dict(self.locale_set.prefixes.get(match, ())) for match in case[:-1]]\n suffixes = [dict(self.locale_set.suffixes.get(match, ())) for match in case[1:]] + [{}]\n\n groups = _DateTime(**{ field: [] for field in _DateTime._fields })\n choices_per_position = {}\n always_literal = set()\n numeric = set()\n for idx, (prefix, suffix) in enumerate(zip(prefixes, suffixes)):\n keyword = self._lookup_keyword(raw[idx])\n if \"y\" in prefix:\n prefix[\"C\"] = tuple(set(prefix[\"y\"] + prefix.get(\"C\", ())))\n if not keyword:\n always_literal.add(idx)\n else:\n if raw[idx].isdigit():\n numeric.add(idx)\n choices_per_position[idx] = len(keyword)\n for fmt, value, locales in keyword:\n category = fmt[-1]\n if category == \"b\":\n # Month-names should be treated like numeric months.\n category = \"m\"\n elif category == \"z\":\n category = \"Z\"\n getattr(groups, category).append(_Assignment(\n fmt=fmt,\n pos=idx,\n value=value,\n locales=locales,\n prefix=prefix.get(fmt[-1]),\n suffix=suffix.get(fmt[-1]),\n ))\n numeric = frozenset(numeric)\n\n # If a required date field is unsatisfiable, this is not a date.\n if not all(getattr(groups, category) for category in _State._min_date_formats):\n for category in _State._all_date_formats:\n getattr(groups, category).clear()\n\n # If a required time field is unsatisfiable, this is not a time.\n if not all(getattr(groups, category) for category in _State._min_time_formats):\n for category in _State._all_time_formats:\n getattr(groups, category).clear()\n\n for group in groups:\n group.sort(key=lambda assignment: (\n -self._optimistic_score(assignment),\n choices_per_position[assignment.pos],\n ))\n\n required_formats = _State._min_date_formats + _State._min_time_formats\n groups = OrderedDict(sorted(\n (\n (\n category,\n (\n group,\n tuple(\n (f, required)\n for f, required in _position_constraints\n if category in required\n ),\n tuple(\n (f, required)\n for f, required, revisit in _value_constraints\n if category in required or category in revisit\n ),\n )\n )\n for category, group in zip(groups._fields, groups)\n if group\n ),\n key=lambda i: (i[0] not in required_formats, len(i[1][0]))\n ))\n\n # We've already filtered out all possibilities; there's nothing here.\n if not groups:\n return []\n\n constrained_groups = []\n while groups:\n category, (group, position, value) = groups.popitem(last=False)\n constrained_groups.append((category, group, position, value))\n required = frozenset(itertools.chain.from_iterable(required for f, required in itertools.chain(position, value)))\n if required:\n required = [\n category\n for category in reversed(groups.keys())\n if category in required\n ]\n for category in required:\n groups.move_to_end(category, last=False)\n groups = constrained_groups\n\n best_quality = 0\n best_candidates = []\n\n partials = [\n _State.empty._replace(\n unconverted=frozenset(always_literal),\n remaining_groups=tuple(groups),\n ).children(numeric=numeric)\n ]\n while partials:\n try:\n quality, locales, state = next(partials[-1])\n except StopIteration:\n partials.pop()\n continue\n\n if state.remaining_groups:\n # Admissable heuristic: compute the best score each group\n # could possibly achieve. Don't count conversion specifiers\n # that we've already used, but don't worry about conflicts\n # in the groups we haven't assigned yet. Any such conflicts\n # can only reduce the resulting score, and we only need to\n # make sure that the heuristic is at least as large as the\n # true value of the best leaf in this subtree. However, the\n # more precise we can be here, the fewer nodes we have to\n # search, so we can spend some CPU time on precision and\n # still come out ahead.\n assigned = state.unconverted.union(state.pos).difference((None,))\n heuristic = len(state.pending_hints) + sum(\n next((\n self._optimistic_score(assignment)\n for assignment in group[1]\n if assignment.pos not in assigned\n ), 0)\n for group in state.remaining_groups\n )\n\n if quality + heuristic < best_quality:\n # Even assuming the remaining groups get the highest\n # possible score, this state is still not good enough.\n continue\n\n partials.append(state.children(numeric=numeric))\n continue\n\n value = state.valid()\n if value is None:\n continue\n\n quality, locales, state = state.final_score()\n\n if best_quality is not None and quality < best_quality:\n # We've seen better, so skip this one.\n continue\n\n if quality != best_quality:\n best_quality = quality\n best_candidates = []\n\n conversions = dict(zip(state.pos, state.fmts))\n fmts = [ conversions.get(idx) or literal for idx, literal in enumerate(raw) ]\n\n pattern = ''.join(lit + fmt for lit, fmt in zip(literals, fmts + [''])).replace(\"%C%y\", \"%Y\")\n best_candidates.append((pattern, value, locales))\n return best_candidates", "def index_lzo_string(string):\n\n index = StringIO()\n index_lzo_file(StringIO(string), index)\n\n return index.getvalue()", "def parse(cls, s):\n raise NotImplementedError", "def _parseString(self, instring):\n\n if self._compiled is None:\n self.compile()\n\n match = self._compiled.match(instring)\n if match is None:\n return None\n\n Count.reset()\n struct = deepcopy(self.structure)\n\n mymatch, substructs, preprocess_func = self._parse_preprocess(match)\n struct.map(preprocess_func)\n struct.map(self._func_parse_leaf(mymatch, substructs))\n struct.parse_end = match.end()\n return struct" ]
[ "0.5334464", "0.5226054", "0.5198872", "0.5191598", "0.51036763", "0.50414246", "0.49094805", "0.4886336", "0.4885104", "0.47790274", "0.47279197", "0.47008738", "0.46219444", "0.462135", "0.45989233", "0.45287374", "0.45258093", "0.45004886", "0.44923663", "0.44205198", "0.44080397", "0.4401825", "0.4398208", "0.43961242", "0.43688533", "0.4361861", "0.43465564", "0.43396834", "0.43370524", "0.43274343" ]
0.5955374
0
return list of slots in order of their signups, from least to highest.
def get_ordered_slots(scheduled_slots, vols): vol_cnts = {} for s_slot in scheduled_slots: s_key = "{}-{}-{}".format(s_slot.day, s_slot.time_period, s_slot.type) vol_cnts[s_key] = 0 for vol in vols: for a_slot in vol.available_slots: a_key = "{}-{}".format(a_slot.day, a_slot.time_period) if a_key == s_key: vol_cnts[s_key] += 1 sorted_vol_cnts = sorted(vol_cnts.items(), key=lambda x: x[1]) #print("ordered slots: {}".format(sorted_vol_cnts)) return sorted_vol_cnts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sorted_signers(self) -> List[Address]:\n return sorted(self.signers)", "def get_grouped_available_slots(vols):\n vol_cnts = {}\n for vol in vols:\n slot_amt = len(vol.available_slots)\n if slot_amt < 1:\n continue\n if slot_amt in vol_cnts:\n vol_cnts[slot_amt].append(vol)\n else:\n vol_cnts[slot_amt] = [vol]\n\n #sorted_vol_cnts = sorted(vol_cnts.keys())\n #for amt in sorted_vol_cnts:\n # print(\"{}: {}\".format(amt, [vol.email for vol in vol_cnts[amt]]))\n return vol_cnts", "def required_slots(tracker):\n print(tracker.get_slot('order_number'))\n return [\"order_number\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"sucursal\",\n \"fecha_hora\"\n ]", "def required_slots(self,tracker) -> List[Text]:", "def required_slots(tracker: Tracker) -> List[Text]:\n print(\"required_slots(tracker: Tracker)\")\n return [\"name\",\"roomcount\",\"roomtype\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"bug\", \"beverage\", \"second_person_plural\", \n \"cot_caught\", \"rain_sun\", \"crawfish\", \"halloween\",\n \"sandwich\", \"side_road\", \"shoes\", \"highway\", \"yard_sale\",\n \"rubbernecking\", \"frosting\", \"lawyer\", \"kitty_corner\",\n \"firefly\", \"verge\", \"brew_thru\", \"water_fountain\"]", "def timeslot(self) -> List[TimeslotTimeslot]:\n return self._timeslot", "def free_slots(self, day_bounds: Slot):\n free_slots: List[Slot] = []\n time_ptr = day_bounds.start\n for meeting in self.meetings:\n if meeting.start > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, meeting.start.time_str))\n time_ptr = meeting.end\n if day_bounds.end > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, day_bounds.end.time_str))\n return free_slots", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"domicilio\",\n \"fecha_hora\"\n ]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n \"tipo_lavado\"\n ]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n ]", "def sorted_availabilities(self, day=None):\r\n if day is not None:\r\n availabilities = [availability for availability in self.availabilities if availability.day == day]\r\n else:\r\n availabilities = self.availabilities\r\n return sorted(availabilities, key=lambda x: (x.day, x.start))", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"product\", \"applicant_name\", \"applicant_dob\", \"applicant_phoneno\", \"applicant_address\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"tipo_compostura\"\n ]", "def InSlotsGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.InSlotsGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n signal_indexes = []\n signal_indexes_size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n for n in range(signal_indexes_size):\n idx += 4\n signal_index = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n signal_indexes.append(signal_index)\n \n return [signal_names,signal_indexes]", "def get_slots(self) -> int:", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"search_type\", \"time\"]", "def _get_initial_slots(self, rows, cols) -> list:\n slots = []\n for x in range(rows):\n row = []\n for y in range(cols):\n slot = Slot(x=x, y=y, mine=False, available=True, flag=False)\n row.append(slot)\n slots.append(row)\n return slots", "def get_sorted_allocated_seats():\n return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=[\"party\"])[\"allocated_seats\"])", "def sorted_carnivores(self):\n fitness_dict = {carn: carn.fitness for carn in self.carnivores}\n sorted_tuples = dict(sorted(fitness_dict.items(), key=lambda x: x[1], reverse=True))\n\n return list(sorted_tuples.keys())", "def topairs(self):\n return list(zip(self._times, self._values))", "def calculate_finishing_order(x):\n\t# Creates a list of keys which are sorted by their values\n\n\treturn [sailor_names for sailor_names,sailorValues in sorted(x.items(), key=lambda y: y[1], reverse=True)]", "def slots(self):\n highSlots = self._getAttribute(Attribute.highSlots)\n medSlots = self._getAttribute(Attribute.medSlots)\n lowSlots = self._getAttribute(Attribute.lowSlots)\n\n if None in [highSlots, medSlots, lowSlots]:\n # This is a T3 ship.\n highSlots = medSlots = lowSlots = 0\n\n # Get rigs and subs.\n rigSlots = self._getAttribute(Attribute.rigSlots, 0)\n subSlots = self._getAttribute(Attribute.subSlots, 0)\n\n # Get missile and turret slots.\n missileSlots = self._getAttribute(Attribute.missileSlots, 0)\n turretSlots = self._getAttribute(Attribute.turretSlots, 0)\n\n return {\n \"highSlots\": int(highSlots),\n \"medSlots\": int(medSlots),\n \"lowSlots\": int(lowSlots),\n \"rigSlots\": int(rigSlots),\n \"subSlots\": int(subSlots),\n \"turretSlots\": int(turretSlots),\n \"missileSlots\": int(missileSlots)\n }", "def get_avail_time_slots(self, cid, date):\n booked = self.get_time_slots(cid, date)\n avail_time_slots = []\n for time in self.initial_time_slots:\n if time not in booked:\n avail_time_slots.append(time)\n return avail_time_slots", "def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n holder = []\n for pair in self.allPairs:\n if asset.lower() in pair:\n holder.append(pair)\n pairsByTickers[asset] = holder\n return pairsByTickers", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"credit_card\", \"payment_amount\", \"time\", \"confirm\"]", "def sorted_herbivores(self):\n fitness_dict = {herb: herb.fitness for herb in self.herbivores}\n sorted_tuples = sorted(fitness_dict.items(), key=lambda x: x[1], reverse=False)\n\n return sorted_tuples", "def get_slots(intent_request):\n return intent_request[\"currentIntent\"][\"slots\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"PERSON\", \"amount_of_money\", \"confirm\"]" ]
[ "0.61842656", "0.59229034", "0.59142065", "0.587465", "0.5765517", "0.57114506", "0.5706217", "0.56994545", "0.5667812", "0.5661197", "0.55991334", "0.55856377", "0.5568477", "0.5548975", "0.5524309", "0.54853594", "0.54852694", "0.54820555", "0.54611295", "0.5451059", "0.5440727", "0.54223", "0.54178876", "0.5392158", "0.5375108", "0.5374702", "0.53727144", "0.5370006", "0.5351205", "0.53267175" ]
0.633528
0
return relevant volunteers grouped by the amount of slots possible.
def get_grouped_available_slots(vols): vol_cnts = {} for vol in vols: slot_amt = len(vol.available_slots) if slot_amt < 1: continue if slot_amt in vol_cnts: vol_cnts[slot_amt].append(vol) else: vol_cnts[slot_amt] = [vol] #sorted_vol_cnts = sorted(vol_cnts.keys()) #for amt in sorted_vol_cnts: # print("{}: {}".format(amt, [vol.email for vol in vol_cnts[amt]])) return vol_cnts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vote_tally(self):\r\n voters = []\r\n tally = {}\r\n for b in reversed(self.blocks):\r\n if b.user_id not in voters and type(b) == VoteBlock:\r\n voters.append(b.user_id)\r\n if b.choice in tally.keys():\r\n tally[b.choice] += 1\r\n else:\r\n tally[b.choice] = 1\r\n result = []\r\n for key in tally:\r\n d = {}\r\n d['name'] = key\r\n d['count'] = tally[key]\r\n result.append(d)\r\n return result", "def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers", "def get_volunteers(service_id, upcoming_plan_id, location_id, event_id, event_period_id, service_time_ids_to_time, event_time_to_id):\r\n # Get all Team members\r\n team_members = requests.get(base_url + f'services/v2/service_types/{service_id}/plans/{upcoming_plan_id}/team_members?per_page=100', headers=headers3).json()\r\n\r\n volunteers = []\r\n # Loop through team members\r\n for person in team_members[\"data\"]:\r\n if person[\"attributes\"][\"status\"] == \"C\" or person[\"attributes\"][\"status\"] == \"U\":\r\n # get volunteer time ids\r\n time_ids=person['relationships']['times']['data']\r\n # convert time_id into times\r\n times = set(service_time_ids_to_time.get(time_id['id']) for time_id in time_ids)\r\n # convert times into event_ids\r\n check_time_ids = set(event_time_to_id.get(time) for time in times)\r\n \r\n # remove any None entry\r\n check_time_ids.discard(None)\r\n\r\n for check_t_id in check_time_ids:\r\n temp_dict = {\r\n 'check-in-kind':'Volunteer',\r\n# \"name\": person[\"attributes\"][\"name\"], # you can also add the persons name but it doesn't seem to be compulsory\r\n 'bulk_check_in[check_ins_attributes][][account_center_person_id]': person['relationships']['person']['data']['id'],\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][location_id]': location_id,\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][event_time_id]': check_t_id,\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][kind]': \"Volunteer\",\r\n 'bulk_check_in[check_ins_attributes][][event_id]': event_id,\r\n 'bulk_check_in[check_ins_attributes][][event_period_id]': event_period_id\r\n }\r\n volunteers.append(temp_dict)\r\n return volunteers", "def getAllVolunteers(self, query):\n return query", "def get_queryset(self):\n unitlist = get_units_visible_to_user(self.request.user)\n\n return Candidate.objects.filter(\n appointments__committee__unit__in=unitlist,\n )", "def get_participants_data(self):\n participants = []\n for (email, uid) in self.tokens.items():\n participant = {} \n participant['uid'] = uid\n participant['email'] = email\n response = 0\n questions = 0\n sections = [x for x in self.values() if ISurveySection.providedBy(x)]\n for section in sections:\n response += len(section.responses.get(uid, {}))\n questions += len(section.question_ids)\n if response != 0:\n participant['finished'] = Decimal(response) / Decimal(questions) * 100\n else:\n participant['finished'] = 0 \n participants.append(participant)\n return participants", "def test_n_volunteers(self):\r\n\r\n app = self.create_app_with_contributors(anonymous=2, registered=3, two_tasks=True)\r\n total_volunteers = cached_apps.n_volunteers(app.id)\r\n\r\n err_msg = \"Volunteers is %s, it should be 5\" % total_volunteers\r\n assert total_volunteers == 5, err_msg", "def getAvailableTimeslots(self, allTimeslots) -> [Timeslot]:\r\n # List with all Timeslots any of the Teachers is not available at.\r\n notAvailableTimeslotsTeachers = flatMap(lambda t: t.not_available_timeslots, self.teachers)\r\n # notAvailableTimeslotsTeachers = [item for sublist in map(lambda t: t.not_available_timeslots, self.teachers) for item in sublist]\r\n # If Lesson can only take place on forenoon, create list with all afternoon timeslots.\r\n if self.course.only_forenoon:\r\n notAvailableTimeslotsForenoon = list(filter(lambda t: t.number not in Timeslot.getForenoonTimeslotNumbers(), allTimeslots))\r\n else:\r\n notAvailableTimeslotsForenoon = []\r\n\r\n timeslots = [x for x in allTimeslots if x not in (notAvailableTimeslotsTeachers + notAvailableTimeslotsForenoon)]\r\n if self.available_timeslots: # If list is not empty. Else no restrictions.\r\n timeslots = [x for x in timeslots if x in self.available_timeslots]\r\n\r\n return timeslots", "def fleets(self):\n\t\treturn [fleet for fleet in self.galaxy.fleets.values() if 'ouid' in fleet.data and fleet.ouid == self.star_id]", "def test_n_registered_volunteers_with_more_than_one_taskrun(self):\r\n\r\n app = self.create_app_with_contributors(anonymous=0, registered=2, two_tasks=True)\r\n registered_volunteers = cached_apps.n_registered_volunteers(app.id)\r\n\r\n err_msg = \"Volunteers is %s, it should be 2\" % registered_volunteers\r\n assert registered_volunteers == 2, err_msg", "def get_voters():", "def get_voters():", "def get_available_time_slot():\n try:\n time_slot_set_list = list()\n # Read all time slot from database\n with open(InterviewCalendarApi.DB_FILE, \"r\") as fd:\n for line in fd:\n time_slot_list = list()\n (_,_,_, time_slots) = line.strip().split(\"|\")\n for time_slot in time_slots.split(\",\"):\n (from_time_slot, to_time_slot) = list(map(int, time_slot.split(\"-\")))\n time_slot_list.extend(range(from_time_slot, (to_time_slot + 1)))\n # Get all available time slot for every user\n time_slot_set_list.append(set(time_slot_list))\n \n # Find common time slot between multiple parties\n available_slots = list(set.intersection(*time_slot_set_list))\n\n msg = json.dumps({\"Status\": \"Success\", \"available_slots\": available_slots})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)\n except:\n err_msg = sys.exc_info()\n error = json.dumps({'error': 'Unable to find time slot due to error: %s' %str(err_msg)})\n return make_response(error, 401, InterviewCalendarApi.HEADERS)", "def computeAvaliableTutors(self):\r\n subject = self.requestedSubject\r\n for tutor in AppUser.objects.all():\r\n if subject in tutor.subjectsOffered.all():\r\n self.avaliableTutors.add(tutor)", "def get_popular_tickets_solution(tickets):\n popular_tickets = []\n for ticket in tickets:\n num_watchers = len(ticket['people']['watchers'])\n if num_watchers >= 8:\n popular_tickets.append(ticket)\n return popular_tickets", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"PERSON\", \"amount_of_money\", \"confirm\"]", "def test_n_registered_volunteers(self):\r\n\r\n app = self.create_app_with_contributors(anonymous=0, registered=3)\r\n registered_volunteers = cached_apps.n_registered_volunteers(app.id)\r\n\r\n err_msg = \"Volunteers is %s, it should be 3\" % registered_volunteers\r\n assert registered_volunteers == 3, err_msg", "def get_recipients(self):\n return [\n slot_participant.participant.user for slot_participant\n in self.obj.slot_participants.all()\n if (\n slot_participant.status == 'registered' and\n slot_participant.participant.status == 'accepted'\n )\n ]", "def vitamins(self) -> List[RecipeObjectNutrientsCalories]:\n return self._vitamins", "def get_candidates(beer):\n span = tracer.current_span()\n span.set_tags({'beer.name': beer.name, 'beer.hops': beer.hops})\n\n db = DonutStats.instance()\n\n # find our optimal sugar level Donuts above or below this level\n # will certainly not be a good match\n optimal_sugar_level = db.get_optimal_sugar_level(beer.hops)\n return db.get_by_sugar_level(optimal_sugar_level, limit=10)", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"bug\", \"beverage\", \"second_person_plural\", \n \"cot_caught\", \"rain_sun\", \"crawfish\", \"halloween\",\n \"sandwich\", \"side_road\", \"shoes\", \"highway\", \"yard_sale\",\n \"rubbernecking\", \"frosting\", \"lawyer\", \"kitty_corner\",\n \"firefly\", \"verge\", \"brew_thru\", \"water_fountain\"]", "def get_recipients(self):\n return [\n self.obj.activity.owner\n ] + [\n slot_participant.participant.user for slot_participant\n in self.obj.slot_participants.all()\n if (\n slot_participant.status == 'registered' and\n slot_participant.participant.status == 'accepted'\n )\n ]", "def test_user_with_no_more_tasks_find_volunteers(self):\r\n\r\n self.register()\r\n user = User.query.first()\r\n app = AppFactory.create(owner=user)\r\n task = TaskFactory.create(app=app)\r\n taskrun = TaskRunFactory.create(task=task, user=user)\r\n res = self.app.get('/app/%s/newtask' % app.short_name)\r\n\r\n message = \"Sorry, you've contributed to all the tasks for this project, but this project still needs more volunteers, so please spread the word!\"\r\n assert message in res.data\r\n self.signout()", "def _search_allowed_partners(self, operator, user_id):\n cr = self.env.cr\n # list of partners corresponding to users\n # which are assigned to the same tickets than the provided user\n cr.execute(\n 'SELECT distinct u.partner_id FROM res_users u, '\n 'anytracker_ticket_assignment_rel m, '\n 'anytracker_ticket_assignment_rel n '\n 'WHERE m.user_id=%s AND u.id=n.user_id '\n 'AND n.ticket_id=m.ticket_id;',\n (user_id,))\n return [('id', operator, tuple(a[0] for a in cr.fetchall()))]", "def equipments(self):\n selection = Equipment.objects.filter(responsible__location_id=self.object.id)\n return {\n 'selection': selection,\n 'count': selection.count()\n }", "def restricted_teams(self, user):\n return []", "def _get_appointment_slots(self, timezone, employee=None):\n self.ensure_one()\n appt_tz = pytz.timezone(self.appointment_tz)\n requested_tz = pytz.timezone(timezone)\n first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(hours=self.min_schedule_hours))\n last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(days=self.max_schedule_days))\n\n # Compute available slots (ordered)\n slots = self._slots_generate(first_day.astimezone(appt_tz), last_day.astimezone(appt_tz), timezone)\n if not employee or employee in self.employee_ids:\n self._slots_available(slots, first_day.astimezone(pytz.UTC), last_day.astimezone(pytz.UTC), employee)\n\n # Compute calendar rendering and inject available slots\n today = requested_tz.fromutc(datetime.utcnow())\n start = today\n month_dates_calendar = cal.Calendar(0).monthdatescalendar\n months = []\n while (start.year, start.month) <= (last_day.year, last_day.month):\n dates = month_dates_calendar(start.year, start.month)\n for week_index, week in enumerate(dates):\n for day_index, day in enumerate(week):\n mute_cls = weekend_cls = today_cls = None\n today_slots = []\n if day.weekday() in (cal.SUNDAY, cal.SATURDAY):\n weekend_cls = 'o_weekend'\n if day == today.date() and day.month == today.month:\n today_cls = 'o_today'\n if day.month != start.month:\n mute_cls = 'text-muted o_mute_day'\n else:\n # slots are ordered, so check all unprocessed slots from until > day\n while slots and (slots[0][timezone][0].date() <= day):\n if (slots[0][timezone][0].date() == day) and ('employee_id' in slots[0]):\n today_slots.append({\n 'employee_id': slots[0]['employee_id'].id,\n 'datetime': slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'),\n 'hours': slots[0][timezone][0].strftime('%H:%M')\n })\n slots.pop(0)\n dates[week_index][day_index] = {\n 'day': day,\n 'slots': today_slots,\n 'mute_cls': mute_cls,\n 'weekend_cls': weekend_cls,\n 'today_cls': today_cls\n }\n\n months.append({\n 'month': format_datetime(start, 'MMMM Y', locale=get_lang(self.env).code),\n 'weeks': dates\n })\n start = start + relativedelta(months=1)\n return months", "def getParticpants(self):\n return participants", "def required_slots(tracker: Tracker) -> List[Text]:\n print(\"required_slots(tracker: Tracker)\")\n return [\"name\",\"roomcount\",\"roomtype\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"sucursal\",\n \"fecha_hora\"\n ]" ]
[ "0.616679", "0.5977285", "0.5965824", "0.5698484", "0.5640452", "0.558289", "0.55814004", "0.55739164", "0.5554357", "0.5514152", "0.545814", "0.545814", "0.5409656", "0.53948075", "0.5380464", "0.5376231", "0.5353561", "0.5309577", "0.52577823", "0.5255719", "0.5249501", "0.52434504", "0.51986647", "0.5157624", "0.5154776", "0.5141205", "0.513621", "0.5131279", "0.51235324", "0.5118278" ]
0.6292542
0
Returns true if it OK add the olunteer to this slot as far as experience goes. That is if the other person assigned to the slot is experienced. Only returns false if someone else already assigned to slot and the current volunteer is inexperienced. so seeks to avoid all inexperienced persons at slot.
def experience_match(volunteer, volunteers, slot): if not volunteer.first_time: return True elif slot.type != 'coach2': return True #Brittle alert. Uses knowledge of the task: that this only matters currently for persons at coach 2 else: for volunteer2 in volunteers: for a_slot in volunteer2.assigned_slots: if a_slot.day == slot.day and a_slot.time_period == slot.time_period: #This should only match for coach 2 slots with same day and time period if volunteer2.first_time: #print("Not matching {} {} and {} {} for slot {} {} {}".format(volunteer.first_name, # volunteer.last_name, volunteer2.first_name, volunteer2.last_name, # slot.day, slot.time_period, slot.type)) return False #else: # print("OK to match {} {} and {} {} for slot {} {}".format(volunteer.first_name, # volunteer.last_name, volunteer2.first_name, volunteer2.last_name, # slot.day, slot.time_period)) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fully_booked(slots, attendees, user_name):\n if len(attendees) >= 2:\n return False\n else:\n return True", "def check_if_enough_skill_points(self):\r\n for skill_string in self.__skills:\r\n if (self.__skills[skill_string].points_to_up >\r\n self.__skill_points):\r\n self.skill_up_disable(skill_string)", "def is_penalty_event(self):\n if hasattr(self, \"fouls_to_give\"):\n team_ids = list(self.current_players.keys())\n offense_team_id = self.get_offense_team_id()\n defense_team_id = (\n team_ids[0] if offense_team_id == team_ids[1] else team_ids[1]\n )\n if self.fouls_to_give[defense_team_id] == 0:\n if isinstance(self, (Foul, FreeThrow, Rebound)):\n # if foul or free throw or rebound on a missed ft\n # check foul event and should return false is foul\n # was shooting foul and team had a foul to give\n if isinstance(self, Foul):\n foul_event = self\n elif isinstance(self, FreeThrow):\n foul_event = self.foul_that_led_to_ft\n else:\n # if rebound is on missed ft, also need to look at foul that led to FT\n if not self.oreb and isinstance(self.missed_shot, FreeThrow):\n foul_event = self.missed_shot.foul_that_led_to_ft\n else:\n return True\n if foul_event is None:\n return True\n fouls_to_give_prior_to_foul = (\n foul_event.previous_event.fouls_to_give[defense_team_id]\n )\n if fouls_to_give_prior_to_foul > 0:\n return False\n return True\n return False", "def already_booked(slots, attendees, user_name):\n already_joined = False\n for i in attendees:\n if i[\"email\"] == user_name+'@student.wethinkcode.co.za':\n already_joined = True\n\n if already_joined == True:\n return False\n else:\n return True", "def is_won(self):\n return self.position == self.proposition.outcome and self.proposition.is_paid", "def check_event_available(self,eid,new_attend_num):\n event_info = self.get_event_info(eid)\n gacceptend = self.get_game_info(event_info['gid'],['gacceptend','gattend'])['gacceptend']\n if int(time.time()) > gacceptend: return 1 # attend end \n return 2 if int(event_info['eattend']) + int(new_attend_num) > int(event_info['emaxattend']) and int(event_info['emaxattend']) else True", "def can_exist_outside_of_game(self):\n return True", "def can_exist_outside_of_game(self):\n return True", "def can_add_player(self, user):\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n return False\n if self.is_user_playing(user):\n return False\n return True", "def game_is_tied(self):\n tie_score = False\n if self.my_score == self.opponent_score:\n tie_score = True\n my_moves = self.steps_available(self.loc)\n opponent_moves = self.steps_available(self.opponent_loc)\n if my_moves == 0 and opponent_moves == 0 and tie_score:\n return True\n else:\n penalty = self.penalty_score\n if my_moves == 0 and opponent_moves != 0:\n return (self.my_score - penalty) == self.opponent_score\n elif my_moves != 0 and opponent_moves == 0:\n return self.my_score == (self.opponent_score - penalty)\n else:\n return False", "def is_eligible(self) -> Optional[bool]:\n return pulumi.get(self, \"is_eligible\")", "def check_end_game(self):\n return False if (any(self.p1_pits()) and any(self.p2_pits())) else True", "def isSolved(self):\n return self.isComplete() and self.isLegal()", "def allowedToEnter(self):\n if base.cr.isPaid():\n return True\n place = base.cr.playGame.getPlace()\n myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)\n if myHoodId in \\\n (ToontownGlobals.ToontownCentral,\n ToontownGlobals.MyEstate,\n ToontownGlobals.GoofySpeedway,\n ):\n # trialer going to TTC/Estate/Goofy Speedway, let them through\n return True\n return False", "def champion_check(self):\n better_champion = False\n\n for species in self.species:\n if species.leader.original_fitness > self.champion_fitness:\n self.age_since_improvement = 0\n self.champion_fitness = species.leader.original_fitness\n better_champion = True\n\n if not better_champion:\n self.age_since_improvement += 1", "def game_over(self):\n return self.lives() < 0", "def is_game_won(self):\n if self.game_is_tied():\n return False\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n if my_available_steps == 0 or opp_available_steps == 0:\n return True\n else:\n return False", "def check_for_end_of_game(self):\n return self.player_1.score + self.player_2.score >= self.number_of_cells", "def check_if_won(self):\n if self.player_points > self.enemy_points:\n self.bHasWon = True\n else:\n self.bHasWon = False", "def updateEatenOpponents1(self, gameState, idx):\n teammatePos = gameState.getAgentState((self.index + 2) % 4).getPosition()\n pos = gameState.getAgentState(idx).getPosition()\n if pos is None and len(self.beliefs[idx]) == 1 and self.beliefs[idx].keys()[0] == teammatePos:\n self.setOpponentToZeroPos(idx)\n return True\n return False", "def _check_for_win(self):\n slots_available = any(\n [slot.available for slot in self.board.iter_slots() if not slot.mine]\n )\n if not slots_available:\n self.status = GameStatusEnum.won\n self.end_time = datetime.utcnow()", "def current_venue_requires_player_greeting() -> bool:\n venue_instance = CommonLocationUtils.get_venue_of_current_lot()\n if venue_instance is None:\n return False\n return venue_instance.requires_visitation_rights", "def likely_to_be_offered(self):\n if self.score >= 5:\n return True\n return False", "def gameOver(self):\n\t\treturn self.lives == 0", "def entrance_exam(self):\n status = False\n tool = ProgrammingTool.create(self.PROGRAMMING_TOOL)\n if tool.connect(self.target_name):\n status = entrance_exam(tool, self.register_map)\n tool.disconnect()\n\n return status == EntranceExamErrors.OK", "def is_game_won(self):\n return True", "def _is_valid(self):\n if len(self.slots) == 0:\n print(\"Parking Lot not created\")\n return False\n return True", "def violated(self) -> bool:\n ...", "def is_advancing_to_next_stage(self):\n if self.game_stage == 1:\n return (self.die_a.current_value == \"1\" and self.die_b.current_value == \"2\" or\n self.die_a.current_value == \"2\" and self.die_b.current_value == \"1\")\n if self.game_stage == 2:\n return (self.die_a.current_value == \"ANGRY\" and self.die_b.current_value == \"4\" or\n self.die_a.current_value == \"4\" and self.die_b.current_value == \"ANGRY\")\n if self.game_stage == 3:\n return False", "def advance_check(self):\n values = [self.die_a.value, self.die_b.value]\n if self.stage == 3:\n if not self.cheating and \"5\" in values and \"6\" in values:\n return True\n if self.stage == 2 and \"ANGRY\" in values and \"4\" in values:\n self.stage = 3\n if self.stage == 1 and \"1\" in values and \"2\" in values:\n self.stage = 2\n if self.die_a.value == self.die_b.value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\")\n self.stage = 1\n return False" ]
[ "0.5966585", "0.58720344", "0.57698774", "0.57312316", "0.5658507", "0.5489901", "0.5485378", "0.5485378", "0.546458", "0.54519045", "0.54474586", "0.54375637", "0.5426604", "0.54058903", "0.5361417", "0.5335454", "0.53330934", "0.53311485", "0.5301933", "0.52844775", "0.5264832", "0.52458256", "0.5243065", "0.5237151", "0.52319515", "0.52082634", "0.5203574", "0.5196022", "0.5192733", "0.5182898" ]
0.64037406
0
Load a list of vector embeddings from a pd.Dataframe
def load_embeddings(db): size = db['size'].values emb = db['embedding'].values emb = [np.load(i).flatten() for i in emb] return emb, size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_embeddings(filename):\n labels = []\n rows = []\n with open(filename, encoding='utf-8') as infile:\n for i, line in enumerate(infile):\n items = line.rstrip().split(' ')\n if len(items) == 2:\n # This is a header row giving the shape of the matrix\n continue\n labels.append(items[0])\n values = np.array([float(x) for x in items[1:]], 'f')\n rows.append(values)\n\n arr = np.vstack(rows)\n return pd.DataFrame(arr, index=labels, dtype='f')", "def process_glove_data(filename):\r\n\r\n word_list = []\r\n embed_list = []\r\n with open(filename,encoding=\"utf8\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n toks = line.split(' ')\r\n word_list.append(toks[0])\r\n vec = [float(tok) for tok in toks[1:]]\r\n embed_list.append(vec)\r\n \r\n embed = np.array(embed_list,dtype=float)\r\n embed_df = pd.DataFrame(embed,index=word_list)\r\n embed_df.index = embed_df.index.str.lower()\r\n \r\n return embed_df", "def as_df(self):\r\n return pd.DataFrame(self.vectors).set_index(self.words)", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def load_embeddings(embeddings_path):\n\n embeddings_index = {}\n f = open(embeddings_path, encoding='utf-8')\n for line in tqdm(f):\n values = line.rstrip().split(' ')\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print('Found {} word vectors.'.format(len(embeddings_index)))\n return embeddings_index", "def load_embeddings(self, f_name, dims=128):\n emb_df = pd.read_csv(f_name, sep=' ', skiprows=1, header=None, index_col=None)\n if not self.embeddings:\n self.embeddings = {}\n for i in range(0, emb_df.shape[0]):\n key = emb_df.iloc[i, 0]\n if str(key) in '</s>':\n continue\n emb = np.array(emb_df.iloc[i, 1: dims + 1].tolist())\n emb = emb.astype(float)\n self.embeddings[int(key)] = emb\n self.make_emb_cols(dims)", "def load_embeddings(path):\r\n\r\n embeds = dict() # dictionary mapping words to vectors\r\n for line in open(path, encoding='utf-8'):\r\n row = line.strip().split('\\t')\r\n embeds[row[0]] = np.array(row[1:], dtype=np.float32)\r\n\r\n embeddings_dim = embeds[list(embeds)[0]].shape[0]\r\n\r\n return embeds, embeddings_dim", "def get_docs_embedding(docs_tok, model, dim=300):\n all_docs_embedding = []\n for doc in docs_tok:\n all_docs_embedding.append(text2vec(doc, model, dim))\n cols = [str(i) for i in range(dim)]\n embeddings = pd.DataFrame(data=all_docs_embedding)\n embeddings.columns = cols\n embeddings.to_parquet('../model/docs_embeddings.parquet', index=False)\n return np.array(all_docs_embedding)", "def vectors_from_dataframe(*columns):\n return lambda df: [np.array(v) for v in zip(*[list(df[x].values) for x in columns])]", "def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()", "def get_pretrained_embeddings(source_vocab,embed_df):\r\n \r\n num_tokens = len(source_vocab)\r\n embedding_dim = embed_df.shape[1]\r\n weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32)\r\n \r\n for idx in range(num_tokens):\r\n token = source_vocab.lookup_index(idx)\r\n if token in embed_df.index:\r\n weights[idx,:] = embed_df.loc[token]\r\n else:\r\n weights[idx,:] = np.random.randn(1,embedding_dim)\r\n \r\n embed_tensor = torch.FloatTensor(weights)\r\n return embed_tensor", "def _index(self, corpus):\n\n # Transform documents to embeddings vectors\n ids, dimensions, stream = self.embedder.model.index(corpus)\n\n # Load streamed embeddings back to memory\n embeddings = np.empty((len(ids), dimensions), dtype=np.float32)\n with open(stream, \"rb\") as queue:\n for x in range(embeddings.shape[0]):\n embeddings[x] = pickle.load(queue)\n\n # Remove temporary file\n os.remove(stream)\n\n all_text = []\n for para_id, text, _ in corpus:\n all_text.append([text, para_id])\n\n df = pd.DataFrame(all_text, columns=[\"text\", \"paragraph_id\"])\n\n embedding_path = os.path.join(\n self.index_path, self.embed_paths[\"embeddings\"])\n dataframe_path = os.path.join(\n self.index_path, self.embed_paths[\"dataframe\"])\n ids_path = os.path.join(self.index_path, self.embed_paths[\"ids\"])\n\n # Load new data\n if os.path.isfile(embedding_path) and (self.encoder_args[\"overwrite\"] is False):\n logger.info(f\"Loading new data from {embedding_path}\")\n\n # Load existing embeddings\n old_embeddings = np.load(embedding_path) # LOAD EMBEDDINGS\n # Remove embeddings with document id overlaps\n embeddings = np.vstack((old_embeddings, embeddings))\n\n # load IDs\n old_ids = [doc_id[:-1] for doc_id in open_txt(ids_path)]\n logger.debug(f\"New ID Length = {len(ids)}\")\n logger.debug(f\"Old ID Length = {len(old_ids)}\")\n # Remove document ids overlaps\n logger.debug(f\"New ID Length = {len(ids)}\")\n ids = old_ids + ids\n logger.debug(f\"Merged ID Length = {len(ids)}\")\n\n # Append new dataframe\n old_df = pd.read_csv(dataframe_path)\n df = pd.concat([old_df, df])\n\n # Store embeddings and document index\n # for future reference\n np.save(embedding_path, embeddings)\n with open(ids_path, \"w\") as fp:\n fp.writelines([i + \"\\n\" for i in ids])\n\n # Save data csv\n df.to_csv(dataframe_path, index=False)\n\n # Normalize embeddings\n self.embedder.normalize(embeddings)\n\n # Save embeddings metadata\n self.embedder.config[\"ids\"] = ids\n self.embedder.config[\"dimensions\"] = dimensions\n\n # Create embeddings index\n logger.info(f\"Creating embeddings and index\")\n self.embedder.embeddings = ANN.create(self.embedder.config)\n logger.info(f\"Created embeddings\")\n\n # Build the index\n self.embedder.embeddings.index(embeddings)\n logger.info(f\"Built the embeddings index\")", "def load_data_and_embedding():\n\n # Load data\n df_data = pd.read_csv('../new_data/train_ids_and_labels_1400.txt',nrows=10000)\n y = df_data['class'] - 1 # class (0 ~ 18)\n X = df_data.drop(['class'], axis=1).values\n\n # Transform to binary class matrix\n y = to_categorical(y.values)\n\n # Randomly shuffle data\n np.random.seed(10)\n\n shuffle_indices = np.random.permutation(range(len(y)))\n X_shuffled = X[shuffle_indices]\n y_shuffled = y[shuffle_indices]\n\n # Split to train/test set\n # TODO: This is very crude, should use cross validation\n val_sample_index = -1 * int(0.2 * len(y))\n X_train, X_val = X_shuffled[:val_sample_index], X_shuffled[val_sample_index:]\n y_train, y_val = y_shuffled[:val_sample_index], y_shuffled[val_sample_index:]\n\n del df_data, X, y, X_shuffled, y_shuffled\n\n embedding_matrix = np.load(\"../embedding/word-embedding-200d-mc5.npy\")\n\n return X_train, y_train, X_val, y_val,embedding_matrix", "def load_embeddings(embedding_path):\n print('loading word embeddings from %s' % embedding_path)\n weight_vectors = []\n word_idx = {}\n with codecs.open(embedding_path, encoding='utf-8') as f:\n for line in f:\n word, vec = line.split(u' ', 1)\n word_idx[word] = len(weight_vectors)\n weight_vectors.append(np.array(vec.split(), dtype=np.float32))\n # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and\n # '-RRB-' respectively in the parse-trees.\n word_idx[u'-LRB-'] = word_idx.pop(u'(')\n word_idx[u'-RRB-'] = word_idx.pop(u')')\n # Random embedding vector for unknown words.\n weight_vectors.append(np.random.uniform(\n -0.05, 0.05, weight_vectors[0].shape).astype(np.float32))\n return np.stack(weight_vectors), word_idx", "def embedding(self, seqs):\n batch_size, seqlen = seqs.shape\n seqs = np.reshape(seqs, (-1)) # convert to 1-d indexes [(batch_sz*seqlen)]\n embs = self.word2vec[seqs] # lookup [(batch_sz*seqlen) x emb_sz]\n embs = np.reshape(embs, (batch_size, seqlen, -1)) # recover the shape [batch_sz x seqlen x emb_sz]\n return embs", "def embed(self, sequence):\n words = sequence.split(' ')\n vecs = [self._E[self._w2i[i]] if i in self._w2i else self._E[self._w2i[\"UNK\"]]\n for i in words]\n return vecs", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def load_embedding(fpath, VOCAB):\n print(\"Loading embeddings...\")\n emb = dict()\n wv_from_bin = KeyedVectors.load_word2vec_format(fpath, limit=VOCAB)\n for word, vector in tqdm(zip(wv_from_bin.vocab, wv_from_bin.vectors)):\n coefs = np.asarray(vector, dtype='float32')\n if word not in emb:\n emb[word] = coefs\n return emb", "def load_document_embeddings(path):\n embedding_dimension = 0\n \n # First pass to work out maximum topic ID to create numpy embeddings\n with open(path, 'rb') as avro_file:\n avro_reader = reader(avro_file)\n for document_embedding in avro_reader:\n topic_probs = document_embedding['topic_probs']\n \n for topic_prob in topic_probs:\n topic_id = topic_prob['topic_id']\n if topic_id + 1 > embedding_dimension:\n embedding_dimension = topic_id + 1\n \n # Second pass to actually store the embeddings\n x = []\n y = []\n \n with open(path, 'rb') as avro_file:\n avro_reader = reader(avro_file)\n for document_embedding in avro_reader:\n label = document_embedding['label']\n topic_probs = document_embedding['topic_probs']\n \n embedding = np.zeros(shape=embedding_dimension, dtype=np.float32)\n \n for topic_prob in topic_probs:\n topic_id = topic_prob['topic_id']\n prob = topic_prob['prob']\n embedding[topic_id] = prob\n \n x.append(embedding)\n y.append(label)\n \n return x, y", "def load_vectors(fname):\r\n # taken from: https://fasttext.cc/docs/en/english-vectors.html\r\n vectors_data = vocab.Vectors(name=fname)\r\n\r\n return vectors_data", "def DataFrameToModelInputs(self, df: pd.DataFrame, gpu_name: str):\n sequences, _ = self.EncodeAndPadSources(df, self.input_shape[0])\n\n # Translate encoded sequences into sequences of normalized embeddings.\n sequence_ph = tf.compat.v1.placeholder(dtype=tf.int32)\n normalized_embedding_matrix = tf.nn.l2_normalize(\n self.embedding_matrix, axis=1\n )\n embedding_input_op = tf.nn.embedding_lookup(\n normalized_embedding_matrix, sequence_ph\n )\n\n with tf.compat.v1.Session() as sess:\n # Tensor of shape (len(df), sequence length, embedding dimension).\n embedding_input = sess.run(\n embedding_input_op, feed_dict={sequence_ph: sequences}\n )\n\n # Get the auxiliary inputs.\n aux_in = np.array(\n [\n df[f\"feature:{gpu_name}:transfer\"].values,\n df[f\"param:{gpu_name}:wgsize\"].values,\n ]\n ).T\n\n return [aux_in, embedding_input]", "def load_embeddings(filepath, vocabulary, retain):\n \n word2index = dict()\n word_vectors = list()\n\n def add_entry(word, vector):\n word2index[word] = len(word2index)\n word_vectors.append(vector)\n\n model = gensim.models.KeyedVectors.load(filepath)\n\n # adding special tokens <FIL>, <UNK> and <NUM>\n dim = model.vector_size\n add_entry('<fil>', np.zeros((dim,)))\n for special in ['<unk>', '<num>']:\n vector = np.random.uniform(-0.025, 0.025, (dim,))\n add_entry(special, vector)\n\n if retain:\n for word, _ in model.vocab.items():\n add_entry(word, model[word])\n else:\n for word in vocabulary:\n if word in model:\n add_entry(word, model[word])\n\n vocabulary = vocabulary.intersection(word2index.keys())\n return word2index, np.asarray(word_vectors)", "def getEmbeddings(embed_loc, wrd_list, embed_dims):\n embed_list = []\n\n wrd2embed = {}\n for line in open(embed_loc, encoding='utf-8', errors='ignore'):\n data = line.strip().split(' ')\n\n # wrd, embed = data[0], data[1:]\n\n # Some words may be separated by space (telephone numbers, for example).\n # It's more robust to load data as follows.\n embed = data[-1 * embed_dims:]\n wrd = ' '.join(data[: -1 * embed_dims])\n\n embed = list(map(float, embed))\n wrd2embed[wrd] = embed\n\n for wrd in wrd_list:\n if wrd in wrd2embed:\n embed_list.append(wrd2embed[wrd])\n else:\n print('Word not in embeddings dump {}'.format(wrd))\n embed_list.append(np.random.randn(embed_dims))\n\n return np.array(embed_list, dtype=np.float32)", "def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]", "def load_word2vect(self, file_path):\n self.embeddings = []\n self.word_to_idx = {'<pad>' : 0}\n self.vocab = ['<pad>']\n\n model = w2v.load(file_path)\n self.embedding_size = model.vectors.shape[1]\n pad_embedding = np.zeros(self.embedding_size, \"float32\")\n self.embeddings.append(pad_embedding)\n\n train_words_set = set([word for text in self.train_data for word in\n text[1].split(\" \")])\n\n for w in model.vocab:\n if w in train_words_set:\n self.word_to_idx[w] = len(self.vocab)\n self.vocab.append(w)\n self.embeddings.append(model[w])\n\n del model", "def load_word_embeddings(self, word_embeddings, word_to_ix):\n logger.info(\"Loading the vocabulary\")\n self.vocab = {}\n self.index2word = []\n counts = {}\n for word in word_to_ix:\n counts[word] = counts.get(word, 0) + 1\n self.vocab_size = len(counts)\n self.vector_size = word_embeddings.shape[1]\n self.vectors = np.zeros((self.vocab_size, self.vector_size))\n self.index2word = [None] * self.vocab_size\n logger.info(\"Corpus has %i words\", len(self.vocab))\n for word_id, word in enumerate(counts):\n self.vocab[word] = Vocab(index=word_id, count=counts[word])\n self.vectors[word_id] = word_embeddings[word_to_ix[word]]\n self.index2word[word_id] = word\n assert((len(self.vocab), self.vector_size) == self.vectors.shape)\n logger.info(\"Loaded matrix of %d size and %d dimensions\", self.vocab_size, self.vector_size)", "def generate_embeddings(documents, model):\r\n # Initialize dictionaries\r\n embeddings = {}\r\n usage_counts = {}\r\n\r\n # Generate embeddings for non-empty documents\r\n for document in filter(is_string, documents):\r\n # For each token in document\r\n for token in document.split(' '):\r\n token = str(token)\r\n\r\n if token not in embeddings:\r\n embeddings[token] = model.get_word_vector(token)\r\n usage_counts[token] = 1\r\n else:\r\n usage_counts[token] += 1\r\n\r\n # Convert embedding dictionary to a double list\r\n embeddings = [\r\n [word, usage_counts[word]] + list(embeddings[word])\r\n for word in embeddings\r\n ]\r\n print('Generated list, converting to DataFrame')\r\n\r\n # Convert double list to Pandas DataFrame\r\n headings = ['words', 'usages'] + [str(ind) for ind in range(1, model.get_dimension() + 1)]\r\n embeddings = DataFrame(embeddings, columns=headings)\r\n\r\n # Sort embeddings by usage then drop usage column\r\n embeddings.sort_values(['usages', 'words'], inplace=True, ascending=[False, True])\r\n embeddings.drop(columns='usages', inplace=True)\r\n\r\n return embeddings", "def load_embeddings(glove_path, vocab):\n vocab_size = vocab.get_vocab_size()\n words_to_keep = set(vocab.get_index_to_token_vocabulary().values())\n glove_embeddings = {}\n embedding_dim = None\n\n logger.info(\"Reading GloVe embeddings from {}\".format(glove_path))\n with open(glove_path) as glove_file:\n for line in tqdm(glove_file,\n total=get_num_lines(glove_path)):\n fields = line.strip().split(\" \")\n word = fields[0]\n if word in words_to_keep:\n vector = np.asarray(fields[1:], dtype=\"float32\")\n if embedding_dim is None:\n embedding_dim = len(vector)\n else:\n assert embedding_dim == len(vector)\n glove_embeddings[word] = vector\n\n all_embeddings = np.asarray(list(glove_embeddings.values()))\n embeddings_mean = float(np.mean(all_embeddings))\n embeddings_std = float(np.std(all_embeddings))\n logger.info(\"Initializing {}-dimensional pretrained \"\n \"embeddings for {} tokens\".format(\n embedding_dim, vocab_size))\n embedding_matrix = torch.FloatTensor(\n vocab_size, embedding_dim).normal_(\n embeddings_mean, embeddings_std)\n # Manually zero out the embedding of the padding token (0).\n embedding_matrix[0].fill_(0)\n # This starts from 1 because 0 is the padding token, which\n # we don't want to modify.\n for i in range(1, vocab_size):\n word = vocab.get_token_from_index(i)\n\n # If we don't have a pre-trained vector for this word,\n # we don't change the row and the word has random initialization.\n if word in glove_embeddings:\n embedding_matrix[i] = torch.FloatTensor(glove_embeddings[word])\n return embedding_matrix", "def read_txt_embeddings(path, params):\n word2id = {}\n vectors = []\n\n # load pretrained embeddings\n _emb_dim_file = params.emb_dim\n with io.open(path, 'r', encoding='utf-8', newline='\\n', errors='ignore') as f:\n for i, line in enumerate(f):\n if i == 0:\n split = line.split()\n assert len(split) == 2\n assert _emb_dim_file == int(split[1])\n continue\n word, vect = line.rstrip().split(' ', 1)\n vect = np.fromstring(vect, sep=' ')\n if word in word2id:\n logger.warning(\"Word \\\"%s\\\" found twice!\" % word)\n continue\n if not vect.shape == (_emb_dim_file,):\n logger.warning(\"Invalid dimension (%i) for word \\\"%s\\\" in line %i.\"\n % (vect.shape[0], word, i))\n continue\n assert vect.shape == (_emb_dim_file,)\n word2id[word] = len(word2id)\n vectors.append(vect[None])\n\n assert len(word2id) == len(vectors)\n logger.info(\"Loaded %i pretrained word embeddings from %s\" % (len(vectors), path))\n\n # compute new vocabulary / embeddings\n embeddings = np.concatenate(vectors, 0)\n embeddings = torch.from_numpy(embeddings).float()\n\n assert embeddings.size() == (len(word2id), params.emb_dim)\n return word2id, embeddings", "def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict" ]
[ "0.67976797", "0.6629015", "0.62132424", "0.61987644", "0.61634785", "0.608386", "0.6048953", "0.5975803", "0.5965511", "0.5949263", "0.5895469", "0.58472", "0.5844589", "0.58138406", "0.58073044", "0.5799596", "0.57825804", "0.5748438", "0.57151216", "0.56519526", "0.5644582", "0.5636812", "0.5617753", "0.5616487", "0.56040514", "0.55956954", "0.55874586", "0.55817264", "0.55792546", "0.55664396" ]
0.6641269
1
Save a histogram depicting the face sizes.
def hist_face_sizes(X, measure, output_dir): os.makedirs(output_dir, exist_ok=True) plt.clf() plt.hist(X, bins=100) plt.xlabel('Face bounding box') plt.ylabel('Frequency') plt.savefig(join(output_dir, 'face_' + measure + '.png'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_histogram(\n self, filename: [str, Path, BinaryIO], bins: int = 10, **kwargs\n ) -> None:\n self.plot_histogram(bins, show=False)\n plt.savefig(filename, **kwargs)\n if not isinstance(filename, BytesIO):\n print(f\"Picket fence histogram saved to: {osp.abspath(filename)}\")", "def save_hist(data, fname, title=''):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.hist(data.flatten(),bins=100,range=[0,1])\n plt.savefig(fname, dpi=100)\n plt.close(fig)", "def save_imageset_histogram(self, folder_name, save_location):\n all_images = []\n photo_list = self.get_photo_list(folder_name)\n for name in photo_list:\n image = cv2.imread(folder_name + '/' + name, cv2.IMREAD_ANYDEPTH)\n all_images.append(image.ravel())\n\n plt.hist(all_images, 256, [0, 65535])\n plt.savefig(save_location + 'histogram.eps', format='eps')\n # plt.show()", "def save_histogram(self, step, tensors):\n\n # Save\n with self.summary_writer.as_default():\n for name, tensor in tensors.items():\n tf.summary.histogram(name, tensor, step)", "def save_histogram(hist, name):\n plt.clf()\n plt.plot(hist, color='k')\n plt.savefig('output/' + name + '.png')", "def save_hist(self, file_name):\n file_ext = file_name.split(\".\")[-1]\n assert file_ext == \"npy\", \"The file extension has to be npy (numpy file)\"\n np.save(file_name, self.hist)", "def histogram(values, title, fig_size=(4,3), path=None):\n plt.clf()\n f, ax = plt.subplots(1, figsize=fig_size)\n ax.hist(values, bins=60)\n ax.set_title(title)\n f.tight_layout()\n if(path != None):\n f.savefig(path+'/hist_'+title+'.png')", "def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()", "def hist_save(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tplt.hist(s, bin1, normed=True, color='c')\t# Extracting the parameters from the histogram\n\t\t\tplt.title('Probability Distribution Fnction of %s' %name, fontsize=20)\n\t\t\tplt.xlabel(\"Filter tap values\", fontsize=20)\n\t\t\tplt.ylabel(\"Probability Distribution\", fontsize=20)\n#\t\t\tplt.xlim(0,0.10)\n\t\t\tplt.ylim(0,100)\n#\t\t\tplt.legend(fontsize = 'xx-large')\n\t\t\tplt.savefig('/home/abhishek/Results/comparison_all_sets/Curve fitting/test/set_1/hist_%s_index_%d' %(name,i))\n\t\t\tplt.close()", "def save_histogram_data(self, histogram_data, obj_name):\n dirname = os.path.dirname(os.path.abspath(__file__))\n histogram_data_dir = os.path.join(dirname, '../data/histogram_data')\n if not os.path.exists(histogram_data_dir):\n os.mkdir(histogram_data_dir)\n filename = os.path.join(histogram_data_dir, obj_name + '_' + self.color_name + '.pkl.gz')\n with gzip.open(filename, 'wb') as f:\n pickle.dump(histogram_data, f)", "def write_hist_img_file(lengths, labels):\n import matplotlib.pyplot as plt\n\n # Find the max and min values for plotting.\n max_length = max(max(i) for i in lengths)\n min_length = min(min(i) for i in lengths)\n bin_size = int(0.025*max_length)\n\n # Make histogram\n colors = ['r', 'g', 'b']\n plt.hist(\n lengths,\n bins=range(min_length, max_length+bin_size, bin_size),\n color=colors[:len(lengths)],\n label=[ntpath.basename(l) for l in labels]\n )\n plt.legend()\n plt.title('Gap Length Histogram')\n plt.xlabel('Gap Length (b)')\n plt.ylabel('Frequency')\n plt.savefig(os.getcwd() + '/gap_stats_hist.pdf')", "def histogram(data, title, path):\n plt.hist(data,\n bins=60)\n plt.xticks(size=22)\n plt.yticks(size=22)\n plt.title(title,\n fontsize=30)\n plt.savefig(path)\n plt.clf()", "def plot_histogram(hist, outname, xlabel=\"\", ylabel=\"frequency\"):\n plt.bar(hist[:,0], hist[:,1])\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.savefig(outname)\n plt.close()", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def plot_save_dat(counter, out_fname, img_name, xlabel, ylabel):\n with open(out_fname, 'w') as fid:\n for ele in counter.most_common():\n fid.writelines('%s %d\\n' % (ele[0], ele[1]))\n logging.info('Wrote to file: {}'.format(out_fname))\n plt.clf()\n # Histogram plot\n plt.hist(np.array(list(counter.values())), bins=100, normed=True)\n plt.xlabel(xlabel)\n plt.yscale('log')\n plt.ylabel(ylabel)\n plt.savefig(img_name)", "def save(self, filename):\n if not os.path.isdir( os.path.dirname(filename) ):\n self.logger.critical('%s does not exist'%os.path.dirname(filename))\n raise FileNotFoundError\n try:\n fit_func = 'NoneType' if not self.fit_function else self.fit_function.__name__\n np.savez_compressed(filename,\n data=self.data,\n bin_centers=self.bin_centers,\n bin_edges=self.bin_edges,\n bin_width=np.array([self.bin_width]),\n errors=self.errors,\n underflow=self.underflow,\n overflow=self.overflow,\n fit_result=self.fit_result,\n fit_function_name=np.array([fit_func]),\n fit_chi2_ndof=self.fit_chi2_ndof,\n fit_axis=self.fit_axis,\n xlabel=np.array([self.xlabel]),\n ylabel=np.array([self.ylabel]),\n label=np.array([self.label]),\n fit_result_label=self.fit_result_label)\n self.logger.info('Saved histogram in %s' % filename)\n except Exception as inst:\n self.logger.critical('Could not save in %s'%filename, inst)\n raise Exception(inst)", "def write_hist(fname='~/.ipyhist.py'):\n import os\n vseparator = '#' + '#'*62 + '#\\n'\n nextcmdfmt = vseparator + '# In[{}]:\\n{}\\n'\n outputfmt = '#' + '-'*62 + '#\\n# Out[{}]:\\n# {}\\n'\n with open(os.path.expanduser(fname), 'w') as outfile:\n for i in range(len(In)):\n outfile.write(nextcmdfmt.format(i, In[i]))\n if Out.has_key(i):\n out = repr(Out[i]).replace('\\n', '\\n# ')\n outfile.write(outputfmt.format(i, out))", "def histogram(L, out_file_name):\n if os.path.exists(out_file_name):\n raise FileExistsError('File already exists.')\n\n mean = math_lib.list_mean(L)\n stdev = math_lib.list_stdev(L)\n fig = plt.figure(dpi=300)\n\n ax = fig.add_subplot(1, 1, 1)\n plt.hist(L)\n plt.title(\"mean: {} stdev: {}\".format(mean, stdev))\n plt.xlabel('Value')\n plt.ylabel('Frequency')\n plt.savefig(out_file_name)", "def save_hist_video(self, video_out, fps=8, frame_size=None, fourcc=None):\n save_hist_video(video_out, self.hist, fps=fps, frame_size=frame_size, fourcc=fourcc)", "def get_histogram(folder_name, image_name, save_location):\n print(\"Getting histogram for:\" + str(folder_name) + '/' + str(image_name))\n image = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n plt.hist(image.ravel(), 256, [0, 65535])\n plt.xlabel('Pixel Intensity')\n plt.ylabel('Number of pixels')\n plt.title('Histogram of normalised reference image. Overnight2')\n plt.savefig(save_location + 'histogram.png')\n plt.savefig(save_location + 'histogram.eps', format='eps')\n # plt.show()", "def saveHistogram2D(y0,\n y1,\n xlabel=\"\",\n ylabel=\"\",\n name=\"Graphe Sans Titre\",\n filename=\"untitledPlot\"):\n print filename\n with open(filename+\".hist2d\", 'w') as openfile:\n openfile.write(\"name:\"+name+\"\\n\")\n openfile.write(\"xlabel:\"+xlabel+\"\\n\")\n openfile.write(\"ylabel:\"+ylabel+\"\\n\")\n Utils.drawArray(openfile, y0, \"y0\")\n Utils.drawArray(openfile, y1, \"y1\")\n openfile.write(\"name:\"+name+\"\\n\")", "def plot_histogram(bins, data, title, saving_path, hist_name):\n\n x = np.asarray(data)\n plt.figure()\n plt.hist(x[np.isfinite(x)], bins)\n plt.title(title)\n if not os.path.exists(saving_path):\n os.mkdir(saving_path)\n plt.savefig(saving_path + hist_name)", "def get_histograms(self, folder_name):\n histograms_folder_name = folder_name + '_histograms'\n\n try:\n print(\"Making dir \" + str(histograms_folder_name) + \" for histograms\")\n os.mkdir(histograms_folder_name)\n except OSError:\n print(\"Folder exists, have you already created these/this??\")\n return\n\n print(\"Writing to folder: \" + str(histograms_folder_name))\n photo_list = self.get_photo_list(folder_name, '*.png')\n for name in photo_list:\n image = cv2.imread(folder_name + '/' + name, cv2.IMREAD_ANYDEPTH)\n plt.hist(image.ravel(), 256, [0, 65535])\n plt.savefig(histograms_folder_name + '/' + name + 'histogram.eps', format='eps')\n plt.clf()\n # plt.show()", "def saveHistogram(x,\n y1,\n y2=None,\n y3=None,\n color1=Constants.colorBluePlotly,\n color2=Constants.colorOrangePlotly,\n color3=Constants.colorGreenPlotly,\n name1=\"\",\n name2=\"\",\n name3=\"\",\n percent=False,\n xlabel=\"\",\n ylabel=\"\",\n typeyaxis=\"linear\",\n name=\"Graphe Sans Titre\",\n filename=\"untitledPlot\"):\n if x is None or y1 is None:\n print \"error : no data to draw\"\n return\n with open(filename+\".txt\", 'w') as openfile:\n openfile.write(\"name:\"+name+\"\\n\")\n openfile.write(\"xlabel:\"+xlabel+\"\\n\")\n openfile.write(\"ylabel:\"+ylabel+\"\\n\")\n openfile.write(\"typeyaxis:\"+typeyaxis+\"\\n\")\n Utils.drawArray(openfile, x, \"x\")\n Utils.drawArray(openfile, y1, \"y1\")\n openfile.write(\"name1:\"+name1+\"\\n\")\n openfile.write(\"percent:\"+str(percent)+\"\\n\")\n if y2 is not None:\n Utils.drawArray(openfile, y2, \"y2\")\n openfile.write(\"name2:\"+name2+\"\\n\")\n if y3 is not None:\n Utils.drawArray(openfile, y3, \"y3\")\n openfile.write(\"name3:\"+name3+\"\\n\")", "def plot_hitstogram_graph(data_values, title,\r\n number_of_keys,\r\n max_val,\r\n file_in):\r\n\r\n # bins = max(data_values)\r\n # pylab.hist(data_values, facecolor='blue')\r\n pylab.hist(data_values, facecolor='green', alpha=0.6)\r\n pylab.grid(True)\r\n pylab.title(title + \"_histogram\")\r\n pylab.xlabel('number in cluster')\r\n pylab.ylabel('Count')\r\n pylab.savefig(file_in + \"_\" + title + '_histogram.png')\r\n plt.close()\r\n pylab.close()\r\n os.chdir('..')", "def showHistogram(image_list, name_list, path, toSave=False, hist_range=(0.0, 1.0)):\n\tfig = plt.figure()\n\tfig.subplots_adjust(hspace=.5)\n\timage_coordinate = 321\n\ti = 0\n\tfor image in image_list:\n\t\tfig.add_subplot(image_coordinate)\n\t\tplt.title(name_list[i])\n\t\tplt.set_cmap('gray')\n\t\tplt.axis('off')\n\t\tplt.imshow(image)\n\n\t\timage_coordinate += 1\n\n\t\tfig.add_subplot(image_coordinate)\n\t\tplt.title('histogram')\n\t\tplt.hist(image.ravel(), bins=256, range=hist_range)\n\n\t\timage_coordinate += 1\t\n\t\ti += 1\n\n\tif toSave:\n\t\tplt.savefig(path + \".jpg\")\n\tplt.show()", "def save_hists_to_file(hists, filen, year, trigger, top_dir, pt):\n logging.info('Saving histograms to \\'{}\\''.format(filen))\n pserver = PlotServer(filen, 'update')\n for frame in hists:\n for var in hists[frame]:\n for state in hists[frame][var]:\n pserver.store_hist(hists[frame][var][state], top_dir, year,\n trigger, pt, var, frame, state)", "def make_and_save_histogramsX(pred_steerings, real_steerings,\n img_name = \"histogramsX.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))\n bins = np.linspace(min_h, max_h, num=50)\n plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')\n plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')\n #plt.title('Steering angle')\n plt.legend(fontsize=10)\n plt.savefig(img_name, bbox_inches='tight')", "def save_faces(img, faces, width, height):\n\n for i, (x, y, w, h) in enumerate(faces):\n # TODO: Change output string\n save_image(resize_img(img[y:y+h, x:x+w], width, height), 'out' + str(i) + '.jpg')", "def make_histogram(outpath, plotdata_y, bins=None, color='red',\n xlabel='', ylabel='', x_range=None):\n if bins is None:\n bins = get_optimum_bins(plotdata_y)\n pyplot.hist(plotdata_y, bins=bins, color=color, range=x_range)\n pyplot.grid(True, which='major', linestyle='-')\n pyplot.grid(True, which='minor')\n pyplot.xlabel(xlabel, fontsize=20)\n pyplot.ylabel(ylabel, fontsize=20)\n pyplot.tick_params(axis='both', which='major', labelsize=16)\n pyplot.tick_params(axis='both', which='minor', labelsize=8)\n pyplot.tight_layout()\n pyplot.savefig(outpath)\n pyplot.close()\n return outpath" ]
[ "0.706048", "0.7014267", "0.6871516", "0.6740837", "0.67147917", "0.6555688", "0.6508386", "0.6440228", "0.6383919", "0.6303205", "0.62309504", "0.615724", "0.6102644", "0.6074065", "0.6046084", "0.6015698", "0.59425527", "0.5941457", "0.5939888", "0.5929775", "0.5921403", "0.5918678", "0.5914912", "0.59127146", "0.5901517", "0.590039", "0.5814757", "0.57944185", "0.5770281", "0.5762026" ]
0.75264275
0
Clean up noise samples and compute Silhouette score. Since DBSCAN assigns a 1 label to those samples that are not attached to any cluster, we should dismiss those ones from the point of view of validation.
def measure_silhouette(X, labels, metric, with_noise=True): if get_number_clusters(labels) < 2: return -1 if -1 in labels: if with_noise: return silhouette_score(X, labels, metric=metric) else: idx2keep = [] for i, x in enumerate(labels): if x != -1: idx2keep.append(i) X = np.array([X[i] for i in idx2keep]) labels = labels[idx2keep] return silhouette_score(X, labels, metric=metric)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def silhouette_samples(X, labels, metric='euclidean', **kwds):\n distances = pairwise_distances(X, metric=metric, **kwds)\n n = labels.shape[0]\n A = np.array([_intra_cluster_distance(distances[i], labels, i)\n for i in range(n)])\n B = np.array([_nearest_cluster_distance(distances[i], labels, i)\n for i in range(n)])\n sil_samples = (B - A) / np.maximum(A, B)\n return sil_samples", "def test_integrated_kmeans_silhouette(self):\n # NOTE see #182: cannot use occupancy dataset because of memory usage\n\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n try:\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(KMeans(random_state=0), ax=ax)\n visualizer.fit(X)\n visualizer.poof()\n\n self.assert_images_similar(visualizer)\n except Exception as e:\n self.fail(\"error during silhouette: {}\".format(e))", "def calculate_sample_silhouette(self):\n sum_samples = 0\n for cluster in self.cluster_lst:\n sum_samples += self.sum_silhouette(cluster)\n sample_size = len(self.samples)\n return sum_samples/sample_size", "def test_integrated_kmeans_silhouette(self):\n # NOTE see #182: cannot use occupancy dataset because of memory usage\n\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(KMeans(random_state=0), ax=ax)\n visualizer.fit(X)\n visualizer.finalize()\n\n self.assert_images_similar(visualizer, remove_legend=True)", "def compare_silhoutte_scores(dfi, samples, range_n_clusters, cluster_dim='features'):\n df = dfi.fillna(0).copy()\n X = df[samples].values\n if cluster_dim == 'samples':\n X = X.T\n \n\n for n_clusters in range_n_clusters:\n # Create a subplot with 1 row and 2 columns\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig.set_size_inches(18, 7)\n\n # The 1st subplot is the silhouette plot\n # The silhouette coefficient can range from -1, 1 but in this example all\n # lie within [-0.1, 1]\n ax1.set_xlim([-0.1, 1])\n # The (n_clusters+1)*10 is for inserting blank space between silhouette\n # plots of individual clusters, to demarcate them clearly.\n ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])\n\n # Initialize the clusterer with n_clusters value and a random generator\n # seed of 10 for reproducibility.\n clusterer = KMeans(n_clusters=n_clusters, random_state=10)\n cluster_labels = clusterer.fit_predict(X)\n\n # The silhouette_score gives the average value for all the samples.\n # This gives a perspective into the density and separation of the formed\n # clusters\n silhouette_avg = silhouette_score(X, cluster_labels)\n print(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\n\n # Compute the silhouette scores for each sample\n sample_silhouette_values = silhouette_samples(X, cluster_labels)\n\n y_lower = 10\n for i in range(n_clusters):\n # Aggregate the silhouette scores for samples belonging to\n # cluster i, and sort them\n ith_cluster_silhouette_values = \\\n sample_silhouette_values[cluster_labels == i]\n\n ith_cluster_silhouette_values.sort()\n\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n\n color = cm.spectral(float(i) / n_clusters)\n ax1.fill_betweenx(np.arange(y_lower, y_upper),\n 0, ith_cluster_silhouette_values,\n facecolor=color, edgecolor=color, alpha=0.7)\n\n # Label the silhouette plots with their cluster numbers at the middle\n ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\n\n # Compute the new y_lower for next plot\n y_lower = y_upper + 10 # 10 for the 0 samples\n\n ax1.set_title(\"The silhouette plot for the various clusters.\")\n ax1.set_xlabel(\"The silhouette coefficient values\")\n ax1.set_ylabel(\"Cluster label\")\n\n # The vertical line for average silhouette score of all the values\n ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\")\n\n ax1.set_yticks([]) # Clear the yaxis labels / ticks\n ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\n\n # 2nd Plot showing the actual clusters formed\n colors = cm.spectral(cluster_labels.astype(float) / n_clusters)\n ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\n # Labeling the clusters\n centers = clusterer.cluster_centers_\n # Draw white circles at cluster centers\n ax2.scatter(centers[:, 0], centers[:, 1], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\n for i, c in enumerate(centers):\n ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\n ax2.set_title(\"The visualization of the clustered data.\")\n ax2.set_xlabel(\"Feature space for the 1st feature\")\n ax2.set_ylabel(\"Feature space for the 2nd feature\")\n\n plt.suptitle((\"Silhouette analysis for KMeans clustering on sample data \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\n\n plt.show()", "def silhouette(data, x_scaled, K, random_s=42):\n #Random seed fixed\n np.random.seed(random_s)\n #Init silouhette list\n silouhette = []\n #init index list\n index_ = []\n for i in K:\n print(\"Step: {} running...\".format(i))\n index_.append(i)\n #Clustering\n kmeans = KMeans(n_clusters=i, random_state=random_s).fit(x_scaled)\n #Compute silhouette coef\n silhouette_avg = silhouette_score(data, kmeans.labels_)\n silouhette.append(silhouette_avg)\n print(\"Silhouette score for step\", i, \"=\", silhouette_avg)\n #Creation of a df\n silouhette_coef = pd.DataFrame(silouhette, index=index_, columns=['coef'])\n silouhette_coef = silouhette_coef.sort_values(by='coef', ascending=False).reset_index()\n #Plot coef in function of the clusters\n ax = plt.figure().gca()\n ax.scatter(silouhette_coef['index'], silouhette_coef['coef'])\n plt.xlabel('Number of Clusters')\n plt.ylabel('Value')\n plt.title('Silhouette coefficient')\n ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))\n plt.show()\n return silouhette_coef", "def silhouette_score(X, labels, metric='euclidean', sample_size=None,\n random_state=None, **kwds):\n n_labels = len(np.unique(labels))\n n_samples = X.shape[0]\n if not 1 < n_labels < n_samples:\n raise ValueError(\"Number of labels is %d. Valid values are 2 \"\n \"to n_samples - 1 (inclusive)\" % n_labels)\n\n if sample_size is not None:\n random_state = check_random_state(random_state)\n indices = random_state.permutation(X.shape[0])[:sample_size]\n if metric == \"precomputed\":\n X, labels = X[indices].T[indices].T, labels[indices]\n else:\n X, labels = X[indices], labels[indices]\n return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))", "def test_colors_silhouette(self):\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(\n MiniBatchKMeans(random_state=0),\n ax=ax,\n colors=[\"red\", \"green\", \"blue\", \"indigo\", \"cyan\", \"lavender\"],\n )\n visualizer.fit(X)\n visualizer.finalize()\n\n self.assert_images_similar(visualizer, remove_legend=True)", "def evaluateClusters( features, labels):\r\n\r\n\treturn silhouette_score( features, labels)", "def problem2(dataset_path):\n\n # to achieve this, we use Silhouette Index\n km = KMeans(init=\"k-mean++\", csv_path=dataset_path, n_init=5)\n\n dfs = []\n cs = []\n for i in range(2, 9):\n cs.append(i)\n km.n_clusters = i\n dfs.append(km.fit_predict_from_csv())\n\n iv = InternalValidator(dfs, cluster_nums=cs)\n iv.make_silhouette_table()\n iv.show_silhouette_plot()\n\n iv.make_cvnn_table()\n iv.show_cvnn_plot()", "def test_integrated_mini_batch_kmeans_silhouette(self):\n # NOTE see #182: cannot use occupancy dataset because of memory usage\n\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n try:\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(MiniBatchKMeans(random_state=0), ax=ax)\n visualizer.fit(X)\n visualizer.poof()\n\n self.assert_images_similar(visualizer)\n except Exception as e:\n self.fail(\"error during silhouette: {}\".format(e))", "def compute_silhouette(self):\n silhouette_dic = {}\n for cur_cluster in self.cluster_lst:\n for cur_sample in cur_cluster.get_samples():\n silhouette_dic[cur_sample.get_s_id()] = self.calculate_silhouette(cur_cluster, cur_sample)\n return silhouette_dic", "def test_negative_silhouette_score(self):\n raise NotImplementedError(\"no negative silhouette example available\")", "def compute_silhouette(self, matrix):\n sil_dict = {}\n for clus in self.clusters:\n for sample in clus.samples: # iterate over every sample in the dataset\n sum_dist = 0\n for other in clus.samples: # iterate over every other sample in the same cluster\n if sample.s_id > other.s_id:\n sum_dist += matrix[(sample.s_id, other.s_id)]\n elif sample.s_id < other.s_id:\n sum_dist += matrix[(other.s_id, sample.s_id)]\n if len(clus.samples) > 1:\n sil_in = float(sum_dist / (len(clus.samples) - 1))\n else: # when a sample is the only one in the cluster, its silhouette in value is 0\n sil_in = 0\n\n dist_to_clus = []\n for other_clus in self.clusters: # iterate over other clusters\n if other_clus == clus:\n continue\n sum_dist = 0\n for other in other_clus.samples: # iterate over every other sample in other clusters\n if sample.s_id > other.s_id:\n sum_dist += matrix[(sample.s_id, other.s_id)]\n else:\n sum_dist += matrix[(other.s_id, sample.s_id)]\n # save the distance from the sample to every other cluster\n dist_to_clus.append(float(sum_dist / (len(other_clus.samples))))\n sil_out = min(dist_to_clus)\n if sil_in == 0:\n sil_dict[sample.s_id] = 0\n else:\n sil_dict[sample.s_id] = float((sil_out - sil_in) / (max(sil_out, sil_in)))\n\n return sil_dict", "def test_clusterer_without_predict(self, model):\n X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [4, 4], [4, 0]])\n try:\n visualizer = SilhouetteVisualizer(model(n_clusters=2))\n visualizer.fit(X)\n visualizer.finalize()\n except AttributeError:\n self.fail(\"could not use fit or fit_predict methods\")", "def get_silhouette(self, factor_df, n_clusters=10):\n # https://stackoverflow.com/questions/51138686/how-to-use-silhouette-score-in-k-means-clustering-from-sklearn-library\n range_n_clusters = list(range(2, 10))\n for n_clusters in range_n_clusters:\n clusterer = KMeans(n_clusters=n_clusters)\n preds = clusterer.fit_predict(factor_df)\n #centers = clusterer.cluster_centers_\n\n score = silhouette_score(factor_df, preds, metric='euclidean')\n print(\"For n_clusters = {}, silhouette score is {})\".format(n_clusters, score))", "def silhouette_scores(self):\r\n kmeans_models = [KMeans(n_clusters=k, random_state=42).fit(self.X) for k in range(1, 10)]\r\n silhouette_scores = [silhouette_score(self.X, model.labels_) for model in kmeans_models[1:]]\r\n print(colored(\"The maximum silhouette score is %0.02f at the cluster number %d\\n\" % (np.max(silhouette_scores),(silhouette_scores.index(np.max(silhouette_scores))+2)),color = 'blue', attrs=['bold']))\r\n plt.figure(figsize=(16, 8))\r\n plt.plot(range(2, 10), silhouette_scores, \"bo-\")\r\n plt.xlabel(\"$k$\", fontsize=14)\r\n plt.ylabel(\"Silhouette score\", fontsize=14)\r\n plt.show()", "def compute_summery_silhouette(self):\n silhouette_cluster_dic = {}\n for cluster in self.cluster_lst:\n silhouette_cluster_dic.setdefault(cluster.get_c_id(), self.calculate_cluster_silhouette(cluster))\n silhouette_cluster_dic[0] = self.calculate_sample_silhouette()\n return silhouette_cluster_dic", "def test_integrated_mini_batch_kmeans_silhouette(self):\n # NOTE see #182: cannot use occupancy dataset because of memory usage\n\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(MiniBatchKMeans(random_state=0), ax=ax)\n visualizer.fit(X)\n visualizer.finalize()\n\n self.assert_images_similar(visualizer, remove_legend=True)", "def experiment1_outliers():\n\tdata_folder = \"ckan_subset/prepared_learnset/\"\n\ttest_folder = 'ckan_subset/testset/xml_csv/'\n\tgm = Graph_Maker()\n\tgm.store()\n\trounds = 5\n\tx = [\"Fingerprint\", \"Syntax Feature Model\", \"Word2Vec Matcher\"]\n\t\n\tnumber_of_classes = 15\n\texamples_per_class = 0\n\taccuracies = []\n\tprecisions = []\n\trecalls = []\n\tfmeasures = []\n\tsf_main = Storage_Files(data_folder, classes)\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\n\tfor i in range(0, rounds):\n\t\tprint(\"Fingerprint\")\n\t\t# --- Fingerprint\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Fingerprint', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Fingerprint_Matcher', {'feature_main': 'fingerprint'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\t\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"SFM\")\n\t\t# --- Syntax Feature Model\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Syntax_Feature_Model', [sf_main, 1, 0, False, False])\n\n\t\tccc.add_matcher('matcher', 'Syntax_Matcher', {'feature_main': 'syntax'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"W2V\")\n\t\t# --- Word2Vec Matcher\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Corpus', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Word2Vec_Matcher', {'feature_main': 'corpus'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\tgm.add_x(x)\n\t# accuracies = [0.4, 0.4, 0.4]\n\t# precisions = [0.5, 0.5, 0.5]\n\t# recalls = [0.62, 0.62, 0.62]\n\t# fmeasures = [0.23, 0.23, 0.28]\n\tgm.append_y(accuracies)\n\tgm.append_y(precisions)\n\tgm.append_y(recalls)\n\tgm.append_y(fmeasures)\n\tgm.store()\n\tsubtitle = \"Scores were averaged over \" + str(rounds) + \" tests with \" + str(len(classes)) + \" classes. \" + \\\n\t\"Number of simulated columns per class: \" + str(number_of_classes)\n\tlabels = [\"Accuracy\", \"Precision\", \"Recall\", \"F-Measure\"]\n\tgm.plot_bar_n(\"Matcher Type\", \"Score\", \"Accuracy of Matchers\", labels, subtitle=subtitle)", "def evaluation(x_selected, y, state_of_art=False):\n # check_number_of_clusters(X_selected)\n # n_clusters = best_number_of_clusters\n\n y_true = np.asarray(y)\n\n if state_of_art:\n avg_sil, model = get_results(x_selected, [max(y_true)])\n else:\n avg_sil, model = get_results(x_selected)\n\n # using GPU\n #avg_sil, model = get_results_from_GPU(x_selected)\n\n \"\"\" VALID DISTANCES METRICS\n 'euclidean', 'l2', 'l1', 'manhattan', 'cityblock',\n 'braycurtis', 'canberra', 'chebyshev', 'correlation',\n 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener',\n 'sokalsneath', 'sqeuclidean', 'yule', \"wminkowski\"\n \n max([\n metrics.silhouette_score(x_selected, model.labels_, metric='euclidean'),\n metrics.silhouette_score(x_selected, model.labels_, metric='cosine'),\n\n metrics.silhouette_score(x_selected, model.labels_, metric='manhattan'),\n metrics.silhouette_score(x_selected, model.labels_, metric='braycurtis'),\n metrics.silhouette_score(x_selected, model.labels_, metric='canberra'),\n metrics.silhouette_score(x_selected, model.labels_, metric='chebyshev'),\n metrics.silhouette_score(x_selected, model.labels_, metric='hamming'),\n metrics.silhouette_score(x_selected, model.labels_, metric='jaccard'),\n\n metrics.silhouette_score(x_selected, model.labels_, metric='russellrao'),\n metrics.silhouette_score(x_selected, model.labels_, metric='sokalsneath'),\n metrics.silhouette_score(x_selected, model.labels_, metric='yule'),\n\n metrics.silhouette_score(x_selected, model.labels_, metric='kulsinski'),\n metrics.silhouette_score(x_selected, model.labels_, metric='cityblock')\n ])\"\"\"\n\n y_predict = model.labels_\n\n for i in range(0, len(y_predict)):\n y_predict[i] = y_predict[i] + 1\n\n # inercia = k_means.inertia_\n inertia = model.inertia_\n\n # calculate NMI\n nmi = v_measure_score(y_true, y_predict)\n # nmi = 0\n\n # calculate corrected rand\n corrected_rand = adjusted_rand_score(y_true, y_predict)\n # corrected_rand = 0\n\n # calculate f-measure\n f_measure = f1_score(y_true, y_predict, average='micro')\n\n # calculate ACC\n\n label1 = np.unique(y_true)\n label2 = np.unique(y_predict)\n if len(label1) == len(label2):\n y_permuted_predict = best_map(y_true, y_predict)\n acc = accuracy_score(y_true, y_permuted_predict)\n else:\n acc = 0\n\n logger.log(\"KMeans model has\"\n \" avg_sil = \" + str(avg_sil) +\n \" and NMI = \" + str(nmi) +\n \" and CR = \" + str(corrected_rand) +\n \" and f-measure = \" + str(f_measure), True)\n\n # calculate cluster.stats\n # dgene = stats.dist(x_selected, method='euclidean')\n\n \"\"\" fpc_result = fpc.cluster_stats(dgene,\n model.labels_,\n G2=False,\n G3=False,\n silhouette=True,\n sepwithnoise=True,\n noisecluster=True,\n wgap=True)\n\n stats_fpc = pandas2ri.ri2py(fpc_result)\"\"\"\n\n # Compute Hopkins statistic for iris dataset\n # nrow = X_selected.shape[0]\n # res = factoextra.get_clust_tendency(X_selected, n=nrow - 1, graph=False)\n # print(\"hopkins test: \"+ str(res[0]))\n # hopkins_test = res\n\n # dunn = stats_fpc[24][0]\n # dunn2 = stats_fpc[25][0]\n # entropy = stats_fpc[26][0]\n # ch = stats_fpc[28][0]\n\n dunn = 0\n dunn2 = 0\n entropy = 0\n ch = 0\n\n # inertia, avg_sil, dunn, dunn2, entropy, ch, nmi, acc, corrected_rand, f_measure\n return ClusteringResults(max(y_predict), inertia, avg_sil, dunn, dunn2, entropy, ch, nmi, acc, corrected_rand, f_measure)", "def test_colormap_silhouette(self):\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(\n MiniBatchKMeans(random_state=0), ax=ax, colormap=\"gnuplot\"\n )\n visualizer.fit(X)\n visualizer.finalize()\n\n self.assert_images_similar(visualizer, remove_legend=True)", "def test_quick_method(self):\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n model = MiniBatchKMeans(3, random_state=343)\n oz = silhouette_visualizer(model, X, show=False)\n assert isinstance(oz, SilhouetteVisualizer)\n\n self.assert_images_similar(oz)", "def calculate_silhouette(self, cluster, sample):\n cluster_size = len(cluster.get_samples())\n if cluster_size <= 1:\n return 0\n in_xi = self.in_xi(sample, cluster, cluster_size)\n out_xi = self.out_xi(sample, cluster)\n return (out_xi-in_xi) / max(in_xi, out_xi)", "def calculate_cluster_silhouette(self, cluster):\n cluster_size = len(cluster.get_samples())\n return self.sum_silhouette(cluster) / cluster_size", "def compute_silhouette(self, umap_embeddings, labels):\n silhouette = silhouette_score(umap_embeddings, labels)\n silhouette = float(\"{0:.3f}\".format(silhouette))\n return silhouette", "def mask_test_train_count(data, split, rating_threshold): \n # create a copy of the full data for reduction\n training_set = data.copy()\n\n # create max split\n max_split = int(split*(training_set.nnz))\n\n # find index of values which are not empty and over threshold\n rating_inds = np.nonzero(training_set > rating_threshold)\n \n # create list of index pairs\n rating_pairs = list(zip(rating_inds[0], rating_inds[1]))\n\n # Split ration, based on threshold\n thres_max = len(rating_pairs)\n\n if thres_max > max_split:\n masking_ratio = max_split / thres_max\n else:\n sys.exit('Your threshold for rating is too high, please recalculate and lower down the threshold')\n\n # calculate the number of samples to be removed in training set\n num_samples = int(np.ceil(masking_ratio*len(rating_pairs)))\n\n # get random samples\n samples = random.sample(rating_pairs, num_samples)\n\n # remove selected samples in training set\n user_inds = [index[0] for index in samples]\n item_inds = [index[1] for index in samples]\n training_set[user_inds, item_inds] = 0 \n\n return training_set, list(set(user_inds)), np.array(samples)", "def silhouette_coefficient(cls, cluster_labels, pca_reduced, cluster_size, names: list):\n score = metrics.silhouette_score(pca_reduced, cluster_labels)\n return ClusterMetricScore('Silhouette score', score, cluster_size, cluster_labels, names)", "def non_maximum_suppression(prediction, iou_threshold=0.45, score_threshold=0.25):\n\n # num_classes = len(names)\n max_wh = 4096\n max_det = 300\n max_nms = 30000\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n\n for xi, x in enumerate(prediction):\n x = x[x[..., 4] > score_threshold]\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = x[:, :4]\n\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > score_threshold]\n\n # Filter by class\n # if classes is not None:\n # x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n # sort by confidence\n x = x[x[:, 4].argsort(descending=True)[:max_nms]]\n\n # Batched NMS\n c = x[:, 5:6] * max_wh # classes\n # boxes (offset by class), scores\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = nms(boxes, scores, iou_threshold) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n\n return output", "def test_colormap_as_colors_silhouette(self):\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(\n MiniBatchKMeans(random_state=0), ax=ax, colors=\"cool\"\n )\n visualizer.fit(X)\n visualizer.finalize()\n\n tol = (\n 3.2 if sys.platform == \"win32\" else 0.01\n ) # Fails on AppVeyor with RMS 3.143\n self.assert_images_similar(visualizer, remove_legend=True, tol=tol)" ]
[ "0.6510874", "0.6369514", "0.635986", "0.62310797", "0.6025459", "0.601955", "0.59956837", "0.59785324", "0.59346735", "0.5932225", "0.5925631", "0.59135884", "0.5869151", "0.5866978", "0.58561087", "0.58439296", "0.58116424", "0.577271", "0.5763137", "0.57414037", "0.57391554", "0.57146484", "0.5675102", "0.5642081", "0.5637154", "0.55957437", "0.5590601", "0.55539006", "0.55401653", "0.5499011" ]
0.6825846
0
Returns the sum of all the primes below n. >>> solution(1000) 76127 >>> solution(5000) 1548136 >>> solution(10000) 5736396 >>> solution(7) 10
def solution(n: int = 2000000) -> int: return sum(takewhile(lambda x: x < n, prime_generator()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def primesupto(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]", "def n_primes(n):\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,\n 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,\n 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,\n 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,\n 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,\n 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,\n 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,\n 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,\n 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,\n 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,\n 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,\n 953, 967, 971, 977, 983, 991, 997][:n]\n\n if len(primes) < n:\n big_number = 2000\n while 'Not enough primes':\n primes = primes_from_2_to(big_number)[:n]\n if len(primes) == n:\n break\n big_number += 1000\n\n return primes", "def nth_prime(n):\n\n upper_bound = 0\n if n >= 7022:\n upper_bound = int(n * log(n) + n * (log(log(n)) - 0.9385))\n elif n >= 6:\n upper_bound = int(n * log(n) + n * log(log(n)))\n else:\n upper_bound = 14\n prime_set = list(primes(upper_bound))\n return prime_set[n - 1]", "def rwh_primes1(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * int((n/2))\n for i in range(3,int(n**0.5)+1,2):\n if sieve[int(i/2)]:\n sieve[int(i*i/2)::i] = [False] * int(((n-i*i-1)/(2*i)+1))\n return [2] + [2*i+1 for i in range(1,int(n/2)) if sieve[int(i)]]", "def ten():\r\n \r\n primes = [2]\r\n n = 3\r\n \r\n while n < 2000000:\r\n if isprime(n) == True:\r\n primes.append(n)\r\n n += 2\r\n #isprime = True\r\n #for prime in primes:\r\n #if n % prime == 0:\r\n #isprime = False\r\n #break\r\n #if isprime == True:\r\n #primes.append(n)\r\n #isprime = True\r\n #n += 2\r\n \r\n #for prime in primes:\r\n #sum += prime\r\n \r\n return sum(primes)", "def get_primes(n):\n\n return list(primes_sieve(n))", "def totalSolutions(n:int):\n\n memo = [0]*6\n memo[5] = 1\n if n == 1:\n return 1\n\n for i in range(n):\n solutions = sum(memo)\n memo.pop(0)\n memo.append(solutions)\n\n return memo.pop()", "def solve(n=4 * 10**6):\r\n target_sum = 0\r\n fib_sequence = fibonacci_generator()\r\n for num in fib_sequence:\r\n if num >= n:\r\n break\r\n\r\n if num % 2 == 0:\r\n target_sum += num\r\n\r\n return target_sum", "def find_n_primes(n):\n primes = [ ]\n\n if n < 2:\n return None;\n\n primes.append(2)\n\n for i in range(3, n + 1, 2):\n is_prime = True\n for p in primes:\n if i % p is 0:\n is_prime = False\n continue\n if is_prime:\n primes.append(i)\n return primes", "def countPrimesOptimized(self, n: int) -> int:\n if n < 2:\n return 0\n\n size = (n - 3) // 2 + 1 # -3 for 0,1,2 and // 2 to ignore evens\n primes = [2]\n is_prime = [True for i in range(size)] # represents if (2i+3) is prime\n\n for i in range(size):\n if is_prime[i]:\n p = 2 * i + 3\n primes.append(p)\n # Sieve from p^2, where p^2 = (2i+3)^2 = (4i^2 + 12i + 9)\n # Index in is_prime is (2i^2 + 6i + 3)\n # because is_prime[i] = 2i + 3.\n for j in range(2 * i**2 + 6 * i + 3, size, p):\n is_prime[j] = False\n\n return len(primes) - 1 if primes[-1] == n else len(primes)", "def solution3(n):\n res = []\n while n > 0:\n m = int(math.sqrt(n))**2\n res.append(m)\n n -= m\n return res", "def count_primes(n):\n i, total = 1, 0\n while i <= n:\n if is_prime(i):\n total += 1\n i += 1\n return total", "def problem10():\n total_sum = 0\n for x in xrange(1, 2000000):\n if is_prime(x):\n total_sum += x\n return total_sum", "def sumTo(n):\n\n sum_all = (n * (n+1))/2\n\n return sum_all", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n if n == 1:\n return []\n elif n == 2:\n return []\n elif n == 3:\n return [2]\n elif n == 4:\n return [2, 3]\n elif n == 5:\n return [2, 3]\n sieve = np.ones(n/3 + (n % 6 == 2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return map(int, np.r_[2, 3, ((3*np.nonzero(sieve)[0]+1) | 1)])", "def sopf(n, primes):\r\n total = 0\r\n for p in primes:\r\n if n % p == 0:\r\n total += p\r\n while n // p == 0:\r\n n //= p\r\n return total", "def get_integers(n: int) -> int:\n l = [int(sqrt(n))]\n val = l[0] * l[0]\n index = 0\n while val != n:\n val = sum([x*x for x in l])\n if val > n:\n l[index] -= 1\n elif val < n:\n index += 1\n l.append(l[index - 1])\n\n return len(l)", "def solution1(n):\n res = []\n while n > 0:\n m = int(math.floor(math.sqrt(n))**2)\n res.append(m)\n n -= m\n return res", "def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def sum_of_proper_divisors_sieve(n):\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve", "def rwh_primes2(n):\n # flake8: noqa\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n correction = (n%6>1)\n n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]\n sieve = [True] * (n/3)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k]=[False]*((n/6-(k*k)/6-1)/k+1)\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k]=[False]*((n/6-(k*k+4*k-2*k*(i&1))/6-1)/k+1)\n return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]", "def primesfrom2to(n):\r\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in xrange(int(n**0.5)/3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)/3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def sumTo(n):\n \n the_sum = 0 #current sum\n a_number = 1 #where we are\n while a_number <= n:\n the_sum += a_number\n a_number += 1\n return the_sum", "def solution(n):\n total = sum(\n [\n i\n for i in range(1, n)\n if sum_of_divisors(sum_of_divisors(i)) == i and sum_of_divisors(i) != i\n ]\n )\n return total", "def primes(n):\n result = []\n i = 2\n while n > 0:\n if isPrime(i):\n result += [i]\n n -= 1\n i += 1\n return result", "def primeGen(n):\n primes = [2, 3, 5, 7, 11]\n if n in xrange(1, len(primes) + 1):\n return primes[:n]\n else:\n banlist = []\n count = 6\n while count <= n:\n Next = (primes[-2] + primes[-1]) - primes[-3]\n if not is_prime(Next):\n count -= 1\n banlist.append(Next)\n count += 1\n primes.append(Next)\n filterout(banlist, primes)\n return primes", "def prime_pi(n):\n if n < 2:\n return 0\n\n primes = sieve(n)\n return len(primes)" ]
[ "0.7467311", "0.7343437", "0.7302786", "0.71355885", "0.71004784", "0.7057291", "0.703228", "0.69721013", "0.6956273", "0.69070196", "0.69021696", "0.6899741", "0.689087", "0.68867993", "0.6867779", "0.68633753", "0.68523663", "0.68466914", "0.6844139", "0.68319136", "0.67879874", "0.67816216", "0.6771907", "0.6767459", "0.675867", "0.6731229", "0.6712279", "0.67038065", "0.66829276", "0.66742545" ]
0.80684763
0
ESTIMATEGAUSSIAN This function estimates the parameters of a Gaussian distribution using the data in X [mu sigma2] = estimateGaussian(X), The input X is the dataset with each ndimensional data point in one row The output is an ndimensional vector mu, the mean of the data set and the variances sigma^2, an n x 1 vector
def estimateGaussian(X): mu = X.mean(0, keepdims=True).T sigma2 = X.var(0, keepdims=True).T return mu, sigma2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimateGaussian(X):\n\tmu = np.mean(X, axis=0)\n\tsigma2 = np.std(X, axis=0) ** 2\n\treturn mu, sigma2", "def estimate_uni_gaussian(X):\n mu = mean(X, axis=0)\n sigma2 = var(X, axis=0)\n return mu, sigma2", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma", "def optimalGaussian(mu, sigma):\r\n \r\n optimal = gaussian(mu, sigma, mu-3.5*sigma, mu+3.5*sigma) #Calculate values of optimal gaussian\r\n def simulationTestGaussian2(params):\r\n \"\"\"\r\n Compare a gaussian with optimal gaussian\r\n \"\"\"\r\n x = gaussian(params[0], params[1], mu-3.5*sigma, mu+3.5*sigma)\r\n error = np.sum(np.power(optimal - x, 2))/optimal.shape[0]\r\n return 1/error\r\n return simulationTestGaussian2", "def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))", "def Gaussiankernel(size, sigma=1): \n size = int(size) // 2\n # create x grid and y grid\n x, y = np.mgrid[-size:size+1, -size:size+1] \n # gaussian distribution formula\n normal = 1 / np.sqrt(2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0*sigma**2))) * normal\n \n return g/g.sum()", "def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)", "def model_gauss(xsigma, nx, ny=1, nz=1, ysigma=None, zsigma=None, xcenter=None, ycenter=None, zcenter=None):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\tif( ysigma == None ) : ysigma = xsigma\n\tif( zsigma == None ) : zsigma = xsigma\n\tif( xcenter == None ) : xcenter = nx//2\n\tif( ycenter == None ) : ycenter = ny//2\n\tif( zcenter == None ) : zcenter = nz//2\n\te.process_inplace(\"testimage.puregaussian\", {\"x_sigma\":xsigma,\"y_sigma\":ysigma,\"z_sigma\":zsigma,\"x_center\":xcenter,\"y_center\":ycenter,\"z_center\":zcenter} )\n\treturn e", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def gaussian(mu, sigma, start, end):\r\n \r\n val = np.linspace(start, end, 100)\r\n a = 1/(sigma*np.pi)\r\n b = - 0.5 * np.power((mu - val)/sigma, 2)\r\n return a*np.exp(b)", "def mean_sigma(h):\n h.Fit(\"gaus\", \"q\")\n result_fit = h.GetFunction(\"gaus\")\n mean = result_fit.GetParameter(1)\n sigma = result_fit.GetParameter(2)\n return mean, sigma", "def gaussian(pars, x):\n A, b, mu, sigma = pars\n # return b + A/(np.sqrt(2*np.pi)*sigma**2) \\\n return b + A \\\n * np.exp(-.5*(x - mu)**2/sigma**2)", "def gauss(x, mu, A, sigma):\n mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)\n val = (A / (sigma * np.sqrt(np.pi * 2)) *\n np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2)))\n return val.sum(axis=-1)", "def cal_gaussian_process(b, sigma2, X_train, y_train, X_test):\n n = X_train.shape[0]\n p = X_test.shape[0]\n\n K_n = np.array([[kernel(X_train[i], X_train[j], b) for i in range(n)] for j in range(n)])\n inv = np.linalg.inv(np.diag([sigma2] * n) + K_n)\n miu = np.zeros(p)\n Sigma = np.zeros(p)\n \n for j in range(p): # for every new point x0 in testing data.\n x0 = X_test[j]\n K_Dn = np.zeros(n) # initialize K_Dn \n for i in range(n):\n K_Dn[i] = kernel(X_train[i], x0, b) # calculate every item in K_Dn\n \n miu[j] = K_Dn.dot(inv).dot(y_train)[0] # calculate new distribution parameters\n Sigma[j] = sigma2 + kernel(x0, x0, b) - K_Dn.dot(inv).dot(K_Dn.T)\n \n return miu, Sigma", "def multivariateGaussian(X, mu, Sigma2):\n k = mu.shape[0]\n\n if Sigma2.shape[1] == 1 or Sigma2.shape[0] == 1:\n Sigma2 = np.diag(Sigma2[:, 0])\n\n X = (X-mu.T).copy()\n p = (2*np.pi)**(-k/2)*np.linalg.det(Sigma2)**-0.5\n p = p*np.exp(-0.5*(X.dot(np.linalg.pinv(Sigma2))*X).sum(1, keepdims=True))\n return p", "def multivariate_gauss_prob(observed, mean, covariance):\n\n return None", "def simulationTestGaussian2(params):\r\n x = gaussian(params[0], params[1], mu-3.5*sigma, mu+3.5*sigma)\r\n error = np.sum(np.power(optimal - x, 2))/optimal.shape[0]\r\n return 1/error", "def calculateGaussian(x, mean, stdev):\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)", "def data_gauss(N0, N1=None, mu0=arr([0, 0]), mu1=arr([1, 1]), sig0=np.eye(2), sig1=np.eye(2)):\n\tif not N1:\n\t\tN1 = N0\n\n\td1,d2 = twod(mu0).shape[1],twod(mu1).shape[1]\n\tif d1 != d2 or np.any(twod(sig0).shape != arr([d1, d1])) or np.any(twod(sig1).shape != arr([d1, d1])):\n\t\traise ValueError('data_gauss: dimensions should agree')\n\n\tX0 = np.dot(np.random.randn(N0, d1), sqrtm(sig0))\n\tX0 += np.ones((N0,1)) * mu0\n\tY0 = -np.ones(N0)\n\n\tX1 = np.dot(np.random.randn(N1, d1), sqrtm(sig1))\n\tX1 += np.ones((N1,1)) * mu1\n\tY1 = np.ones(N1)\n\n\tX = np.row_stack((X0,X1))\n\tY = np.concatenate((Y0,Y1))\n\n\treturn X,Y", "def multivariateGaussian(X, mu, sigma2):\n\tk = len(mu)\n\n\tif sigma2.ndim == 1:\n\t\t# convert sigma2 to a diagonal matrix\n\t\tsigma2 = np.diag(sigma2)\n\n\t# vectorized version of Multivariate Gaussian Distribution\n\tX = X - mu\n\t# p is a vector contains all probabilities of each examples\n\tp = (2 * np.pi) ** (- k / 2.0) * np.linalg.det(sigma2) ** (-0.5) * \\\n\t np.exp(-0.5 * np.sum(X.dot(np.linalg.pinv(sigma2)) * X, axis=1))\n\n\treturn p", "def fitgaussian(self, data):\n params = self.moments(data)\n errorfunction = lambda p: ravel(self.Gauss(*p)(*indices(data.shape)) - data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def param_gauss(xdata_, *params_):\n scale_, mean_, cov_ = params_to_scale_mean_cov(params_)\n return scale_ * gaussian(xdata_, mean=mean_, cov=cov_)", "def gaussian(x, peak_x=.0, sigma=1.0, name=''):\n x = x.astype(np.float)\n variables = {'function': gaussian, 'peak_x': peak_x, 'sigma': sigma}\n y = np.exp((-1 * (x - peak_x)**2) / (2 * sigma**2))\n return packer(x, y, variables, name=name)", "def Gaussian(x, mu=0, sigma=26.4, A=1, y0=0):\r\n #width = sigma*(2*np.sqrt(2*np.log(2)))\r\n b = 1/(sigma*np.sqrt(2*np.pi))\r\n f = b*np.power(np.e, -(((x-mu)**2)/(2*sigma**2)))\r\n return A*f + y0", "def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B", "def Gaussian(x,t,sigma):\n return np.exp(-(x-t)**2/(2*sigma**2))", "def Gaussian(x, mu, sigma, a):\n amplitude = a / ( sigma * np.sqrt(2 * np.pi) )\n u = (x - mu) / sigma\n return amplitude * np.exp( -0.5 * (u**2) )", "def gaussum(xdata,*params):\n\tamp = num.zeros(0)\n\tcen = num.zeros(0)\n\tstdv = num.zeros(0)\n\n\tfor i in range(0, len(params), 3): #This section is just unpacking the parameter array into amps, cens, and stdvs\n\t\tx = params[i]\n\t\tamp = num.append(amp,x)\n\t\ty = params[i+1]\n\t\tcen = num.append(cen,y)\n\t\tz = params[i+2]\n\t\tstdv = num.append(stdv,z)\n\tglobal storage #You may not need storage to be global so think about taking this part out. storage stores the data\n\tstorage = [[0 for x in range(1)] for x in range(len(params)/3)] #from each iteration of the gaussian equation into\n\tfor i in range(len(params)/3):#individual rows. So row one will be the gaussian solutions to the first peak and so on\n\t\tstorage[i] = gaus(xdata,amp[i],cen[i],stdv[i])\n\tstorage = num.asarray(storage)\n\treturn sum(storage)" ]
[ "0.7859824", "0.7347176", "0.7337883", "0.6956588", "0.6940591", "0.6928924", "0.68244404", "0.68032956", "0.662365", "0.6578702", "0.6561422", "0.65113556", "0.6508217", "0.64527017", "0.6452224", "0.64339375", "0.6422379", "0.64214444", "0.64155674", "0.64134204", "0.6396536", "0.6389613", "0.63708436", "0.63694406", "0.63621455", "0.6322861", "0.6301306", "0.6295678", "0.62795675", "0.62781996" ]
0.7678695
1
MULTIVARIATEGAUSSIAN Computes the probability density function of the multivariate gaussian distribution. p = MULTIVARIATEGAUSSIAN(X, mu, Sigma2) Computes the probability density function of the examples X under the multivariate gaussian distribution with parameters mu and Sigma2. If Sigma2 is a matrix, it is treated as the covariance matrix. If Sigma2 is a vector, it is treated as the \sigma^2 values of the variances in each dimension (a diagonal covariance matrix)
def multivariateGaussian(X, mu, Sigma2): k = mu.shape[0] if Sigma2.shape[1] == 1 or Sigma2.shape[0] == 1: Sigma2 = np.diag(Sigma2[:, 0]) X = (X-mu.T).copy() p = (2*np.pi)**(-k/2)*np.linalg.det(Sigma2)**-0.5 p = p*np.exp(-0.5*(X.dot(np.linalg.pinv(Sigma2))*X).sum(1, keepdims=True)) return p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multivariateGaussian(X, mu, sigma2):\n\tk = len(mu)\n\n\tif sigma2.ndim == 1:\n\t\t# convert sigma2 to a diagonal matrix\n\t\tsigma2 = np.diag(sigma2)\n\n\t# vectorized version of Multivariate Gaussian Distribution\n\tX = X - mu\n\t# p is a vector contains all probabilities of each examples\n\tp = (2 * np.pi) ** (- k / 2.0) * np.linalg.det(sigma2) ** (-0.5) * \\\n\t np.exp(-0.5 * np.sum(X.dot(np.linalg.pinv(sigma2)) * X, axis=1))\n\n\treturn p", "def multivariate_gauss_prob(observed, mean, covariance):\n\n return None", "def multi_gaussian(X, mu, sigma):\n m, n = X.shape\n X = X - mu\n\n factor = X.dot(inv(sigma))\n factor = multiply(factor, X)\n factor = - (1 / 2) * sum(factor, axis=1, keepdims=True)\n\n p = 1 / (power(2 * pi, n / 2) * sqrt(det(sigma)))\n p = p * exp(factor)\n\n return p", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)", "def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma", "def multivariate_gaussian(pos, mu, Sigma):\r\n\r\n n = mu.shape[0]\r\n Sigma_det = np.linalg.det(Sigma)\r\n Sigma_inv = np.linalg.inv(Sigma)\r\n N = np.sqrt((2*np.pi)**n * Sigma_det)\r\n # This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized\r\n # way across all the input variables.\r\n fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)\r\n\r\n return np.exp(-fac / 2) / N", "def estimate_uni_gaussian(X):\n mu = mean(X, axis=0)\n sigma2 = var(X, axis=0)\n return mu, sigma2", "def doubleGaussianCen(p, x, mu1, mu2):\n # 2013-05-06 20:29 IJMC: Created\n\n x = array(x, dtype=float, copy=False)\n param1 = [p[0], p[1], mu1, 0]\n if len(p)==4:\n param2 = [p[2], p[3], mu2, 0]\n elif len(p)==5:\n param2 = [p[2], p[3], mu2, p[4]]\n\n return gaussian(param1, x) + gaussian(param2, x)", "def product_of_gaussians(mus, sigmas_squared):\n sigmas_squared = torch.clamp(sigmas_squared, min=1e-07)\n sigma_squared = 1.0 / torch.sum(torch.reciprocal(sigmas_squared), dim=0)\n mu = sigma_squared * torch.sum(mus / sigmas_squared, dim=0)\n return mu, sigma_squared", "def vectorized_gaussian_variable_mutation(population: np.array, std: float = 1.0) -> np.array:\n return population + stats.norm.rvs(size=np.product(population.shape), scale=std).reshape(population.shape)", "def mix_gaussian(mu, sigma_list, weights, num_sample):\n\t\"\"\"\n\tinputs:\n\t-------\n\tmu mean list, numpy array\n\tsigma_list sigma list\n\tweights weights corresponding to each components\n\tnum_sample the number of samples\n\t\n\treturns:\n\t--------\n\tsamples\n\tprobability density function (pdf) of mixture Gaussian distribution\n\t\"\"\"\n\tdim = mu.shape[1]\n\tnum_components = mu.shape[0]\n\tassert (len(weights) == num_components) and (num_components == len(sigma_list))\n\tdata = np.zeros((num_sample, dim))\n\tfor i in range(num_sample):\n\t\tidx_component = np.random.choice(num_components, p=weights)\n\t\tmean = mu[idx_component]\n\t\tcov = sigma_list[idx_component]\n\t\tdata[i, :] = np.random.multivariate_normal(mean, cov)\n\treturn data", "def gaussian_probability(sigma, mu, target):\n target = target.unsqueeze(1).expand_as(sigma)\n ret = ONEOVERSQRT2PI * torch.exp(-0.5 * ((target - mu) / sigma)**2) / sigma\n # ret = integrate.quad(pdf_func, target, target+)\n # print('ret', ret.size())\n # print('2dret', torch.prod(ret, 2).size())\n # print(not torch.isnan(ret).any())\n if (torch.isnan(ret).any()):\n print('sigma', sigma)\n print('mu', mu)\n print('ret', ret)\n input()\n return torch.prod(ret, 2)", "def data_gauss(N0, N1=None, mu0=arr([0, 0]), mu1=arr([1, 1]), sig0=np.eye(2), sig1=np.eye(2)):\n\tif not N1:\n\t\tN1 = N0\n\n\td1,d2 = twod(mu0).shape[1],twod(mu1).shape[1]\n\tif d1 != d2 or np.any(twod(sig0).shape != arr([d1, d1])) or np.any(twod(sig1).shape != arr([d1, d1])):\n\t\traise ValueError('data_gauss: dimensions should agree')\n\n\tX0 = np.dot(np.random.randn(N0, d1), sqrtm(sig0))\n\tX0 += np.ones((N0,1)) * mu0\n\tY0 = -np.ones(N0)\n\n\tX1 = np.dot(np.random.randn(N1, d1), sqrtm(sig1))\n\tX1 += np.ones((N1,1)) * mu1\n\tY1 = np.ones(N1)\n\n\tX = np.row_stack((X0,X1))\n\tY = np.concatenate((Y0,Y1))\n\n\treturn X,Y", "def MVgaussian(size,mu1=0,mu2=0, sigma1=3,sigma2 = 1):\n kernel = np.zeros((size, size), dtype=np.float32)\n \n size = int(size) // 2\n X = np.arange(-size,size+1)\n Y = np.arange(-size,size+1)\n \n for x in X:\n for y in Y:\n Gx = np.exp(-((x-mu1)**2)/(2*(sigma1**2)))\n Gy = np.exp(-((y-mu2)**2)/(2*(sigma2**2)))\n Gx = math.exp(-(math.pow(x-mu1,2))/(2*math.pow(sigma1,2)))\n Gy = math.exp(-(math.pow(y-mu2,2))/(2*math.pow(sigma2,2)))\n kernel[x+size,y+size] = Gx*Gy\n return kernel", "def productGaussian(mu1, C1, mu2, C2):\n Cn = C1 + mat(.0001*identity(2))\n K = Cn*linalg.inv(Cn+C2)\n mu = mu1 + K*(mu2-mu1)\n C = Cn - K*Cn\n #denom = linalg.inv(C1+C2)\n #mu = denom*(C1*mu2+C2*mu1)\n #C = C1*denom*C2\n return mu,C", "def gaus(x, A, mu, sigma):\n return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))", "def estimateGaussian(X):\n mu = X.mean(0, keepdims=True).T\n sigma2 = X.var(0, keepdims=True).T\n return mu, sigma2", "def wass_gaussians(mu1, mu2, Sigma1, Sigma2):\n d = mu1.shape[0]\n if d == 1:\n w2 = (mu1 - mu2)**2 + (np.sqrt(Sigma1) - np.sqrt(Sigma2))**2\n else:\n prodSigmas = Sigma2**(1/2)*Sigma1*Sigma2**(1/2)\n w2 = np.linalg.norm(mu1 - mu2)**2 + np.trace(Sigma1 + Sigma2 - 2*(prodSigmas)**(1/2))\n return np.sqrt(w2)", "def gauss(x, mu, A, sigma):\n mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)\n val = (A / (sigma * np.sqrt(np.pi * 2)) *\n np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2)))\n return val.sum(axis=-1)", "def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def gaussian_dense(matrix, two_sigma_square):\n\n return np.exp(- matrix / two_sigma_square)", "def optimalGaussian(mu, sigma):\r\n \r\n optimal = gaussian(mu, sigma, mu-3.5*sigma, mu+3.5*sigma) #Calculate values of optimal gaussian\r\n def simulationTestGaussian2(params):\r\n \"\"\"\r\n Compare a gaussian with optimal gaussian\r\n \"\"\"\r\n x = gaussian(params[0], params[1], mu-3.5*sigma, mu+3.5*sigma)\r\n error = np.sum(np.power(optimal - x, 2))/optimal.shape[0]\r\n return 1/error\r\n return simulationTestGaussian2", "def matlab_style_gauss2D(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def matlab_style_gauss2D(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def matlab_style_gauss2D(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def addGaussian(ax, ismulti):\n shape = (96, 288) #ax.shape[:2]\n intensity_noise = np.random.uniform(low=0, high=0.05)\n if ismulti:\n ax[:,:,0] = ax[:,:,0]*(1+ intensity_noise*np.random.normal(loc=0, scale=1, size=shape[0]*shape[1]).reshape(shape[0],shape[1]))\n else:\n ax[:,:,0] = ax[:,:,0] + intensity_noise*np.random.normal(loc=0, scale=1, size=shape[0]*shape[1]).reshape(shape[0],shape[1])\n return ax", "def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B" ]
[ "0.7436147", "0.70535463", "0.6552553", "0.6514739", "0.6439178", "0.6319203", "0.60270005", "0.5780886", "0.57091594", "0.57025003", "0.56856954", "0.56552154", "0.5488051", "0.5474055", "0.54697037", "0.5448944", "0.5433295", "0.5429303", "0.5429244", "0.54279035", "0.5417467", "0.53725326", "0.53678334", "0.53641033", "0.53623414", "0.532283", "0.53217894", "0.53217894", "0.53126013", "0.53109294" ]
0.7175663
1
VISUALIZEFIT Visualize the dataset and its estimated distribution. VISUALIZEFIT(X, p, mu, sigma2) This visualization shows you the probability density function of the Gaussian distribution. Each example has a location (x1, x2) that depends on its feature values.
def visualizeFit(X, mu, sigma2): X1 = np.arange(0, 35.1, .5) X2 = np.arange(0, 35.1, .5) X1, X2 = np.meshgrid(X1, X2) Z = multivariateGaussian(np.c_[X1.flatten(), X2.flatten()], mu, sigma2) Z = Z.reshape(X1.shape) plt.figure() plt.plot(X[:, 0], X[:, 1], 'bx') # Do not plot if there are infinities if np.isinf(Z).sum() == 0: plt.contour(X1, X2, Z, 10**np.arange(-20., 0, 3))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_fit(self):\n self.fft_fit_plotter.plot(self.ax)\n plt.draw()", "def plot_data(self, plot_input=True, plot_fitted=True,plotfile=None, show=None):\n if not self.fitted:\n raise RuntimeError(\"Please run fit() before attempting to plot the results\")\n\n fitted_data = self.data_summary(printout=False)\n fitted_mean = fitted_data['mean'].to_numpy().reshape((self.npoints,self.ndim))\n print(fitted_mean.shape)\n fitted_sigma = fitted_data['sd'].to_numpy().reshape((self.npoints,self.ndim))\n if self.ndim==np.int(2) and isinstance(self.ndim, int):\n blue, _, red, *_ = sns.color_palette()\n f, ax = plt.subplots(1, 1, figsize=(5, 4))#, gridspec_kw=dict(width_ratios=[4, 3]))\n\n sns.scatterplot(x=self.data[:,0], y=self.data[:,1])\n if plot_input:\n ax.errorbar(x=self.data[:,0], y=self.data[:,1],\n xerr=self.sigma[:,0], yerr=self.sigma[:,1],fmt='o',label='input data')\n \n if plot_fitted:\n ax.errorbar(x=fitted_mean[:,0], y=fitted_mean[:,1],\n xerr=fitted_sigma[:,0], yerr=fitted_sigma[:,1],fmt='o',label='inferred data')\n \n mu_post = self.trace.posterior[\"mu\"].mean(axis=(0, 1)).data\n \n sigma_post = self.trace.posterior[\"cov\"].mean(axis=(0, 1)).data\n \n var_post, U_post = np.linalg.eig(sigma_post)\n angle_post = 180.0 / np.pi * np.arccos(np.abs(U_post[0, 0]))\n\n e_post = Ellipse(\n mu_post,\n 2 * np.sqrt(5.991 * var_post[0]),\n 2 * np.sqrt(5.991 * var_post[1]),\n angle=angle_post,\n )\n e_post.set_alpha(0.5)\n e_post.set_facecolor(blue)\n e_post.set_zorder(10)\n ax.add_artist(e_post)\n rect_post = plt.Rectangle((0, 0), 1, 1, fc=blue, alpha=0.5)\n ax.legend(\n [rect_post],\n [\"Estimated 95% density region\"],\n loc=2,\n )\n #plt.show()\n\n elif self.ndim > 2 and isinstance(int, self.ndim) and np.isfinite(self.ndim):\n #raise NotImplementedError(\"This routine doesn't support plotting correlations in more than 2 dimensions yet!\")\n rows = self.ndim - 1\n cols = self.ndim - 1\n fig = plt.figure()\n gs = fig.add_gridSpec(rows, cols,left=0.1, right=0.9, bottom=0.1, top=0.9,\n wspace=0.05, hspace=0.05)\n for i in range(self.ndim - 1):\n for j in range(i+1,self.ndim - 1):\n ax = fig.add_subplot(gs[i,j])\n #plot the data points\n sns.scatterplot(self.data[:,i], self.data[:,j], ax=ax)\n if plot_input:\n ax.errorbar(x=self.data[:,i], y=self.data[:,j],\n xerr=self.sigma[:,i], yerr=self.sigma[:,j])\n \n if plot_fitted:\n ax.errorbar(x=fitted_mean[:,i], y=fitted_mean[:,j],\n xerr=fitted_sigma[:,i], yerr=fitted_sigma[:,j])\n \n mu_post = self.trace.posterior[\"mu\"].mean(axis=(i, j)).data\n \n sigma_post = self.trace.posterior[\"cov\"].mean(axis=(i, j)).data\n \n var_post, U_post = np.linalg.eig(sigma_post)\n angle_post = 180.0 / np.pi * np.arccos(np.abs(U_post[0, 0]))\n \n e_post = Ellipse(\n mu_post,\n 2 * np.sqrt(5.991 * var_post[0]),\n 2 * np.sqrt(5.991 * var_post[1]),\n angle=angle_post,\n )\n e_post.set_alpha(0.5)\n e_post.set_facecolor(blue)\n e_post.set_zorder(10)\n ax.add_artist(e_post)\n \n else:\n raise ValueError(\"Ndim is either less than 2 or is not an integer!\")\n \n if isinstance(plotfile, str):\n plt.save(plotfile)\n elif not show:\n raise TypeError(\"plotfile must be a string\")\n if show:\n plt.show()\n elif plotfile is not None:\n plt.close()", "def test():\n X,Xval,Yval = _load_sample_data()\n mu,var = estimate_gaussian_params(X)\n pval = get_probability(Xval,mu,var)\n\n figure()\n plot(X[:,0],X[:,1],'b+',label='data'); xlabel(\"Latency (ms)\"); ylabel(\"Throughput (Mb/s)\")\n epsilon, F1 = determine_threshold(Yval,pval)\n print(\"Optimal epsilon and F1 score for sample dataset {}, {}\".format(epsilon, F1))\n plot_gaussian(mu,var,epsilon=epsilon)\n\n ## Plot Outliers\n predictions = get_probability(X,mu, var)\n outliers = X[predictions < epsilon]\n plot(outliers[:,0],outliers[:,1],'ro',mfc=None,label='outliers');\n legend()\n grid()", "def plot_filtergaussian_distribution(dataframe, ellipse=None, confidence=0.975, *args, **kwargs):\n\n fig = pyplot.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax = dataframe.plot(kind='scatter', x='coul', y='vdw',\n title='Multivariate Gaussian distribution outlier detection of delta VdW and Coul energies',\n ax=ax, **kwargs)\n\n # If outliers, plot them.\n if dataframe.outliers['filter_mask'].sum() > 0:\n outlier_dataframe = dataframe.outliers\n outlier_dataframe.plot(kind='scatter', x='coul', y='vdw',\n color='red', label='Outlier %1.1f%%' % (confidence * 100),\n ax=ax, **kwargs)\n\n # Print outlier labels\n if kwargs.get('show_labels', True):\n xoff, yoff = _get_label_offset(dataframe[['coul', 'vdw']])\n for i, point in outlier_dataframe.iterrows():\n ax.text(point['coul'] + xoff, point['vdw'] + yoff,\n \"{0:.0f}-{1:.0f}\".format(point['case'], point['poses']), fontsize=8)\n\n # Plot training compounds if any\n trainset = dataframe.trainset\n if not trainset.empty:\n trainset.plot(kind='scatter', x='coul', y='vdw', color='green', label='Training compounds', ax=ax, **kwargs)\n\n ax.legend(frameon=False, loc='best', fontsize=8)\n ax.set_xlabel(r'$\\Delta$$E_{ele}$ (kJ/mol)', fontsize=10)\n ax.set_ylabel(r'$\\Delta$$E_{VdW}$ (kJ/mol)', fontsize=10)\n\n # Plot the ellipse\n if ellipse:\n ax.add_artist(ellipse)\n\n return fig", "def GaussianFit(data, title=\"\"):\n y, binEdges = np.histogram(data, 50)\n x = (binEdges[:-1] + binEdges[1:]) / 2\n x_width = (x[-1] - x[0]) / len(x)\n y_err = np.sqrt(y) # items in a bin should follow the Poisson distribution\n\n # calculate optimal fit parameters and covariance matrix using least squares method\n popt, cov = curve_fit(Gaussian, x, y, [np.mean(data), np.std(data), 10])\n\n # plot data\n plt.bar(x, y, x_width, yerr=y_err, color=\"blue\", edgecolor=\"black\", capsize=3, ecolor=\"black\")\n \n text1 = \"Mean (GeV): \" + str( round_to(popt[0], cov[0, 0]) ) + \" $\\pm$ \" + str( round_to(cov[0, 0], cov[0, 0]) )\n\n text2 = \"Standard deviation (GeV): \" + str( round_to(popt[1], cov[1, 1]) ) + \" $\\pm$ \" + str( round_to(cov[1, 1], cov[1, 1]) )\n\n text = '\\n'.join((text1, text2))\n\n # plot gaussian fit\n x_int = np.linspace(x[0], x[-1], 10*len(x)) # interpolate data\n y_int = Gaussian(x_int, *popt)\n plt.plot(x_int, y_int, label=\"Gaussian fit\", color=\"red\")\n\n\n plt.annotate(text, xy=(0.025, 0.8), xycoords='axes fraction')\n\n # plot options\n plt.legend()\n plt.xlabel(\"Energy (GeV)\")\n plt.ylabel(\"Number of events (bin width=\" + str(round(x_width, 2)) + \" GeV)\")\n plt.title(title)\n #plt.title(\"Beam momentum 100GeV, magnetic field \" + str(geometry.B) + \"T.\")\n \n # return some results, mean, standard deviation, amplitude\n return [popt[0], cov[0, 0]], [popt[1], cov[1, 1]], [popt[2], cov[2, 2]]", "def fit_transform_show(self, X, y=None, outpath=None, **kwargs):\n Xp = self.fit_transform(X, y, **kwargs)\n self.show(outpath, **kwargs)\n return Xp", "def vis_points(data,f1,f2):\n if np.isnan(data).any():\n return\n \n plt.scatter(data[:,f1], data[:,f2], alpha=0.2, c='b')\n plt.xlim(lims)\n plt.ylim(lims)", "def method_silverman(data, tofit):\n gaus_kde = stats.gaussian_kde(data.T)\n return gaus_kde(tofit.T)", "def vis_g(f1,f2):\n data = generator(fixed_noise).data.numpy()\n if np.isnan(data).any():\n return\n \n plt.scatter(data[:,f1], data[:,f2], alpha=0.2, c='b')\n plt.xlim(lims)\n plt.ylim(lims)", "def visualize_data(data):\n\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=2)\n\n # Fit and transform x to visualise inside a 2D feature space\n x_vis = pca.fit_transform(data[data.columns[:-1]])\n y = data['Tumor'].as_matrix()\n\n # Plot the original data\n # Plot the two classes\n palette = sns.color_palette()\n\n plt.scatter(x_vis[y == 0, 0], x_vis[y == 0, 1], label=\"Normal\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[0], linewidth=0.15)\n plt.scatter(x_vis[y == 1, 0], x_vis[y == 1, 1], label=\"Tumor\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[2], linewidth=0.15)\n\n plt.legend()\n plt.show()", "def visualize(self):\n\t\tplt.figure(1)\n\t\tax1 = plt.add_suplot(1,2,1)\n\t\t# Plot free energy error\n\t\tax1.plot(self.FE_errors_GMM_CV_, linewidth=4, label='GMM with cross-validation')\n\t\tax1.plot(self.FE_errors_GMM_mix_models_, linewidth=4, label='GMM with mixture of models')\n\t\tplt.legend()\n\n\t\t# Plot density error\n\n\t\t# Plot log-likelihood of test set\n\n\t\t# Plot clustering score\n\n\t\tplt.show()\n\n\t\treturn", "def feature_vis(data, x, y = 'price', categorical = False, kde = True):\n\n print(data[x].value_counts().sort_index())\n \n fig, axs = plt.subplots(ncols=2, figsize= (12,6))\n \n sns.regplot(data=data, x=x, y=y, ax=axs[0])\n sns.histplot(data=data, x=x, discrete=categorical, kde=kde, ax=axs[1])\n \n fig.suptitle(f'{x.title()} vs. {y.title()}', fontsize=16)\n plt.tight_layout();\n \n return", "def visualize(self):\n\n check_is_fitted(self, \"sm_\")\n\n fig = plt.figure(figsize=(6, 4))\n inner = gridspec.GridSpec(2, 1, hspace=0.1, height_ratios=[6, 1])\n ax1_main = plt.Subplot(fig, inner[0]) \n xgrid = np.linspace(self.xmin, self.xmax, 100).reshape([-1, 1])\n ygrid = self.decision_function(xgrid)\n ax1_main.plot(xgrid, ygrid)\n ax1_main.set_xticklabels([])\n ax1_main.set_title(\"Shape Function\", fontsize=12)\n fig.add_subplot(ax1_main)\n \n ax1_density = plt.Subplot(fig, inner[1]) \n xint = ((np.array(self.bins_[1:]) + np.array(self.bins_[:-1])) / 2).reshape([-1, 1]).reshape([-1])\n ax1_density.bar(xint, self.density_, width=xint[1] - xint[0])\n ax1_main.get_shared_x_axes().join(ax1_main, ax1_density)\n ax1_density.set_yticklabels([])\n ax1_density.autoscale()\n fig.add_subplot(ax1_density)\n plt.show()", "def plotFittingResults(self):\n _listFitQ = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitQ()]\n _listFitValues = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitValues()]\n _listExpQ = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataQ()]\n _listExpValues = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataValues()]\n\n #_listExpStdDev = None\n #if self.getDataInput().getExperimentalDataStdDev():\n # _listExpStdDev = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataStdDev()]\n #if _listExpStdDev:\n # pylab.errorbar(_listExpQ, _listExpValues, yerr=_listExpStdDev, linestyle='None', marker='o', markersize=1, label=\"Experimental Data\")\n # pylab.gca().set_yscale(\"log\", nonposy='clip')\n #else: \n # pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5, label=\"Experimental Data\")\n\n pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5, label=\"Experimental Data\")\n pylab.semilogy(_listFitQ, _listFitValues, label=\"Fitting curve\")\n pylab.xlabel('q')\n pylab.ylabel('I(q)')\n pylab.suptitle(\"RMax : %3.2f. Fit quality : %1.3f\" % (self.getDataInput().getRMax().getValue(), self.getDataOutput().getFitQuality().getValue()))\n pylab.legend()\n pylab.savefig(os.path.join(self.getWorkingDirectory(), \"gnomFittingResults.png\"))\n pylab.clf()", "def simplePlots() -> None:\r\n \r\n # Univariate data -------------------------\r\n \r\n # Make sure that always the same random numbers are generated\r\n np.random.seed(1234)\r\n \r\n # Generate data that are normally distributed\r\n x = np.random.randn(500)\r\n \r\n # Other graphics settings\r\n # Set \" context='poster' \" for printouts, and \"set_fonts(32)\"\r\n sns.set(context='notebook', style='ticks', palette='muted')\r\n \r\n # Set the fonts the way I like them\r\n set_fonts(16)\r\n \r\n # Scatter plot\r\n plt.plot(x, '.', markersize=7)\r\n plt.xlim([0, len(x)])\r\n \r\n # Save and show the data, in a systematic format\r\n printout('scatterPlot.jpg', xlabel='Datapoints', ylabel='Values', title='Scatter')\r\n \r\n # Histogram\r\n plt.hist(x)\r\n printout('histogram_plain.jpg', xlabel='Data Values',\r\n ylabel='Frequency', title='Histogram, default settings')\r\n \r\n plt.hist(x, 25, density=True)\r\n printout('density_histogram.jpg', xlabel='Data Values', ylabel='Probability',\r\n title='Density Histogram, 25 bins')\r\n \r\n # Boxplot\r\n # The ox consists of the first, second (middle) and third quartile\r\n set_fonts(18)\r\n plt.boxplot(x, sym='*')\r\n printout('boxplot.jpg', xlabel='Values', title='Boxplot')\r\n \r\n plt.boxplot(x, sym='*', vert=False)\r\n plt.title('Boxplot, horizontal')\r\n plt.xlabel('Values')\r\n plt.show()\r\n \r\n # Errorbars\r\n x = np.arange(5)\r\n y = x**2\r\n errorBar = x/2\r\n plt.errorbar(x,y, yerr=errorBar, fmt='o', capsize=5, capthick=3)\r\n plt.xlim([-0.2, 4.2])\r\n plt.ylim([-0.2, 19])\r\n printout('Errorbars.jpg', xlabel='Data Values', ylabel='Measurements', title='Errorbars')\r\n\r\n # SD for two groups\r\n weight = {'USA':89, 'Austria':74}\r\n weight_SD_male = 12\r\n plt.errorbar([1,2], weight.values(), yerr=weight_SD_male * np.r_[1,1],\r\n capsize=5, LineStyle='', marker='o')\r\n plt.xlim([0.5, 2.5])\r\n plt.xticks([1,2], weight.keys())\r\n plt.ylabel('Weight [kg]')\r\n plt.title('Adult male, mean +/- SD')\r\n\r\n show_data('SD_groups.jpg', out_dir='.')\r\n \r\n # Barplot\r\n # The font-size is set such that the legend does not overlap with the data\r\n np.random.seed(1234)\r\n set_fonts(16)\r\n \r\n df = pd.DataFrame(np.random.rand(7, 3), columns=['one', 'two', 'three'])\r\n df.plot(kind='bar', grid=False, color=sns.color_palette('muted'))\r\n \r\n show_data('barplot.jpg')\r\n\r\n # Bivariate Plots\r\n df2 = pd.DataFrame(np.random.rand(50, 3), columns=['a', 'b', 'c'])\r\n df2.plot(kind='scatter', x='a', y='b', s=df2['c']*500);\r\n plt.axhline(0, ls='--', color='#999999')\r\n plt.axvline(0, ls='--', color='#999999')\r\n printout('bivariate.jpg')\r\n \r\n sns.set_style('ticks')\r\n\r\n # Pieplot\r\n txtLabels = 'Cats', 'Dogs', 'Frogs', 'Others'\r\n fractions = [45, 30, 15, 10]\r\n offsets =(0, 0.05, 0, 0)\r\n \r\n plt.pie(fractions, explode=offsets, labels=txtLabels,\r\n autopct='%1.1f%%', shadow=True, startangle=90,\r\n colors=sns.color_palette('muted') )\r\n plt.axis('equal')\r\n printout('piePlot.jpg', title=' ')", "def display_fit(x,y,p,func,fig=None):\n if fig is None:\n fig = plots.tac_figure('x','y','fitting')\n fig.plot(x,np.log(y),label='data')\n \n \n fig.plot(x,np.log(func(p,x)),'--x',label=func.__name__ + '('+\n ','.join(['%.1e'%k for k in p])+ ')')\n \n return fig", "def VisualizeDistribution(dataset, distribution, title, filename):\n # create the output directory if it doesn't exist\n if not os.path.exists('distributions'):\n os.mkdir('distributions')\n if not os.path.exists('distributions/{}'.format(dataset)):\n os.mkdir('distributions/{}'.format(dataset))\n\n # determine the appropriate units for this distribution\n max_duration = max(distribution)\n if max_duration > 10**8:\n units = 'seconds'\n for iv in range(len(distribution)):\n distribution[iv] = distribution[iv] / 10**9\n else:\n units = 'microseconds'\n\n # plot the figure\n plt.figure(figsize=(6, 4))\n\n # write the labels for this set of functions\n plt.title(title, pad=20, fontsize=14)\n plt.ylabel('Time ({})'.format(units), fontsize=12)\n plt.xlabel('No. Appearances: {}'.format(len(distribution)), fontsize=12)\n\n # plot the distribution\n plt.boxplot(distribution)\n\n plt.tight_layout()\n\n output_filename = 'distributions/{}/{}'.format(dataset, filename)\n plt.savefig(output_filename)\n\n # clear and close this figure\n plt.clf()\n plt.close()", "def gaussian_fit(self):\r\n\r\n self.df5 = pd.DataFrame(columns=['Slit Number', 'Centre', 'Centre_err', 'Sigma', 'Sigma_err', 'FWHM', 'FWHM_err', 'Height', 'Height_err'])\r\n QDot_slits = self.QDot_detection()\r\n\r\n if len(QDot_slits) > 0: \r\n self.plot_data = pd.DataFrame(columns=[f\"{QDot_slits[0]}\"], index=self.energies)\r\n else:\r\n self.plot_data = pd.DataFrame(index=self.energies)\r\n\r\n for slit_number in QDot_slits:\r\n sel = self.df4[f'{slit_number}']\r\n self.plot_data[f'{slit_number}'] = sel\r\n \r\n # Makes a good first guess for the fit values of the gaussian\r\n max_intensity = max(sel)\r\n central_energy = sel[sel==max_intensity].index.values\r\n central_energy = central_energy[0]\r\n\r\n # Fits a gaussian model to the selected data and shows the output\r\n gauss = models.GaussianModel()\r\n fit = gauss.fit(sel, x=self.energies, weights=1 / np.sqrt(sel), center = central_energy, amplitude = max_intensity, sigma = 1, nan_policy= 'omit')\r\n \r\n self.plot_data[f'{slit_number} best fit'] = fit.best_fit\r\n\r\n # Appends the fit data for the variables to a new dataframe and shows the fit results with errors\r\n fit_variables = [slit_number]\r\n for key in fit.params:\r\n if key in ['center', 'sigma', 'fwhm', 'height']:\r\n fit_variables.append(fit.params[key].value)\r\n fit_variables.append(fit.params[key].stderr)\r\n \r\n self.df5 = self.df5.append({'Slit Number': fit_variables[0], 'Centre': fit_variables[1], 'Centre_err': fit_variables[2], 'Sigma': fit_variables[3], 'Sigma_err': fit_variables[4], 'FWHM': fit_variables[5], 'FWHM_err': fit_variables[6], 'Height': fit_variables[7], 'Height_err': fit_variables[8]}, ignore_index=True)\r\n \r\n return self.plot_data, self.df5", "def visualize(\n self,\n fig: plt.Figure,\n X: np.ndarray,\n energy: Callable[[np.ndarray], np.ndarray] = None,\n ):\n fig.clear()\n ax = fig.subplots(1, 1)\n if X.shape[1] == 1:\n ax.hist(X.reshape([-1]), density=True, label=\"Data\")\n x_min = X.min()\n x_max = X.max()\n xs = np.linspace(x_min, x_max, 100)\n if hasattr(self, \"logpdf\"):\n ys_ = np.exp(self.logpdf(xs.reshape([-1, 1])))\n ys = ys_.reshape(-1)\n ax.plot(xs, ys, label=\"Actual\")\n if energy is not None:\n ys_ = np.exp(-energy(xs.reshape([-1, 1])))\n ys = ys_.reshape(-1)\n Z = ys_.mean() * (x_max - x_min)\n ax.plot(xs, ys / Z, label=\"Energy\", color=\"red\")\n ax.legend()\n elif X.shape[1] == 2:\n ax.scatter(X[:, 0], X[:, 1], label=\"Data\")\n x_min, x_max = X[:, 0].min(), X[:, 0].max()\n y_min, y_max = X[:, 1].min(), X[:, 1].max()\n x_support = np.linspace(x_min, x_max, 100)\n y_support = np.linspace(y_min, y_max, 100)\n xx, yy = np.meshgrid(x_support, y_support)\n XY = np.hstack([xx.reshape([-1, 1]), yy.reshape([-1, 1])])\n if hasattr(self, \"logpdf\"):\n z_ = np.exp(self.logpdf(XY))\n z = z_.reshape(xx.shape)\n ax.contour(xx, yy, z, 10)\n if energy is not None:\n z_ = np.exp(-energy(XY))\n z = z_.reshape(xx.shape)\n ax.contour(xx, yy, z, 10, cmap=\"Reds\")\n ax.legend()\n else:\n from sklearn.manifold import TSNE\n\n tsne = TSNE(n_components=2)\n emb = tsne.fit_transform(X)\n ax.scatter(emb[:, 0], emb[:, 1])", "def show_data(sim_attr_generator):\n#TODO description\n if Args.data_to_show == 'dprime':\n show_dprime(sim_attr_generator)", "def show_cleaned_vis(data, x, y = 'price', categorical = False, kde = True):\n\n ### Filter outliers first\n \n idx_out = find_outliers_IQR(data[x])\n \n df_cleaned = data[~idx_out].copy()\n\n ### Plot Data\n \n df_cleaned.value_counts().sort_index()\n \n fig, axs = plt.subplots(ncols=2, figsize= (12,6))\n \n sns.regplot(data=df_cleaned, x=x, y=y, ax=axs[0],line_kws={\"color\": \"red\"})\n sns.histplot(data=df_cleaned, x=x, discrete=categorical, kde=kde, ax=axs[1])\n \n fig.suptitle(f'{x.title()} vs. {y.title()}', fontsize=16)\n plt.tight_layout();\n \n return #df_cleaned", "def part2():\n\tX, Xval, yval = loadDataSet('ex8data2.mat')\n\tmu, sigma2 = estimateGaussian(X)\n\tp = multivariateGaussian(X, mu, sigma2)\n\tpval = multivariateGaussian(Xval, mu, sigma2)\n\tepsilon, F1 = selectThreshold(yval, pval)\n\n\tprint('Best epsilon found using cross-validation: %e\\n' % (epsilon))\n\tprint('Best F1 on Cross Validation Set: %f\\n' % (F1))\n\tprint('# Outliers found: %d\\n' % (np.sum(p < epsilon)))\n\tprint('(you should see a value epsilon of about 1.38e-18)\\n\\n')", "def plot_heldout_prediction(input_val,\n y_val,\n mu_val,\n sigma_val,\n fname=None,\n n=1,\n title=\"\"):\n fig = figure.Figure(figsize=(9, 3 * n))\n canvas = backend_agg.FigureCanvasAgg(fig)\n for i in range(n):\n ax = fig.add_subplot(n, i + 1, 1)\n ax.plot(input_val, y_val, label='True data')\n ax.plot(input_val, mu_val, label='Predictive mean')\n lower = mu_val - 1.96 * sigma_val\n upper = mu_val + 1.96 * sigma_val\n ax.fill_between(\n input_val, lower, upper, label='95% confidence interval')\n\n plt.legend()\n fig.suptitle(title)\n fig.tight_layout()\n\n if fname is not None:\n canvas.print_figure(fname, format=\"png\")\n print(\"saved {}\".format(fname))", "def _2d_plot_samples(self, **kwargs):\n\n from pesummary.core.plots.bounded_2d_kde import Bounded_2d_kde\n\n # get bounds\n lows = []\n highs = []\n methods = []\n for param in self.parameters[0:2]:\n if param in DEFAULT_BOUNDS:\n lows.append(\n DEFAULT_BOUNDS[param][\"low\"]\n if \"low\" in DEFAULT_BOUNDS[param]\n else None\n )\n highs.append(\n DEFAULT_BOUNDS[param][\"high\"]\n if \"high\" in DEFAULT_BOUNDS[param]\n else None\n )\n methods.append(\n DEFAULT_BOUNDS[param][\"method\"]\n if \"method\" in DEFAULT_BOUNDS[param]\n else \"Reflection\"\n )\n\n if self.plottype == \"triangle\":\n from pesummary.core.plots.publication import triangle_plot as plotfunc\n elif self.plottype == \"reverse_triangle\":\n from pesummary.core.plots.publication import (\n reverse_triangle_plot as plotfunc,\n )\n else:\n # contour plot\n from pesummary.core.plots.publication import (\n comparison_twod_contour_plot as plotfunc,\n )\n\n # set KDE information\n kwargs.update(\n {\n \"kde\": Bounded_2d_kde,\n \"kde_kwargs\": {\n \"xlow\": lows[0],\n \"xhigh\": highs[0],\n \"ylow\": lows[1],\n \"yhigh\": highs[1],\n },\n }\n )\n\n # default to not showing data points\n if \"plot_datapoints\" not in kwargs:\n kwargs[\"plot_datapoints\"] = False\n\n if \"triangle\" in self.plottype:\n from pesummary.core.plots.bounded_1d_kde import bounded_1d_kde\n\n # set KDE informaiton\n kwargs.update(\n {\n \"kde_2d\": Bounded_2d_kde,\n \"kde_2d_kwargs\": {\n \"xlow\": lows[0],\n \"xhigh\": highs[0],\n \"ylow\": lows[1],\n \"yhigh\": highs[1],\n },\n \"kde\": bounded_1d_kde,\n }\n )\n\n kwargs[\"kde_kwargs\"] = {\n \"x_axis\": {\"xlow\": lows[0], \"xhigh\": highs[0], \"method\": methods[0]},\n \"y_axis\": {\"xlow\": lows[1], \"xhigh\": highs[1], \"method\": methods[1]},\n }\n\n args = [\n [samps[self.parameters[0]].values for samps in self._samples.values()],\n [samps[self.parameters[1]].values for samps in self._samples.values()],\n ]\n\n if \"xlabel\" not in kwargs:\n kwargs[\"xlabel\"] = self.latex_labels[self.parameters[0]]\n if \"ylabel\" not in kwargs:\n kwargs[\"ylabel\"] = self.latex_labels[self.parameters[1]]\n\n if \"labels\" not in kwargs and len(self.results) > 1:\n kwargs[\"labels\"] = list(self._samples.keys())\n\n # set injection parameter values\n if self.injection_parameters is not None:\n if (\n self.injection_parameters[self.parameters[0]] is not None\n and self.injection_parameters[self.parameters[1]] is not None\n ):\n kwargname = \"truths\" if self.plottype == \"corner\" else \"truth\"\n kwargs[kwargname] = [\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]],\n self.injection_parameters[self.parameters[1]]\n - self.parameter_offsets[self.parameters[1]],\n ]\n\n # create plot\n with DisableLogger():\n fig = plotfunc(*args, **kwargs)\n\n return fig", "def summary_plot(dataset, dof=None, p=0.05, ax=None, **kwargs):\n if ax is None:\n ax = plt.gca()\n\n scores = []\n for traj in dataset:\n scores += traj.meta['chi2scores'].tolist()\n scores = np.array(scores)\n\n preferences = {\n 'bins' : 'auto',\n 'density' : True,\n 'histtype' : 'step'\n }\n for key in preferences.keys():\n if not key in kwargs.keys():\n kwargs[key] = preferences[key]\n\n ax.hist(scores[~np.isnan(scores)], **kwargs)\n\n if dof is not None:\n xplot = np.linspace(0, np.nanmax(scores), 1000)\n ax.plot(xplot, np.insert(scipy.stats.chi2.pdf(xplot[1:], dof), 0, 0), color='red', label='expected chi2')\n\n if p is not None:\n thres = scipy.stats.chi2.ppf(p, dof)\n ax.axvline(x=thres, color='magenta')\n thres = scipy.stats.chi2.isf(p, dof)\n ax.axvline(x=thres, color='magenta')", "def show(self):\n if self._tree is None:\n raise RuntimeError(\"Estimator not fitted, call `fit` first\")\n\n import tree_plotter\n tree_plotter.createPlot(self._tree)", "def plot_gmm_preds(x, z, with_supervision, plot_id):\n plt.figure(figsize=(12, 8))\n plt.title('{} GMM Predictions'.format('Semi-supervised' if with_supervision else 'Unsupervised'))\n plt.xlabel('x_1')\n plt.ylabel('x_2')\n\n for x_1, x_2, z_ in zip(x[:, 0], x[:, 1], z):\n color = 'gray' if z_ < 0 else PLOT_COLORS[int(z_)]\n alpha = 0.25 if z_ < 0 else 0.75\n plt.scatter(x_1, x_2, marker='.', c=color, alpha=alpha)\n\n file_name = 'pred{}_{}.pdf'.format('_ss' if with_supervision else '', plot_id)\n save_path = os.path.join('.', file_name)\n plt.savefig(save_path)", "def plot_gmm_preds(x, z, with_supervision, plot_id):\n plt.figure(figsize=(12, 8))\n plt.title('{} GMM Predictions'.format('Semi-supervised' if with_supervision else 'Unsupervised'))\n plt.xlabel('x_1')\n plt.ylabel('x_2')\n\n for x_1, x_2, z_ in zip(x[:, 0], x[:, 1], z):\n color = 'gray' if z_ < 0 else PLOT_COLORS[int(z_)]\n alpha = 0.25 if z_ < 0 else 0.75\n plt.scatter(x_1, x_2, marker='.', c=color, alpha=alpha)\n\n file_name = 'pred{}_{}.pdf'.format('_ss' if with_supervision else '', plot_id)\n save_path = os.path.join('.', file_name)\n plt.savefig(save_path)", "def distribution_plot(data):\r\n ready_data = sorted((data))\r\n fit = stats.norm.pdf(ready_data, np.mean(ready_data), np.std(ready_data))\r\n plt.plot(ready_data, fit, '-o')\r\n plt.ylabel(\"Prob\")\r\n plt.xlabel(\"Prices\")\r\n plt.title(\"Distribution of prices (Under 50 days) Demand Function\")\r\n plt.show()", "def vis_survival_stats(data, outcomes, feature):\n pass" ]
[ "0.5769036", "0.56943357", "0.5680741", "0.5669008", "0.55950904", "0.5514858", "0.54367095", "0.5405027", "0.5348903", "0.53205526", "0.5303339", "0.5293988", "0.52836484", "0.52553016", "0.52499604", "0.5218861", "0.51786095", "0.51578337", "0.51521975", "0.5151396", "0.5145401", "0.51325864", "0.5113006", "0.51119804", "0.5102774", "0.50962925", "0.5095676", "0.5095676", "0.50841856", "0.5057405" ]
0.68592554
0
SELECTTHRESHOLD Find the best threshold (epsilon) to use for selecting outliers [bestEpsilon bestF1] = SELECTTHRESHOLD(yval, pval) finds the best threshold to use for selecting outliers based on the results from a validation set (pval) and the ground truth (yval).
def selectThreshold(yval, pval): bestEpsilon = 0 bestF1 = 0 F1 = 0 stepsize = (pval.max()-pval.min())/1000 for epsilon in np.arange(pval.min(), pval.max()+stepsize/2, stepsize): predictions = (pval < epsilon) tp = ((predictions == 1) & (yval == 1)).sum() fp = ((predictions == 1) & (yval == 0)).sum() fn = ((predictions == 0) & (yval == 1)).sum() prec = tp/(tp+fp) rec = tp/(tp+fn) F1 = 2*prec*rec/(prec+rec) if F1 > bestF1: bestF1 = F1 bestEpsilon = epsilon return bestEpsilon, bestF1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def selectThreshold(yval, pval):\n\tbestEpsilon = 0\n\tbestF1 = 0\n\tstepsize = (np.max(pval) - np.min(pval)) / 1000\n\n\tfor epsilon in np.arange(np.min(pval), np.max(pval), stepsize):\n\t\tpredictions = (pval < epsilon) + 0\n\t\ttp = np.sum((yval == 1) & (predictions == 1))\n\t\tfp = np.sum((yval == 0) & (predictions == 1))\n\t\tfn = np.sum((yval == 1) & (predictions == 0))\n\t\tif tp + fp == 0:\n\t\t\tcontinue\n\t\tprec = float(tp) / (tp + fp) # tips: cast int to float, or you will get 0\n\t\trec = float(tp) / (tp + fn)\n\t\tF1 = 2.0 * prec * rec / (prec + rec)\n\t\tif F1 > bestF1:\n\t\t\tbestF1 = F1\n\t\t\tbestEpsilon = epsilon\n\treturn bestEpsilon, bestF1", "def find_best_threshold(y, y_hat, step_size, score_func, maximize=True):\n best_thres, best_score = 0.0, 0.0 if maximize else 1.0\n for thres in np.arange(0, 1, step_size):\n score = score_for_threshold(y, y_hat, score_func, thres)\n if (maximize and (score > best_score)) or (not maximize and (score < best_score)):\n best_score = score\n best_thres = thres\n\n return best_thres, best_score", "def determine_threshold(yval,pval):\n\n F1 = 0\n epsilon = 0\n for _epsilon in np.linspace(min(pval),max(pval),1000):\n ## Compute stats\n _F1,stats = evaluate_epsilon(yval,pval,_epsilon)\n\n if _F1 > F1:\n F1 = _F1\n epsilon = _epsilon\n print(\"Better threshold found! {} ==> F1 {}\".format(epsilon,F1))\n \n return epsilon, F1", "def threshold_selection(prevalence, CostFP_minus_CostTN, CostFN_minus_CostTP, y, y_hat):\n fpr, tpr, thresholds = roc_curve(y, y_hat)\n m = ((1 - prevalence) / prevalence) * ((CostFP_minus_CostTN) / (CostFN_minus_CostTP))\n fm_thresholds = []\n for i in range(len(fpr)):\n fm = tpr[i] - (m * fpr[i])\n fm_thresholds.append((thresholds[i], fm))\n fm_thresholds = sorted(fm_thresholds, key=lambda fm_value: fm_value[1], reverse=True)\n return fm_thresholds[0][0]", "def gp_optimize_threshold(gp_model, X_val, y_val, X_scaler, y_scaler, optimize_for=\"profits\"): \n y_hat, conf = gp_model.predict(X_val)\n regressed_payment = y_scaler.inverse_transform(y_hat).reshape(-1)\n loan_amt = X_scaler.inverse_transform(X_val)[:,0]\n\n # This ratio is a guage of how likely a person will pay back.\n # It is compared with a threshold to determine whether or not to loan.\n payment_to_loan_ratio = regressed_payment / loan_amt\n\n # Sort in descending order\n sorted_ind = np.argsort(-payment_to_loan_ratio)\n sorted_payment_to_loan_ratio = payment_to_loan_ratio[sorted_ind]\n X_sorted, y_sorted = X_val[sorted_ind,:], y_val[sorted_ind]\n\n threshold, highest_opt_val = 0, 0\n for i, thresh in enumerate(sorted_payment_to_loan_ratio): \n X_loanee = X_sorted[:i+1,:]\n y_loanee = y_sorted[:i+1]\n \n loan_amt_loanee = np.sum(X_scaler.inverse_transform(X_loanee)[:,0])\n payments_loanee = np.sum(y_loanee)\n\n # Optimize for different values\n if optimize_for == \"profits\":\n opt_val = payments_loanee - loan_amt_loanee\n elif optimize_for == \"profit_percentage\":\n opt_val = (payments_loanee - loan_amt_loanee) / loan_amt_loanee\n else:\n raise Exception(\"Illegal optimize_for value: %s\" % optimize_for)\n\n # Keep track of highest value (that is being optimized for)\n if opt_val > highest_opt_val:\n threshold = thresh\n highest_opt_val = opt_val\n return threshold", "def _find_threshold(self, feature, y_train, num_class):\n assert len(num_class) == 2, \"This function only assumes work with binary classification.\"\n best_threshold = 0.0\n max_exact_classification = 0.0\n is_positive_negative = False\n sorted_feature = sorted(np.unique(feature))\n for i in range(len(sorted_feature)-1):\n # assume the value less than threshold is negative (0), greater than threshold is positive (1)\n threshold = (sorted_feature[i] + sorted_feature[i+1]) / 2\n left_partition = y_train[feature < threshold]\n right_partition = y_train[feature > threshold]\n negative_positive = ((len(left_partition[left_partition == 0]) + len(right_partition[right_partition == 1]))\n / len(feature))\n # assume the value less than threshold is positive (1), greater than threshold is negative. (0)\n positive_negative = ((len(left_partition[left_partition == 1]) + len(right_partition[right_partition == 0]))\n / len(feature))\n # make decision here\n is_positive_negative = positive_negative > negative_positive\n choose = positive_negative if is_positive_negative else negative_positive\n if max_exact_classification < choose:\n max_exact_classification = choose\n best_threshold = threshold\n return best_threshold, is_positive_negative", "def find_best_feature_threshold(self, X, Y):\n nan_direction = 0\n best_gain = - np.inf\n best_feature, best_threshold = None, None\n rsts = None\n\n # for each feature, find its best_threshold and best_gain, finally select the largest gain\n # implement in parallel\n cols = list(X.columns)\n data = pd.concat([X, Y], axis=1)\n\n func = partial(self.find_best_threshold, data)\n if self.num_thread == -1:\n pool = Pool()\n rsts = pool.map(func, cols)\n pool.close()\n\n else:\n pool = Pool(self.num_thread)\n rsts = pool.map(func, cols)\n pool.close()\n\n for rst in rsts:\n if rst[2] > best_gain:\n best_gain = rst[2]\n best_threshold = rst[1]\n best_feature = rst[0]\n\n return best_feature, best_threshold, best_gain", "def best_threshold_from_folds(y_tuples, scoring=f1_score, step_size=0.01, maximize=True):\n thresholds, scores = [], []\n for _, y_true, y_pred in y_tuples:\n t, s = find_best_threshold(y_true, y_pred, step_size, scoring, maximize=maximize)\n thresholds.append(t)\n scores.append(s)\n\n mean_threshold = np.mean(thresholds)\n mean_score = np.mean([score_for_threshold(y, y_hat, scoring, mean_threshold) for _, y, y_hat in y_tuples])\n return mean_threshold, mean_score", "def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def _find_best_threshold(self, num_of_steps=20, verbose=False):\n xmin = self.x.min()\n xmax = self.x.max()\n step = (xmax - xmin)/num_of_steps\n \n lower_th = None\n lower_IR = 1\n\n # for each potential threshold\n for threshold in np.arange(xmin+step, xmax, step):\n IR = self._compute_isometric_ratio(threshold)\n \n if IR < lower_IR:\n lower_IR = IR\n lower_th = threshold\n \n self.threshold = lower_th\n if verbose:\n print(f'\\tThreshold:\\t\\t{lower_th}\\n\\tIsometric Ratio:\\t{lower_IR}')", "def part2():\n\tX, Xval, yval = loadDataSet('ex8data2.mat')\n\tmu, sigma2 = estimateGaussian(X)\n\tp = multivariateGaussian(X, mu, sigma2)\n\tpval = multivariateGaussian(Xval, mu, sigma2)\n\tepsilon, F1 = selectThreshold(yval, pval)\n\n\tprint('Best epsilon found using cross-validation: %e\\n' % (epsilon))\n\tprint('Best F1 on Cross Validation Set: %f\\n' % (F1))\n\tprint('# Outliers found: %d\\n' % (np.sum(p < epsilon)))\n\tprint('(you should see a value epsilon of about 1.38e-18)\\n\\n')", "def find_metric_threshold(self):\n logger.info(\"compute metric threshold\")\n\n ### Beaucoup trop lent quand on a beaucoup de models ###\n\n df_results_not_aggregated = self.result_reader.load_all_results(aggregate=False)\n\n if len(df_results_not_aggregated) == 0:\n logger.info(\"threshold = None\")\n return None\n\n main_scorer = \"test_%s\" % self.job_config.main_scorer\n (df_results_not_aggregated[main_scorer].fillna(df_results_not_aggregated[main_scorer].min(), inplace=True))\n min_cv = df_results_not_aggregated.groupby(\"job_id\")[main_scorer].min().values\n delta_min_max_cv = np.median(\n df_results_not_aggregated.groupby(\"job_id\")[main_scorer].apply(lambda x: x.max() - x.min())\n )\n\n if len(min_cv) <= self.min_nb_of_models:\n logger.info(\"threshold = None\")\n return None\n\n min_cv = -np.sort(-min_cv)\n result = min_cv[self.min_nb_of_models] - delta_min_max_cv\n\n # result = np.percentile( min_cv, self._get_quantile(len(min_cv)) * 100)\n # TODO : ici peut etre faire une estimation parametric du quantile avec un Kernel, plus smooth et moins sensible quand peu de valeurs\n\n logger.info(\"threshold : %2.2f\" % result)\n return result", "def get_optimal_threshold(y_true, y_proba, sig2incl_ratio, sample_weight=None):\n significances, thresholds = signal_significance(\n y_true, y_proba, sig2incl_ratio, sample_weight=sample_weight\n )\n return thresholds[np.nanargmax(significances)]", "def get_optimal_threshhold(true_label, prediction, iterations=100, size=17):\n best_threshhold = [0.2]*size\n for t in range(size):\n best_fbeta = 0\n temp_threshhold = [0.2]*size\n for i in range(iterations):\n temp_value = i / float(iterations)\n temp_threshhold[t] = temp_value\n temp_fbeta = fbeta(true_label, prediction > temp_threshhold)\n if temp_fbeta > best_fbeta:\n best_fbeta = temp_fbeta\n best_threshhold[t] = temp_value\n return best_threshhold", "def _findThreshold(self, loudnesses, annotations, measure):\n\n # Sort loudnesses and respective annotations\n sortedLoudnesses, sortedAnnotations = zip(\n *sorted(zip(loudnesses, annotations)))\n\n # Preparation\n scores = []\n loudnessesBelow = []\n loudnessesAbove = list(loudnesses)\n estimations = [True] * len(loudnesses)\n\n # Try out all reasonable thresholds\n for i in range(len(loudnesses) - 1):\n estimations[i] = False\n loudnessesBelow.append(loudnessesAbove.pop(0))\n scores.append(\n self._score(measure, estimations, sortedAnnotations,\n loudnessesBelow, loudnessesAbove))\n\n # Find optimal threshold\n idx = np.argmax(scores)\n threshold = (sortedLoudnesses[idx] + sortedLoudnesses[idx + 1]) * 0.5\n bestScore = scores[idx]\n\n return threshold, bestScore", "def is_better(self, curr, best, **kwargs):\r\n score_threshold = kwargs.pop('score_threshold', 1e-3)\r\n relative_eps = 1.0 + score_threshold\r\n return curr >= best*relative_eps", "def get_best_thresholds(labels, test_y, outputs, plot=False):\n t_max = [0] * len(labels)\n f_max = [0] * len(labels)\n\n for i, label in enumerate(labels):\n ts = []\n fs = []\n\n for t in np.linspace(0.1, 0.99, num=50):\n p, r, f, _ = precision_recall_fscore_support(test_y[:,i], np.where(outputs[:,i]>t, 1, 0), average='micro')\n ts.append(t)\n fs.append(f)\n if f > f_max[i]:\n f_max[i] = f\n t_max[i] = t\n\n if plot:\n print(f'LABEL: {label}')\n print(f'f_max: {f_max[i]}')\n print(f't_max: {t_max[i]}')\n\n plt.scatter(ts, fs)\n plt.show()\n \n return t_max, f_max", "def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector", "def select_best(self,dataframe: pd.DataFrame):\n \n # create a Dataframe only for categorical variables\n # categorical_df = pd.get_dummies(dataframe[self.cat_feats])\n categorical_df = dataframe[self.cat_feats]\n \n for feats in self.cat_feats:\n lbl = preprocessing.LabelEncoder()\n lbl.fit(dataframe[feats].values)\n categorical_df.loc[:,feats] = lbl.transform(dataframe[feats].values)\n \n # select only Top 5 variables \n selector = SelectKBest(chi2,k=5)\n # give the targetcolumn and the rest of the data to the scalar to fit\n selector.fit(categorical_df,dataframe[self.target_cols])\n # get the indicies of the selected columns\n cols = selector.get_support(indices=True)\n\n # For display purpose Only\n dfscores = pd.DataFrame(selector.scores_)\n dfcolumns = pd.DataFrame(categorical_df.columns)\n\n #concat two dataframes for better visualization \n featureScores = pd.concat([dfcolumns,dfscores],axis=1)\n featureScores.columns = ['Features','Score'] #naming the dataframe columns\n featureScores = featureScores.sort_values(by='Score', ascending=False)\n \n utils.bar_plot(\n x_data= featureScores['Features'],\n y_data=featureScores['Score'],\n title=\"Select_K_Best using CHI2 For Categorical Features\",\n x_title=\"Features\",\n y_title=\"CHI2 Score\",\n output_path= os.path.join(self.output_path,\"select_k_best_chi2.html\")\n )\n \n self.cat_feats = featureScores['Features'].values.tolist()[:self.num_best]\n # drop the columns which did not qualify\n for feats in self.dataframe_d_copy.columns:\n if feats not in self.cat_feats:\n self.dataframe_d_copy = self.dataframe_d_copy.drop(feats,axis=1)\n return self.cat_feats", "def bestThreshold3(lda_imlementation, vectorized_coded_revs, column, best_tops,\n coded_reviews_df):\n cat_array = lda_imlementation.transform(vectorized_coded_revs)\n\n topic_array = []\n for i in range(lda_imlementation.n_topics):\n coded_reviews_df['topic_'+str(i)] = cat_array[:, i]\n topic_array += ['topic_'+str(i)]\n\n # Compute ROC curve point\n y_score = sum([coded_reviews_df[top] for top in best_tops])\n precision, recall, thresh = precision_recall_curve(\n coded_reviews_df[column], y_score)\n\n\n prc_thresh = zip(precision, recall, thresh)\n prc_df = pd.DataFrame(prc_thresh, columns=['prec', 'recall', 'threshold'])\n prc_df2 = prc_df[prc_df.recall > .5]\n return prc_df2.threshold.iloc[prc_df2.prec.argmax()]", "def kbest(X, y, select_method, pipeline):\n\n # Fitting the tuned pipeline to the whole dataset and extracting the\n # selected features\n pipe = pipeline.fit(X=X, y=y)\n if select_method is 'enet':\n coefs = (pipe\n .best_estimator_\n .named_steps['selector']\n .estimator_\n .coef_[pipe\n .best_estimator_\n .named_steps['selector']\n .get_support()])\n elif select_method is 'f-test':\n coefs = (pipe\n .best_estimator_\n .named_steps['selector']\n .scores_[pipe\n .named_steps['selector']\n .get_support()])\n else:\n raise ValueError(\"\"\"Must specify feature selection technique \n in select method\"\"\")\n \n # Getting feature names\n names = (X\n .columns\n .values[pipe\n .best_estimator_\n .named_steps['selector']\n .get_support()])\n names_scores = list(zip(names, coefs))\n kbest_df = (pd\n .DataFrame(data=names_scores,\n columns=['Features',\n 'Coefs'])\n .sort_values(by='Coefs',\n ascending=False))\n\n # Filtering out zeroed coefficients from the elastic net that were not\n # removed in SelectFromModel\n if select_method is 'enet':\n kbest_df = kbest_df.loc[(kbest_df['Coefs'] != 0.000000)\n | kbest_df['Coefs'] != -0.000000]\n else:\n pass\n\n # Getting the tuned parameters\n optimal_params = pipeline.best_params_\n params_df = pd.DataFrame.from_dict(data=optimal_params,\n orient='index',\n columns=['Parameters'])\n best_inner_cv_test_score = pipeline.best_score_\n\n return kbest_df, params_df, best_inner_cv_test_score", "def bestThreshold2(lda_imlementation, vectorized_coded_revs, column, best_tops,\n coded_reviews_df):\n cat_array = lda_imlementation.transform(vectorized_coded_revs)\n\n topic_array = []\n for i in range(lda_imlementation.n_topics):\n coded_reviews_df['topic_'+str(i)] = cat_array[:, i]\n topic_array += ['topic_'+str(i)]\n\n # Compute ROC curve point\n y_score = sum([coded_reviews_df[top] for top in best_tops])\n precision, recall, thresh = precision_recall_curve(\n coded_reviews_df[column], y_score)\n\n\n prc_thresh = zip(precision, recall, thresh)\n prc_df = pd.DataFrame(prc_thresh, columns=['prec', 'recall', 'threshold'])\n prc_df['dist'] = ((1-prc_df.prec)**2 + (1-prc_df.recall)**2)**.5\n return prc_df.threshold.iloc[prc_df.dist.argmin()]", "def decision_threshold(x, y):\n \n model = DecisionTreeClassifier(max_depth=1, criterion='entropy')\n model.fit(x,y)\n print (\"-- Uncertainty Threshold: \", model.tree_.threshold[0])\n return model.tree_.threshold[0]", "def threshold_and_take_max_before_error(input_signal, target_signal, error_measure, thresh, default_min_value=-1):\n if thresh == None:\n thresh = (max(target_signal) + min(target_signal)) / 2.\n\n # check if default_min_value is coherent with the threshold\n if default_min_value >= thresh:\n raise Exception, 'the default value applied after the max is taken is equal or superior to the threshold.'\n\n input_signal_max = keep_max_for_each_time_step_with_default(input_signal, default_min_value=default_min_value)\n return error_measure(input_signal_max>thresh, target_signal>thresh)", "def selectFeatures(k_features=5, *args):\n X, y = args\n skb = SelectKBest(k=k_features)\n return skb.fit_transform(X, y)", "def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\treturn percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask", "def test():\n X,Xval,Yval = _load_sample_data()\n mu,var = estimate_gaussian_params(X)\n pval = get_probability(Xval,mu,var)\n\n figure()\n plot(X[:,0],X[:,1],'b+',label='data'); xlabel(\"Latency (ms)\"); ylabel(\"Throughput (Mb/s)\")\n epsilon, F1 = determine_threshold(Yval,pval)\n print(\"Optimal epsilon and F1 score for sample dataset {}, {}\".format(epsilon, F1))\n plot_gaussian(mu,var,epsilon=epsilon)\n\n ## Plot Outliers\n predictions = get_probability(X,mu, var)\n outliers = X[predictions < epsilon]\n plot(outliers[:,0],outliers[:,1],'ro',mfc=None,label='outliers');\n legend()\n grid()", "def find_best_threshold(eye_frame):\n average_iris_size = 0.48\n trials = {}\n\n for threshold in range(5, 100, 5):\n iris_frame = Pupil.image_processing(eye_frame, threshold)\n trials[threshold] = Calibration.iris_size(iris_frame)\n\n best_threshold, iris_size = min(trials.items(), key=(lambda p: abs(p[1] - average_iris_size)))\n return best_threshold", "def _backward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = tuple(features)\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f not in k:\n continue\n candidate_features = tuple([x for x in k if x != f])\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])" ]
[ "0.7637404", "0.6577474", "0.6432618", "0.63751316", "0.6364954", "0.61214995", "0.60790485", "0.6060021", "0.59685284", "0.59358245", "0.5918413", "0.58912176", "0.5870182", "0.58629036", "0.5786894", "0.5751271", "0.57167", "0.5685529", "0.56417114", "0.5627823", "0.56117946", "0.5611769", "0.55746514", "0.55448335", "0.55287135", "0.5502312", "0.5481259", "0.5453066", "0.5442718", "0.541766" ]
0.7630243
1
COFICOSTFUNC Collaborative filtering cost function [J, grad] = COFICOSTFUNC(params, Y, R, num_users, num_movies, ... num_features, lambda) returns the cost and gradient for the collaborative filtering problem.
def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd): X = np.reshape(params[:num_movies*num_features], (num_movies, num_features)) Theta = np.reshape(params[num_movies*num_features:], (num_users, num_features)) # J=sum((X*Theta'-Y)^2) where R[i,j]==1 h = X.dot(Theta.T)-Y M = h**2 J = (M*R).sum()/2 reg = lbd/2*((X**2).sum()+(Theta**2).sum()) J = J+reg X_grad = (h*R).dot(Theta)+lbd*X Theta_grad = (h*R).T.dot(X)+lbd*Theta grad = np.r_[X_grad.flatten(), Theta_grad.flatten()] return J, grad
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, reg_lambda, returnCostOnly=False,\n returnGradOnly=False):\n\n # Unfold the U and W matrices from params\n X = params[0:num_movies * num_features].reshape((num_movies, num_features))\n Theta = params[num_movies * num_features:].reshape((num_users, num_features))\n\n errors = (X.dot(Theta.T) - Y) * R\n J = 1 / 2 * np.sum(np.sum(errors ** 2))\n\n penalty = (reg_lambda / 2) * (np.sum(np.sum(Theta ** 2)) + np.sum(np.sum(X ** 2)))\n J = J + penalty\n\n X_grad = errors.dot(Theta) + reg_lambda * X\n Theta_grad = errors.T.dot(X) + reg_lambda * Theta\n\n grad = np.r_[X_grad.flatten(), Theta_grad.flatten()]\n\n if returnGradOnly:\n return grad.flatten()\n if returnCostOnly:\n return J\n\n return J, grad", "def cofiCostFunc(self,params, *args):\n\t\tY, R, num_users, num_products, num_features,l = args[0], args[1],args[2], args[3],args[4],args[5]\n\n\t\taux = params.reshape((num_products + num_users, num_features))\n\n\t\tX = aux[0:num_products , :]\n\n\t\tTheta = aux[num_products:, :] \n\n\t\ttest = np.dot(X,Theta.transpose())\n\t\ttest = test - Y\n\t\ttest = np.multiply(test , R)\n\t\ttest = np.power(test,2)\n\t\ttest = test.sum()\n\t\ttest = 0.5 * test\n\n\t\tJ = 0;\n\t\tregularization = (l * 0.5) * np.power(X,2).sum() + np.power(Theta,2).sum()\n\n\t\tJ = test# + regularization\n\n\t\treturn J", "def cost_function(param, Y, R, n_features):\r\n # theta (user, feature), (943, 10): user preference\r\n # X (movie, feature), (1682, 10): movie features\r\n n_movie, n_user = Y.shape\r\n X, theta = deserialize(param, n_movie, n_user, n_features)\r\n\r\n inner = np.multiply(X @ theta.T - Y, R)\r\n\r\n return np.power(inner, 2).sum() / 2", "def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size", "def checkCostFunction(lbd=0):\n # Create small problem\n X_t = np.random.rand(4, 3)\n Theta_t = np.random.rand(5, 3)\n\n # Zap out most entries\n Y = X_t.dot(Theta_t.T)\n Y[np.random.rand(Y.shape[0], Y.shape[1]) > .5] = 0\n R = np.zeros(Y.shape)\n R[Y == 0] = 1\n\n # Run Gradient Checking\n X = np.random.randn(X_t.shape[0], X_t.shape[1])\n Theta = np.random.randn(Theta_t.shape[0], Theta_t.shape[1])\n num_users = Y.shape[1]\n num_movies = Y.shape[0]\n num_features = Theta_t.shape[1]\n\n def Jfunc(t):\n return cofiCostFunc(t, Y, R, num_users, num_movies, num_features, lbd)\n\n numgrad = computeNumericalGradient(Jfunc, np.r_[X.flatten(), Theta.flatten()])\n\n cost, grad = cofiCostFunc(np.r_[X.flatten(), Theta.flatten()], Y, R, num_users, num_movies, num_features, lbd)\n\n print(np.c_[numgrad, grad])\n print('The above two columns you get should be very similar.')\n print('(Left-Your Numerical Gradient, Right-Analytical Gradient)\\n')\n\n diff = np.linalg.norm(numgrad-grad)/np.linalg.norm(numgrad+grad)\n print('If your cost function implementation is correct, then')\n print('the relative difference will be small (less than 1e-9).')\n print('Relative Difference: %g\\n' % diff)", "def compute_cost_function(X, Y, theta, lambda_factor, temp_parameter):\n h = compute_probabilities(X, theta, temp_parameter)\n\n cost = 0\n for i in range(X.shape[0]):\n for j in range(theta.shape[0]):\n if Y[i] == j:\n cost += np.log(h[j,i])\n\n cost = -cost / X.shape[0]\n\n theta = np.power(theta, 2)\n\n cost += lambda_factor / 2 * theta.sum()\n\n return cost", "def nnCostFunction2(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_):\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n # for our 2 layer neural network\n Theta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape(\n (hidden_layer_size, input_layer_size + 1))\n Theta2 = nn_params[hidden_layer_size *\n (input_layer_size + 1):].reshape((num_labels, hidden_layer_size + 1))\n\n # Setup some useful variables\n m = X.shape[0]\n\n # Add ones to the X data matrix\n X = np.insert(X, 0, 1, axis=1)\n\n # Perform forward propagation for layer 2\n z2 = np.matmul(X, Theta1.transpose())\n a2 = sigmoid(z2)\n a2 = np.insert(a2, 0, 1, axis=1)\n z3 = np.matmul(a2, Theta2.transpose())\n a3 = sigmoid(z3)\n\n # turn Y into a matrix with a new column for each category and marked with 1\n y_one_hot = np.zeros_like(a3)\n for i in range(m):\n y_one_hot[i, y[i] - 1] = 1\n\n # Calculate the cost of our forward prop\n ones = np.ones_like(a3)\n A = np.matmul(y_one_hot.transpose(), np.log(a3)) + \\\n np.matmul((ones - y_one_hot).transpose(), np.log(ones - a3))\n J = -1 / m * A.trace()\n J += lambda_ / (2 * m) * \\\n (np.sum(Theta1[:, 1:] ** 2) + np.sum(Theta2[:, 1:] ** 2))\n\n # Perform backward propagation to calculate deltas & gradients\n delta3 = a3 - y_one_hot\n delta2 = np.matmul(delta3, Theta2[:, 1:]) * sigmoidGradient(z2)\n Theta2_grad = np.matmul(a2.transpose(), delta3).transpose()\n Theta1_grad = np.matmul(X.transpose(), delta2).transpose()\n\n Theta1_grad[:, 1:] += lambda_ * Theta1[:, 1:]\n Theta2_grad[:, 1:] += lambda_ * Theta2[:, 1:]\n Theta1_grad /= m\n Theta2_grad /= m\n grad = np.concatenate([Theta1_grad.reshape(-1), Theta2_grad.reshape(-1)])\n return J, grad", "def calcCostFun(self):\n\n self.start()\n F, K = self.model()\n \n return self.costFunction", "def cost_function (model, X, y, lambda_reg=0.):\n\n m = len (y)\n pred = model.predict (X)\n cost = 1. / (2. * m) * ((pred - y)**2).sum () + \\\n lambda_reg / (2. * m) * (model.coef_**2).sum ()\n return (cost)", "def lrCostFunction(theta, X, y, lambda_):\n if X.ndim == 1:\n X = X.reshape(1, -1)\n\n if y.dtype == bool:\n y = y.astype(int)\n\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the cost of a particular choice of theta.\n # You should set J to the cost.\n #\n # Hint: The computation of the cost function and gradients can be\n # efficiently vectorized. For example, consider the computation\n #\n # sigmoid(X * theta)\n #\n # Each row of the resulting matrix will contain the value of the\n # prediction for that example. You can make use of this to vectorize\n # the cost function and gradient computations.\n #\n\n z = X @ theta\n h = sigmoid(z)\n\n theta_ = np.r_[0, theta[1:]]\n\n J = (-y @ np.log(h) - (1 - y) @ np.log(1 - h)) / m\n J += lambda_ * sum(theta_**2) / (2 * m)\n\n grad = (h - y) @ X / m\n grad += lambda_ * theta_ / m\n\n # =============================================================\n\n return J, grad", "def costFunctionReg(theta, X, y, Lambda):\n # Initialize some useful values\n m = len(y) # number of training examples\n j = costFunction(theta, X, y)\n j += (Lambda/(2*m))*np.sum(theta[1:]**2)\n return j", "def RatingsGradientDescent(params, Y, R, num_users, num_movies, num_features, lbd, alpha, num_iters):\n J_history = np.zeros(num_iters)\n for i in range(num_iters):\n J_history[i], grad = cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd)\n params = params-alpha*grad\n if i % 100 == 99:\n print('Step %i, cost=%f' % (i+1, J_history[i]))\n return params, J_history", "def crf_obj(x, train_data, c):\n print(\"Evaluating grad\")\n global iteration\n iteration +=1\n print(iteration)\n # x is a vector as required by the solver.\n logCrf = log_crf_wrapper(x,train_data, 128, 26, from_file=False)\n model = CRFModel(128, 26)\n model.load_X(x,from_file=False)\n W = model._W # column format\n T = model._T # column format\n # Compute the objective value of CRF\n f = (-c *logCrf) + (0.5 * np.sum(W*W)) + (0.5 * np.sum(T*T)) # objective log-likelihood + regularizer\n reg = np.concatenate([W.T.reshape(-1), T.T.reshape(-1)])\n g = grad_crf_wrapper(x, train_data, 128, 26, from_file=False)\n g = -c * g + reg\n return [f, g]", "def closure(model, optimizer, source_grad, source_clean_grad, source_gnorm): # noqa: D401\n input_indcs, source_indcs = self._index_mapping(model, inputs, sources)\n\n feature_model, last_layer = bypass_last_layer(model)\n new_inputs = torch.zeros_like(inputs)\n new_sources = torch.zeros_like(sources)\n for i in range(len(input_indcs)):\n new_inputs[i] = inputs[input_indcs[i]]\n new_sources[i] = sources[source_indcs[i]]\n\n outputs = feature_model(new_inputs)\n prediction = (last_layer(outputs).data.argmax(dim=1) == labels).sum()\n outputs_sources = feature_model(new_sources)\n prediction = (last_layer(outputs).data.argmax(dim=1) == labels).sum()\n feature_loss = (outputs - outputs_sources).pow(2).mean(dim=1).sum()\n feature_loss.backward(retain_graph=self.retain)\n return feature_loss.detach().cpu(), prediction.detach().cpu()", "def lrCostFunction(theta,X,y, lambda_reg):\n m = np.size(y)\n grad = np.zeros(np.size((theta)))\n J_base, grad = costFunction(theta, X, y)\n \n\n reg_cost = (lambda_reg / (2.0 * m)) * np.sum(theta[1:] ** 2)\n \n reg_gradient = (lambda_reg / m) * theta\n reg_gradient[0] = 0\n cost = J_base + reg_cost\n return cost, grad + reg_gradient", "def cost_function(params, count):\n circuit = models.Circuit(nqubits)\n for l in range(layers):\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(0, nqubits - 1, 2):\n circuit.add(gates.CZ(q, q + 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(1, nqubits - 2, 2):\n circuit.add(gates.CZ(q, q + 1))\n circuit.add(gates.CZ(0, nqubits - 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n\n cost = 0\n circuit.set_parameters(\n params\n ) # this will change all thetas to the appropriate values\n for i in range(len(ising_groundstates)):\n final_state = circuit(np.copy(ising_groundstates[i]))\n cost += np.real(encoder.expectation(final_state.state()))\n\n if count[0] % 50 == 0:\n print(count[0], cost / len(ising_groundstates))\n count[0] += 1\n\n return cost / len(ising_groundstates)", "def cost_function(X, y, theta, _lambda, num_labels, n_hidden_layers=1):\n m, n = X.shape\n intercept = ones((m, 1), dtype=float64)\n X = append(intercept, X, axis=1)\n\n _h = h(X, theta, n_hidden_layers) # model hypothesis\n\n J = 0\n for c in range(num_labels):\n _J = dot(1 - (y == c).T, log(1 - _h[:, c]))\n _J = _J + dot((y == c).T, log(_h[:, c]))\n J = J - (1 / m) * sum(_J)\n\n theta_squared_term = 0\n for j in range(len(theta)):\n theta_squared_term += sum(power(theta[j][:, 1:], 2))\n\n J = J + (_lambda / (2 * m)) * theta_squared_term\n\n return J", "def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n dataset, word2vecCostAndGradient=softmaxCostAndGradient):\n\n cost = 0.0\n gradIn = np.zeros(inputVectors.shape)\n gradOut = np.zeros(outputVectors.shape)\n\n ### YOUR CODE HERE\n predicted = np.zeros(inputVectors.shape[1])\n for cw in contextWords:\n predicted += inputVectors[tokens[cw]]\n predicted /= C\n \n cost, sum_gradIn, gradOut = word2vecCostAndGradient(predicted, tokens[currentWord], outputVectors, dataset)\n # distribute sum_gradIn to each of context words\n for cw in contextWords: \n gradIn[tokens[cw]] += sum_gradIn / C\n ### END YOUR CODE\n return cost, gradIn, gradOut", "def cnn_model_fn(features, labels, mode):\n\t# Input Layer\n\t# Reshape X to 4-D tensor: [batch_size, width, height, channels]\n\t# Our images are 400x400 pixels, and have one color channel (greyscale)\n\tinput_layer = tf.reshape(features[\"x\"], [-1, 400, 400, 1])\n\n\t# Convolutional Layer #1\n\tconv1 = tf.layers.conv2d(\n\t\tinputs=input_layer,\n\t\tfilters=32,\n\t\tkernel_size=[20, 20],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\t\n\n\n\t# Pooling Layer #1\n\t# First max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 400, 400, 32]\n\t# Output Tensor Shape: [batch_size, 200, 200, 32]\n\tpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n\t# Convolutional Layer #2\n\t# Computes 64 features using a 5x5 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 200, 200, 32]\n\t# Output Tensor Shape: [batch_size, 200, 200, 64]\n\tconv2 = tf.layers.conv2d(\n\t\tinputs=pool1,\n\t\tfilters=64,\n\t\tkernel_size=[10, 10],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\t# Pooling Layer #2\n\t# Second max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 200, 200, 64]\n\t# Output Tensor Shape: [batch_size, 100, 100, 64]\n\tpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n\n\t# Convolutional Layer #3\n\t# Computes 64 features using a 10x10 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 100, 100, 64]\n\t# Output Tensor Shape: [batch_size, 100, 100, 64]\n\tconv3 = tf.layers.conv2d(\n\t\tinputs=pool2,\n\t\tfilters=64,\n\t\tkernel_size=[10, 10],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\t# Pooling Layer #3\n\t# Second max pooling layer with a 4x4 filter and stride of 4\n\t# Input Tensor Shape: [batch_size, 100, 100, 64]\n\t# Output Tensor Shape: [batch_size, 50, 50, 64]\n\tpool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)\n\n\n\t# Convolutional Layer #4\n\t# Computes 64 features using a 10x10 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 50, 50, 64]\n\t# Output Tensor Shape: [batch_size, 50, 50, 128]\n\tconv4 = tf.layers.conv2d(\n\t\tinputs=pool3,\n\t\tfilters=128,\n\t\tkernel_size=[5, 5],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\n\t# Convolutional Layer #4\n\t# Computes 64 features using a 10x10 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 50, 50, 128]\n\t# Output Tensor Shape: [batch_size, 50, 50, 64]\n\tconv5 = tf.layers.conv2d(\n\t\tinputs=conv4,\n\t\tfilters=64,\n\t\tkernel_size=[10, 10],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\t# Pooling Layer #4\n\t# Second max pooling layer with a 4x4 filter and stride of 4\n\t# Input Tensor Shape: [batch_size, 50, 50, 64]\n\t# Output Tensor Shape: [batch_size, 25, 25, 64]\n\tpool4 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[2, 2], strides=2)\n\n\n\t# Flatten tensor into a batch of vectors\n\t# Input Tensor Shape: [batch_size, 25, 25, 128]\n\t# Output Tensor Shape: [batch_size, 25 * 25 * 128]\n\tpool4_flat = tf.reshape(pool4, [-1, 25 * 25 * 64])\n\n\t# Dense Layer\n\t# Densely connected layer with 1024 neurons\n\t# Input Tensor Shape: [batch_size, 25 * 25 * 96]\n\t# Output Tensor Shape: [batch_size, 1024]\n\tdense1 = tf.layers.dense(inputs=pool4_flat, units=1024, activation=tf.nn.leaky_relu)\n\n\t# Dense Layer\n\t# Densely connected layer with 512 neurons\n\t# Input Tensor Shape: [batch_size, 1024]\n\t# Output Tensor Shape: [batch_size, 512]\n\tdense2 = tf.layers.dense(inputs=dense1, units=512, activation=tf.nn.leaky_relu)\n\n\t# Dense Layer\n\t# Densely connected layer with 512 neurons\n\t# Input Tensor Shape: [batch_size, 512]\n\t# Output Tensor Shape: [batch_size, 256]\n\tdense3 = tf.layers.dense(inputs=dense2, units=256, activation=tf.nn.leaky_relu)\n\n\t# Add dropout operation; 0.5 probability that element will be kept\n\tdropout = tf.layers.dropout(\n\t\tinputs=dense3, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n\t# Logits layer\n\t# Input Tensor Shape: [batch_size, 512]\n\t# Output Tensor Shape: [batch_size, 6]\n\tlogits = tf.layers.dense(inputs=dropout, units=NUM_CLASSES)\n\n\t# Avoid NaN loss error by perturbing logits\n\tepsilon = tf.constant(1e-8)\n\tlogits = logits + epsilon \n\n\t\n\n\n\tpredictions = {\n\t\t# Generate predictions (for PREDICT and EVAL mode)\n\t\t\"classes\": tf.argmax(input=logits, axis=1),\n\t\t# Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n\t\t# `logging_hook`.\n\t\t\"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n\t}\n\n\tif mode == tf.estimator.ModeKeys.PREDICT:\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\t# Calculate Loss (for both TRAIN and EVAL modes)\n\tonehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=NUM_CLASSES)\n\tloss = tf.losses.softmax_cross_entropy(\n\t\tonehot_labels=onehot_labels, logits=logits)\n\n\t# Configure the Training Op (for TRAIN mode)\n\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.04)\n\t\toptimizer = tf.train.AdamOptimizer(learning_rate=0.000006)\n\t\ttrain_op = optimizer.minimize(\n\t\t\tloss=loss,\n\t\t\tglobal_step=tf.train.get_global_step())\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n\t# Add evaluation metrics (for EVAL mode)\n\teval_metric_ops = {\n\t\t\"accuracy\": tf.metrics.accuracy(\n\t\t\tlabels=labels, predictions=predictions[\"classes\"])}\n\treturn tf.estimator.EstimatorSpec(\n\t\tmode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def crps_cost_function(y_true, y_pred, theano=False):\n\n # Split input\n mu = y_pred[:, 0]\n sigma = y_pred[:, 1]\n # Ugly workaround for different tensor allocation in keras and theano\n if not theano:\n y_true = y_true[:, 0] # Need to also get rid of axis 1 to match!\n\n # To stop sigma from becoming negative we first have to convert it the the variance and then take the square root again. \n var = K.square(sigma)\n # The following three variables are just for convenience\n loc = (y_true - mu) / K.sqrt(var)\n phi = 1.0 / np.sqrt(2.0 * np.pi) * K.exp(-K.square(loc) / 2.0)\n Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))\n # First we will compute the crps for each input/target pair\n crps = K.sqrt(var) * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))\n # Then we take the mean. The cost is now a scalar\n \n return K.mean(crps)", "def cnn_model_fn(features, labels, mode):\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n # The frames are 90x90 pixels, and have one grayscale color channel\n input_layer = tf.reshape(features[\"x\"], [-1, 90, 90, 3])\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 90, 90, 1]\n # Output Tensor Shape: [batch_size, 86, 86, 32]\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[3, 3],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 86, 86, 32]\n # Output Tensor Shape: [batch_size, 43, 43, 32]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 90, 90, 1]\n # Output Tensor Shape: [batch_size, 86, 86, 32]\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 86, 86, 32]\n # Output Tensor Shape: [batch_size, 43, 43, 32]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 90, 90, 1]\n # Output Tensor Shape: [batch_size, 86, 86, 32]\n conv3 = tf.layers.conv2d(\n inputs=pool2,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 86, 86, 32]\n # Output Tensor Shape: [batch_size, 43, 43, 32]\n pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)\n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 19, 19, 64]\n # Output Tensor Shape: [batch_size, 19 * 19 * 64]\n pool3_flat = tf.reshape(pool3, [-1, 11 * 11 * 32])\n\n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 19 * 19 * 64]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=pool3_flat, units=1024, activation=tf.nn.relu)\n\n # Add dropout operation; 0.6 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 5]\n logits = tf.layers.dense(inputs=dropout, units=5)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n error = tf.reduce_mean(loss, name=\"loss_tensor\")\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(labels=labels, predictions=predictions[\"classes\"]),\n \"precision\": tf.metrics.precision(labels=labels, predictions=predictions[\"classes\"]),\n \"confusion_matrix\": eval_confusion_matrix(labels=labels, predictions=predictions[\"classes\"]),\n \"recall\": tf.metrics.recall(labels=labels, predictions=predictions[\"classes\"])\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def cnn_model_fn(features, labels, mode):\n\t# Input Layer\n\t# Reshape X to 4-D tensor: [batch_size, width, height, channels]\n\t# Modified MNIST images are 64x64 pixels, and have one color channel\n\tinput_layer = tf.reshape(features[\"x\"], [-1, 64, 64, 1])\n\n\t# Convolutional Layer #1\n\t# Computes 32 features using a 5x5 filter with ReLU activation.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 64, 64, 1]\n\t# Output Tensor Shape: [batch_size, 64, 64, 32]\n\tconv1 = tf.layers.conv2d(\n\t\t\tinputs=input_layer,\n\t\t\tfilters=32,\n\t\t\tkernel_size=[5, 5],\n\t\t\tpadding=\"same\",\n\t\t\tactivation=tf.nn.relu)\n\n\t# Pooling Layer #1\n\t# First max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 64, 64, 32]\n\t# Output Tensor Shape: [batch_size, 32, 32, 32]\n\tpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n\t# Convolutional Layer #2\n\t# Computes 64 features using a 5x5 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 32, 32, 32]\n\t# Output Tensor Shape: [batch_size, 32, 32, 64]\n\tconv2 = tf.layers.conv2d(\n\t\t\tinputs=pool1,\n\t\t\tfilters=64,\n\t\t\tkernel_size=[5, 5],\n\t\t\tpadding=\"same\",\n\t\t\tactivation=tf.nn.relu)\n\n\t# Pooling Layer #2\n\t# Second max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 32, 32, 64]\n\t# Output Tensor Shape: [batch_size, 16, 16, 64]\n\tpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n\t# Flatten tensor into a batch of vectors\n\t# Input Tensor Shape: [batch_size, 16, 16, 64]\n\t# Output Tensor Shape: [batch_size, 16 * 16 * 64]\n\tpool2_flat = tf.reshape(pool2, [-1, 16 * 16 * 64])\n\n\t# Dense Layer\n\t# Densely connected layer with 1024 neurons\n\t# Input Tensor Shape: [batch_size, 16 * 16 * 64]\n\t# Output Tensor Shape: [batch_size, 1024]\n\tdense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\n\t# Add dropout operation; 0.6 probability that element will be kept\n\tdropout = tf.layers.dropout(\n\t\t\tinputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n\t# Logits layer\n\t# Input Tensor Shape: [batch_size, 1024]\n\t# Output Tensor Shape: [batch_size, 40]\n\tlogits = tf.layers.dense(inputs=dropout, units=40)\n\n\tpredictions = {\n\t\t\t# Generate predictions (for PREDICT and EVAL mode)\n\t\t\t\"classes\": tf.argmax(input=logits, axis=1),\n\t\t\t# Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n\t\t\t# `logging_hook`.\n\t\t\t\"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n\t}\n\tif mode == tf.estimator.ModeKeys.PREDICT:\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\t# Calculate Loss (for both TRAIN and EVAL modes)\n\tonehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=40)\n\tloss = tf.losses.softmax_cross_entropy(\n\t\t\tonehot_labels=onehot_labels, logits=logits)\n\n\t# Configure the Training Op (for TRAIN mode)\n\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n\t\ttrain_op = optimizer.minimize(\n\t\t\t\tloss=loss,\n\t\t\t\tglobal_step=tf.train.get_global_step())\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n\t# Add evaluation metrics (for EVAL mode)\n\teval_metric_ops = {\n\t\t\t\"accuracy\": tf.metrics.accuracy(\n\t\t\t\t\tlabels=labels, predictions=predictions[\"classes\"])}\n\treturn tf.estimator.EstimatorSpec(\n\t\t\tmode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def objective6(X, Y):\n filter = ConstArrayExpr(numpy.load(kernel_blur_large))\n return conv2d(X, filter)", "def cnn(scaled_images, **kwargs):\n activ = tf.nn.relu\n # layer_1 = activ(conv(scaled_images, 'c1', n_filters=32,\n # filter_size=8, stride=4, init_scale=np.sqrt(2), **kwargs))\n layer_1 = activ(conv(scaled_images, 'c1', n_filters=64,\n filter_size=3, stride=2, init_scale=np.sqrt(2), **kwargs))\n layer_2 = activ(conv(layer_1, 'c2', n_filters=64,\n filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs))\n layer_3 = activ(conv(layer_2, 'c3', n_filters=64,\n filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs))\n layer_3 = conv_to_fc(layer_3)\n fc1 = activ(linear(layer_3, 'fc1', n_hidden=128, init_scale=np.sqrt(2)))\n fc2 = activ(linear(fc1, 'fc2', n_hidden=128, init_scale=np.sqrt(2)))\n return fc2", "def costFunction(R, W):\n costFunc = 0\n for i in range(0, len(R)):\n for j in range(i, len(R)):\n costFunc += costBetweenNodes(R, W, i, j)\n return costFunc", "def costFunction(theta, X, y):\n\n # Initialize some useful values\n m = y.size # number of training examples\n J = np.sum(np.array([inner(theta, xi, yi) for xi, yi in zip(X, y)]))\n J /= m\n\n\n return J", "def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n\tdataset, word2vecCostAndGradient=softmaxCostAndGradient):\n\tcost = 0.0 \n\tgradIn = np.zeros(inputVectors.shape)\n\tgradOut = np.zeros(outputVectors.shape)\n\n\tpredicted_indices = [tokens[word] for word in contextWords]\n\tpredicted_vectors = inputVectors[predicted_indices]\n\tpredicted = np.sum(predicted_vectors, axis=0)\n\ttarget = tokens[currentWord]\n\tcost, gradIn_predicted, gradOut = \\\n\t\tword2vecCostAndGradient(predicted, target, outputVectors, dataset)\n\tfor i in predicted_indices:\n\t\tgradIn[i] += gradIn_predicted\n\n\treturn cost, gradIn, gradOut", "def cifar10_model_fn(features, labels, mode, params):\n features = tf.reshape(features, [-1, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS])\n\n learning_rate_fn = resnet_run_loop.learning_rate_with_decay(\n batch_size=params['batch_size'], batch_denom=128,\n num_images=_NUM_IMAGES['train'], boundary_epochs=[10, 20, 30],\n decay_rates=[1, 0.1, 0.01, 0.001])\n\n # We use a weight decay of 0.0002, which performs better\n # than the 0.0001 that was originally suggested.\n weight_decay = 2e-4\n\n # Empirical testing showed that including batch_normalization variables\n # in the calculation of regularized loss helped validation accuracy\n # for the CIFAR-10 dataset, perhaps because the regularization prevents\n # overfitting on the small data set. We therefore include all vars when\n # regularizing and computing loss during training.\n def loss_filter_fn(_):\n return True\n\n return resnet_run_loop.resnet_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n model_class=Model,\n resnet_size=params['resnet_size'],\n weight_decay=weight_decay,\n learning_rate_fn=learning_rate_fn,\n momentum=0.9,\n data_format=params['data_format'],\n resnet_version=params['resnet_version'],\n loss_scale=params['loss_scale'],\n loss_filter_fn=loss_filter_fn,\n dtype=params['dtype'],\n fine_tune=params['fine_tune']\n )", "def getCostFunction(self, evalpts, observations, sigma=None, metric=lambda x: sum(x*x)):\n #XXX: better interface for sigma?\n def _(params):\n ind = 0\n for F, n, ofilt, icheck in zip(self._forwardFactories, self._inputs, \\\n self._outputFilters, self._inputCheckers):\n # check input #XXX: is this worthwile to do?\n my_params = params[ind:ind+n]\n checkQ = icheck(my_params, evalpts)\n if checkQ is not None:\n # some parameters are out of range... returns \"cost\"\n return checkQ\n\n Gm = F(params[ind:ind+n])\n if ind == 0:\n x = ofilt(Gm(evalpts)) \n else:\n x = x + ofilt(Gm(evalpts)) \n ind = ind+n\n if sigma is None:\n x = x - observations\n else:\n x = (x - observations) / sigma\n #return sum(real((conjugate(x)*x)))\n #return sum(x*x) \n return metric(x)\n return _", "def compute_cost(features, values, theta):\r\n \r\n # your code here\r\n error = (values - features.dot(theta))\r\n cost = error.dot(error) \r\n return cost" ]
[ "0.8002528", "0.72502065", "0.6108871", "0.57345927", "0.57172936", "0.5682062", "0.56794614", "0.5547408", "0.5493439", "0.5474869", "0.5458474", "0.54476273", "0.5372544", "0.5371089", "0.5368994", "0.5344341", "0.5344016", "0.5269173", "0.5230837", "0.5183692", "0.5162195", "0.5155345", "0.51521176", "0.51516503", "0.51457244", "0.5143634", "0.51397824", "0.51342076", "0.51160055", "0.51143414" ]
0.7602335
1
COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences" and gives us a numerical estimate of the gradient. numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical gradient of the function J around theta. Calling y = J(theta) should return the function value at theta.
def computeNumericalGradient(J, theta): numgrad = np.zeros(theta.size) perturb = np.zeros(theta.size) e = 1e-4 for p in range(theta.size): # Set perturbation vector perturb[p] = e loss1 = J(theta - perturb)[0] loss2 = J(theta + perturb)[0] # Compute Numerical Gradient numgrad[p] = (loss2 - loss1) / (2*e) perturb[p] = 0 return numgrad
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def computeNumericalGradient(J, theta):\n numgrad = np.zeros_like(theta).reshape(-1)\n perturb = np.zeros_like(theta).reshape(-1)\n e = 1e-4\n for p in range(theta.size):\n # Set perturbation vector\n perturb[p] = e\n loss1, _ = J(theta - perturb.reshape(theta.shape))\n loss2, _ = J(theta + perturb.reshape(theta.shape))\n # Compute Numerical Gradient\n numgrad[p] = (loss2 - loss1) / (2 * e)\n perturb[p] = 0\n\n return numgrad.reshape(theta.shape)", "def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta", "def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad", "def gradient_function(theta, X, y):\n\n grad = None\n #######################################################################\n # TODO: #\n # Compute the gradient for a particular choice of theta. #\n # Compute the partial derivatives and set grad to the partial #\n # derivatives of the cost w.r.t. each parameter in theta #\n # #\n #######################################################################\n \n theta = theta[:, np.newaxis]\n \n thetatrans = theta.T\n Xtrans = X.T\n \n MulThetaX = np.dot(thetatrans, Xtrans)\n \n h = sigmoid(MulThetaX)\n \n grad = (y - h) * Xtrans\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n return grad", "def computeNumericalGradient(J, W):\n numgrad = np.zeros(W.shape)\n perturb = np.zeros(W.shape)\n epsilon = 1e-4\n for i in range(W.shape[0]):\n for j in range(W.shape[1]):\n perturb[i][j] = epsilon\n loss1, _ = J(W - perturb)\n loss2, _ = J(W + perturb)\n numgrad[i][j] = (loss2 - loss1) / (2 * epsilon)\n perturb[i][j] = 0\n return numgrad", "def gradient(self, theta):\n pass", "def gradient(self, theta):\n pass", "def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a", "def calc_grad(X, Y, theta):\n m, n = X.shape\n\n margins = Y * X.dot(theta)\n probs = 1. / (1 + np.exp(margins))\n grad = -(1./m) * (X.T.dot(probs * Y))\n\n return grad", "def gradient(self, theta):\n a = -(6 * self.scale ** 2)\n b = 3 * self.scale ** 2 + np.exp(2 * theta)\n b *= np.log(3 * self.scale ** 2 * np.exp(-2 * theta) + 1)\n return a / b", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def costFunction(self,theta, X, y): \n m = len(y)\n h = self.sigmoid(X@theta)\n J = 1 / m * (- y.T @ self.log(h) - (1-y).T @ self.log(1-h)) \n # grad = 1/ m * X.T @ (h - y)\n return J", "def eval_numerical_gradient(f,x):\n\n\tgrad = np.zeros(x.shape)\n\th = 0.0001\n\n\t# iterate over all indexes in x\n\tit = np.nditer(x, flag = ['multi_index'], op_flags = ['readwrite'])\n\n\twhile not it.finished:\n\t\tix = it.multi_index\n\t\told_value = x[ix]\n\n\t\tx[ix] = old_value + h\n\t\tfxh_left = f(x)\n\n\t\tx[ix] = old_value - h\n\t\tfxh_right = f(x)\n\n\t\tx[ix] = old_value\n\n\t\t# compute the partial derivative\n\t\tgrad[ix] = (fxh_left - fxh_right) / (2 * h)\n\t\tit.iterate()\n\n\treturn grad", "def gradient(self, theta):\n return np.zeros([theta.shape[0]])", "def gradient(self, theta):\n return (1 / (self.sigma * np.sqrt(2 * np.pi))) * (\n -theta / (self.sigma ** 2) * np.exp(-(theta ** 2) / (2 * self.sigma ** 2))\n )", "def gradient_descent(X, y, theta, alpha, total_iterations, hypothesis):\n len_theta = len(theta)\n m = len(y)\n one_over_m = (1.0 / float(m))\n\n for _ in range(0, total_iterations):\n temp_theta = numpy.zeros(len_theta)\n\n X_by_theta_minus_y = numpy.subtract(hypothesis(numpy.matrix(theta), X), y)\n\n for j in range(0, len_theta):\n jth_column_of_X = X[:,j]\n derivative_j = one_over_m * numpy.multiply(X_by_theta_minus_y, jth_column_of_X).sum()\n temp_theta[j] = theta[j] - alpha*derivative_j\n\n theta = temp_theta\n\n return numpy.matrix(theta)", "def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad", "def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #The true gradient\n num_features = theta.shape[0]\n approx_grad = np.zeros(num_features) #Initialize the gradient we approximate\n #TODO\n e_i = np.zeros(num_features)\n for k in range(num_features):\n e_i[k] = 1\n approx_grad[k] = (compute_square_loss(X, y, theta+epsilon*e_i)-compute_square_loss(X, y, theta-epsilon*e_i))/(2*epsilon) \n e_i[k] = 0\n\n return np.sqrt(sum((true_gradient-approx_grad)**2)) < tolerance", "def gradient(x, y, theta):\n if x.ndim == 1:\n x = x[:, np.newaxis]\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.flatten()\n if theta.ndim == 2 and theta.shape[1] == 1:\n theta = theta.flatten()\n\n if (x.size == 0 or y.size == 0 or theta.size == 0\n or x.ndim != 2 or y.ndim != 1 or theta.ndim != 1\n or x.shape[0] != y.shape[0] or x.shape[1] + 1 != theta.shape[0]):\n return None\n\n x_padded = np.c_[np.ones(x.shape[0]), x]\n\n return x_padded.T.dot(x_padded.dot(theta) - y) / y.shape[0]", "def costFunction(theta,X,y):\n m = X.shape[0]\n J = 0\n h = sigmoid (np.dot(X,theta))\n \n J = (1/m)* ((-np.dot(y.T,(np.log(h)))) - np.dot((1 - y).T,(np.log(1-h))))\n \n #grad = (1/m) * np.dot(X.T,(h-y))\n grad = (1/m) * np.dot((h.T - y), X).T\n \n return J, grad", "def numerical_gradient(f, x: np.ndarray):\n h = 1e-4\n grad = np.zeros_like(x)\n for i in range(x.size):\n tmp_val = x.flat[i]\n x.flat[i] = tmp_val + h\n fxh1 = f(x)\n\n x.flat[i] = tmp_val - h\n fxh2 = f(x)\n grad.flat[i] = (fxh1 - fxh2) / (2 * h)\n x.flat[i] = tmp_val\n return grad", "def gradcovfunc(self, theta, d):\n sigmaf, l = theta[:2] \n xxl = np.sum((d/l)**2, axis=1)\n dk_dsigmaf = 2 * sigmaf * np.exp(-xxl/2.)\n dk_dl = sigmaf**2/l * xxl * np.exp(-xxl/2.)\n grad = np.array([dk_dsigmaf, dk_dl])\n return grad", "def gradient(self):\n functional = self\n\n class KLCrossEntCCGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\"\"\"\n if functional.prior is None:\n return np.exp(x)\n else:\n return functional.prior * np.exp(x)\n\n return KLCrossEntCCGradient()", "def test_CRot_gradient(self, theta, tol):\n dev = qml.device(\"default.qubit\", wires=2)\n a, b, c = np.array([theta, theta ** 3, np.sqrt(2) * theta])\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def circuit(a, b, c):\n qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)\n qml.CRot(a, b, c, wires=[0, 1])\n return qml.expval(qml.PauliX(0))\n\n res = circuit(a, b, c)\n expected = -np.cos(b / 2) * np.cos(0.5 * (a + c))\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n grad = qml.grad(circuit)(a, b, c)\n expected = np.array(\n [\n [\n 0.5 * np.cos(b / 2) * np.sin(0.5 * (a + c)),\n 0.5 * np.sin(b / 2) * np.cos(0.5 * (a + c)),\n 0.5 * np.cos(b / 2) * np.sin(0.5 * (a + c)),\n ]\n ]\n )\n assert np.allclose(grad, expected, atol=tol, rtol=0)", "def num_grad(theta, X, y, lambda_, e=1e-3):\n # ... dopolnite (naloga 1, naloga 2)\n return np.array([(cost(theta + eps, X, y, lambda_) - cost(theta - eps, X, y, lambda_)) / (2 * e)\n for eps in np.identity(len(theta)) * e])", "def gradient(self):\n functional = self\n\n class KLCCGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in points where one or more\n components are larger than or equal to one.\n \"\"\"\n if functional.prior is None:\n return 1.0 / (1 - x)\n else:\n return functional.prior / (1 - x)\n\n return KLCCGradient()", "def compute_gradient(self, grad=None):\n x = self.input_nodes[0].output_value\n if grad is None:\n grad = backend.ones_like(self.output_value)\n if x == float('inf') or x == float('-inf'):\n return grad * float('inf')\n else:\n return grad * 1 / x", "def gradientFunctionReg(theta, X, y, Lambda):\n m = len(y) # number of training examples\n grad = np.zeros(theta.shape[0])\n theta = np.transpose(theta)\n sum_1 = 0\n X = X.values\n y = y.values\n #calcuate the theta_0 \n# ====================== YOUR CODE HERE ======================\n# Instructions: Compute the gradient of a particular choice of theta.\n# Compute the partial derivatives and set grad to the partial\n# derivatives of the cost w.r.t. each parameter in theta\n for i in range(theta.shape[0]):\n if i == 0:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i]\n else:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i] + Lambda*theta[i]\n grad[i] = sum_1/m\n sum_1 = 0\n\n# =============================================================\n\n return grad", "def compute_gradient(self, grad=None):\n if grad is None:\n grad = backend.ones_like(self.output_value)\n x, = self.input_nodes[0].output_value\n return backend.exp(x) * grad", "def gradient(self, x):\n u = np.asarray([x[0]])\n C = self.C_func(u)\n dC = self.dC_func(u, order=1)\n P = self.P\n numerator = np.sum((C - P) * dC, axis=0)\n denominator = np.sum(np.sum((C - P) ** 2, axis=0) ** (1 / 2))\n if np.abs(denominator) > 0:\n gradient = numerator/denominator\n else:\n gradient = np.asarray(0)[np.newaxis]\n return gradient" ]
[ "0.7404099", "0.6591238", "0.6295547", "0.62560546", "0.62262666", "0.6171367", "0.6171367", "0.6063131", "0.60142076", "0.59528935", "0.59496725", "0.59108096", "0.59023327", "0.58785945", "0.5851465", "0.58256984", "0.58197623", "0.575046", "0.5730485", "0.57108414", "0.570772", "0.5706205", "0.57023257", "0.57000613", "0.56650436", "0.5659369", "0.5648764", "0.5642127", "0.56397", "0.56252354" ]
0.74444306
0
CHECKCOSTFUNCTION Creates a collaborative filering problem to check your cost function and gradients CHECKCOSTFUNCTION(lambda) Creates a collaborative filering problem to check your cost function and gradients, it will output the analytical gradients produced by your code and the numerical gradients (computed using computeNumericalGradient). These two gradient computations should result in very similar values.
def checkCostFunction(lbd=0): # Create small problem X_t = np.random.rand(4, 3) Theta_t = np.random.rand(5, 3) # Zap out most entries Y = X_t.dot(Theta_t.T) Y[np.random.rand(Y.shape[0], Y.shape[1]) > .5] = 0 R = np.zeros(Y.shape) R[Y == 0] = 1 # Run Gradient Checking X = np.random.randn(X_t.shape[0], X_t.shape[1]) Theta = np.random.randn(Theta_t.shape[0], Theta_t.shape[1]) num_users = Y.shape[1] num_movies = Y.shape[0] num_features = Theta_t.shape[1] def Jfunc(t): return cofiCostFunc(t, Y, R, num_users, num_movies, num_features, lbd) numgrad = computeNumericalGradient(Jfunc, np.r_[X.flatten(), Theta.flatten()]) cost, grad = cofiCostFunc(np.r_[X.flatten(), Theta.flatten()], Y, R, num_users, num_movies, num_features, lbd) print(np.c_[numgrad, grad]) print('The above two columns you get should be very similar.') print('(Left-Your Numerical Gradient, Right-Analytical Gradient)\n') diff = np.linalg.norm(numgrad-grad)/np.linalg.norm(numgrad+grad) print('If your cost function implementation is correct, then') print('the relative difference will be small (less than 1e-9).') print('Relative Difference: %g\n' % diff)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_function(symbol, forward=None, backward=None, grad_input_vars=None,\n shape=None, dtype=None, in_range=None, values=None,\n exclude_targets=None, only_targets=None,\n additional_params=None,\n numerical_grads=None, numerical_grads_params=None,\n atol=1e-5, rtol=1e-5, quiet=False):\n # validate and preprocess the input params\n if numerical_grads is None and forward is None and backward is None:\n raise ValueError(\"No reference function was passed to check_function. If you only want to \"\n \"check gradients numerically, pass numerical_grads=True explicitly.\")\n\n if numerical_grads is None:\n numerical_grads = 'if_possible'\n\n if numerical_grads not in [False, True, 'if_possible']:\n raise ValueError(\"numerical_grads must be a bool or 'if_possible', not {}\"\n .format(numerical_grads))\n\n if additional_params is None:\n additional_params = {}\n\n input_vars = symbol.list_input_variables()\n input_dict = {x.attr('name'): x for x in input_vars}\n\n if grad_input_vars is None:\n grad_input_vars = sorted(input_vars, key=lambda x: x.attr('name'))\n else:\n grad_input_vars = [input_dict[x] if isinstance(x, str) else x for x in grad_input_vars]\n\n in_range = _dict_var_to_dict_str(in_range)\n values = _dict_var_to_dict_str(values)\n\n out_len = len(symbol.list_output_names())\n\n # Infer the output shapes and dtypes, and preprocess the shape and dtype params\n forward_graph, shape, dtype, out_shapes, out_dtypes = \\\n infer_shapes_dtypes(nnvm.graph.create(symbol), shape=shape, dtype=dtype,\n fallback_dtype='float32')\n\n if not all(out_shapes) or not all(out_dtypes):\n if not quiet:\n print(forward_graph.ir(join_node_attrs=['shape', 'dtype']))\n raise ValueError(\"Could not infer shapes or dtypes for outputs.\\n\"\n \"out_shapes = {}\\nout_dtypes = {}\".format(out_shapes, out_dtypes))\n\n backward_graph = None\n\n # If we want gradients, we have to recreate the graph, but now with gradient computations\n # Note that here we need out_shapes for defining the shape of head grads, so we have to\n # create the graph twice\n if backward is not None or numerical_grads:\n try:\n head_grads_symbols = [nnvm.symbol.Variable(\"head_grads_\" + str(i),\n shape=out_shapes[i],\n dtype=DTYPE_TO_TCODE[out_dtypes[i]])\n for i in range(out_len)]\n grad_symbols = graph_util.gradients([symbol], grad_input_vars,\n grad_ys=head_grads_symbols)\n # Sometimes grads do not depend on head_grads, so head_grads does not appear\n # in the variable list; adding it manually prevents this, making things a bit easier\n backward_graph = \\\n nnvm.graph.create(nnvm.symbol.Group([symbol] + grad_symbols + head_grads_symbols))\n\n backward_graph, shape, dtype, out_shapes, out_dtypes = \\\n infer_shapes_dtypes(backward_graph, shape=shape, dtype=dtype,\n fallback_dtype='float32')\n except nnvm._base.NNVMError as err:\n if backward is None and numerical_grads == \"if_possible\":\n logging.warning(\"Won't check gradients because: %s\", str(err).split('\\n', 1)[0])\n numerical_grads = False\n backward_graph = None\n else:\n raise\n\n main_graph = backward_graph if backward_graph is not None else forward_graph\n\n # Generate random data for inputs (including head_grads)\n\n np_inputs = {}\n\n for x in main_graph.symbol.list_input_variables():\n x_name = x.attr('name')\n x_shape = shape[x_name]\n x_dtype = dtype[x_name]\n\n if values is not None and x_name in values:\n np_inputs[x_name] = values[x_name].astype(x_dtype)\n continue\n\n low = -1.0\n high = 1.0\n if in_range is not None:\n if isinstance(in_range, dict):\n if x_name in in_range:\n low = in_range[x_name][0]\n high = in_range[x_name][1]\n else:\n low = in_range[0]\n high = in_range[1]\n\n np_inputs[x_name] = np.random.uniform(size=x_shape, low=low, high=high).astype(x_dtype)\n\n np_inputs_without_head_grads = {k: np_inputs[k] for k in np_inputs\n if not k.startswith('head_grads_')}\n\n nothing_was_done = True\n\n # Compute and compare the results\n for target, ctx in ctx_list():\n if exclude_targets is not None:\n if target in exclude_targets or str(target) in exclude_targets:\n logging.info(\"Skipping target = %s, ctx = %s\", target, ctx)\n continue\n if only_targets is not None:\n if target not in only_targets and str(target) not in only_targets:\n logging.info(\"Skipping target = %s, ctx = %s\", target, ctx)\n continue\n\n logging.info(\"Checking computation on target = %s, ctx = %s\", target, ctx)\n\n debug_stage = None\n\n try:\n nnvm_res = None\n\n debug_stage = \"compiling\"\n main_function = graph_to_function(main_graph, target, ctx)\n\n # nnvm_res contains the output and gradients (if they are needed)\n debug_stage = \"running\"\n nnvm_res = main_function(**np_inputs)\n\n try:\n logging.debug(\"checking to_relay conversion\")\n inputs = np_inputs_without_head_grads.copy()\n func, inputs = to_relay(main_graph, shape, dtype, params=inputs)\n with relay.build_config(opt_level=3):\n graph, lib, params = relay.build(func, target=target)\n m = graph_runtime.create(graph, lib, ctx)\n m.set_input(**inputs)\n m.set_input(**params)\n m.run()\n for i in range(out_len):\n relay_out = m.get_output(i).asnumpy()\n tvm.testing.assert_allclose(nnvm_res[i], relay_out, atol=atol, rtol=rtol)\n except NotImplementedError as err:\n # the NNVM operator is not supported yet\n logging.warning(err)\n\n if backward_graph is not None:\n grad_var_names = [x.attr('name') for x in grad_input_vars]\n nnvm_grads = {x: v for x, v in zip(grad_var_names, nnvm_res[out_len:])}\n\n if forward is not None:\n nothing_was_done = False\n debug_stage = \"checking forward computation\"\n logging.debug(debug_stage)\n\n params = {}\n params.update(np_inputs_without_head_grads)\n params.update(additional_params)\n numpy_res = forward(**params)\n\n if isinstance(numpy_res, tuple):\n numpy_res = list(numpy_res)\n\n if not isinstance(numpy_res, list):\n numpy_res = [numpy_res]\n\n if len(numpy_res) != out_len:\n raise ValueError(\"Forward function returned {} values, but \"\n \"the nnvm graph returns {} values\"\n .format(len(numpy_res), out_len))\n\n for i in range(out_len):\n tvm.testing.assert_allclose(nnvm_res[i], numpy_res[i], atol=atol, rtol=rtol)\n\n if backward is not None:\n nothing_was_done = False\n debug_stage = \"checking gradients\"\n logging.debug(debug_stage)\n\n np_head_grads = [np_inputs[\"head_grads_\" + str(i)] for i in range(out_len)]\n\n if out_len == 1:\n np_head_grads = np_head_grads[0]\n\n params = {'head_grads': np_head_grads}\n params.update(np_inputs_without_head_grads)\n params.update(additional_params)\n numpy_grads = backward(**params)\n\n if not isinstance(numpy_grads, dict):\n if isinstance(numpy_grads, tuple):\n numpy_grads = list(numpy_grads)\n if not isinstance(numpy_grads, list):\n numpy_grads = [numpy_grads]\n numpy_grads = {x: v for x, v in zip(grad_var_names, numpy_grads)}\n if len(numpy_grads) != len(grad_var_names):\n raise ValueError(\"The backward function returns a list of gradients which \"\n \"does not contain gradients for these variables: {}\"\n .format(set(grad_var_names) - set(numpy_grads)))\n\n for x_name in numpy_grads:\n tvm.testing.assert_allclose(nnvm_grads[x_name], numpy_grads[x_name],\n atol=atol, rtol=rtol)\n\n if numerical_grads:\n nothing_was_done = False\n debug_stage = \"checking gradients numerically\"\n logging.debug(debug_stage)\n\n forward_function = graph_to_function(forward_graph, target, ctx)\n\n # Since the result may be non-scalar, we have to put another operation on the top,\n # so we just multiple by the randomly generated head_grads and then sum everything.\n # This way we can reuse the gradient values which has been already computed.\n def scalar_function(**kwargs):\n res = forward_function(**kwargs)\n return np.sum([np.dot(np_inputs['head_grads_' + str(i)].ravel(), res[i].ravel())\n for i in range(out_len)])\n\n if numerical_grads_params is None:\n numerical_grads_params = {}\n\n check_numerical_grads(\n scalar_function,\n input_values=np_inputs_without_head_grads,\n grad_values=nnvm_grads,\n **numerical_grads_params)\n\n except:\n if not quiet:\n print(\"\\ncheck_function failed while {}, here is the main graph\"\n .format(debug_stage))\n print(main_graph.ir(join_node_attrs=['shape', 'dtype']))\n if nnvm_res is not None:\n print(\"Generated inputs:\")\n print(np_inputs)\n print()\n raise\n\n if nothing_was_done:\n logging.warning(\"Nothing was done in check_function. Check ctx_list().\")", "def grad_check_sparse(f, net_x, net_y, param, analytic_grad, num_checks=10, h=1e-5):\n param_shape = param.shape.eval()\n #f_x = f(net_x, net_y)\n for i in xrange(num_checks):\n ix = tuple([randrange(m) for m in param_shape])\n param_val = param.get_value()\n oldval = param_val[ix]\n \n param_val[ix] = oldval + h # increment by h\n param.set_value(param_val)\n fxph = f(net_x, net_y) # evaluate f(x + h)\n \n param_val[ix] = oldval - h # increment by h\n param.set_value(param_val)\n fxmh = f(net_x, net_y) # evaluate f(x - h)\n \n param_val[ix] = oldval # reset\n param.set_value(param_val)\n \n grad_numerical = (fxph - fxmh) / (2 * h)\n grad_analytic = analytic_grad[ix]\n \n rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))\n print('numerical: %f analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error))", "def gradcheck(func, inputs, eps=1e-6, atol=1e-5, rtol=1e-3, raise_exception=True):\n tupled_inputs = _as_tuple(inputs)\n\n # Make sure that gradients are saved for all inputs\n any_input_requiring_grad = False\n for inp in tupled_inputs:\n if isinstance(inp, tf.Tensor):\n if _requires_grad(inp):\n if inp.dtype != tf.float64:\n warnings.warn(\n 'At least one of the inputs that requires gradient '\n 'is not of double precision floating point. '\n 'This check will likely fail if all the inputs are '\n 'not of double precision floating point. ')\n any_input_requiring_grad = True\n # inp.retain_grad()\n if not any_input_requiring_grad:\n raise ValueError(\n 'gradcheck expects at least one input tensor to require gradient, '\n 'but none of the them have requires_grad=True.')\n\n output = _differentiable_outputs(func(*tupled_inputs))\n\n def fail_test(msg):\n if raise_exception:\n raise RuntimeError(msg)\n return False\n\n for i, o in enumerate(output):\n if not _requires_grad(o):\n continue\n\n def fn(input):\n return _as_tuple(func(*input))[i]\n\n analytical, reentrant, correct_grad_sizes = get_analytical_jacobian(tupled_inputs, o)\n numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps)\n\n if not correct_grad_sizes:\n return fail_test('Analytical gradient has incorrect size')\n\n for j, (a, n) in enumerate(zip(analytical, numerical)):\n if _numel(a) != 0 or _numel(n) != 0:\n if not allclose(a, n, rtol, atol):\n return fail_test('Jacobian mismatch for output %d with respect to input %d,\\n'\n 'numerical:%s\\nanalytical:%s\\n' % (i, j, n, a))\n\n if not reentrant:\n return fail_test('Backward is not reentrant, i.e., running backward with same '\n 'input and grad_output multiple times gives different values, '\n 'although analytical gradient matches numerical gradient')\n\n # check if the backward multiplies by grad_output\n with tf.GradientTape(persistent=True) as tape:\n output = _differentiable_outputs(func(*tupled_inputs))\n\n if any([_requires_grad(o) for o in output]):\n diff_input_list = list(iter_tensors(tupled_inputs, True))\n grads_input = tape.gradient(output, diff_input_list, [tf.zeros_like(o) for o in output])\n\n if not len(grads_input) == 0:\n raise RuntimeError(\"no Tensors requiring grad found in input\")\n\n # grads_input = torch.autograd.grad(output, diff_input_list, [torch.zeros_like(o) for o in output],\n # allow_unused=True)\n for gi, i in zip(grads_input, diff_input_list):\n if gi is None:\n continue\n if not tf.reduce_all(tf.equal(gi, 0)):\n return fail_test('backward not multiplied by grad_output')\n if gi.dtype != i.dtype:\n return fail_test(\"grad is incorrect type\")\n if gi.shape != i.shape:\n return fail_test('grad is incorrect size')\n\n return True", "def generic_gradient_checker(X, y, theta, objective_func, gradient_func, epsilon=0.01, tolerance=1e-4):\n #TODO", "def run_check_grad(hyperparameters):\n # This creates small random data with 20 examples and\n # 10 dimensions and checks the gradient on that data.\n num_examples = 20\n num_dimensions = 10\n\n weights = np.random.randn(num_dimensions + 1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = np.random.rand(num_examples, 1)\n\n diff = check_grad(logistic,\n weights,\n 0.001,\n data,\n targets,\n hyperparameters)\n\n print(\"diff =\", diff)", "def run_check_grad(hyperparameters):\n\n # This creates small random data with 7 examples and\n # 9 dimensions and checks the gradient on that data.\n num_examples = 7\n num_dimensions = 9\n\n weights = np.random.randn(num_dimensions+1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = (np.random.rand(num_examples, 1) > 0.5).astype(int)\n\n diff = check_grad(logistic, # function to check\n weights,\n 0.001, # perturbation\n data,\n targets,\n hyperparameters)\n\n print \"diff =\", diff", "def calcCostFun(self):\n\n self.start()\n F, K = self.model()\n \n return self.costFunction", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def return_terminal_cost_func(TerminalCost='Minimize final angle',\n ReturnGradientAndHessian=False):\n if type(TerminalCost)==str:\n assert TerminalCost in ['Minimize final angle from target angle',\n 'Minimize final angular velocity from target angular velocity'],\\\n \"TerminalCost must be either 'Minimize final angle from target angle' (Default), 'Minimize final angular velocity from target angular velocity'.\"\n else:\n assert type(TerminalCost)==list, \"TerminalCost must be a list of cost types.\"\n for el in TerminalCost:\n assert type(el)==str, \"Each element of TerminalCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize final angle from target angle',\n 'Minimize final angular velocity from target angular velocity'],\\\n \"Each element of TerminalCost must be either 'Minimize final angle from target angle' (Default), 'Minimize final angular velocity from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize final angle from target angle\" in TerminalCost:\n result1 = lambda X,U,dt: k4*(1/2)*(X[0,-1]-TargetAngle)**2\n result1_grad = lambda X,U,dt:\\\n np.matrix([[k4*(X[0,-1]-TargetAngle)],[0]])\n result1_hess = lambda X,U,dt: np.matrix([[k4*1,0],[0,0]])\n else:\n result1 = lambda X,U,dt: 0\n result1_grad = lambda X,U,dt:\\\n np.matrix([[0],[0]])\n result1_hess = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n if \"Minimize final angular velocity from target angular velocity\" in TerminalCost:\n result2 = lambda X,U,dt: k5*(1/2)*(X[1,-1]-TargetAngularVelocity)**2\n result2_grad = lambda X,U,dt:\\\n np.matrix([[0],[k5*(X[1,-1]-TargetAngularVelocity)]])\n result2_hess = lambda X,U,dt: np.matrix([[0,0],[0,k5*1]])\n else:\n result2 = lambda X,U,dt: 0\n result2_grad = lambda X,U,dt:\\\n np.matrix([[0],[0]])\n result2_hess = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt)\n if ReturnGradientAndHessian:\n result_grad = lambda X,U,dt: result1_grad(X,U,dt) \\\n + result2_grad(X,U,dt)\n result_hess = lambda X,U,dt: result1_hess(X,U,dt) \\\n + result2_hess(X,U,dt)\n return(result,result_grad,result_hess)\n else:\n return(result)", "def test_check_cost():", "def generic_gradient_checker(X, y, theta, objective_func, gradient_func, epsilon=0.01, tolerance=1e-4):\n #TODO\n true_gradient = gradient_func(X, y, theta) #The true gradient\n num_features = theta.shape[0]\n approx_grad = np.zeros(num_features) #Initialize the gradient we approximate\n #TODO\n e_i = np.zeros(num_features)\n for k in range(num_features):\n e_i[k] = 1\n approx_grad[k] = (objective_func(X, y, theta+epsilon*e_i)-objective_func(X, y, theta-epsilon*e_i))/(2*epsilon) \n e_i[k] = 0\n\n return np.sqrt(sum((true_gradient-approx_grad)**2)) < tolerance", "def checkNNGradients(Lambda=0):\n\n input_layer_size = 3\n hidden_layer_size = 5\n num_labels = 3\n m = 5\n\n # We generate some 'random' test data\n Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size)\n Theta2 = debugInitializeWeights(num_labels, hidden_layer_size)\n # print(Theta1.shape, Theta2.shape)\n\n # Reusing debugInitializeWeights to generate X\n X = debugInitializeWeights(m, input_layer_size - 1)\n y = np.mod(range(1, m+1), num_labels)\n # print(X.shape, y.shape)\n\n # Short hand for cost function\n net = Net(input_layer_size, hidden_layer_size, num_labels, Theta1, Theta2)\n numgrad = net.computeNumericalGradient(X, y, Lambda)\n _, grad = net.costFunction(X, y, Lambda)\n\n for key, _ in grad.items():\n # Visually examine the two gradient computations. The two columns\n # you get should be very similar.\n print(np.column_stack((numgrad[key].T.ravel(), grad[key].T.ravel())))\n\n print('The above two columns you get should be very similar.\\n' \\\n '(Left-Your Numerical Gradient, Right-Analytical Gradient)\\n\\n')\n\n # Evaluate the norm of the difference between two solutions.\n # If you have a correct implementation, and assuming\n # you used EPSILON = 0.0001\n # in computeNumericalGradient.m, then diff below should be less than 1e-9\n diff = np.linalg.norm(numgrad[key].T.ravel()-grad[key].T.ravel())\n diff /= np.linalg.norm(numgrad[key].T.ravel()+grad[key].T.ravel())\n\n print('If your backpropagation implementation is correct, then\\n ' \\\n 'the relative difference will be small (less than 1e-9). \\n' \\\n '\\nRelative Difference: %g\\n' % diff)", "def test_gradients_check(self):\n model = PoincareModel(self.data, negative=3)\n try:\n model.train(epochs=1, batch_size=1, check_gradients_every=1)\n except Exception as e:\n self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def test_gradient(gradient, thetas, activations_neural, classification_matrix, lambda_value=1, step=1E-4, tolerance=1E-4):\n \n dimensional_error(thetas[-1].shape, gradient[-1].shape)\n\n last_thetas = thetas[-1]\n \n last_thetas_plus_step = thetas[-1] + step\n last_thetas_minus_step = thetas[-1] - step\n\n num_grad_total = pd.DataFrame()\n\n for i in range( gradient[-1].shape[0] ):\n\n\n last_thetas_plus = pd.concat( [last_thetas[0:i], last_thetas_plus_step[i:i+1] , last_thetas[i+1:]] , axis=0 )\n\n last_thetas_minus = pd.concat( [last_thetas[0:i], last_thetas_minus_step[i:i+1], last_thetas[i+1:]] , axis=0 )\n\n last_activation_plus = activation_values(activations_neural[-2], last_thetas_plus ).to_numpy()\n last_activation_minus = activation_values(activations_neural[-2], last_thetas_minus).to_numpy()\n\n cost_plus = cost_function_sigmoid([last_activation_plus] , classification_matrix, [last_thetas_plus] , lambda_value)\n cost_minus = cost_function_sigmoid([last_activation_minus], classification_matrix, [last_thetas_minus], lambda_value)\n\n num_grad = (cost_plus - cost_minus)/(2*step) # it's a column DataFrame\n num_grad_total = pd.concat([num_grad_total, num_grad], axis=1)\n\n num_grad_total = num_grad_total.T\n\n dimensional_error(num_grad_total.shape, gradient[-1].shape)\n\n num_grad_total.index = gradient[-1].index\n num_grad_total.columns = gradient[-1].columns\n\n _ = ( np.abs( gradient[-1].to_numpy() - num_grad_total.to_numpy() ) <= tolerance )\n\n return _, num_grad_total", "def compute_cost(AL,Y,cost_function_name):\n cost_functions = {\n \"cost_func_1\": cf.cost_function_1\n } \n\n activ_func = cost_functions.get(cost_function_name,lambda : \"Invalid Cost Function Name !\")\n\n cost,dAL = activ_func(AL,Y)\n\n return cost, dAL", "def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):\n\n for i in range(num_checks):\n ix = tuple([randrange(m) for m in x.shape])\n\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evaluate f(x + h)\n x[ix] = oldval - h # increment by h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # reset\n\n grad_numerical = (fxph - fxmh) / (2 * h)\n grad_analytic = analytic_grad[ix]\n rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))\n print('numerical: %f analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error))", "def testNestedFunctionGradientCall(self):\n check_numerics_callback.enable_check_numerics()\n\n x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)\n\n @def_function.function\n def asinp1(x):\n # asin()'s gradient overflows at the value close to 1.0.\n return math_ops.asin(x) + 1.0\n\n @def_function.function\n def loss(x):\n return math_ops.square(asinp1(x))\n\n with backprop.GradientTape() as tape:\n tape.watch(x)\n y = loss(x)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: self.evaluate(tape.gradient(y, x)))\n self.assertTrue(re.search(r\"gradient\", message))", "def check_gradient(f, x, delta=1e-5, tol=1e-4):\n\n assert isinstance(x, np.ndarray)\n assert x.dtype == np.float\n \n orig_x = x.copy()\n #print('check_g, orig_x befor',orig_x)\n #print('check_g, x befor',x)\n #print('befor first pass in grad check')\n fx, analytic_grad = f(x)\n #print('after first pass in grad check')\n #print('check_g, orig_x after',orig_x)\n #print('check_g, x.shape',x.shape)\n #print('func',f(x)[0])\n #print('fx=',fx,'analityc_grad=',analytic_grad)\n \n assert np.all(np.isclose(orig_x, x, tol)), \"Functions shouldn't modify input variables\"\n\n assert analytic_grad.shape == x.shape\n #print('analitical grad.shape',analytic_grad.shape)\n analytic_grad = analytic_grad.copy()\n\n # We will go through every dimension of x and compute numeric\n # derivative for it\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n #print('it.shape=',it.shape)\n while not it.finished:\n ix = it.multi_index\n #print('ix',ix)\n #print('x[ix]',x[ix])\n analytic_grad_at_ix = analytic_grad[ix]\n #print('analitical_grad-at_ix',analytic_grad_at_ix)\n orig_x = x.copy()\n #print('orig_x',orig_x)\n #print('x.shape befor delta',x.shape)\n orig_x[ix]+=delta\n #print('x.shape after delta',x.shape)\n #print('orig_x[ix] delta +',orig_x[ix])\n fx_plus=f(orig_x)[0]\n #fx_plus=fx_plus_full[ix[0]]\n #print('fx__plus',fx_plus)\n orig_x = x.copy()\n orig_x[ix]-=delta\n #print('orig_x[ix] delta -',orig_x[ix])\n fx_minus=f(orig_x)[0]\n #print('fx_minus',fx_minus)\n \n divider=2*delta\n #print('divider',divider)\n #numeric_grad_at_ix = np.divide((fx_plus-fx_minus),divider)\n numeric_grad_at_ix = (fx_plus-fx_minus)/divider\n #print('numeric_grad_at_ix',numeric_grad_at_ix)\n #print('fx(ix)', fx[ix])\n\n # TODO compute value of numeric gradient of f to idx\n \n if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):\n print(\"Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f\" % (ix, analytic_grad_at_ix, numeric_grad_at_ix))\n return False\n\n it.iternext()\n\n print(\"Gradient check passed!\")\n return True", "def check_model_gradient(model, X, y,\n delta=1e-5, tol=1e-4,\n check_inputs=False):\n\n if not isinstance(X, list):\n X = [X]\n\n if check_inputs:\n for input_key in range(len(X)):\n print(f'Checking gradient for model input #{input_key}')\n\n def helper_func(x):\n this_X = [CP.cp.copy(tX) for tX in X]\n this_X[input_key] += x\n loss = model.compute_loss_and_gradients(this_X, y)\n out_loss = loss['output_losses']\n reg_loss = loss['regularization_loss']\n loss = np.sum(out_loss) + reg_loss\n input_grads = model.input_grads[input_key]\n if isinstance(input_grads, list):\n input_grads = input_grads[0]\n return loss, input_grads\n\n zero_X = CP.cp.zeros_like(X[input_key])\n\n if not check_gradient(helper_func, zero_X, delta, tol):\n return False\n\n params = model.params()\n\n for param_key in params:\n print(f'Checking gradient for {param_key}')\n param = params[param_key]\n initial_w = param.value\n\n def helper_func(w):\n param.value = w\n loss = model.compute_loss_and_gradients(X, y)\n out_loss = loss['output_losses']\n reg_loss = loss['regularization_loss']\n loss = np.sum(out_loss) + reg_loss\n grad = param.grad\n return loss, grad\n\n if not check_gradient(helper_func, initial_w, delta, tol):\n return False\n\n return True", "def cross_validation(lambd_values = [0.1], maxfun_values = [200]):\n \n n_lambd, n_maxfun = len(lambd_values), len(maxfun_values)\n \n # Creation of the DataFrame where the results are to be stored\n df_results = pd.DataFrame(index = range(n_lambd * n_maxfun))\n df_results['Maxfun'], df_results['Lambd'] = list(maxfun_values) * n_lambd, list(lambd_values) * n_maxfun\n df_results['Hidden layers'] = num_of_hidden_layers\n nodes_avg = np.mean(layers[1:-1])\n df_results['Nodes per hidden layer (avg)'] = nodes_avg\n accuracy_col = []\n \n for lambd in lambd_values:\n \n for maxfun in maxfun_values:\n \n start = time() # start of the timer\n \n res = opt.fmin_l_bfgs_b(costFunction, nn_weights, fprime = backwards, args = (layers, images_validation, labels_training, num_labels, lambd), maxfun = maxfun, factr = 1., disp = True)\n Theta = roll_params(res[0], layers)\n \n # input('\\nProgram paused. Press enter to continue!!!')\n \n # print(\"\\nTesting Neural Network... \\n\")\n \n pred = predict(Theta, images_test)\n end = time() # end of the timer\n accuracy = np.mean(labels_test == pred) * 100\n print('\\nLambda =', lambd)\n print('Maxfun =', maxfun)\n time_complexity = end - start\n print('Time:', time_complexity, 'seconds')\n print('Accuracy =', accuracy, '%')\n \n # Modification of the 'Accuracy' column\n accuracy_col.append(accuracy)\n \n # Accuracy values stored into the dataframe\n df_results['Accuracy'] = accuracy_col\n \n return df_results", "def check_cost_operator(C, obj_f, offset=0):\n m_diag = cost_operator_to_vec(C, offset=offset)\n m_diag = np.real(get_adjusted_state(m_diag))\n for k, v in state_to_ampl_counts(m_diag, eps=-1).items():\n x = np.array([int(_k) for _k in k])\n assert(np.isclose(obj_f(x), v))", "def gradient_check(op, *args, **kwargs):\n\n if( not 'id_list' in kwargs.keys() ):\n kwargs.update({\"id_list\":[0]})\n\n id_list = kwargs.get(\"id_list\", [0])\n\n for i in id_list:\n\n if(not isinstance(args[i], Variable)):\n raise Exception(\"input {:g} is not a variable\".format(i))\n\n if(isinstance(args[i], Variable) and not args[i].requires_grad):\n raise Exception(\"input {:g} doesn't require gradient\".format(i))\n\n nelems = args[i].numel()\n\n \"\"\" numerical gradient \"\"\"\n\n wrapper, p = numdiff_wrapper(op, args, kwargs, i)\n jacobian_numerical = numdiff_unified(wrapper, p)\n\n \"\"\" analytic gradient \"\"\"\n\n jacobian_analytic = []\n\n if(len(kwargs.keys()) > 1):\n \"\"\"function has dictionary inputs\"\"\"\n f = op(*args, **kwargs)\n else:\n f = op(*args)\n\n output_nelems = f.data.numel()\n\n for k in range(output_nelems):\n\n output_grad = torch.zeros(f.data.size())\n output_grad.view(output_nelems, 1)[k] = 1\n\n f.backward(output_grad, retain_variables=True)\n\n jacobian_analytic.append( np.copy( args[i].grad.data.view( nelems ).numpy() ) )\n\n for params_i in args:\n if(isinstance(params_i, torch.autograd.Variable) and params_i.requires_grad):\n params_i.grad.data.zero_()\n\n jacobian_analytic = np.asarray(jacobian_analytic)\n\n \"\"\"\n compare jacobian_analytic with jacobian_numerical\n \"\"\"\n\n if( np.allclose(jacobian_analytic, jacobian_numerical) ):\n\n print \"gradient is correct\"\n\n else:\n\n rel_error = np.linalg.norm( jacobian_analytic - jacobian_numerical ) / \\\n np.maximum( np.linalg.norm( jacobian_analytic ), np.linalg.norm( jacobian_numerical) )\n\n print 'analytic jacobian :'\n print jacobian_analytic\n\n print 'numerical jacobian :'\n print jacobian_numerical\n\n print 'jacobian difference :'\n print jacobian_analytic - jacobian_numerical\n\n print 'relative error:'\n print rel_error", "def safe_verify_grad(func, data):\r\n # 'data' is a one-element list.\r\n data_tensor, = data\r\n # Flatten it into a 1D vector.\r\n data_vector = data_tensor.flatten()\r\n # Compute pairwise absolute differences.\r\n diff = numpy.abs(data_vector.reshape((-1, 1)) - data_vector)\r\n # Alter the diagonal to avoid a zero minimum.\r\n for i in xrange(len(diff)):\r\n diff[i, i] = 1\r\n # Find an appropriate epsilon.\r\n eps = builtin_min(numeric_grad.type_eps[config.floatX],\r\n diff.min() / 2)\r\n # Run gradient verification.\r\n utt.verify_grad(func, data, eps=eps)", "def check_numerical_gradient():\r\n\r\n # Evaluate the function and gradient at x = [4, 10]\r\n x = np.array([4, 10], dtype=np.float64)\r\n value, grad = simple_quadratic_function(x)\r\n\r\n # Use your code to numerically compute the gradient of simple_quadratic_function at x.\r\n func = lambda x : simple_quadratic_function(x)[0] \r\n numgrad = compute_numerical_gradient(func, x)\r\n\r\n # Visually examine the two gradient computations. The two columns\r\n # you get should be very similar.\r\n n_grad = grad.size\r\n for i in range(n_grad):\r\n print(\"{0:20.12f} {1:20.12f}\".format(numgrad[i], grad[i]))\r\n print('The above two columns you get should be very similar.\\n(Left-Your Numerical Gradient, Right-Analytical Gradient)\\n')\r\n\r\n # Evaluate the norm of the difference between two solutions. \r\n # If you have a correct implementation, and assuming you used EPSILON = 0.0001 \r\n # in computeNumericalGradient.m, then diff below should be 2.1452e-12 \r\n diff = np.linalg.norm(numgrad - grad) / np.linalg.norm(numgrad + grad)\r\n print(\"Norm of difference = \", diff) \r\n print('Norm of the difference between numerical and analytical gradient (should be < 1e-9)\\n\\n')", "def gradient_supplied(fun, x0, jac, info):\n result = OptimizerResult()\n result.x = x0\n result.fun = 0\n info[\"has_gradient\"] = jac is not None\n\n return result", "def test_deriv_cost_function():\n # Setup scenario\n b_lower = 0.35\n b = 0.6\n v = [1.0, 2.0, 3.0]\n\n # Calculate expected result\n n = len(v)\n tmp = np.ones(n) * (b-b_lower)\n ps = [x**(k-1) for k,x in zip(range(n), tmp)]\n bs = np.array([i * x for i,x in zip(range(n), ps)])\n alphas = np.array(v)\n expected = np.dot(alphas, bs)\n\n # Calculate actual result\n actual = deriv_cost_function(b_lower, v, b)\n \n # Compare\n assert expected == actual", "def constraint_check(F):\n def _check(condition, err_msg):\n if isinstance(condition, bool):\n if not condition:\n raise ValueError(err_msg)\n return 1.0\n return F.npx.constraint_check(condition, err_msg)\n return _check", "def test_gradient_convergence(self):\n pass", "def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #the true gradient\n num_features = theta.shape[0]\n \n e = np.eye(num_features)\n denominator = np.float(2*epsilon)\n numerator = np.array([ compute_square_loss(X_train,y_train,theta+epsilon*e[i]) - compute_square_loss(X_train,y_train,theta-epsilon*e[i]) for i in range(num_features) ] )\n diff = (true_gradient - numerator/denominator)\n \n return (diff.dot(diff) < tolerance)" ]
[ "0.594035", "0.58975935", "0.5887315", "0.58429676", "0.5820648", "0.58180976", "0.56850296", "0.56849337", "0.5643986", "0.5643095", "0.5579458", "0.55547184", "0.55505186", "0.54298055", "0.5423791", "0.53822243", "0.52979624", "0.5283935", "0.52480435", "0.5244632", "0.52386224", "0.52157146", "0.5202262", "0.5201022", "0.5194057", "0.51847184", "0.5181157", "0.51721174", "0.5166192", "0.5156251" ]
0.6569059
0
GETMOVIELIST reads the fixed movie list in movie.txt and returns a cell array of the words movieList = GETMOVIELIST() reads the fixed movie list in movie.txt and returns a cell array of the words in movieList.
def loadMovieList(): # Read the fixed movieulary list fid = open('movie.txt', 'r', encoding='UTF-8') ls = fid.readlines() fid.close() movieList = [i[i.find(' ')+1:-1] for i in ls] return movieList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getVocabList():\n vocab_list = []\n with open('vocab.txt') as f_obj:\n while True:\n vocab_line = f_obj.readline()\n if not vocab_line:\n break\n word = re.search(r'\\t(\\w+)', vocab_line).group(1)\n vocab_list.append(word)\n return vocab_list", "def read_movies(filename):\n movies = []\n # TODO: Read the file correctly and instantiate movie objects to store them\n # inside the movies list.\n return movies", "def getVocabList():\n vocabList = pd.read_csv(os.path.join(folder, 'vocab.txt'),\n delimiter='\\t',\n names=['index', 'vocab'],\n index_col='index')\n return vocabList", "def get_movie_data(files: list) -> list:\n pass", "def get_imdb_list():\n list_file = 'imdb.txt'\n name_column = 26\n f = open(list_file, 'r')\n film_list = []\n pos = 0\n\n for line in f:\n pos += 1\n words = line.split()\n name = line[name_column:-1]\n # could be problematic is there are brackets in the film name\n year = name[name.find('(') + 1:name.find(')')]\n name = name.replace('(' + year + ')', '')\n film = {\n 'pos': pos,\n 'score': Decimal(words[2]),\n 'name': name.strip(),\n 'year': year\n }\n film_list.append(film)\n f.close()\n return film_list", "def loadWords() -> List[str]:\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print(\" \", len(wordList), \"words loaded.\")\n\n return wordList", "def _get_movies(self):\n\n return self.data_file['movies']", "def get_bfi_list():\n list_file = 'bfi_sight_and_sound_2012.txt'\n f = open(list_file, 'r')\n film_list = []\n\n for line in f:\n words = line.split(' ')\n #NOTE: pos is not the position in the pyton list but in the original\n # list so is not always an integer due to joint places\n film = {'pos': words[0], 'name': words[1][:-1]}\n film_list.append(film)\n f.close()\n return film_list", "def loadWords():\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n return wordlist", "def fetch_words(filename):\n data = [] #empty list\n with urlopen(filename) as story:\n for line in story:\n words = line.decode('utf-8').split() #must decode into strings and then separate with spaces\n #print(lists)\n for word in words:\n data.append(word)\n return(data)", "def load_stop_list():\n stop_list = []\n with open(STOP_LIST, \"r\") as f:\n lines = f.readlines()\n stop_list = [word.strip() for word in lines]\n return stop_list", "def get_movies():\n\n # ouverture du fichier de notre liste de films\n with open(DATA_FILE,\"r\") as f:\n movies_list = json.load(f)\n\n # notre liste des instances\n movies = [Movie(m)for m in movies_list] \n return movies", "def load_words():\r\n## print \"Loading word list from file...\"\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r', 0)\r\n # wordlist: list of strings\r\n wordlist = []\r\n for line in inFile:\r\n wordlist.append(line.strip().lower())\r\n## print \" \", len(wordlist), \"words loaded.\"\r\n return wordlist", "def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def list_words():\n fin = open('words.txt')\n words = []\n for line in fin:\n words.append(line.strip())\n fin.close()\n return words", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print(\" \", len(wordList), \"words loaded.\")\n return wordList", "def get_movies():\n tree = get_tree()\n movie_list = [movie.get(\"title\") for movie in tree.getroot().findall(\"movie\")]\n return movie_list", "def loadStopWordList(swFile):\n f = open(swFile, 'r')\n lines = f.readlines()\n f.close()\n result = list()\n for line in lines:\n sWord = line.strip('\\n')\n result.append(sWord)\n return result", "def load_words():\r\n \r\n my_file = open(\"words.txt\")\r\n words = my_file.read()\r\n words_list = words.split(\" \")\r\n return (words_list)\r\n my_file.close()", "def loadWords():\r\n print(\"Loading word list from file...\")\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r')\r\n # line: string\r\n line = inFile.readline()\r\n # wordlist: list of strings\r\n wordList = line.split()\r\n\r\n print(\" \", len(wordList), \"words loaded.\")\r\n return wordList", "def make_word_list():\n word_list = []\n fin = open('words.txt')\n for line in fin:\n word = line.strip()\n word_list.append(word)\n return word_list", "def make_word_list():\n result = []\n for line in open('words.txt'):\n word = line.strip()\n result.append(word)\n return result", "def load_words():\n print(\"Loading word list from file..\")\n WORDLIST_FILENAME = \"words.txt\"\n # with open('words.txt', 'r') as f:\n # inFile = f.read()\n inFile = open(WORDLIST_FILENAME, 'r')\n wordlist = []\n\n for line in inFile:\n wordlist.append(line.strip().lower())\n return wordlist", "def create_medium_list(self):\n word_list = []\n try:\n f = open(self.index, 'r')\n for line in f:\n if line[0] == 'M' and line[1] == \" \" and line[2] != \" \":\n readout = line[2:].upper()\n has_digit = re.search('\\d', readout)\n # this can be added to if there are more characters that cannot be\n # used in the game\n has_wrong = re.search(\"[-,.' '/!?]\", readout)\n if has_digit is None:\n if has_wrong is None:\n word_list.append(readout.strip('\\n'))\n return word_list\n except IOError:\n print(\"Cannot open file\")\n raise (IOError)", "def movie_list(self):\n return self._request_obj(self._urls[\"movie_list\"], key=\"genres\")", "def get_convos():\n # returns array of arrays with line data from movie_conversations.txt\n # ex. convos = [['L194', 'L195', 'L196'], ['L198', L'199']]\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist" ]
[ "0.6709577", "0.648282", "0.62326944", "0.6198604", "0.61928093", "0.60708266", "0.6070315", "0.6035897", "0.59378856", "0.5916645", "0.59033954", "0.58966136", "0.5863689", "0.58624583", "0.58624583", "0.58624583", "0.585424", "0.5835306", "0.58195555", "0.5803572", "0.5760497", "0.57574624", "0.5752365", "0.5739837", "0.57272017", "0.57197875", "0.57162493", "0.5709675", "0.57073534", "0.57073534" ]
0.81582546
0
NORMALIZERATINGS Preprocess data by subtracting mean rating for every movie (every row) [Ynorm, Ymean] = NORMALIZERATINGS(Y, R) normalized Y so that each movie has a rating of 0 on average, and returns the mean rating in Ymean.
def normalizeRatings(Y, R): m, n = Y.shape Ymean = np.zeros((m, 1)) Ynorm = np.zeros(Y.shape) for i in range(m): idx = np.where(R[i] == 1)[0] Ymean[i, 0] = Y[i, idx].mean() Ynorm[i, idx] = Y[i, idx]-Ymean[i, 0] return Ynorm, Ymean
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalizeRatings(Y, R):\n\n m, n = Y.shape\n Ymean = np.zeros(m)\n Ynorm = np.zeros(Y.shape)\n\n for i in range(m):\n idx = R[i] == 1\n y = Y[i, idx]\n\n Ymean[i] = np.mean(y)\n Ynorm[i, idx] = y - Ymean[i]\n\n return Ynorm, Ymean", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def _preprocess(self, data, normalize=False) -> np.ndarray:\n \n preprocessor = StandardScaler() if not normalize else Normalizer()\n\n data = preprocessor.fit_transform(data)\n \n return data", "def normalize(data):\n data = lowercase(data)\n data = remove_punct(data)\n data = remove_apostrophes(data)\n data = remove_stopwords(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data) #done again to remove hyphens produced by num2words\n data = remove_stopwords(data) #done agan to remove stopwords produced by num2words\n return data", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def normalize_train_data(train_data, hter=False):\n feats = train_data[:, :-1]\n labels = train_data[:, -1]\n if hter:\n labels_pw = labels\n else:\n labels_pw = labels / feats[:, 1]\n scaler = pp.StandardScaler()\n scaler.fit(feats)\n norm_feats = scaler.transform(feats)\n return np.concatenate((norm_feats, labels_pw[:, None]), axis=1), scaler", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize_mean(dataset):\n normalized_dataset = np.array(dataset)\n return normalized_dataset - np.mean(normalized_dataset)", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std", "def featureNormalization(X):\n mean=np.hstack(np.mean(X[:,0]),np.mean(X[:,1]),np.mean(X[:,2]))\n std=np.hstack(np.std(X[:,0]),np.std(X[:,1]),np.std(X[:,2]))\n \n X_norm = (X - mean)/std\n \n return X_norm", "def standardize(data,weights,mode=None):\n wght = MA.resize(weights,data.shape)\n if mode == 'row': # space\n mean = MA.average(data,weights=wght,axis=1)\n std = MA.sqrt(MA.average((data-mean[:,N.newaxis])**2,weights=wght,\n axis=1))\n norm_data = ((data-mean[:,N.newaxis])/std[:,N.newaxis])\n elif mode == 'col': # time\n mean = MA.average(data,weights=wght,axis=0)\n std = MA.sqrt(MA.average((data-mean)**2,weights=wght,axis=0))\n norm_data = (data - mean) / std\n else: # total space-time\n mean = MA.average(data,weights=wght)\n std = MA.sqrt(MA.average((data-mean)**2,weights=wght))\n norm_data = (data - mean) / std\n\n return norm_data", "def _normalize_feature(self, feature):\n\n for ic in range(self.data_shape[0]):\n feature[ic] = (feature[ic] - self.feature_mean[ic]\n ) / self.feature_std[ic]\n return feature", "def _process(self, data: np.ndarray) -> np.ndarray:\n nominals = unp.nominal_values(data)\n min_y, max_y = np.min(nominals), np.max(nominals)\n\n return (data - min_y) / (max_y - min_y)", "def _transform(self, data):\r\n mean, variance = self._input_statistics.overall_feature_moments\r\n return (data - mean) / variance", "def normalize_train_data(self, data_vector, clf_type = \"generic\"):\n\t\tassert(clf_type in [\"generic\", \"specific\"])\n\n\t\tif clf_type == \"generic\":\n\t\t\tself.mean_per_dim_generic = []\n\t\t\tmean_per_dim = self.mean_per_dim_generic\n\t\t\tself.std_per_dim_generic = []\n\t\t\tstd_per_dim = self.std_per_dim_generic\n\t\telse:\n\t\t\tself.mean_per_dim_specific = []\n\t\t\tmean_per_dim = self.mean_per_dim_specific\n\t\t\tself.std_per_dim_specific = []\n\t\t\tstd_per_dim = self.std_per_dim_specific\n\n\t\tper_dim = zip(*data_vector)\n\n\t\tfor i in xrange(len(per_dim)):\n\t\t\n\t\t\tm = np.float64(sum (per_dim[i]) / float (len(per_dim[i])))\n\t\t\ts = np.std(per_dim[i])\n\t\t\tper_dim[i] -= m\n\t\t\tif s>0:\n\t\t\t\tper_dim[i] /= s\n\t\t\n\t\t\tmean_per_dim.append(m)\n\t\t\tstd_per_dim.append(s)\n\t\n\t\tdata_vector = zip(*per_dim)\n\t\tfor i in xrange(len(data_vector)):\n\t\t\tdata_vector[i] = list(data_vector[i])\n\n\t\treturn data_vector", "def standardize_data(data):\n return (data - np.mean(data, axis=0)) / (np.std(data, axis=0) + 10 ** -16)", "def normalize_transform():\n\n # Default for PyTorch's pre-trained models\n return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])", "def normalize_dataset(self):", "def z_score_normalization(data):\n # import data\n\n features = data[:, 0:-1]\n target = data[:, -1]\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(features))\n print('\\n')\n print('Targets:\\n\\n' + str(target))\n\n # Data standarization\n standardized_data = preprocessing.scale(features)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(standardized_data[:10])\n print('\\n\\n')\n\n new_data = np.append(standardized_data, target.reshape(target.shape[0], -1), axis=1)\n print('\\nNew array\\n')\n print(new_data)\n\n return new_data", "def normalize(feats_Xy, trace_normalize=True, data=None):\n feats, labels = zip(*feats_Xy)\n if data is None:\n train_f = feats[0]\n m = train_f.mean(axis=0)\n s = np.maximum(train_f.std(axis=0), 1e-8)\n else:\n m = data['train_mean']\n s = data['train_std']\n feats = [(f - m) / s for f in feats]\n if trace_normalize:\n if data is None:\n train_f = feats[0]\n tr = np.maximum(np.sqrt((train_f**2).sum(axis=1)).mean(), 1e-8)\n else:\n tr = data['trace']\n else:\n tr = None\n if trace_normalize:\n feats = [f / tr for f in feats]\n feats_Xy = tuple(zip(feats,labels))\n return feats_Xy + (m, s, tr)", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def standardize(data):\n stddev = data.std()\n #if stddev == 0.:\n # sys.exit(\"data.std() == 0. !\")\n if stddev != 0.:\n data = (data - data.mean()) / (data.std())\n\n return data", "def _get_normalisation_stats(self):\n p_net_datasets = [self.pdf_dataset] + [self.PDE_dataset] + [self.BC_dataset]\n p_net_means, p_net_stds = get_mean_std_from_datasets(p_net_datasets)\n\n D_net_datasets = [self.PDE_dataset]\n D_net_means, D_net_stds = get_mean_std_from_datasets(D_net_datasets)\n\n U_net_datasets = [self.PDE_dataset]\n U_net_means, U_net_stds = get_mean_std_from_datasets(U_net_datasets)\n\n return p_net_means, p_net_stds, D_net_means, D_net_stds, U_net_means, U_net_stds", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X" ]
[ "0.714793", "0.6533668", "0.6453761", "0.6344558", "0.6208749", "0.6180315", "0.61512446", "0.61158943", "0.610423", "0.6082433", "0.60785216", "0.60010606", "0.59637064", "0.5939617", "0.59323406", "0.59188473", "0.5907955", "0.5896943", "0.5888731", "0.5886372", "0.58828217", "0.5865788", "0.58637697", "0.58593965", "0.5859375", "0.58573455", "0.5852058", "0.5850771", "0.58445644", "0.58442074" ]
0.7204803
0
Gradientdescent to learn ratings
def RatingsGradientDescent(params, Y, R, num_users, num_movies, num_features, lbd, alpha, num_iters): J_history = np.zeros(num_iters) for i in range(num_iters): J_history[i], grad = cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd) params = params-alpha*grad if i % 100 == 99: print('Step %i, cost=%f' % (i+1, J_history[i])) return params, J_history
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_gradient(self, learning_rate):\n raise NotImplementedError()", "def _ci_grads(preds, dtrain):\n # predictions: np.array with shape of (n, )\n n = preds.shape[0]\n y_hat = preds\n\n # labels: np.array with shape of (n, )\n labels = dtrain.get_label().astype('int')\n E = (labels > 0).astype('int')\n T = np.abs(labels)\n\n # L2 Gradient Computation (Concordance Index Approximation)\n # gradients computation of numerator and denominator in L2\n # initialization\n num, den = .0, .0\n grad_den = np.zeros_like(y_hat)\n hess_den = np.zeros_like(y_hat) # 0\n grad_num = np.zeros_like(y_hat)\n hess_num = np.zeros_like(y_hat)\n\n # firstly, compute gradients of numerator(\\alpha) and denominator(\\beta) in L2\n for k in np.arange(n):\n ## gradients of denominator (\\beta)\n # For set s1 (i.e. \\omega 1 in the paper)\n # s1 = (k, i): E_k = 1 and T_k < T_i\n s1 = E[k] * np.sum(T > T[k])\n # For set s2 (i.e. \\omega 2 in the paper)\n # s2 = (i, k): E_i = 1 and T_i < T_k\n s2 = np.sum((E > 0) * (T < T[k]))\n # For grad_den (i.e. the first-order gradient of denominator)\n grad_den[k] = s2 - s1\n # hess_den[k] = 0\n\n ## gradients of numerator (\\alpha)\n\n # set S1\n # i.e. the first-order and second-order gradients related to set s1\n # s1 = (k, i): E_k = 1 and T_k < T_i\n g_s1, h_s1 = .0, .0\n if E[k] == 1:\n w = y_hat[k] - y_hat[T[k] < T]\n # For den and num\n den += np.sum(-w)\n num += np.sum((w < _GAMMA) * (-w) * (_GAMMA - w)**2)\n\n g_s1 = np.sum((w < _GAMMA) * (_GAMMA - w) * (3*w - _GAMMA))\n\n h_s1 = np.sum((w < _GAMMA) * (4*_GAMMA - 6*w))\n \n # set S2\n # i.e. the first-order and second-order gradients related to set s2\n w = y_hat[(E > 0) * (T < T[k])] - y_hat[k]\n g_s2 = np.sum((w < _GAMMA) * (_GAMMA - w) * (_GAMMA - 3*w))\n h_s2 = np.sum((w < _GAMMA) * (4*_GAMMA - 6*w))\n \n grad_num[k] = g_s2 + g_s1\n hess_num[k] = h_s2 + h_s1\n\n if den == 0:\n grad_f = np.zeros_like(y_hat)\n hess_f = np.zeros_like(y_hat)\n else:\n grad_f = grad_num / den - num * grad_den / (den ** 2)\n hess_f = (den * hess_num - num * hess_den) / (den ** 2) - 2 * grad_den / den * grad_f\n \n return grad_f, hess_f", "def _learn_using_GD(self, y, tx, w, fn, gamma, lambda_, regularization):\n loss, grad = fn(y, tx, w, lambda_)\n loss, grad = self.apply_regularization(w, loss, grad, regularization, lambda_, tx.shape[0])\n w = w - gamma * grad\n return loss, w", "def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def learn(self,k=10):\n \n gradients = {}\n for i in range(k):\n\n #create TILDE(R) tree object\n tree_i = TILDE(typ=\"regression\",score=\"WV\",max_depth=self.max_depth)\n\n #subsample negatives if too many for each tree\n sampled_neg = deepcopy(self.neg)\n if len(self.neg) > 2*len(self.pos):\n sampled_neg = sample(self.neg,2*len(self.pos))\n\n #compute gradients as I-P\n for ex in self.examples:\n p = sigmoid(self.examples[ex])\n if ex in self.pos:\n gradients[ex] = 1-p\n elif ex in sampled_neg:\n gradients[ex] = 0-p\n\n #fit tree on gradients\n tree_i.learn(self.data,self.bk,self.target,examples=gradients)\n \n #recompute example values as previous example value + tree_i value\n for ex in self.examples:\n tree_i_value = tree_i.infer(self.data,ex)\n self.examples[ex] += 0.01*tree_i_value\n\n #add tree to boosted_trees\n self.boosted_trees.append(tree_i)", "def gradient_descent(self, X ,eta, tol,iter):\n gd=[]\n gd_x=[X]\n iteration=0\n # current_pt=X\n first_derivative=sym.diff(self.gdfunc)\n #print(first_derivative)\n x=sym.Symbol('x')\n first_derivative=sym.lambdify(x,first_derivative)\n learn_rate=eta\n \n \n prev_x=X\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n #print(\"prev_x = \",prev_x,\" Next x = \",new_x)\n for i in range(iter):\n prev_x=new_x\n #print(prev_x)\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n # print(\"x = \",new_x,\"Gradient =\",learn_rate*self.func(prev_x))\n if abs(self.func(new_x)) <= self.func(tol) :\n break\n iteration=iteration+1\n #print(\"Best at GD x= \",new_x)\n gd.append(gd_x)\n gd.append(new_x)\n gd.append(iteration)\n\n return gd", "def train_gradient_descent(self, X, y, learning_rate=0.01, n_iters=100):\r\n # Step 0: Initialize the parameters\r\n n_samples, n_features = X.shape\r\n self.weights = np.zeros(shape=(n_features,1))\r\n self.bias = 0\r\n costs = []\r\n\r\n for i in range(n_iters):\r\n # Step 1: Compute a linear combination of the input features and weights\r\n y_predict = np.dot(X, self.weights) + self.bias\r\n\r\n # Step 2: Compute cost over training set\r\n cost = (1 / n_samples) * np.sum((y_predict - y)**2)\r\n costs.append(cost)\r\n\r\n if i % 100 == 0:\r\n print(f\"Cost at iteration {i}: {cost}\")\r\n\r\n # Step 3: Compute the gradients\r\n dJ_dw = (2 / n_samples) * np.dot(X.T, (y_predict - y))\r\n dJ_db = (2 / n_samples) * np.sum((y_predict - y)) \r\n \r\n # Step 4: Update the parameters\r\n self.weights = self.weights - learning_rate * dJ_dw\r\n self.bias = self.bias - learning_rate * dJ_db\r\n\r\n return self.weights, self.bias, costs", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient", "def learn(self, D, **kwargs):\n pass", "def __update_weights_grad_desc(self, x_train, y_train):\n\n predictions = self.__compute_prediction(x_train)\n weights_delta = np.dot(x_train.T, y_train - predictions)\n\n m = y_train.shape[0]\n self.__weights += self.__learning_rate / float(m) * weights_delta", "def learn(self):\n Xt = np.append(np.ones((self.X.shape[0], 1)), self.X, axis=1)\n Yt = self.Y * 2 - 1\n\n w = np.ones(Xt.shape[1]) # avoiding random init, for debugging\n lw = [[] for k in range(len(w))]\n \n for iter in range(self.max_steps):\n P = Yt * np.dot(Xt, w)\n M = np.where(P <= 0)[0] # indices of misclassified datapoints\n\n if len(M) == 0: \n self.logger.debug(\"Found linearly separable hyperplane!\")\n break\n\n if self.is_stochastic:\n # just pick one randomly from M\n M = [M[random.randint(0, len(M)-1)]]\n\n grad = -1 * np.sum((Yt[M] * Xt[M].T), axis=1) / len(M)\n\n if self.reg_constant > 0:\n grad += self.reg_constant * w\n \n eta = self.step_size * 10000 / (10000 + iter)\n \n w = w - grad * eta\n \n if iter % 100 == 0:\n for k in range(len(w)):\n lw[k].append(w[k])\n \n if iter % 1000 == 0:\n self.logger.debug(\"Iter %s:\\t %f %f %f\" %(iter, w[0], w[1], w[2]))\n \n self.logger.debug(\"Iterations: %s\" %(iter))\n\n# x_range = range(len(lw[0]))\n# fig = plt.figure()\n# ax1 = fig.add_subplot(111) \n# for j, lwn in enumerate(lw):\n# if j % 3 >= 2: # plot an arbitrary subset of features\n# a = w[j]\n# ax1.plot(x_range, [(x-a) for x in lwn], label=str(j))\n# \n# plt.xlabel(\"Iteration\")\n# plt.ylabel(\"Feature weight\")\n# plt.show()\n \n #self.logger.debug(\"%s\" % np.array2string(w, precision=2, separator=','))\n \n self.w = w", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w = w-gamma*grad\n return w, loss", "def calculate_recommendations(self, vote_list, itemMatch, itemIgnored):\n #print \"--------------------------------------------------\"\n #print \"calculate_recommendations\"\n #print \"--------------------------------------------------\"\n\n # http://www.quuxlabs.com/blog/2010/09/matrix-factorization-a-simple-tutorial-and-implementation-in-python/\n\n # U = np.array('users')\n # D = np.array('video_games')\n\n # R = |U| cross |D|\n\n # We want to discover K latent features\n\n # Find\n # P(a | |U| corss K matrix)\n # Q(a | |D| cross K matrix)\n # Such that their product approximates R\n # R approx= P cross transpose(Q) = hat(R)\n #\n\n # r[i][j] = transpose(p)[i] * q[j]\n # = sum( 1..k, p[i][k] * q[k][j] )\n\n # e[i][j]**2 = (r[i][j] - hat(r)[i][j])**2\n # = (r[i][j] - sum( 1..K, p[i][k] * q[k][j]))**2\n # squared error, estimated rating can be either higher or lower than the real thing\n\n # find the gradient\n # diff(e[i][j]**2, p[i][k]) = -2*(r[i][j] - hat(r)[i][j]) * (q[k][j]) = -2*e[i][j] * q[k][j]\n # diff(e[i][j]**2, q[k][j]) = -2*(r[i][j] - hat(r)[i][j]) * (p[i][k]) = -2*e[i][j] * p[i][k]\n\n # update rules\n # alpha = settings.alpha # learning_rate\n # alpha = 0.0002 # learning_rate\n # p[i][k]' = p[i][k] + alpha * diff(e[i][j]**2, p[i][k])\n # = p[i][k] + 2 * alpha * e[i][j] * q[k][j]\n # q[k][j]' = q[k][j] + alpha * diff(e[i][j]**2, q[k][j])\n # = q[k][j] + 2 * alpha * e[i][j] * p[i][k]\n\n # training data\n # T = (u[i], d[j], r[i][j])\n # np.array()\n\n # iterate until convergance\n # E = sum((u[i], d[j], r[i][j]) in T, e[i][j])\n # = sum((u[i], d[j], r[i][j]) in T, r[i][j]\n # - sum(1..k, p[i][k]*q[k][j]))**2\n\n # regularization\n # beta = 0.02\n # e[i][j]**2 = (r[i][j] - sum(1..K, p[i][j]*q[k][j]))**2\n # + ((beta/2) * sum(1..K, norm(P)**2 + norm(Q)**2))\n #\n # p[i][k]' = p[i][k] + alpha * (2 * e[i][j] * q[k][j] - beta * p[i][k])\n # q[k][j]' = q[k][j] + alpha * (2 * e[i][j] * p[i][k] - beta * q[k][j])\n\n data = np.array(vote_list)\n\n encoder = OneHotEncoder()\n\n users = data[:,0]\n unique_users = list(set(users))\n for i in range(len(users)):\n users[i] = unique_users.index(users[i])\n\n video_games = data[:,1]\n unique_games = list(set(video_games))\n for i in range(len(video_games)):\n video_games[i] = unique_games.index(video_games[i])\n\n ratings = data[:,2]\n M = len(set(video_games))\n N = len(set(users))\n R = np.zeros((N,M))\n for i in range(len(users)):\n user = users[i]\n game = video_games[i]\n rating = ratings[i]\n R[user][game] = rating\n\n K = 2\n\n P = np.random.rand(N,K)\n Q = np.random.rand(M,K)\n\n nP, nQ = self.matrix_factorization(R, P, Q, K)\n nR = np.dot(nP, nQ.T)\n\n itemMatch = {}\n for i in range(N):\n user = unique_users[i]\n itemMatch[user] = []\n for j in range(M):\n if R[i][j] == 0:\n video_game = unique_games[j]\n recommendation = (video_game, nR[i][j])\n itemMatch[user].append(recommendation)\n itemMatch[None] = []\n print 'pmf recommendations', itemMatch.items()\n print '\\n'\n recommendations = itemMatch.items()\n\n # returns\n # [\n # (<user1>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # (<user2>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # ]\n\n return recommendations", "def __call__(self, y, pred, sample_weight=None):", "def gradient_descent_step(self, x, y, learning_rate):\n # compute derivative of loss wrt Z\n dZ = self.derivative_loss(y, self.predict(x))\n dW = np.dot(dZ, x)\n # subtract average derivative from weights\n self.w -= learning_rate * 1.0/dW.shape[0] * dW\n if self.fit_b:\n self.b -= learning_rate * (1.0/x.shape[0] * np.sum(dZ))", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def _sgd_step(self, regularize, learning_rate):\n ratings = self.ratings.sample(frac=1)\n num_ratings = len(ratings)\n sse = 0\n for row in ratings.itertuples(index=False):\n item, rating, user = row\n prediction = self.predict(user, item)\n err = rating - prediction\n sse += err ** 2\n self.user_bias[user] += learning_rate * (err - regularize*self.user_bias[user])\n self.item_bias[item] += learning_rate * (err - regularize*self.item_bias[item])\n self.U[user, :] += learning_rate * (err*self.I[:, item] - regularize*self.U[user, :])\n self.I[:, item] += learning_rate * (err*self.U[user, :] - regularize*self.I[:, item])\n self.global_rmse.append(np.sqrt(sse / num_ratings))", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def learning_by_gradient_descent(y, tx, w, gamma):\n\tgrad = calculate_gradient(y, tx, w)\n\n\tw = w - gamma * grad\n\treturn w", "def learn(self, Xtrain, ytrain):\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xless.T,Xless)/numsamples + (self.params['regwgt'] * np.identity(np.shape(Xless)[1]))), Xless.T),ytrain)/numsamples", "def sgd(self):\n for i, j, r in self.samples:\n # Computer prediction and error\n if (self.type=='bias'):\n prediction = self.get_rating_bias(i, j)\n elif(self.type=='nonbias') :\n prediction = self.get_rating(i, j)\n # print(i, j, r,prediction)\n e = (r - prediction)\n\n # Update biases\n self.b_u[i] =self.b_u[i]+ self.alpha * (e - self.beta * self.b_u[i])\n self.b_i[j] = self.b_i[j] + self.alpha * (e - self.beta * self.b_i[j])\n\n # Create copy of row of P since we need to update it but use older values for update on Q\n P_i = self.P[i, :][:]\n\n # Update user and item latent feature matrices\n # print(self.alpha * (e * self.Q[j, :] - self.beta * self.P[i, :]))\n # print(self.P[i, :])\n self.P[i, :] =self.P[i, :] + self.alpha * (e * self.Q[j, :] - self.beta * self.P[i, :])\n # print(self.P[i, :],\"&&&&&&\")\n self.Q[j, :] = self.Q[j, :] + self.alpha * (e * P_i - self.beta * self.Q[j, :])\n # print(self.Q[j, :])", "def learn(self, Xtrain, ytrain):\n # Dividing by numsamples before adding ridge regularization\n # to make the regularization parameter not dependent on numsamples\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n y = ytrain[:, np.newaxis]\n #self.weights = np.dot(np.dot(np.transpose(Xless), np.linalg.inv(np.dot(Xless, np.transpose(Xless))/numsamples) / numsamples), y) / numsamples\n #Solves with respect to w for the equation Xless * w = y: it computes the pseudo inverse, using singular values internally, for the matri Xlessx, avoiding the original singular matrix error.\n self.weights = np.linalg.lstsq(Xless, y)[0]", "def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def learn(self):\n total_error = 0\n threshold = 0.05\n\n counter = len(self._training_set)*len(self._perceptrons)\n total_error+=self.learning_step()\n\n while total_error/counter > threshold:\n counter += len(self._training_set)*len(self._perceptrons)\n total_error +=self.learning_step()", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)", "def learn(self,k=10):\n \n gradients = {}\n for i in range(k):\n\n #create TILDE(R) tree object\n tree_i = TILDE(typ=\"regression\",score=\"WV\",max_depth=self.max_depth)\n\n #subsample negatives if too many for each tree\n sampled_neg = deepcopy(self.neg)\n if len(self.neg) > 2*len(self.pos):\n sampled_neg = sample(self.neg,2*len(self.pos))\n\n #compute gradients using LMNN loss function\n for ex in self.examples:\n gradient = self.compute_gradient(ex,\n self.pos,\n self.neg,\n self.examples)\n gradients[ex] = gradient\n\n\n #fit tree on gradients\n tree_i.learn(self.data,self.bk,self.target,examples=gradients)\n \n #recompute example values as previous example value - gamma*tree_i value\n for ex in self.examples:\n tree_i_value = tree_i.infer(self.data,ex)\n self.examples[ex] -= 0.01*tree_i_value #learning rate\n \n #add tree to boosted_trees\n self.boosted_trees.append(tree_i)", "def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,\n K=10):\n\n # Sampling of indices is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n indices = [target]\n indices.extend(getNegativeSamples(target, dataset, K))\n\n ### YOUR CODE HERE\n grad = np.zeros_like(outputVectors)\n gradPred = np.zeros_like(predicted)\n cost = 0.0\n probability = 0.0\n for sample_idx in indices:\n similarity = outputVectors[sample_idx].dot(predicted.T)\n probability = sigmoid(similarity) # squash to 0 ~ 1\n if sample_idx == target: # positive sample\n #p = sigmoid(outputVectors[sample_idx].dot(predicted.T))\n cost += -np.log(sigmoid(similarity))\n else: # negative sample\n #p = sigmoid(-outputVectors[sample_idx].dot(predicted.T))\n cost += -np.log(sigmoid(-similarity)) # deduction from reference 2.\n \n if sample_idx == target:\n grad[sample_idx, :] += (probability - 1) * predicted\n gradPred += (probability - 1) * outputVectors[sample_idx]\n else:\n grad[sample_idx, :] += probability * predicted\n gradPred += probability * outputVectors[sample_idx]\n '''\n V, D = outputVectors.shape\n one_hot_target = np.zeros(V)\n one_hot_target[target] = 1\n cost = 0\n gradPred = np.zeros_like(predicted)\n grad = np.zeros_like(outputVectors)\n \n for idx in indices:\n context_vector = outputVectors[idx] # embedding vector (1, D)\n cosine_similarity = normalizeRows(predicted).dot(normalizeRows(context_vector).T)\n print('neg sample, consine_similarity={0}'.format(cosine_similarity))\n binary_class = sigmoid(cosine_similarity)\n print('neg sample, binary_class={0}'.format(binary_class))\n \n if idx == target:\n cost += binary_class - 1\n else:\n cost += binary_class\n \n dlogits = sigmoid_grad(cosine_similarity)\n #gradPred += dlogits * normalizeRows(context_vector)\n #grad += np.outer(one_hot_target, dlogits * normalizeRows(predicted))\n gradPred += dlogits\n grad += np.outer(one_hot_target, dlogits)\n '''\n ### END YOUR CODE\n\n return cost, gradPred, grad" ]
[ "0.6332611", "0.63145196", "0.62612844", "0.622522", "0.6213547", "0.6210447", "0.6207228", "0.61697274", "0.6164421", "0.6163743", "0.61341023", "0.6130062", "0.6113881", "0.61076546", "0.61035484", "0.61020714", "0.6100461", "0.60973877", "0.6088086", "0.60739964", "0.6066376", "0.6045143", "0.60448885", "0.6032661", "0.60322887", "0.60083485", "0.5991324", "0.59737456", "0.5971359", "0.59652364" ]
0.66002995
0
Make an empty environment with the outer environment specified
def makeenv(outer=None): retval = {'outer': outer} return retval
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_empty_env():\n return EvalEnvironment(namespaces={})", "def get_empty_env():\n return EvalEnvironment(namespaces={})", "def base_env(*args, **kwargs):\n try:\n # regular gym\n env = gym.make(*args, **kwargs)\n except:\n try:\n # gym retro\n env = retro.make(*args, **kwargs)\n except:\n # gym-super-mario-bros\n env = gym_super_mario_bros.make(*args, **kwargs)\n env.recognized = None\n return env", "def empty_env(self):\n return ()", "def make_environment(seed, task_horizon):\n # Load the gym environment.\n environment = CartPoleEnv()\n environment = gym_wrappers.TimeLimit(environment, task_horizon)\n environment.seed(seed)\n environment = wrappers.GymWrapper(environment)\n environment = wrappers.SinglePrecisionWrapper(environment)\n return environment", "def empty(model, inplace=False):\n\n return Environment.from_defaults(model, max_uptake=0, max_secretion=None, inplace=inplace)", "def init():\n env = Environment(5, 5, 20, [10, 20, 10, 5])\n return env", "def _create_environment(config):\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n env = tools.wrappers.ConvertTo32Bit(env)\n return env", "def _transform_env(self) -> None:\n self.env = None if self.env == {} else self.env", "def make_env(env_name):\n \n env = gym.make(env_name) \n return env", "def make_possibly_parallel_environment(env_name_):\n if num_parallel_environments == 1:\n return env_load_fn(env_name_)\n else:\n return parallel_py_environment.ParallelPyEnvironment(\n [lambda: env_load_fn(env_name_)] * num_parallel_environments)", "def make_environment(\n evaluation: bool = False,\n task: str = 'MountainCarContinuous-v0') -> dm_env.Environment:\n del evaluation\n\n # Load the gym environment.\n environment = gym.make(task)\n\n # Make sure the environment obeys the dm_env.Environment interface.\n environment = wrappers.GymWrapper(environment)\n # Clip the action returned by the agent to the environment spec.\n environment = wrappers.CanonicalSpecWrapper(environment, clip=True)\n environment = wrappers.SinglePrecisionWrapper(environment)\n\n return environment", "def make_env():\n return {\n 'init': init,\n 'step': step,\n 'is_terminal': is_terminal,\n 'state_as_example': state_as_example,\n }", "def make_env(local_env, rank, seed=0):\n\n def _init():\n local_env.seed(seed + rank)\n return local_env\n\n set_random_seed(seed)\n return _init", "def make_env(local_env, rank, seed=0):\n\n def _init():\n local_env.seed(seed + rank)\n return local_env\n\n set_random_seed(seed)\n return _init", "def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound:\n env = cls(*args)\n env.shared = True\n return env", "def _make_isolated_env_for_template(self, template: Union[Path, str]) -> Environment:\n if isinstance(template, str):\n # string tempaltes have no associated files, and therefore don't alter the environment. They can use the base environment directly\n return self.env\n \n # Deplicate the base env, but replace references to dictionaries in the base env with copies of those dictionaries\n env: Environment = self.env.overlay()\n # globals can be a nested data structure, so it must be deep copied\n env.globals = deepcopy(env.globals)\n # filters and tests can be shallow-copied\n env.filters = env.filters.copy()\n env.tests = env.tests.copy()\n # create a new filesystem loader\n searchpath = env.loader.searchpath.copy() # type: ignore\n env.loader = FileSystemLoader(searchpath=searchpath)\n return env", "def make_wrapped_env(seed=123,\n visualize=False, \n run_logs_dir=\"./run_logs/\", \n dMoments=None,\n step_timeout=10,\n integrator_accuracy = 5e-5,\n ):\n rank = 0 # MPI.COMM_WORLD.Get_rank()\n set_global_seeds(seed + 10000 * rank)\n print(\"Making wrapped env\")\n env = IsolatedEnv(visualize=visualize,\n run_logs_dir=run_logs_dir,\n dMoments=dMoments,\n step_timeout=step_timeout,\n integrator_accuracy=integrator_accuracy\n )\n #print(\"IsolatedEnv: \", type(env))\n #env = ProstheticsEnv(visualize=visualize)\n #env = Monitor(env, os.path.join(logger.get_dir(), str(rank)))\n #print(\"h5pyEnvLogger:\")\n #env = h5pyEnvLogger(env, \"obs-logs\", str(rank))\n #print(\"h5pyEnvLogger:\", type(env))\n #env.seed(seed) # jw\n return env", "def make_as_global(self):\n return setup(env=self)", "def clone(self):\n return Environment(self.local_variables, self.local_types)", "def test_implicitly_created_environment(tmp_home, tmp_root_prefix):\n skip_if_shell_incompat(\"bash\")\n shutil.rmtree(tmp_root_prefix)\n assert helpers.shell(\"init\", \"--shell=bash\")\n assert (Path(tmp_root_prefix) / \"conda-meta\").exists()\n # Check, for example, that \"list\" works.\n assert helpers.umamba_list(\"-p\", tmp_root_prefix)", "def setup(c):\n c.run('nox --envdir .')", "def createEnvironment(self, _):\r\n if self._namespaces:\r\n raise InternalError('The environment can have only one namespace '\r\n 'at a time.')\r\n\r\n environment = Environment(self)\r\n return self._avatar.callRemote('setupNamespace', environment)", "def init_environment(env_name):\n env = gym.make(env_name)\n discrete = False\n if type(env.action_space) is gym.spaces.Discrete:\n discrete = True\n else:\n env = NormalizedActions(env)\n return env, discrete", "def make(\n env='Stack-v0', \n n_parallel=None, \n block=None, \n curriculum=None,\n unwrapped=False,\n as_path=False,\n **kwargs,\n):\n if curriculum:\n return make_curriculum(\n env, \n n_parallel=n_parallel, \n block=block, \n curriculum=curriculum,\n unwrapped=unwrapped,\n as_path=as_path,\n **kwargs,\n )\n else:\n if as_path:\n if isinstance(env, gym.Env):\n spec = env.unwrapped.spec\n elif isinstance(env, str):\n spec = gym.envs.registry.env_specs[env]\n else:\n raise TypeError(\n \"Invalid type {} for argument env.\".format(type(env))\n )\n\n entry_point = spec.entry_point\n if not callable(entry_point):\n entry_point = gym.envs.registration.load(entry_point)\n\n # Get entry point's default parameters\n args = {\n p.name:(p.default if p.default != p.empty else None)\n for p in inspect.signature(entry_point).parameters.values()\n }\n # Update with registered kwargs\n args.update(spec._kwargs)\n # Update with provided kwargs\n args.update(kwargs)\n \n path = []\n # Make a string with arg name and value for each argument\n for k,v in args.items():\n if k != 'seed':\n k = k.split('_')\n # Each name uses at most 4 chars\n if len(k) > 1:\n k = k[0][:1] + k[-1][:3]\n else:\n k = k[0][:4]\n path.append(k + str(v))\n # Join all argument strings with commas, removing spaces and quotation marks\n path = ','.join(path).replace(' ', '').replace(\"'\", '').replace('\"','')\n # Add the entry point's name\n return os.path.join(entry_point.__name__, path)\n elif unwrapped:\n if isinstance(env, gym.Env):\n return env\n elif isinstance(env, str):\n return gym.make(env, **kwargs)\n else:\n raise TypeError(\n \"Invalid type {} for argument env.\".format(type(env))\n )\n else:\n if not n_parallel:\n return Env(env, **kwargs)\n else:\n return ParallelEnv(env, n_parallel=n_parallel, block=block, **kwargs)", "def _create_extra_environment(self):\n return {}", "def _env_create(name_or_path, init_file=None, dir=False, with_view=None, keep_relative=False):\n if dir:\n env = ev.Environment(name_or_path, init_file, with_view, keep_relative)\n env.write()\n tty.msg(\"Created environment in %s\" % env.path)\n tty.msg(\"You can activate this environment with:\")\n tty.msg(\" spack env activate %s\" % env.path)\n else:\n env = ev.create(name_or_path, init_file, with_view, keep_relative)\n env.write()\n tty.msg(\"Created environment '%s' in %s\" % (name_or_path, env.path))\n tty.msg(\"You can activate this environment with:\")\n tty.msg(\" spack env activate %s\" % (name_or_path))\n return env", "def make_env_local(stack=True, scale_rew=True, idx=6, frame_wrapper=WarpFrame, reward_type=None):\n from retro_contest.local import make\n\n all_level = train_level + test_level\n\n print(str(idx) + \": start game=\" + all_level[idx][0] + \", state=\" + all_level[idx][1])\n\n env = make(game=all_level[idx][0], state=all_level[idx][1])\n\n return wrap_env(env, stack, scale_rew, frame_wrapper, reward_type)", "def make_env(env_id, rand, seed=0):\n def _init():\n env = gym.make(env_id)\n # env.unwrapped\n env.seed(seed + rand)\n return env\n set_global_seeds(seed)\n return _init", "def _make_env(environment):\n env = []\n for key, value in environment.items():\n env.append({\"name\": key, \"value\": value})\n\n return env" ]
[ "0.76089615", "0.76089615", "0.64476967", "0.6409842", "0.634659", "0.6296848", "0.6287654", "0.6220598", "0.6174979", "0.6111547", "0.60660183", "0.60641164", "0.60411716", "0.603954", "0.603954", "0.5998172", "0.5967356", "0.595362", "0.5953504", "0.58625597", "0.58617914", "0.5848657", "0.5802446", "0.58008885", "0.578586", "0.57742125", "0.57687026", "0.5758804", "0.57552725", "0.5751667" ]
0.7615676
0
Find a symbol in env If symbol not in env or any of its outer, return None
def find(sym, env): try: if sym in env: return env[sym] else: return find(sym, env['outer']) except TypeError: # once hit here, sym is nowhere to be found raise NameError("Undefined atom {0!r}".format(sym))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_symbol(self, op):\n for ii in self.__symbols:\n if ii.get_name() == op:\n return ii\n return None", "def top_level(symbol):\n return (symbol and ('.' not in symbol)) or None", "def checkLookup(self, name):\n if not self.symbols.has_key(name):\n # we don't care\n return None\n # is it one we really care about\n t = self.symbols[name].getType()\n if t == \"typedef\":\n t = self.symbols[name].getAliasType()\n if t == \"general\" or t == \"struct\" or t == \"union\":\n return self.symbols[name]", "def find(self, var):\n if var in self: return self\n elif self.outer: return self.outer.find(var)\n else:\n raise Exception(\"Unresolved symbol: %s\", var)", "def get_symbol_by_address_fuzzy(self, address):\n for sym in self.symbols:\n if address == sym.relative_addr or address in sym.bind_xrefs or address in sym.symbol_stubs:\n return sym\n return None", "def findSymbol(self, exp):\n k = str(exp)\n try:\n return self.currSyms[k]\n except KeyError:\n raise SymbolNotFound('Identifier not found:<%s>' % (k))", "def find_pure_symbol(symbols, clauses):\n for s in symbols:\n found_pos, found_neg = False, False\n for c in clauses:\n if not found_pos and s in disjuncts(c): found_pos = True\n if not found_neg and ~s in disjuncts(c): found_neg = True\n if found_pos != found_neg: return s, found_pos\n return None, None", "def _GetSymbol(atom):\n ks = atom.keys()\n if 'sym' in ks:\n return atom['sym']\n\n for k in ks:\n if k not in PROTECTED_KEYS and isinstance(atom[k], list):\n if len(atom[k]) == 3:\n return k\n\n raise ValueError", "def find_symbol(self) -> str:\n pattern = struct.pack(\"<HBBBBHQ\", self.event_id, self.version, self.channel, self.level, self.opcode, self.task, self.keyword)\n for start, end in find_segment(self._bv, \".rentries\"):\n offset = self._bv.read(start, end - start).find(pattern)\n if offset == -1:\n continue\n\n symbol = self._bv.get_symbol_at(start + offset)\n if symbol is None:\n continue\n \n return symbol.name\n\n return None", "def _detect(env):\n try:\n return env['KCC']\n except KeyError:\n pass\n\n kcc = env.WhereIs('kcc', env['KCC_DIR'])\n if kcc:\n return kcc\n\n raise SCons.Errors.StopError(\n KccNotFound,\n \"Could not find Kalimba C compiler (kcc.exe)\")", "def lookup_variable_value(var, env):\n def env_loop(environment):\n \"\"\"\n calls scan on each frame in the env list\n \"\"\"\n def scan(vars, vals):\n \"\"\"\n scans variables in a frame\n \"\"\"\n if isNull(vars):\n return env_loop(enclosing_env(environment)) # 5-4: env -> environment\n elif isEq(var, car(vars)) == TRUE:\n return car(vals)\n else:\n return scan(cdr(vars), cdr(vals))\n if environment is the_empty_environment:\n raise UnboundLocalError(\"lookup_variable\")\n frame = first_frame(environment)\n return scan(frame_variables(frame), frame_values(frame))\n return env_loop(env)", "def test_lookup_missing(self):\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n env.run_all()\n ret = env.lookup('foo')\n self.assertIsNone(ret)", "def first_lookup(self, symbol, size=1):\r\n if isinstance(symbol, (TerminalSymbol, NullSymbol)):\r\n return [symbol.gd]\r\n result = []\r\n for production in self.productions:\r\n if production.leftside[0] != symbol:\r\n continue\r\n for right_symbol in production.rightside:\r\n if right_symbol == symbol: #Avoids infinite recursion\r\n break\r\n current_symbol_first = self.first_lookup(right_symbol, size)\r\n result += current_symbol_first\r\n if NullSymbol not in current_symbol_first:\r\n break # This element doesn't have Null in its first set so there is no need to continue\r\n if not result:\r\n raise KeyError(\"Symbol doesn't exist in this grammar\")\r\n from pydsl.Grammar.PEG import Choice\r\n return Choice(result)", "def find_env_variable(env_var_to_find):\n try:\n env_var = os.environ[env_var_to_find]\n return env_var\n except KeyError:\n return False", "def symbol(self, **kw):\n if not kw:\n raise ValueError(u\"'symbol' needs keyword arguments\")\n res = self.find_symbols(**kw)\n if len(res)==1:\n return res[0]\n else:\n return res", "def resolve(name, env):\n t = name\n while t in env:\n t = env[t]\n return t", "def check_for_implicit_decl(ident):\n compound = None\n parent = LinkSearch.parent_lut[ident]\n while True:\n if isinstance(parent, AST.Compound):\n compound = parent\n break\n if parent not in LinkSearch.parent_lut:\n break\n parent = LinkSearch.parent_lut[parent]\n\n if compound is not None:\n if compound in LinkSearch.envr_lut:\n comp_envr = LinkSearch.envr_lut[compound]\n if comp_envr.is_localy_defined(ident.name):\n return None\n if compound in LinkSearch.scope_decl_lut:\n for decl in LinkSearch.scope_decl_lut[compound]:\n if decl.name == ident.name:\n return decl\n return None", "def find(self, op):\n # return the environment itself so that invoker can update information\n # in specific environment\n if op in self:\n return self\n if self._outer is None:\n raise LookupError('unbound '+op)\n return self._outer.find(op)", "def exists(_env):\n return True", "def exists(_env):\n return True", "def lookup(scopes, name):\n # type: (Scopes[T], str) -> Optional[T]\n\n for scope in scopes:\n for key, val in scope:\n if key == name:\n return val\n return None", "def isSymbolInContext(self, symbol):\n if ( symbol in list(self.dict.keys()) ):\n return True\n elif self.parent:\n return self.parent.isSymbolInContext(symbol)\n return False", "def findRecursive(epr, sym):\n # Handle iterable\n if isIterable(epr):\n return any([findRecursive(a, sym) for a in epr])\n # Handle an expression\n if \"args\" in dir(epr):\n if str(epr) == str(sym):\n return True\n if sym in epr.args:\n return True\n return any([findRecursive(a, sym) for a in epr.args])\n # Handle other cases\n return str(epr) == str(sym)", "def find_demangled_fxn(tu, fxn, call_graph):\n for f in call_graph['globals'].values():\n if 'demangledName' in f:\n if f['demangledName'] == fxn:\n return f\n for f in call_graph['locals'].values():\n if tu in f:\n if 'demangledName' in f[tu]:\n if f[tu]['demangledName'] == fxn:\n return f[tu]\n return None", "def get_symbol(self, name):\n if not self.ksymtab_initialized:\n self._init_ksymtab()\n for match in re.finditer('{0}\\0'.format(name), self.kernel_image[self.ksymtab_strings_offset:]):\n symbol_str_offset = self.ksymtab_strings_offset + match.start()\n if re.match(r'[0-9a-z_]', self.kernel_image[symbol_str_offset - 1:symbol_str_offset]):\n # Symbol string is a substring of another symbol string,\n # e.g. 'use_mm' is a substring of 'unuse_mm'.\n continue\n debug.debug(\"Found the physical offset of the symbol string \"\n \"'{0}': {1:#010x}\".format(name, symbol_str_offset))\n symbol_str_vaddr = symbol_str_offset + self.page_offset\n symbol_str_vaddr_little = pack('<L', symbol_str_vaddr)\n # TODO: save ksymtab_offset in the object variable\n ksymtab_offset = max(0, symbol_str_offset - KSYMTAB_MAX_SIZE) >> 2 << 2 # align to x4\n ksymtab_data = self.kernel_image[ksymtab_offset:ksymtab_offset + KSYMTAB_MAX_SIZE]\n for match in re.finditer(symbol_str_vaddr_little.encode('hex'), ksymtab_data.encode('hex')):\n ksymtab_entry_offset = ksymtab_offset + match.start() / 2 - 4\n symbol_vaddr, = unpack('<L', self.kernel_image[ksymtab_entry_offset:ksymtab_entry_offset + 4])\n debug.debug(\"Requested kernel symbol '{0}' found: {1:#010x}\".format(name, symbol_vaddr))\n return symbol_vaddr\n debug.debug(\"Requested kernel symbol '{0}' not found\".format(name))\n return None", "def llvm_found_in_n_scope(self, id, found_first=False):\n return self._parent_node.llvm_found_in_n_scope(id)", "def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret", "def FindGlobalName(name):\r\n r = Names.FindName(name)\r\n return None if r is None or r[1] != 0 else r[0]", "def resolve(self, key: str) -> Optional[Any]:\n return environ.get(key)", "def getFunctionSymbol(self, symbol):\n if ( symbol in list(self.dict.keys()) and self.dict[symbol][\"type\"] == \"func\"):\n return self.dict[symbol]\n return None" ]
[ "0.6370924", "0.62944776", "0.6078659", "0.60524756", "0.6004943", "0.5947626", "0.5891099", "0.5712541", "0.56552565", "0.56427425", "0.56182706", "0.5615034", "0.5589886", "0.55879825", "0.55729616", "0.55723345", "0.5512622", "0.55066395", "0.545123", "0.545123", "0.5438858", "0.5435743", "0.5425815", "0.5395677", "0.5322702", "0.53153366", "0.52455616", "0.5212544", "0.5205305", "0.51532924" ]
0.7866419
0
Converge the orders we currently have in the book with what we want to be in the book. This involves amending any open orders and creating new ones if any have filled completely. We start from the closest orders outward.
def converge_orders(self, buy_orders, sell_orders, order_status): tickLog = self.exchange.get_instrument()['tickLog'] to_amend = [] to_create = [] to_cancel = [] buys_matched = 0 sells_matched = 0 existing_orders = self.exchange.get_orders() # Check all existing orders and match them up with what we want to place. # If there's an open one, we might be able to amend it to fit what we want. for order in existing_orders: if order['ordType'] != 'Limit': continue try: if (order['side'] == 'Buy' and (order_status == 0 or order_status == 4 or order_status == 3 or order_status == 1 or order_status == 7)): desired_order = buy_orders[buys_matched] buys_matched += 1 elif (order['side'] == 'Sell' and (order_status == 0 or order_status == 2 or order_status == 1 or order_status == 3 or order_status == 8)): desired_order = sell_orders[sells_matched] sells_matched += 1 elif (order['price'] == buy_orders[buys_matched]['price'] and order_status == 6): to_cancel.append(order) buys_matched += 1 continue elif (order['price'] == sell_orders[sells_matched]['price'] and order_status == 6): to_cancel.append(order) sells_matched += 1 continue else: continue # Found an existing order. Do we need to amend it? if desired_order['orderQty'] != order['leavesQty'] or ( # If price has changed, and the change is more than our RELIST_INTERVAL, amend. desired_order['price'] != order['price'] and abs((desired_order['price'] / order['price']) - 1) > 0): to_amend.append({'orderID': order['orderID'], 'orderQty': order['cumQty'] + desired_order['orderQty'], 'price': desired_order['price'], 'side': order['side']}) # Found an stop existing order. Do we need to amend it? except IndexError: # Will throw if there isn't a desired order to match. In that case, cancel it. if ((order_status == 2 and order['side'] == 'Sell') or (order_status == 1 and self.running_qty > 0) or (order_status == 4 and order['side'] == 'Buy') or (order_status == 3 and self.running_qty < 0) or (order_status == 7 and order['side'] == 'Buy') or (order_status == 8 and order['side'] == 'Sell')): to_cancel.append(order) if (order_status == 0 or order_status == 4 or order_status == 3 or order_status == 1 or order_status == 5 or order_status == 7): while buys_matched < len(buy_orders): to_create.append(buy_orders[buys_matched]) buys_matched += 1 if (order_status == 0 or order_status == 2 or order_status == 1 or order_status == 3 or order_status == 5 or order_status == 8): while sells_matched < len(sell_orders): to_create.append(sell_orders[sells_matched]) sells_matched += 1 if len(to_amend) > 0: for amended_order in reversed(to_amend): reference_order = [o for o in existing_orders if o['orderID'] == amended_order['orderID']][0] logger.info("Amending %4s: %d @ %.*f to %d @ %.*f (%+.*f)" % ( amended_order['side'], reference_order['leavesQty'], tickLog, reference_order['price'], (amended_order['orderQty'] - reference_order['cumQty']), tickLog, amended_order['price'], tickLog, (amended_order['price'] - reference_order['price']) )) # This can fail if an order has closed in the time we were processing. # The API will send us `invalid ordStatus`, which means that the order's status (Filled/Canceled) # made it not amendable. # If that happens, we need to catch it and re-tick. try: self.exchange.amend_bulk_orders(to_amend) except requests.exceptions.HTTPError as e: errorObj = e.response.json() if errorObj['error']['message'] == 'Invalid ordStatus': logger.warn("Amending failed. Waiting for order data to converge and retrying.") sleep(0.5) return self.place_orders() else: logger.error("Unknown error on amend: %s. Exiting" % errorObj) sys.exit(1) if len(to_create) > 0: logger.info("Creating %d orders:" % (len(to_create))) for order in reversed(to_create): logger.info("%4s %d @ %.*f" % (order['side'], order['orderQty'], tickLog, order['price'])) self.exchange.create_bulk_orders(to_create) # Could happen if we exceed a delta limit if len(to_cancel) > 0: logger.info("Canceling %d orders:" % (len(to_cancel))) for order in reversed(to_cancel): logger.info("%4s %d @ %.*f" % (order['side'], order['leavesQty'], tickLog, order['price'])) self.exchange.cancel_bulk_orders(to_cancel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def place_orders(self):\n buy_orders = []\n sell_orders = []\n buy_stop_order = {}\n sell_stop_order = {}\n order_status = 0\n \"\"\"order_status参数说明\n 0: running_qty为0, 维持原样\n 1: self.running_qty > 0, 买卖都变化, 买单按照offset2, 卖单按照offset3\n 2: 买单维持不变, 卖单按照offset3\n 3: self.running_qty < 0, 买卖都变化, 买单按照offset3, 卖单按照offset2\n 4: 卖单维持不变, 买单按照offset3\n 5: 追加指定订单\n 6: 取消指定订单\n 7: self.running_qty > 0, 买单按照offset2, 卖单不变\n 8: self.running_qty < 0, 买单不变, 卖单按照offset2\n \"\"\"\n # Create orders from the outside in. This is intentional - let's say the inner order gets taken;\n # then we match orders from the outside in, ensuring the fewest number of orders are amended and only\n # a new order is created in the inside. If we did it inside-out, all orders would be amended\n # down and a new order would be created at the outside.\n position_grade = self.get_position_grade()\n avgCostPrice = self.exchange.get_position()['avgCostPrice']\n print ('position_grade: %s ' % position_grade)\n print ('running_qty: %s ' % self.running_qty)\n print ('ORDER_START_SIZE: %s ' % self.ORDER_START_SIZE)\n schedule.run_pending()\n\n if(self.countdown == True): #设置倒数计时, 60秒后delay_order_check设为True, 可以重新挂非清仓方向的价格\n self.cycleclock = self.cycleclock - 1\n if(self.cycleclock <= 0):\n if(self.check_last_price_upordown() == True):\n self.cycleclock = 5\n else:\n self.countdown = False\n self.delay_order_check = True\n\n if(self.get_ticker()['last'] > STOP_PRICE and self.buy_only_flag == False):\n self.buy_only_flag = True\n if(self.running_qty < 0):\n self.clear_position(buy_orders, sell_orders)\n return self.converge_orders(buy_orders, sell_orders, order_status)\n\n if(self.get_5th_max_MA15_defference(getmessage = 1) > 100):\n self.stop_market_maker_flag = True\n self.cancel_all_orders_flag = True\n self.buy_only_flag = False\n self.sell_only_flag = False\n tg_important_message('上涨差值超过100,暂停交易')\n\n if(self.stop_market_maker_flag == True and self.cancel_all_orders_flag == True):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Cancel all orders\")\n elif(self.stop_market_maker_flag == True and self.clear_position_flag == True):\n if(self.running_qty != 0):\n self.clear_position(buy_orders, sell_orders)\n else:\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n elif(self.stop_market_maker_flag == True):\n if(self.running_qty > 0):\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n order_status = 4\n elif(self.running_qty < 0):\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n order_status = 2\n elif(self.running_qty == 0 and self.last_running_qty == 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n\n elif(self.running_qty == 0 and self.restart_flag == False):\n if(self.check_last_price_upordown() == True):\n self.restart_flag = True\n self.countdown_restart = 5\n return\n self.ORDER_START_SIZE = self.start_XBt // 1000000 * START_SIZE_MAGNIFICATION #新算法, 每次初始交易重新设定ORDER_START_SIZE\n order_status = 0\n if not(self.sell_only_flag == True):\n buy_orders.append(self.prepare_order(-1, order_status))\n if not(self.buy_only_flag == True):\n sell_orders.append(self.prepare_order(1, order_status))\n self.countdown = False\n self.restart_flag = True\n self.countdown_restart = 30\n\n elif(self.running_qty == 0 and self.restart_flag == True):\n self.countdown_restart = self.countdown_restart - 1\n if(self.countdown_restart <= 0):\n self.restart_flag = False\n return\n\n elif(self.running_qty != 0 and self.running_qty != self.last_running_qty): #仓位变动后开始倒计时60秒, 60秒后delay_order_check为True, 可以重新挂非清仓方向的价格\n if(self.running_qty > 0):\n order_status = 2\n sell_orders.append(self.prepare_order(1, order_status))\n elif(self.running_qty < 0):\n order_status = 4\n buy_orders.append(self.prepare_order(-1, order_status))\n self.cycleclock = 60\n self.countdown = True\n self.restart_flag = False\n self.delay_order_check = False\n\n elif(self.running_qty != 0 and self.running_qty == self.last_running_qty and self.delay_order_check == True): #可以重新挂非清仓方向的价格\n i = abs(self.running_qty) // (self.ORDER_START_SIZE//4) + 1\n if(self.running_qty > 0):\n order_status = 7\n if(i <= 3):\n buy_orders.append(self.prepare_order(-i, order_status))\n if(self.running_qty < 0):\n order_status = 8\n if(i <= 3):\n sell_orders.append(self.prepare_order(i, order_status))\n self.cycleclock = 30\n self.countdown = True\n self.delay_order_check = False\n\n else:\n if(self.running_qty > 0):\n order_status = 2\n sell_orders.append(self.prepare_order(1, order_status))\n elif(self.running_qty < 0):\n order_status = 4\n buy_orders.append(self.prepare_order(-1, order_status))\n\n if(self.last_running_qty != self.running_qty):\n self.send_tg_message()\n self.last_running_qty = self.running_qty\n self.reset = False\n buy_orders = list(filter(None.__ne__, buy_orders)) #去除None\n sell_orders = list(filter(None.__ne__, sell_orders)) #去除None\n print('BXBT_MA15: %s' % self.get_BXBT_MA15())\n print(buy_orders)\n print(sell_orders)\n if((self.last_buy_orders == buy_orders and self.last_sell_orders == sell_orders) or (buy_orders == [] and sell_orders == [])):\n print('order no change, return')\n return\n else:\n self.last_buy_orders = buy_orders\n self.last_sell_orders = sell_orders\n self.converge_stop_order(buy_stop_order, sell_stop_order)\n return self.converge_orders(buy_orders, sell_orders, order_status)", "def make_orders(self):\n\n # orders to cancel from all of the strategies\n ocancel = self.get_cancel_orders()\n\n # orders to update from all of the strategies\n oupdate = self.get_update_orders()\n\n # new orders from all of the strategies\n onew = self.get_new_orders()\n \n # do we need to cancel, update, or make new orders?\n tocancel = bool(ocancel[const.BDAQID] or ocancel[const.BFID])\n toupdate = bool(oupdate[const.BDAQID] or oupdate[const.BFID])\n tonew = bool(onew[const.BDAQID] or onew[const.BFID])\n\n if tocancel:\n betlog.betlog.debug('cancelling orders: {0}'.format(ocancel))\n\n if toupdate:\n betlog.betlog.debug('updating orders: {0}'.format(oupdate))\n\n if tonew:\n betlog.betlog.debug('making new orders: {0}'.format(onew))\n\n if (tocancel or toupdate or tonew):\n \n # we could instead do 'monkey patching' here so we don't\n # need to check this every tick...\n if self.gconf.PracticeMode:\n # we don't make any real money bets in practice mode\n print 'bets not made since in practice mode'\n return\n\n # call multithreaded make orders so that we make all order\n # requests (cancelling, updating, making new) for BDAQ and\n # BF simultaneously.\n corders, uorders, neworders = multi.\\\n make_orders(ocancel, oupdate, onew)\n\n # save the full order information to the order store (this will\n # handle writing to the DB, etc.)\n self.ostore.add_orders(corders, uorders, neworders)\n\n else:\n \n # we need to set latest cancel, update, new orders to be\n # empty.\n self.ostore.latest = [{const.BDAQID: {}, const.BFID: {}}, \n {const.BDAQID: {}, const.BFID: {}}, \n {const.BDAQID: {}, const.BFID: {}}]", "def update_order():", "def update_order():", "def generate_orders(self, good):\n surplus = self.inventory.surplus(good)\n if surplus >= 1: # sell inventory\n # the original only old one item here\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n # print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))\n self.market.sell(order)\n else: # buy more\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n\n if shortage > 0:\n if shortage <= free_space:\n # enough space for ideal order\n limit = shortage\n else:\n # not enough space for ideal order\n limit = math.floor(free_space / shortage)\n\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n # print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))\n self.market.buy(order)\n # else:\n # print(\"{} has no shortage of {} (has shortage: {})\".format(self.pop_job.title, good.title, shortage))", "def _move_closed_order(self, bo):\n return(self._move_order_from_to(bo, 'trades', 'history'))", "def create_orders(self, new_weights):\n cur_weights = self.normalized_holdings()\n vols = ((new_weights - cur_weights) * self.total_wealth())[:-1]\n holdings = self.investor.portfolio\n tickers = sorted(holdings)\n prices = np.array([self.market.price_for(t) for t in tickers])\n\n # identify the correct prices for bid and ask transactions\n bid_asks = [p[(v < 0).astype(int)] for v, p in zip(vols, prices)]\n\n orders = []\n for v, ba, t in zip(vols, bid_asks, tickers):\n amt = np.abs((v / ba).astype(int))\n b_or_a = Bid if v > 0 else Ask\n if v != 0:\n orders.append(b_or_a(price=ba, amount=amt,\n ticker=t, other_party=self.investor))\n return orders", "def manage_orders(self):\r\n for coin, pair_info in self.usdt_pairs.items():\r\n orders = self.kc.get_orders(pair_info[\"symbol\"], status=\"active\")\r\n self.log(coin, orders[\"totalNum\"])\r\n if orders[\"totalNum\"]:\r\n self.log(len(orders[\"items\"]))\r\n for order in orders[\"items\"]:\r\n self.log(order)\r\n\r\n self.log(mp.mpf())\r\n\r\n # ticker = current price action, bid/ask, etc\r\n ticker = self.kc.get_ticker(pair_info[\"symbol\"])\r\n self.log(ticker)\r\n return", "def update_past_orders(self):\n\n #TODO: Implement a method to grab the order history for just one stock\n all_past_orders = self.portfolio.all_past_orders() #This is REALLY inefficient (takes forever)\n\n #Now pre-parse into commonly used categories\n self.past_orders = all_past_orders[all_past_orders['symbol']==self.symbol] #Past orders for only this stock\n self.filled_orders = self.past_orders[self.past_orders['state']=='filled'] #Only orders that were filled (not canceled)\n\n return True", "def make_calcurve_orders(self):\n for oi in self.orders:\n self.make_calcurve_order(oi)", "def set_room_order(self, room_orders):\n orders = []\n self.room_orders = ';'.join(['%d-%d' % \\\n (item[0], item[1]) for item in room_orders.items()])", "def _get_book_prices(self):\n for k in self.orders.keys():\n if self.orders[k].type == 'ask':\n self.ask_prices.append(self.orders[k].price)\n self.ask_snapshot[k] = self.orders[k]\n elif self.orders[k].type == 'bid':\n self.bid_prices.append(self.orders[k].price)\n self.bid_snapshot[k] = self.orders[k]\n # Sorting and removing dubbing\n self.ask_prices = list(dict.fromkeys(sorted(self.ask_prices)))\n self.bid_prices = list(dict.fromkeys(sorted(self.bid_prices, reverse=True)))", "def get_new_orders(self):\n\n # note we only get orders from the strategies with UPDATED =\n # True, i.e. only those which got new pricing information this\n # tick. Among other reasons, this is because some strategies\n # (e.g. MMStrategy) need to be fed new prices in order to\n # clear the order dictionary, so if we didn't use _if, we\n # could potentially place these orders many times.\n\n return self.stratgroup.get_orders_to_place_if(UPDATED)", "def __handle_open_orders(self):\n portfolio = self.get_portfolio_object()\n # only take complete orders\n orders = [order for order in portfolio.orders if order.status == Status.confirmed]\n time_zone = TraderBase.get_timezone()\n now = datetime.datetime.now(time_zone)\n for order in orders:\n price = self.db_tool.session.query(Series)\\\n .filter(order.stock_id == Series.stock_id) \\\n .filter(Series.date.between(order.date, now)) \\\n .filter(order.price >= Series.pricehigh)\\\n .order_by(Series.date.asc()).first()\n if price:\n order.status = Status.completed\n order.date = price.date\n self.connect_related_order(order)\n else:\n diff = now - order.date.replace(tzinfo=time_zone)\n hours = diff.total_seconds() / 60\n if hours >= self.expire_in_hours:\n self.logger.info(\"Order is expired because limit {} for {} \"\n \"was not reached during the day\".\n format(order.price, order.stock_id))\n order.status = Status.expired\n portfolio.cash -= order.price_complete", "def sync(self):\n\n new_book = {}\n update_list = [self.book[WAIT_OPEN], self.book[OPEN]]\n\n for status, booklet in self.book.items():\n new_book[status] = {}\n\n for status, booklet in self.book.items():\n for pos_id, position in booklet.items():\n\n position.update()\n new_status = position.status\n\n if status == new_status:\n new_book[status][pos_id] = position\n else:\n new_book[new_status][pos_id] = position\n\n self.book = new_book", "def generate_orderbooks(self):\n logger.DLOG(\"Generating orderbooks...\")\n # Create marketplace in db if not exist\n market_place = self.db_ops.insert_market_place(self.market_place, self.amas_location, self.amas_port, self.commit_orderbook)\n # Create market segment in db if not exist\n market_segment = self.db_ops.insert_market_segment(self.market_place, self.market_segment, self.commit_orderbook) # no creation of new market segment if update\n \n \n if market_place and market_segment:\n # Get instruments to generate orderbooks for\n instruments = self.get_instruments_to_generate_orderbooks(self.stored_query)\n \n if not instruments:\n logger.ELOG(\"no instrument selected in query'%s'\"%(self.stored_query))\n\n for each_instrument in instruments:\n \n orderbook_currency = each_instrument.Currency().Name() \n orderbook_name = self.define_orderbook_name(each_instrument, self.external_id_type)\n if not orderbook_name:\n logger.ELOG(\"**Cannot** generate Orderbook, as no ExternalId found to map\")\n continue\n\n # Check orderbook exist for instrument in db\n existing_orderbook = self.db_ops.get_orderbook_from_marketplace(each_instrument, market_place, orderbook_currency)\n if existing_orderbook: \n \n if self.update_orderbook: \n # update existing orderbook in database with new values or/and new leaf (market segment)\n self.db_ops.update_orderbook(existing_orderbook, each_instrument, market_place, market_segment, self.market_capability, self.tick_size_list, \\\n self.round_lot, self.day_count, orderbook_name, self.tiering_level, orderbook_currency) \n \n if self.commit_orderbook:\n #this is for creating the a new leaf, if customer wants an orderbook to be listed in another leaf\n group_map = self.db_ops.get_list_leaf(existing_orderbook, market_segment) \n if group_map and existing_orderbook.GroupMaps().IndexOf(group_map) <0 :\n existing_orderbook.GroupMaps().Add(group_map) \n existing_orderbook.GroupMaps().Commit() \n \n else:\n #This parts doesnt allow an orderbook to exist in in two different market segments on the same market. while for an organisational pupose\n #traders needs to add it on two different segments. but the same orderbook same physicalMarketSegment but another leaf\n # Check if same orderbook name is used for any other instrument orderbook\n #orderbook_name_in_use = self.db_ops.check_orderbook_name_already_in_use(orderbook_name, market_place)\n #if orderbook_name_in_use:\n # logger.LOG(\"**Cannot** create OrderBook. Orderbook ExternalID <%s> is already used for instrument <%s> in MarketPlace <%s>\"%(orderbook_name, orderbook_name_in_use.Instrument().Name(), market_place.Name()))\n # continue\n \n if self.commit_orderbook or (not self.commit_orderbook and not self.update_orderbook):\n logger.DLOG(\"Order book **does not exist** for instrument <%s>, MarketPlace <%s>.Creating it...\"%(each_instrument.Name(), market_place.Name()))\n # Get tick size, round lot and day count from another existing orderbook for same instrument\n tick_size_list, round_lot, day_count = self.get_orderbook_data(each_instrument)\n \n self.db_ops.insert_orderbook(each_instrument, market_place, market_segment, self.market_capability, tick_size_list, \\\n round_lot, day_count, orderbook_name, self.commit_orderbook, self.tiering_level, orderbook_currency)\n \n if self.update_orderbook and not self.commit_orderbook:\n logger.WLOG(\"**Cannot** update orderbook for <%s> as it does not exist in database.\"%each_instrument.Name()) \n \n \n else:\n if not market_place:logger.WLOG(\"Market place doesnt exist\") \n if not market_segment:logger.WLOG(\"Market segment doesnt exist\")", "def prep(self, order):\n update = {}\n for col in list(set(self.numeric + self.non_numeric + self.currencies + self.columns)):\n try:\n if col in self.numeric:\n value = float(order[col])\n else:\n value = order[col]\n update[col] = value\n except:\n update[col] = 0.0\n continue\n update = pd.Series(update).fillna(0)\n update['currency_on_hold'] = order['product_id'][-3:] if order['side'] == 'buy' else order['product_id'][:3]\n update['create_time'] = pd.to_datetime(order['time'])\n update['update_time'] = pd.to_datetime(order['time'])\n update['time'] = update.update_time.to_datetime64().astype('int64')//1e9\n update['status'] = order['type']\n update['order_type'] = 'unknown' if not update['order_type'] else update['order_type']\n return update#pd.Series(update).fillna(0)", "def map_to_orderbook_order(self, raw_orderbook_order: HitbtcRawOrderBookOrderModel\n ) -> HitbtcOrderBookOrderModel:\n\n price = Decimal(raw_orderbook_order[\"price\"])\n size = Decimal(raw_orderbook_order[\"size\"])\n order = HitbtcOrderBookOrderModel(price=price, size=size)\n return order", "def order(self, order):\n\n #print(\"Evaluating order: \", order)\n if self.security != order.secid:\n raise (\"Cannot place order for security \"\n \"%s on book[%s]\" % (order.security, self.security))\n\n levels = self.bid\n if order.side == Side.SELL:\n levels = self.offer\n\n new_level = OrderBookLevel(price=order.price, qty=order.qty, order_count=1)\n start_index = levels.bisect_right(new_level)\n levels.insert(start_index, new_level)\n OrderBookUtils.compact(levels, start_index)\n\n # Trim list\n if order.side == Side.SELL:\n # Delete from end of list - highest offers\n size = len(self.offer)\n if size > MAX_DEPTH:\n for _ in itertools.repeat(None, size - MAX_DEPTH):\n del self.offer[-1]\n else:\n # Delete from start of list - lowest bids\n size = len(self.bid)\n if size > MAX_DEPTH:\n for _ in itertools.repeat(None, size - MAX_DEPTH):\n del self.bid[0]\n\n return self.match(order.side)", "def update_packing_records(self, orders_since=None):\n staff = {\n _.email_address: _\n for _ in Staff.objects.filter(email_address__isnull=False)\n }\n orders = (\n self.get_recent_orders(orders_since)\n .filter(linnworks_order__order_guid__isnull=False, packed_by__isnull=True)\n .select_related(\"linnworks_order\")\n )\n wait_time = 60\n for i, order in enumerate(orders):\n try:\n audits = get_order_audit_trail(order.linnworks_order.order_guid)\n for audit in audits:\n if audit.audit_type in (\"ORDER_PROCESSED\", \"SHIPPING_LABEL_\"):\n order.packed_by = staff[audit.updated_by]\n order.save()\n break\n except Exception as e:\n logger.exception(e)\n continue\n finally:\n if i > 0 and i % 149 == 0:\n time.sleep(wait_time)", "def map_to_orderbook(self, raw_orderbook: HitbtcRawOrderBookModel\n ) -> HitbtcOrderBookModel:\n\n ask = list(map(self.map_to_orderbook_order, raw_orderbook[\"ask\"]))\n bid = list(map(self.map_to_orderbook_order, raw_orderbook[\"bid\"]))\n timestamp = raw_orderbook[\"timestamp\"]\n symbol = raw_orderbook[\"symbol\"]\n\n orderbook = HitbtcOrderBookModel(\n ask=ask, bid=bid, timestamp=timestamp, symbol=symbol)\n return orderbook", "def set_working_order(self):\n self.set_values(\n start_phrase='Working Orders',\n end_phrase=None,\n start_with=2,\n end_until=-1,\n prop_keys=self.working_order_keys,\n prop_name='working_order'\n )\n self.remove_working_order_rows()\n self.fillna_dict_with_exists(\n self.working_order,\n 'time_placed',\n ('time_placed', 'spread', 'order', 'tif', 'mark', 'status')\n )\n\n self.working_order = map(self.del_empty_keys, self.working_order)\n self.convert_type(self.working_order, 'time_placed', self.convert_datetime, None)\n self.convert_type(self.working_order, 'quantity', int, 0)\n self.convert_type(self.working_order, 'strike', float, 0.0)\n self.convert_type(self.working_order, 'price', float, 0.0)\n self.convert_type(self.working_order, 'expire_date', str, '')", "def place_orders(context, data):\r\n log.info(\"*********Monthly flags: %s\" % context.flags)\r\n \r\n context.sell = []\r\n context.buy = []\r\n \r\n # Go through flags to determine buy/sell signals\r\n for asset, flags in context.flags.items():\r\n # If up > down and multiple blue flags, add to buy\r\n if flags['UP'] > flags['DOWN'] and flags['UP'] > 1:\r\n context.buy.append(asset)\r\n \r\n # If down > up and multiple down flags, add to sell\r\n elif flags['DOWN'] > flags['UP'] and flags['DOWN'] > 1:\r\n context.sell.append(asset)\r\n \r\n # If both SPY and QQQ are buys, rebalance weightings and check components\r\n if sid(8554) in context.buy and sid(19920) in context.buy:\r\n rebalance_weightings(context)\r\n \r\n # Reset down sequence\r\n context.first_down_sequence = set()\r\n \r\n # Reset SPY and QQQ to max weightings\r\n context.target_weights[sid(8554)] = context.max_weights[sid(8554)]\r\n context.target_weights[sid(19920)] = context.max_weights[sid(19920)]\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[sid(8554)] = round(context.target_weights[sid(8554)] * context.portfolio.portfolio_value / context.price[sid(8554)])\r\n context.target_shares[sid(19920)] = round(context.target_weights[sid(19920)] * context.portfolio.portfolio_value / context.price[sid(19920)])\r\n \r\n # If not overweighting:\r\n if not context.overweighting:\r\n context.buy.remove(sid(8554))\r\n context.buy.remove(sid(19920))\r\n \r\n # Check components\r\n for asset, ratio in context.up_ratios.items():\r\n # If UP ratio > 1, add to buy\r\n if asset != sid(8554) and asset != sid(19920) and ratio > 1:\r\n context.buy.append(asset)\r\n \r\n # If SPY is a sell, check UP ratios for components\r\n if sid(8554) in context.sell:\r\n for asset, ratio in context.up_ratios.items():\r\n # If UP ratio < 1, add to sell\r\n if asset != sid(8554) and asset != sid(19920) and ratio < 1:\r\n context.sell.append(asset)\r\n \r\n \r\n \r\n # First month at end August 2017: set all other assets to max weighting, except take UP ratio of JKL to be <1 so sell 20% of weighting\r\n if context.first_iteration:\r\n log.info('First iteration')\r\n \r\n # Initialise weightings\r\n rebalance_weightings(context)\r\n context.first_iteration = False\r\n \r\n for asset, weight in context.max_weights.items(): \r\n # JKL\r\n if asset == sid(26451):\r\n context.sell.append(asset)\r\n\r\n context.target_weights[asset] = weight\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n \r\n buy_overweight = []\r\n remaining_cash = context.portfolio.cash\r\n \r\n # Buy components first (before considering overweighting QQQ/SPY)\r\n for asset in sorted(context.buy, reverse=True):\r\n \r\n # This is an up sequence so no subsequent down sequence\r\n if asset in context.first_down_sequence:\r\n context.first_down_sequence.remove(asset) \r\n \r\n # Buy 50% of weighting\r\n log.info('UP flags for %s: Buy 50 percent' % asset)\r\n extra_weight = 0.5 * context.max_weights[asset]\r\n \r\n # Do not exceed max shares by weighting, UNLESS taking from cash from components (overweighting)\r\n if context.target_weights[asset] == context.max_weights[asset] or (context.target_weights[asset] > context.max_weights[asset] and context.overweighting):\r\n buy_overweight.append(asset)\r\n \r\n elif context.target_weights[asset] + extra_weight > context.max_weights[asset]:\r\n context.target_weights[asset] = context.max_weights[asset]\r\n \r\n else:\r\n context.target_weights[asset] += extra_weight\r\n \r\n # Convert weights to number of shares\r\n old_shares = context.target_shares[asset]\r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n remaining_cash -= (context.target_shares[asset] - old_shares) * context.price[asset]\r\n \r\n for asset in buy_overweight:\r\n if remaining_cash > 0:\r\n # If first overweight or 2 assets to be overweighted, take 50% of available cash\r\n if context.target_weights[asset] > context.max_weights[asset] or len(buy_overweight) > 1:\r\n log.info('Taking half of cash of value: %f' % (remaining_cash * 0.5))\r\n context.target_weights[asset] += 0.5 * remaining_cash / context.portfolio.portfolio_value\r\n \r\n # If second overweight, take all remaining cash\r\n else:\r\n log.info('Taking remaining of cash of value: %f' % (remaining_cash))\r\n context.target_weights[asset] += remaining_cash / context.portfolio.portfolio_value\r\n \r\n else:\r\n # If no cash, ignore\r\n log.info('UP flags for %s: No change' % asset)\r\n continue\r\n \r\n \r\n # For assets in sell list\r\n for asset in context.sell:\r\n \r\n # If asset already has 0 holdings, ignore\r\n if context.target_weights[asset] == 0:\r\n log.info('DOWN flags for %s: No change' % asset)\r\n continue\r\n \r\n # If first multiple down flags, sell 20% of UP weight\r\n elif asset not in context.first_down_sequence:\r\n log.info('First DOWN flags for %s: Sell 20 percent' % asset)\r\n context.target_weights[asset] -= 0.2 * context.max_weights[asset]\r\n context.first_down_sequence.add(asset)\r\n \r\n # If this is a subsequent down flag sequence, sell 40% of UP weight\r\n else:\r\n log.info('DOWN flags for %s: Sell 40 percent' % asset)\r\n context.target_weights[asset] -= 0.4 * context.max_weights[asset]\r\n \r\n # Ensure no short position\r\n if context.target_weights[asset] < 0:\r\n context.target_weights[asset] = 0\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n \r\n print(context.target_weights)", "def processMarketOrders(self):\n try:\n nextRound = self.currentRound+1\n resultsList = []\n master = {}\n self.genMarketStat()\n myMarketStat = self.marketStats[str(self.currentRound)]\n \n # sorted lists of market orders\n master['buyAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'AL', 'min':0})\n master['buyEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'EC', 'min':0})\n master['buyIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'IA', 'min':0})\n master['sellAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'AL', 'max':0})\n master['sellEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'EC', 'max':0})\n master['sellIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'IA', 'max':0})\n \n for res in ['AL', 'EC', 'IA']:\n for sellOrder in master['sell%s' % res]:\n # min sell order gets first chance to sell its product\n if sellOrder.amountUsed == sellOrder.amount:\n pass # seller has sold all he wants with this order\n else:\n i = 0\n for buyOrder in master['buy%s' % res]:\n # determine price, allow for bidding on price\n try:\n nextBuyOrder = master['buy%s' % res][i+1]\n if nextBuyOrder.max < buyOrder.max and (nextBuyOrder.max+1) >= sellOrder.min:\n price = nextBuyOrder.max + 1\n else:\n price = buyOrder.max\n except IndexError:\n price = buyOrder.max\n # max buy order gets first chance to buy sellers product\n resultsList.append(self.processMarketTransaction(buyOrder, sellOrder, price))\n i += 1\n \n # set the average market prices for this round\n if getattr(myMarketStat, 'volSold%s' % res) > 0:\n setattr(myMarketStat, 'avgSold%s' % res, (getattr(myMarketStat, 'sumSold%s' % res) / \n getattr(myMarketStat, 'volSold%s' % res)))\n \n # clean up market orders for next round\n for orderID in self.marketOrders.keys():\n myMarketOrder = self.marketOrders[orderID]\n myMarketOrder.cleanUp()\n if myMarketOrder.amount == 0:\n resultsList.append('cancel market Order=%s' % orderID)\n self.cancelMarketOrder(orderID)\n \n return str(resultsList)\n except:\n return 'galaxy->processMarketOrders error'", "def update_book(self):\n while self.lowest_sell is not None and self.highest_buy is not None and self.lowest_sell <= self.highest_buy:\n sell = self.sell_levels[self.lowest_sell].head_order\n buy = self.buy_levels[self.highest_buy].head_order\n self.execute_trade(sell, buy)", "def create_order_2(i):\n # create dummy order 2\n o2 = models.Order()\n o2.inmate = i\n o2.save()\n o2.status = 'SENT'\n o2.date_closed = datetime.datetime.now()\n o2.save()\n # ...with 1 dummy book\n b2 = models.Book()\n b2.title = \"dictionary\"\n b2.order = o2\n b2.full_clean()\n b2.save()\n return o2", "def update_OpenOrders(self, market):\n mid = self.marketid(market)\n o_orders = self.Request.fetch('marketorders',params={'marketid':mid})\n ##check the form of o_orders\n \n print o_orders\n #self.OpenOrders[self.Pairs[mid]] = \n return 0", "def order_book_builder(self, data, timestamp, datetime, symbol):\n if isinstance(data[1], list):\n data = data[1]\n # Price, Count, Amount\n bids = {\n str(level[0]): [str(level[1]), str(level[2])]\n for level in data if level[2] > 0\n }\n asks = {\n str(level[0]): [str(level[1]), str(abs(level[2]))]\n for level in data if level[2] < 0\n }\n self.orderbooks[symbol].update({'bids': bids})\n self.orderbooks[symbol].update({'asks': asks})\n self.orderbooks[symbol].update({'timestamp': timestamp})\n self.orderbooks[symbol].update({'datetime': datetime})\n\n else:\n # Example update message structure [1765.2, 0, 1] where we have [price, count, amount].\n # Update algorithm pseudocode from Bitfinex documentation:\n # 1. - When count > 0 then you have to add or update the price level.\n # 1.1- If amount > 0 then add/update bids.\n # 1.2- If amount < 0 then add/update asks.\n # 2. - When count = 0 then you have to delete the price level.\n # 2.1- If amount = 1 then remove from bids\n # 2.2- If amount = -1 then remove from asks\n data = data[1:]\n data = [str(data[0]), str(data[1]), str(data[2])]\n if int(data[1]) > 0: # 1.\n\n if float(data[2]) > 0: # 1.1\n self.orderbooks[symbol]['bids'].update({data[0]: [data[1], data[2]]})\n\n elif float(data[2]) < 0: # 1.2\n self.orderbooks[symbol]['asks'].update({data[0]: [data[1], str(abs(float(data[2])))]})\n\n elif data[1] == '0': # 2.\n\n if data[2] == '1': # 2.1\n if self.orderbooks[symbol]['bids'].get(data[0]):\n del self.orderbooks[symbol]['bids'][data[0]]\n\n elif data[2] == '-1': # 2.2\n if self.orderbooks[symbol]['asks'].get(data[0]):\n del self.orderbooks[symbol]['asks'][data[0]]", "def map_to_orderbooks(self, raw_orderbooks: HitbtcRawOrderBooks) -> HitbtcOrderBooks:\n\n orderbooks: HitbtcOrderBooks = {}\n for symbol, raw_orderbook in raw_orderbooks.items():\n orderbooks[symbol] = self.map_to_orderbook(raw_orderbook)\n return orderbooks", "def updateOrderbookFull(self, asks, bids):\n self.asks = asks\n self.bids = bids" ]
[ "0.6256506", "0.61004424", "0.60048115", "0.60048115", "0.5979889", "0.590376", "0.5839191", "0.57591915", "0.5754883", "0.57441545", "0.5730442", "0.5697442", "0.5606702", "0.5579016", "0.55562407", "0.55487514", "0.5543467", "0.5517832", "0.5501182", "0.5500108", "0.54459155", "0.54295456", "0.5379534", "0.5366828", "0.5363337", "0.53591394", "0.5351461", "0.53475523", "0.531244", "0.52967894" ]
0.6941159
0
Set up the iCloud Scanner.
def setup_scanner(hass, config, see): from pyicloud import PyiCloudService from pyicloud.exceptions import PyiCloudFailedLoginException from pyicloud.exceptions import PyiCloudNoDevicesException # Get the username and password from the configuration username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) if username is None or password is None: _LOGGER.error('Must specify a username and password') return False try: _LOGGER.info('Logging into iCloud Account') # Attempt the login to iCloud api = PyiCloudService(username, password, verify=True) except PyiCloudFailedLoginException as error: _LOGGER.exception('Error logging into iCloud Service: %s', error) return False def keep_alive(now): """ Keeps authenticating iCloud connection. """ api.authenticate() _LOGGER.info("Authenticate against iCloud") track_utc_time_change(hass, keep_alive, second=0) def update_icloud(now): """ Authenticate against iCloud and scan for devices. """ try: # The session timeouts if we are not using it so we # have to re-authenticate. This will send an email. api.authenticate() # Loop through every device registered with the iCloud account for device in api.devices: status = device.status() location = device.location() # If the device has a location add it. If not do nothing if location: see( dev_id=re.sub(r"(\s|\W|')", '', status['name']), host_name=status['name'], gps=(location['latitude'], location['longitude']), battery=status['batteryLevel']*100, gps_accuracy=location['horizontalAccuracy'] ) else: # No location found for the device so continue continue except PyiCloudNoDevicesException: _LOGGER.info('No iCloud Devices found!') track_utc_time_change( hass, update_icloud, minute=range(0, 60, config.get(CONF_INTERVAL, DEFAULT_INTERVAL)), second=0 ) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_cloud_api(self, args=None):\n pass", "def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)", "def setup_clouds(auth=None):\n get_operator_cloud(auth)\n get_openstack_cloud(auth)", "def setUp(self):\n self._plugin = spotlight_volume.SpotlightVolumePlugin()\n self._parser = plist.PlistParser()", "def do_start_icloud(self, args):\n\n\n args = self.parse_arguments(args)\n\n plat = get_local_platform_routines()\n user = LocalUserRoutines(plat)\n\n home_dir = user.get_home_dir()\n\n icloud_dir = os.path.join(home_dir, \"iCloudDrive\", \"fiepipe_watch\")\n\n if not os.path.exists(icloud_dir):\n self.perror(\"No such path: \" + icloud_dir)\n return\n\n if not os.path.isdir(icloud_dir):\n self.perror(\"Not a directory: \" + icloud_dir)\n return\n\n routines = self.get_configuration_routines()\n routines.load()\n asset_routines = self.get_asset_shell().get_asset_routines()\n asset_routines.load()\n\n watchfolder_routines = WatcherRoutines(routines.get_configuration(), asset_routines, self.get_feedback_ui())\n self.do_coroutine(watchfolder_routines.start_watching_routine(asset_routines.abs_path, icloud_dir))\n self.do_coroutine(watchfolder_routines.process_queue())", "def setup(args):\n\n start = time.time()\n \n SETUP_SUMMARY_FILE = os.path.splitext(os.path.basename(args.setup.name))[0] + '.cid'\n print SETUP_SUMMARY_FILE\n\n with shell.Step(1):\n print \"Cloud setup validation:\"\n\n # Load cloud configuration\n print \"* Parsing the cloud XML definition file\"\n config = etree.parse(args.setup)\n\n # Validate the configuration file\n print \"* Validating the cloud XML definition against the XML schema\"\n conf.schema('cloud-setup').assertValid(config)\n\n cloud = config.getroot().attrib\n\n # Raise an error if an unmanaged cloud is requested\n print \"* Checking for supported setup type\"\n if 'manager' not in cloud:\n raise NotImplementedError(\"Unmanaged clouds are not yet supported\")\n\n # Instantiate connections\n with shell.Step(2):\n print \"Instantiation of the cloud manager connection:\"\n print \"* Connecting to the VPC manager\"\n c = boto.VPCConnection(args.access_key_id, args.secret_key)\n\n with shell.Step(3):\n print \"Creation and setup of the virtual private cloud:\"\n # Get max vpc size (16) using the cloud subnet IP range\n print \"* Getting or creating the VPC\"\n vpc, created = c.get_or_create(str(cidr.CIDR(cloud['cidr'], 16)))\n subnet_cidr = cidr.CIDR(cloud['cidr'])\n if created:\n print \" └ New VPC created with ID '{0}'\".format(vpc.id)\n print \"* Waiting for VPC creation to complete\",\n vpc = shell.wait(vpc, 'available', interval=0)\n else:\n print \" └ Using existing VPC with ID '{0}'\".format(vpc.id)\n print \"* Checking for valid CIDR block of the existing VPC\"\n vpc_cidr = cidr.CIDR(vpc.cidr_block)\n if subnet_cidr.base not in vpc_cidr:\n raise ValueError(\"The requested subnet CIDR block base \" \\\n \"address ({0}) falls outside the VPC CIDR \" \\\n \"block ({1!s}).\\nAcceptable values are in \" \\\n \"the range {1.base} - {1.last}.\".format(\n subnet_cidr.base, vpc_cidr))\n\n if subnet_cidr.size > vpc_cidr.size:\n raise ValueError(\"The requested subnet CIDR size (/{0.block},\"\\\n \" {0.size} IPs) is too big for the \" \\\n \"existing VPC CIDR size (/{1.block}, {1.size}\"\\\n \" IPs).\".format(subnet_cidr, vpc_cidr))\n\n with shell.Step(4):\n print \"Subnet, gateway, addressing and routing setup:\"\n\n print \"* Getting or creating subnet\"\n subnet, created = vpc.get_or_create_subnet(str(subnet_cidr))\n if created:\n print \" └ New subnet created with ID '{0}'\".format(subnet.id)\n else:\n print \" └ Using existing subnet with ID '{0}'\".format(subnet.id)\n\n print \"* Getting or creating internet gateway\"\n gateway, created = vpc.get_or_create_gateway()\n if created:\n print \" └ New gateway created with ID '{0}'\".format(gateway.id)\n else:\n print \" └ Using existing gateway with ID '{0}'\".format(gateway.id)\n\n print \"* Getting public IP address\"\n address, created = c.get_or_create_address()\n if created:\n print \" └ New address created with IP '{0.public_ip}'\".format(\n address\n )\n else:\n print \" └ Using existing address with IP '{0.public_ip}'\".format(\n address\n )\n\n print \"* Setting up routing\"\n print \" └ Getting route table\"\n route_table = c.get_all_route_tables()[0]\n print \" └ Associating route table with subnet\"\n route_table.associate(subnet)\n print \" └ Creating route to internet gateway\"\n route_table.route('0.0.0.0/0', gateway=gateway)\n\n with shell.Step(5):\n print \"Security resources setup:\"\n\n print \"* Creating temporary security group\"\n group = vpc.create_security_group(\n 'pop-' + random_string(16),\n 'Temporary security group for a POP application'\n )\n print \" └ New security group created with ID '{0.id}'\".format(group)\n\n print \"* Authorizing all internal traffic\"\n group.authorize(-1, 0, 65535, src_group=group)\n\n print \"* Authorizing external SSH access\"\n group.authorize('tcp', 22, 22, \"0.0.0.0/0\")\n\n print \"* Creating key pair\"\n key = c.create_key_pair('pop-' + random_string(16))\n print \" └ New key pair created with name '{0.name}'\".format(key)\n\n with shell.Step(6):\n print \"Virtual machines boot process:\"\n\n print \"* Getting needed images\"\n images = c.get_all_images(config.xpath('//setup/machine/@image'))\n images = dict([(image.id, image) for image in images])\n\n print \"* Launching instances\"\n reservations = {}\n for machine in config.xpath('//setup/machine'):\n machine = machine.attrib\n image = images[machine['image']]\n res = image.run(\n key_name=key.name,\n security_groups=[group.id,],\n instance_type=machine.get('type', DEFAULT_MACHINE_TYPE),\n subnet_id=subnet.id,\n private_ip_address=machine['ip'],\n )\n\n print \" └ New reservation (ID: {0}, IP: {1})\".format(\n res.id,\n machine['ip']\n )\n reservations[machine['ip']] = machine, res.instances[0]\n\n print \"* Waiting for machines to boot\"\n for ip, (machine, instance) in reservations.iteritems():\n print \" └ Waiting for machine @ {0} to boot\".format(ip),\n shell.wait(instance, 'running', interval=.5)\n\n print \"* Associating public IP address to POP application manager\"\n address.associate(reservations[cloud['manager']][1])\n\n print \"* Waiting for manager to come online\",\n shell.wait(ConnectionAttempt(address.public_ip, 22), 'connected', interval=.8)\n\n with shell.Step(7):\n print \"Local environment setup:\"\n\n print \"* Saving private key to disk\"\n with open(KEY_FILENAME, 'w') as fh:\n fh.write(key.material)\n os.chmod(KEY_FILENAME, stat.S_IRUSR | stat.S_IWUSR)\n print \" └ Private key written to '{0}'\".format(KEY_FILENAME)\n\n print \"* Generating local fabfile\"\n \n local = os.path.join(os.path.dirname(fabfiles.__file__), 'local.pyt')\n with open(local, 'r') as rfh:\n with open('fabfile.py', 'w') as wfh:\n wfh.write(rfh.read().format(**{\n 'gendate': datetime.today(),\n 'mgraddress': address.public_ip,\n 'remoteuser': USER,\n 'cloudsetup': SETUP_SUMMARY_FILE,\n 'keyfilename': KEY_FILENAME,\n }))\n \n with open('cloud.i.xml', 'w') as fh:\n fh.write(xml.format_document(config))\n\n print \"* Saving cloud setup to XML file\"\n cloud.update({\n 'vpc': vpc.id,\n 'subnet': subnet.id,\n 'gateway': gateway.id,\n 'security-group': group.id,\n 'key-pair': key.name,\n 'public-address': address.public_ip,\n 'key-filename': KEY_FILENAME,\n })\n for machine, instance in reservations.itervalues():\n machine['instance-id'] = instance.id\n machine['launch-time'] = instance.launch_time\n\n with open(SETUP_SUMMARY_FILE, 'w') as fh:\n fh.write(xml.format_document(config))\n\n print \"* Removing old public key from known hosts (if present)\"\n\n try:\n with open(KNOWN_HOSTS, 'r') as fh:\n known_hosts = fh.read()\n except:\n print \" └ Could not read {0}\".format(KNOWN_HOSTS)\n else:\n known_hosts, count = re.subn(\n '\\n{0} .*'.format(re.escape(address.public_ip)),\n '',\n known_hosts\n )\n if count:\n try:\n with open(KNOWN_HOSTS, 'w') as fh:\n fh.write(known_hosts)\n except:\n print \" └ Could not write changes back to {0}\".format(\n KNOWN_HOSTS\n )\n else:\n print \" └ Public key for IP {0} removed\".format(\n address.public_ip\n )\n else:\n print \" └ No public key matching IP {0} found\".format(\n address.public_ip\n )\n\n duration = int(time.time() - start)\n duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60)\n\n with shell.Wrapper(72):\n print\n print \"Cloud setup completed in {0}; you can manually connect to the \"\\\n \"manager using the following command:\\n\".format(duration)\n\n print shell.hilite(\n \" ssh -i {0} {1}@{2}\".format(KEY_FILENAME, USER, address.public_ip),\n shell.MAGENTA\n )\n\n with shell.Wrapper(72):\n print\n print \"Alternatively, you can use the commands already provided by \" \\\n \"the generated fabfile. To rapidly obtain some help about them,\"\\\n \" execute the following command in the directory where the \" \\\n \"fabfile is located (make sure you have a recent fabric \" \\\n \"installation):\\n\"\n print shell.hilite(\" fab --list\", shell.MAGENTA)", "def __init__(self):\n self.config = config.setup()\n self.log = logging.getLogger(__name__)\n #This block gets interface and interface type from config file\n self._lookupInterfaces()\n #And this one does the same for disks.\n self._lookupDisks()\n self.search_headers = self.config.get('VM', 'search_headers', 'name,uuid')\n self.headers = self.search_headers.split(',')\n def _error_handler(self, err):\n msg = \"Ignoring Libvirt error %s)\" % err\n pass\n # Prevent libvirt errors from reaching the console\n libvirt.registerErrorHandler(_error_handler, None)", "def setup(self):\n base = automap_base()\n engine = create_engine(\"mysql+pymysql://\" + csconfig.config.db_user + \":\" +\n csconfig.config.db_password + \"@\" +\n csconfig.config.db_host + \":\" +\n str(csconfig.config.db_port) +\n \"/\" + csconfig.config.db_name)\n base.prepare(engine, reflect=True)\n session = Session(engine)\n cloud_yaml = base.classes.csv2_group_resource_yaml\n\n for cloud in self.group_resources:\n cloud_yamls = session.query(cloud_yaml).\\\n filter(cloud_yaml.group_name == self.name,\n cloud_yaml.cloud_name == cloud.cloud_name)\n cloud_yaml_list = []\n for yam in cloud_yamls:\n cloud_yaml_list.append([yam.yaml_name, yam.yaml, yam.mime_type])\n if cloud.cloud_type == 'localhost':\n newcloud = cloudscheduler.localhostcloud.LocalHostCloud(extrayaml=cloud_yaml_list, resource=cloud)\n else:\n newcloud = cloudscheduler.openstackcloud.\\\n OpenStackCloud(extrayaml=cloud_yaml_list, resource=cloud)\n self.clouds[newcloud.name] = newcloud\n self.log.debug(\"Added all clouds for group: %s\", self.name)", "def setUpClass(cls):\n rules = [{'id': 9, 'regex': 'sshpass|password|pwd|passwd|pass',\n 'category': 'password', 'description': 'password keywords'}]\n cls.file_scanner = FileScanner(rules)", "def setUp(self) -> None:\n self.eks: EKSClient = boto3.client('eks')\n self.iam: IAMClient = boto3.client('iam')\n self.sts: STSClient = boto3.client('sts')\n self.ec2: EC2Client = boto3.client('ec2')", "def setup(self):\n # TODO : figure out how to make the map interface a singleton class\n\n if not hasattr(self, 'mapInterface'):\n self.mapInterface = MapInterface(settings['FILE_CONFIG']['filename'])", "def setup(self):\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def __init__(self, main):\n InjectionScannerBase.__init__(self, main)", "def setup(self):\n\n self.parser = GingerIt()", "def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()" ]
[ "0.5866603", "0.5790116", "0.5790116", "0.5620999", "0.55827475", "0.5516062", "0.54825497", "0.53748256", "0.533198", "0.53023446", "0.52686286", "0.52626604", "0.5261257", "0.5261257", "0.5261257", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.52435845", "0.5230558", "0.5224943", "0.5223863" ]
0.68545204
0
Keeps authenticating iCloud connection.
def keep_alive(now): api.authenticate() _LOGGER.info("Authenticate against iCloud")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticate(self):\n\n LOGGER.info(f\"Authenticating as {self.user['apple_id']}\")\n\n data = dict(self.user)\n\n # We authenticate every time, so \"remember me\" is not needed\n #data.update({\"extended_login\": False})\n data.update({\"extended_login\": True})\n\n try:\n req = self.session.post(\n self._base_login_url, params=self.params, data=json.dumps(data)\n )\n except PyiCloudAPIResponseException as error:\n msg = \"Invalid email/password combination.\"\n raise PyiCloudFailedLoginException(msg, error)\n\n self.data = req.json()\n self.params.update({\"dsid\": self.data[\"dsInfo\"][\"dsid\"]})\n self._webservices = self.data[\"webservices\"]\n\n if not path.exists(self._cookie_directory):\n mkdir(self._cookie_directory)\n self.session.cookies.save()\n LOGGER.debug(f\"Cookies saved to {self._get_cookiejar_path()}\")\n\n LOGGER.info(\"Authentication completed successfully\")\n LOGGER.debug(self.params)", "def authenticate(self):\n # self.qobject.remove_authenticate_signal.emit()\n # self.qobject.authenticate_signal.emit( )\n #if self.app.sync_thread.status != const.STATUS_SYNC:\n # self.app.sync_thread.force_sync()\n change_auth_token( )\n self.data_changed()", "def credentials_work(self):\n\n good = True\n try:\n self.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def update_icloud(now):\n try:\n # The session timeouts if we are not using it so we\n # have to re-authenticate. This will send an email.\n api.authenticate()\n # Loop through every device registered with the iCloud account\n for device in api.devices:\n status = device.status()\n location = device.location()\n # If the device has a location add it. If not do nothing\n if location:\n see(\n dev_id=re.sub(r\"(\\s|\\W|')\",\n '',\n status['name']),\n host_name=status['name'],\n gps=(location['latitude'], location['longitude']),\n battery=status['batteryLevel']*100,\n gps_accuracy=location['horizontalAccuracy']\n )\n else:\n # No location found for the device so continue\n continue\n except PyiCloudNoDevicesException:\n _LOGGER.info('No iCloud Devices found!')", "def credentials_work(self):\n good = True\n try:\n self.session.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def _stayAlive(self):\n\n if not self._validate_exp():\n self.s.auth = JWTAuth(self.getJWTtoken().tokenValue)", "def authenticate(self):\n self.connection.authenticate()", "def authenticate(self):\n log.info(\n \"Attempting authentication to vCenter instance '%s'.\",\n self.vc_host\n )\n try:\n vc_instance = SmartConnectNoSSL(\n host=self.vc_host,\n port=self.vc_port,\n user=self.vc_user,\n pwd=self.vc_pass,\n )\n atexit.register(Disconnect, vc_instance)\n self.vc_session = vc_instance.RetrieveContent()\n log.info(\n \"Successfully authenticated to vCenter instance '%s'.\",\n self.vc_host\n )\n except (gaierror, vim.fault.InvalidLogin, OSError) as err:\n if isinstance(err, OSError):\n err = \"System unreachable.\"\n err_msg = (\n \"Unable to connect to vCenter instance '{}' on port {}. \"\n \"Reason: {}\".format(self.vc_host, self.vc_port, err)\n )\n log.critical(err_msg)\n raise ConnectionError(err_msg)", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def _connect(self):\n cluster = Cluster('http://{}:{}'.format(self.host, self.port))\n authenticator = PasswordAuthenticator('Administrator', self.password)\n cluster.authenticate(authenticator)\n self.client = cluster.open_bucket(self.bucket)", "def setup_scanner(hass, config, see):\n from pyicloud import PyiCloudService\n from pyicloud.exceptions import PyiCloudFailedLoginException\n from pyicloud.exceptions import PyiCloudNoDevicesException\n\n # Get the username and password from the configuration\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n if username is None or password is None:\n _LOGGER.error('Must specify a username and password')\n return False\n\n try:\n _LOGGER.info('Logging into iCloud Account')\n # Attempt the login to iCloud\n api = PyiCloudService(username,\n password,\n verify=True)\n except PyiCloudFailedLoginException as error:\n _LOGGER.exception('Error logging into iCloud Service: %s', error)\n return False\n\n def keep_alive(now):\n \"\"\" Keeps authenticating iCloud connection. \"\"\"\n api.authenticate()\n _LOGGER.info(\"Authenticate against iCloud\")\n\n track_utc_time_change(hass, keep_alive, second=0)\n\n def update_icloud(now):\n \"\"\" Authenticate against iCloud and scan for devices. \"\"\"\n try:\n # The session timeouts if we are not using it so we\n # have to re-authenticate. This will send an email.\n api.authenticate()\n # Loop through every device registered with the iCloud account\n for device in api.devices:\n status = device.status()\n location = device.location()\n # If the device has a location add it. If not do nothing\n if location:\n see(\n dev_id=re.sub(r\"(\\s|\\W|')\",\n '',\n status['name']),\n host_name=status['name'],\n gps=(location['latitude'], location['longitude']),\n battery=status['batteryLevel']*100,\n gps_accuracy=location['horizontalAccuracy']\n )\n else:\n # No location found for the device so continue\n continue\n except PyiCloudNoDevicesException:\n _LOGGER.info('No iCloud Devices found!')\n\n track_utc_time_change(\n hass, update_icloud,\n minute=range(0, 60, config.get(CONF_INTERVAL, DEFAULT_INTERVAL)),\n second=0\n )\n\n return True", "def maintainConnection():\n return RoboCaller().call(\"maintainConnection\", \"void\")", "def connect(self):\n try:\n self.session = Session(aws_access_key_id=access_key, aws_secret_access_key=secret_access_key)\n self.s3_resource = self.session.resource('s3')\n self.bucket = self.s3_resource.Bucket(self.bucket_name)\n except Exception as e:\n raise Exception('Some Error occurred while connecting to the cloud storage')\n return", "def authenticate(self):\n # generate the necessary information for the authentication\n timestamp = int(time.time())\n password = hashlib.sha256(self.password).hexdigest()\n authkey = hashlib.sha256(str(timestamp) + password).hexdigest()\n values = {\n 'action' : 'handshake',\n 'auth' : authkey,\n 'timestamp' : timestamp,\n 'user' : self.username,\n 'version' : API_VERSION,\n }\n\n # now send the authentication request to Ampache\n try:\n res = self.__call_api(values)\n for k,v in res.items():\n res[k] = v[0]['child']\n # Save the data returned from the initial authentication\n self.auth_data = res\n except Exception as e: # couldn't auth, try up to AUTH_MAX_RETRY times\n print(e)\n print('[Error] Authentication Failed')\n error = None\n try: # to find the error\n error = dom.getElementsByTagName(\"error\")[0].childNodes[0].data\n print(\"[Error] Authentication Failed :: %s\" % error)\n return error\n except: # no error found.. must have failed because data was sent to wrong place\n return False\n # if it made it this far, the auth was successfull, now check to see if the catalog needs updating\n try:\n # convert ISO 8601 to epoch\n update = int(time.mktime(time.strptime( self.auth_data['update'][:-6], \"%Y-%m-%dT%H:%M:%S\" )))\n add = int(time.mktime(time.strptime( self.auth_data['add'][:-6], \"%Y-%m-%dT%H:%M:%S\" )))\n clean = int(time.mktime(time.strptime( self.auth_data['clean'][:-6], \"%Y-%m-%dT%H:%M:%S\" )))\n\n new_time = max([update, add, clean])\n self.last_update_time = new_time\n except Exception as detail:\n print(\"Couldn't get time catalog was updated -- assuming catalog is dirty -- \", detail)\n self.last_update_time = -1\n return self.auth_data", "def authenticate(self):\n try:\n self._token = self._lookup_token()\n except:\n raise HTTPError(\n \"Unable to get short-lived access token for cyberark storage\"\n )", "def auth(self):\n if self.get_saved_token():\n return\n self.oauth2()\n self.save_token()", "def _connect(self):\n try:\n self._si = SmartConnectNoSSL(host=self._host, user=self._username, pwd=self._password)\n except Exception as e:\n self._logger.error(\"Unable to connect to host {0} : {1}\".format(self._host, e))\n self._si = None", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def __init__(self):\n self._url_base = None\n self._keystone = None\n self._auth_token = None\n self._auth_lock = threading.Lock()\n self._failed_auth = False", "def reconnect(self):\r\n if self._stopped.is_set():\r\n self._safe_close()\r\n return\r\n\r\n def safe_close(zh):\r\n try:\r\n zookeeper.close(zh)\r\n except:\r\n # TODO(wickman) When the SystemError bug is fixed in zkpython, narrow this except clause.\r\n pass\r\n\r\n def activate():\r\n self._authenticated.set()\r\n self._live.set()\r\n\r\n def on_authentication(zh, rc):\r\n if self._zh != zh:\r\n safe_close(zh)\r\n return\r\n if rc == zookeeper.OK:\r\n activate()\r\n\r\n def maybe_authenticate():\r\n if self._authenticated.is_set() or not self._credentials:\r\n activate()\r\n return\r\n try:\r\n scheme, credentials = self._credentials\r\n zookeeper.add_auth(self._zh, scheme, credentials, on_authentication)\r\n except zookeeper.ZooKeeperException as e:\r\n self._logger('Failed to authenticate: %s' % e)\r\n\r\n def connection_handler(handle, type, state, path):\r\n if self._zh != handle:\r\n safe_close(handle)\r\n return\r\n if self._stopped.is_set():\r\n return\r\n if self._watch:\r\n self._watch(self, type, state, path)\r\n if state == zookeeper.CONNECTED_STATE:\r\n self._logger('Connection started, setting live.')\r\n maybe_authenticate()\r\n self._clear_completions()\r\n elif state == zookeeper.EXPIRED_SESSION_STATE:\r\n self._logger('Session lost, clearing live state.')\r\n self._session_expirations.increment()\r\n self._live.clear()\r\n self._authenticated.clear()\r\n self._zh = None\r\n self._init_count = 0\r\n self.reconnect()\r\n else:\r\n self._logger('Connection lost, clearing live state.')\r\n self._connection_losses.increment()\r\n self._live.clear()\r\n\r\n # this closure is exposed for testing only -- in order to simulate session events.\r\n self._handler = connection_handler\r\n\r\n timeout_ms = int(self._timeout_secs * 1000)\r\n while True:\r\n self._safe_close()\r\n servers = self.expand_ensemble(self._servers)\r\n self._log('Connecting to ZK hosts at %s' % servers)\r\n self._zh = zookeeper.init(servers, connection_handler, timeout_ms)\r\n self._init_count += 1\r\n self._live.wait(self._timeout_secs + 1)\r\n if self._live.is_set():\r\n break\r\n elif self._max_reconnects > 0 and self._init_count >= self._max_reconnects:\r\n self._safe_close()\r\n raise ZooKeeper.ConnectionTimeout('Timed out waiting for ZK connection to %s' % servers)\r\n self._log('Successfully connected to ZK at %s' % servers)", "def pycloud(ctx, log_level):\n KeyPairStorage.initialize()\n PyCloudConfig.initialize_state_mgmt()\n\n ctx.obj = {}\n configure_logger(level=log_level)\n return 0", "def _check_authentication(self) -> NoReturn:\n if not self.heartbeat():\n self.authenticate()", "def authenticate_client(self):\n\n success = False\n prefix = self.build_endpoint_prefix()\n endpoint = prefix + \"/oauth/access_token?grant_type=client_credentials\"\n combined = str(self.key_id) + ':' + str(self.secret)\n message_bytes = combined.encode('utf-8')\n base64_bytes = base64.b64encode(message_bytes)\n base64_message = base64_bytes.decode('utf-8')\n headers = {\"Authorization\": str(\"Basic \" + base64_message)}\n max_tries = 5\n for _ in range(max_tries):\n token, scope = self.get_auth_token(endpoint, headers)\n if token == \"BAD\":\n # Add message for IP restrictions\n exc_msg = \"Invalid credentials- can not obtain session token.\"\n raise CloudPassageAuthentication(exc_msg)\n if token is not None:\n self.auth_token = token\n self.auth_scope = scope\n success = True\n break\n else:\n time.sleep(1)\n return success", "def configure_cloud(self):\n \n # if API check fails, log error output - actually probably not since we have interactivity here\n again = True\n while again:\n # print info\n print(\"Welcome to the cloud configuration center.\\n\")\n print(\"Here you can enter your plant ID and activation key to link it to \"\n \"the cloud and enable data uploads.\")\n print(\"The activation details for the plants you own are available on the web \"\n \"application\")\n # get plant ID from user\n entered_id = input(\n \"Please enter the plant ID (enter nothing to cancel): \")\n if entered_id != \"\":\n # get plant key/password from user\n entered_key = input(\n \"Please enter the plant activation key (enter nothing to cancel): \")\n if entered_key != \"\":\n hasher = hashlib.sha256()\n hasher.update(bytes(entered_key, 'utf-8'))\n # Uncomment this line and comment the one after if want hashing\n # hashed_key = hasher.hexdigest()\n hashed_key = entered_key\n # verify entered details\n verified = self.verify_plant(entered_id, hashed_key)\n\n if verified:\n # save details to file if valid, exit cloud center\n json_info = {\"plant_id\": entered_id,\n \"plant_key\": hashed_key}\n with open(CONFIG_FILE_PATH, 'w+') as config_file:\n json.dump(json_info, config_file)\n print(\"Successful cloud link! \\n\")\n again = False\n else:\n # option to try again if verification failed (invalid details)\n print(\"Error: plant ID or activation key is incorrect.\")\n user_choice = input(\"Try again? (y/n): \")\n if user_choice.lower() == \"y\":\n again = True\n else:\n again = False\n else:\n # entering nothing exits the cloud center\n again = False\n else:\n # entering nothing exits the cloud center\n again = False", "def init_api():\n global soundcloud\n import json\n \n SECRETS_VERSION = 1\n \n # Load secrets file\n if os.path.exists(config.token_cache):\n with open(config.token_cache, 'r', encoding='utf-8') as f:\n secrets = json.load(f)\n else:\n secrets = {}\n \n # Try to reuse the cached access token\n if secrets\\\n and secrets['version'] == SECRETS_VERSION\\\n and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\\\n and secrets['username'] == config.username:\n \n soundcloud = Soundcloud(\n client_id=config.client_id,\n client_secret=config.client_secret,\n access_token=secrets['access_token']\n )\n return\n \n # Get a new access token\n logging.info('Getting a new access token') \n try:\n soundcloud = Soundcloud(\n client_id=config.client_id,\n client_secret=config.client_secret,\n username=config.username,\n password=config.password\n )\n except HTTPError as e:\n if e.response.status_code == 401:\n logging.critical('Incorrect API key, login or password. Please, edit config.py.')\n sys.exit(1)\n else:\n raise\n \n # Save the token\n secrets = {\n 'version': SECRETS_VERSION,\n 'username': config.username,\n 'access_token': soundcloud.access_token,\n 'access_token_acquired_at': time(),\n 'access_token_expires_in': soundcloud.token.expires_in,\n }\n \n with open(config.token_cache, 'w', encoding='utf-8') as f:\n secrets = json.dump(secrets, f, indent='\\t', ensure_ascii=False)", "def login(self):\n try:\n self._service_instance = connect.SmartConnect(host=self.address,\n user=self.username,\n pwd=self.password,\n port=self.port,\n sslContext=self.sslContext)\n #connectionPoolTimeout=self.timeout)\n except Exception as err:\n raise err", "def autodisable_cloud(cloud):\n log.warning(\"Autodisabling %s\", cloud)\n cloud.ctl.disable()\n title = \"Cloud %s has been automatically disabled\" % cloud.name\n message = \"%s after multiple failures to connect to it.\" % title\n notify_user(cloud.owner, title=title, message=message, email_notify=True)", "def m_apiInstance_AuthenticationStatusUpdate(self, sender, e):\r\n if e.Status.IsSuccess:\r\n # Add code here to begin working with the TT API\r\n # lookup an instrument\r\n self.m_req = ttapi.InstrumentLookupSubscription(self.m_apiInstance.Session, ttapi.Dispatcher.Current, ttapi.ProductKey(ttapi.MarketKey.Cme, ttapi.ProductType.Future, \"YM\"), \"Jun17\")\r\n self.m_req.Update += self.m_req_Update\r\n print(\"Connection Success!\")\r\n self.m_req.Start()\r\n else:\r\n print(\"TT Login failed: {0}\".format(e.Status.StatusMessage))\r\n self.Dispose()", "def signedOn(self):\n # create a session to respond to private messages from nicks\n # not in any channel I'm in\n\n self.ircNetwork = u'TODO' # TODO \n\n self.defaultSession = self.store.find(d20session.D20Session,\n d20session.D20Session.name == u'#@@default@@').one()\n self.defaultSession.isDefaultSession = True\n # join my default channel\n self.join(self.factory.channel)", "def _login(self):\n self.logger.debug(\"Logging into \" + \"{}/{}\".format(self._im_api_url, \"j_spring_security_check\"))\n self._im_session.headers.update({'Content-Type':'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'})\n #self._im_session.mount('https://', TLS1Adapter())\n #self._im_verify_ssl = False\n self.j_username = self._username\n self.j_password = self._password\n requests.packages.urllib3.disable_warnings() # Disable unverified connection warning.\n payload = {'j_username': self.j_username, 'j_password': self.j_password, 'submit':'Login'}\n \n # login to ScaleIO IM\n r = self._im_session.post(\n \"{}/{}\".format(self._im_api_url,\"j_spring_security_check\"),\n verify=self._im_verify_ssl,\n #headers = {'Content-Type':'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'},\n data=payload)\n self.logger.debug(\"Login POST response: \" + \"{}\".format(r.text))\n\n self._im_logged_in = True\n \n \"\"\"\n ADD CODE:\n Check if this is IM have existing configuration. If so populate ScaleIO_configurtion_object\n \"\"\"" ]
[ "0.60142636", "0.58762807", "0.5862534", "0.58443606", "0.5819822", "0.5804538", "0.57355726", "0.5642202", "0.558887", "0.5545338", "0.5468961", "0.5466535", "0.54000777", "0.5383877", "0.5376469", "0.5353568", "0.53381795", "0.52868426", "0.522603", "0.52217215", "0.521283", "0.52077293", "0.5202446", "0.5197784", "0.5159528", "0.5156795", "0.5147419", "0.513379", "0.51276624", "0.5123702" ]
0.8251854
0
Perform bad channel detection. The recording is assumed to be filtered. If not, a highpass filter is applied on the fly.
def detect_bad_channels( recording, method="coherence+psd", std_mad_threshold=5, psd_hf_threshold=0.02, dead_channel_threshold=-0.5, noisy_channel_threshold=1.0, outside_channel_threshold=-0.75, n_neighbors=11, nyquist_threshold=0.8, direction="y", chunk_duration_s=0.3, num_random_chunks=10, welch_window_ms=10.0, highpass_filter_cutoff=300, neighborhood_r2_threshold=0.9, neighborhood_r2_radius_um=30.0, seed=None, ): import scipy.stats method_list = ("std", "mad", "coherence+psd", "neighborhood_r2") assert method in method_list, f"{method} is not a valid method. Available methods are {method_list}" # Get random subset of data to estimate from random_chunk_kwargs = dict( num_chunks_per_segment=num_random_chunks, chunk_size=int(chunk_duration_s * recording.sampling_frequency), seed=seed, ) # If recording is not filtered, apply a highpass filter if not recording.is_filtered(): recording_hp = highpass_filter(recording, freq_min=highpass_filter_cutoff) else: recording_hp = recording # Adjust random chunk kwargs based on method if method in ("std", "mad"): random_chunk_kwargs["return_scaled"] = False random_chunk_kwargs["concatenated"] = True elif method == "coherence+psd": random_chunk_kwargs["return_scaled"] = True random_chunk_kwargs["concatenated"] = False elif method == "neighborhood_r2": random_chunk_kwargs["return_scaled"] = False random_chunk_kwargs["concatenated"] = False random_data = get_random_data_chunks(recording_hp, **random_chunk_kwargs) channel_labels = np.zeros(recording.get_num_channels(), dtype="U5") channel_labels[:] = "good" if method in ("std", "mad"): if method == "std": deviations = np.std(random_data, axis=0) else: deviations = scipy.stats.median_abs_deviation(random_data, axis=0) thresh = std_mad_threshold * np.median(deviations) mask = deviations > thresh bad_channel_ids = recording.channel_ids[mask] channel_labels[mask] = "noise" elif method == "coherence+psd": # some checks assert recording.has_scaled(), ( "The 'coherence+psd' method uses thresholds assuming the traces are in uV, " "but the recording does not have scaled traces. If the recording is already scaled, " "you need to set gains and offsets: " ">>> recording.set_channel_gains(1); recording.set_channel_offsets(0)" ) assert 0 < nyquist_threshold < 1, "nyquist_threshold must be between 0 and 1" # If location are not sorted, estimate forward and reverse sorting channel_locations = recording.get_channel_locations() dim = ["x", "y", "z"].index(direction) assert dim < channel_locations.shape[1], f"Direction {direction} is wrong" locs_depth = channel_locations[:, dim] if np.array_equal(np.sort(locs_depth), locs_depth): order_f = None order_r = None else: # sort by x, y to avoid ambiguity order_f, order_r = order_channels_by_depth(recording=recording, dimensions=("x", "y")) # Create empty channel labels and fill with bad-channel detection estimate for each chunk chunk_channel_labels = np.zeros((recording.get_num_channels(), len(random_data)), dtype=np.int8) for i, random_chunk in enumerate(random_data): random_chunk_sorted = random_chunk[order_f] if order_f is not None else random_chunk chunk_channel_labels[:, i] = detect_bad_channels_ibl( raw=random_chunk_sorted, fs=recording.sampling_frequency, psd_hf_threshold=psd_hf_threshold, dead_channel_thr=dead_channel_threshold, noisy_channel_thr=noisy_channel_threshold, outside_channel_thr=outside_channel_threshold, n_neighbors=n_neighbors, nyquist_threshold=nyquist_threshold, welch_window_ms=welch_window_ms, ) # Take the mode of the chunk estimates as final result. Convert to binary good / bad channel output. mode_channel_labels, _ = scipy.stats.mode(chunk_channel_labels, axis=1, keepdims=False) if order_r is not None: mode_channel_labels = mode_channel_labels[order_r] (bad_inds,) = np.where(mode_channel_labels != 0) bad_channel_ids = recording.channel_ids[bad_inds] channel_labels[mode_channel_labels == 1] = "dead" channel_labels[mode_channel_labels == 2] = "noise" channel_labels[mode_channel_labels == 3] = "out" if bad_channel_ids.size > recording.get_num_channels() / 3: warnings.warn( "Over 1/3 of channels are detected as bad. In the precense of a high" "number of dead / noisy channels, bad channel detection may fail " "(erroneously label good channels as dead)." ) elif method == "neighborhood_r2": # make neighboring channels structure. this should probably be a function in core. geom = recording.get_channel_locations() num_channels = recording.get_num_channels() chan_distances = np.linalg.norm(geom[:, None, :] - geom[None, :, :], axis=2) np.fill_diagonal(chan_distances, neighborhood_r2_radius_um + 1) neighbors_mask = chan_distances < neighborhood_r2_radius_um if neighbors_mask.sum(axis=1).min() < 1: warnings.warn( f"neighborhood_r2_radius_um={neighborhood_r2_radius_um} led " "to channels with no neighbors for this geometry, which has " f"minimal channel distance {chan_distances.min()}um. These " "channels will not be marked as bad, but you might want to " "check them." ) max_neighbors = neighbors_mask.sum(axis=1).max() channel_index = np.full((num_channels, max_neighbors), num_channels) for c in range(num_channels): my_neighbors = np.flatnonzero(neighbors_mask[c]) channel_index[c, : my_neighbors.size] = my_neighbors # get the correlation of each channel with its neighbors' median inside each chunk # note that we did not concatenate the chunks here correlations = [] for chunk in random_data: chunk = chunk.astype(np.float32, copy=False) chunk = chunk - np.median(chunk, axis=0, keepdims=True) padded_chunk = np.pad(chunk, [(0, 0), (0, 1)], constant_values=np.nan) # channels with no neighbors will get a pure-nan median trace here neighbmeans = np.nanmedian( padded_chunk[:, channel_index], axis=2, ) denom = np.sqrt(np.nanmean(np.square(chunk), axis=0) * np.nanmean(np.square(neighbmeans), axis=0)) denom[denom == 0] = 1 # channels with no neighbors will get a nan here chunk_correlations = np.nanmean(chunk * neighbmeans, axis=0) / denom correlations.append(chunk_correlations) # now take the median over chunks and threshold to finish median_correlations = np.nanmedian(correlations, 0) r2s = median_correlations**2 # channels with no neighbors will have r2==nan, and nan<x==False always bad_channel_mask = r2s < neighborhood_r2_threshold bad_channel_ids = recording.channel_ids[bad_channel_mask] channel_labels[bad_channel_mask] = "noise" return bad_channel_ids, channel_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_bad_channels_ibl(\n raw,\n fs,\n psd_hf_threshold,\n dead_channel_thr=-0.5,\n noisy_channel_thr=1.0,\n outside_channel_thr=-0.75,\n n_neighbors=11,\n nyquist_threshold=0.8,\n welch_window_ms=0.3,\n):\n _, nc = raw.shape\n raw = raw - np.mean(raw, axis=0)[np.newaxis, :]\n nperseg = int(welch_window_ms * fs / 1000)\n import scipy.signal\n\n fscale, psd = scipy.signal.welch(raw, fs=fs, axis=0, window=\"hann\", nperseg=nperseg)\n\n # compute similarities\n ref = np.median(raw, axis=1)\n xcorr = np.sum(raw * ref[:, np.newaxis], axis=0) / np.sum(ref**2)\n\n # compute coherence\n xcorr_neighbors = detrend(xcorr, n_neighbors)\n xcorr_distant = xcorr - detrend(xcorr, n_neighbors) - 1\n\n # make recommendation\n psd_hf = np.mean(psd[fscale > (fs / 2 * nyquist_threshold), :], axis=0)\n\n ichannels = np.zeros(nc, dtype=int)\n idead = np.where(xcorr_neighbors < dead_channel_thr)[0]\n inoisy = np.where(np.logical_or(psd_hf > psd_hf_threshold, xcorr_neighbors > noisy_channel_thr))[0]\n\n ichannels[idead] = 1\n ichannels[inoisy] = 2\n\n # the channels outside of the brains are the contiguous channels below the threshold on the trend coherency\n # the chanels outide need to be at either extremes of the probe\n ioutside = np.where(xcorr_distant < outside_channel_thr)[0]\n if ioutside.size > 0 and (ioutside[-1] == (nc - 1) or ioutside[0] == 0):\n a = np.cumsum(np.r_[0, np.diff(ioutside) - 1])\n ioutside = ioutside[a == np.max(a)]\n ichannels[ioutside] = 3\n\n return ichannels", "def deal_with_bad_channels(self, selection_method, plot=True, threshold_sd_of_mean=40, interpolate=True,\n file_path=None):\n # TODO: (Everyone) Check how well the automatic detection works on your data\n\n if file_path is None:\n file_path = os.getcwd()\n file_name = os.path.join(file_path, 'participant_{}_bad_channels.csv'.format(self.participant_id))\n \n if selection_method == \"automatic\":\n if self.epochs is None:\n raise AttributeError('Please create epochs first, as the automatic algorithm needs them to work.')\n else:\n df = self.epochs.to_data_frame()\n\n group = df.groupby('epoch')\n mean = group.mean()\n\n a = mean.std()\n a = a[1:]\n print('standard deviation of mean across epochs:')\n print(np.mean(a), np.std(a))\n print('higher than %s:' % threshold_sd_of_mean)\n print(a[a > threshold_sd_of_mean].index)\n\n for i in a[a > threshold_sd_of_mean].index:\n self.raw.info['bads'].append(i)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n pd.DataFrame({'participant': self.participant_id,\n 'bad_channels': self.raw.info['bads']}).to_csv(path_or_buf=file_name,\n index=False)\n\n print(\"Saving bad channels as {}\".format(file_name))\n\n elif selection_method == \"file\":\n bads = pd.read_csv(file_name)\n self.raw.info['bads'] = list(bads['bad_channels'].values)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n elif selection_method != \"manual\":\n ValueError(\"selection_method can be automatic, file, or manual\")\n\n if plot or selection_method == \"manual\":\n self.raw.plot(block=True)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n if file_path is None:\n file_path = os.getcwd()\n file_name = os.path.join(file_path, 'participant_{}_bad_channels.csv'.format(self.participant_id))\n pd.DataFrame({'participant': self.participant_id,\n 'bad_channels': self.raw.info['bads']}).to_csv(path_or_buf=file_name,\n index=False)\n\n print(\"Saving bad channels as {}\".format(file_name))\n\n if interpolate:\n \"Interpolating bad channels...\"\n if len(self.raw.info['bads']) > 0:\n self.raw.interpolate_bads(reset_bads=True)", "def _find_bad_channels(cfg, raw, subject, session, task, run) -> None:\n if not (cfg.find_flat_channels_meg or cfg.find_noisy_channels_meg):\n return\n\n if (cfg.find_flat_channels_meg and\n not cfg.find_noisy_channels_meg):\n msg = 'Finding flat channels.'\n elif (cfg.find_noisy_channels_meg and\n not cfg.find_flat_channels_meg):\n msg = 'Finding noisy channels using Maxwell filtering.'\n else:\n msg = ('Finding flat channels, and noisy channels using '\n 'Maxwell filtering.')\n\n logger.info(**gen_log_kwargs(message=msg, subject=subject,\n session=session))\n\n bids_path = BIDSPath(subject=subject,\n session=session,\n task=task,\n run=run,\n acquisition=acq,\n processing=proc, # XXX : what is proc?\n recording=cfg.rec,\n space=cfg.space,\n suffix=cfg.datatype,\n datatype=cfg.datatype,\n root=cfg.deriv_root)\n\n auto_noisy_chs, auto_flat_chs, auto_scores = \\\n mne.preprocessing.find_bad_channels_maxwell(\n raw=raw,\n calibration=cfg.mf_cal_fname,\n cross_talk=cfg.mf_ctc_fname,\n origin=mf_head_origin,\n coord_frame='head',\n return_scores=True\n )\n\n preexisting_bads = raw.info['bads'].copy()\n bads = preexisting_bads.copy()\n\n if find_flat_channels_meg:\n msg = f'Found {len(auto_flat_chs)} flat channels.'\n logger.info(**gen_log_kwargs(message=msg,\n subject=subject, session=session))\n bads.extend(auto_flat_chs)\n if find_noisy_channels_meg:\n msg = f'Found {len(auto_noisy_chs)} noisy channels.'\n logger.info(**gen_log_kwargs(message=msg,\n subject=subject, session=session))\n bads.extend(auto_noisy_chs)\n\n bads = sorted(set(bads))\n raw.info['bads'] = bads\n msg = f'Marked {len(raw.info[\"bads\"])} channels as bad.'\n logger.info(**gen_log_kwargs(message=msg,\n subject=subject, session=session))\n\n if find_noisy_channels_meg:\n auto_scores_fname = bids_path.copy().update(\n suffix='scores', extension='.json', check=False)\n with open(auto_scores_fname, 'w') as f:\n json_tricks.dump(auto_scores, fp=f, allow_nan=True,\n sort_keys=False)\n\n if interactive:\n import matplotlib.pyplot as plt\n plot_auto_scores(auto_scores)\n plt.show()\n\n # Write the bad channels to disk.\n bads_tsv_fname = bids_path.copy().update(suffix='bads',\n extension='.tsv',\n check=False)\n bads_for_tsv = []\n reasons = []\n\n if find_flat_channels_meg:\n bads_for_tsv.extend(auto_flat_chs)\n reasons.extend(['auto-flat'] * len(auto_flat_chs))\n preexisting_bads = set(preexisting_bads) - set(auto_flat_chs)\n\n if find_noisy_channels_meg:\n bads_for_tsv.extend(auto_noisy_chs)\n reasons.extend(['auto-noisy'] * len(auto_noisy_chs))\n preexisting_bads = set(preexisting_bads) - set(auto_noisy_chs)\n\n preexisting_bads = list(preexisting_bads)\n if preexisting_bads:\n bads_for_tsv.extend(preexisting_bads)\n reasons.extend(['pre-existing (before MNE-BIDS-pipeline was run)'] *\n len(preexisting_bads))\n\n tsv_data = pd.DataFrame(dict(name=bads_for_tsv, reason=reasons))\n tsv_data = tsv_data.sort_values(by='name')\n tsv_data.to_csv(bads_tsv_fname, sep='\\t', index=False)", "def detect(self, frame, cur_count):\n self.variables['is_black'] = False\n self.process(frame, cur_count)", "def _pick_bad_channels(event, params):\n # Both bad lists are updated. params['info'] used for colors.\n bads = params['raw'].info['bads']\n params['info']['bads'] = _select_bads(event, params, bads)\n _plot_update_raw_proj(params, None)", "def test_brainvision_data_filters():\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_highpass_path,\n montage=montage, eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n\n assert_equal(raw.info['highpass'], 0.1)\n assert_equal(raw.info['lowpass'], 250.)", "def detect_badchannels(raw, picks, ref_meg=\"auto\", significance_level=0.05):\n\n gesd_args = {'alpha': significance_level}\n\n if (picks == \"mag\") or (picks == \"grad\"):\n chinds = mne.pick_types(raw.info, meg=picks, ref_meg=ref_meg, exclude='bads')\n elif picks == \"meg\":\n chinds = mne.pick_types(raw.info, meg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eeg\":\n chinds = mne.pick_types(raw.info, eeg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eog\":\n chinds = mne.pick_types(raw.info, eog=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"ecg\":\n chinds = mne.pick_types(raw.info, ecg=True, ref_meg=ref_meg, exclude='bads')\n else:\n raise NotImplementedError(f\"picks={picks} not available.\")\n ch_names = np.array(raw.ch_names)[chinds]\n\n bdinds = sails.utils.detect_artefacts(\n raw.get_data(picks=chinds),\n axis=0,\n reject_mode=\"dim\",\n ret_mode=\"bad_inds\",\n gesd_args=gesd_args,\n )\n\n s = \"Modality {0} - {1}/{2} channels rejected ({3:02f}%)\"\n pc = (bdinds.sum() / len(bdinds)) * 100\n logger.info(s.format(picks, bdinds.sum(), len(bdinds), pc))\n\n # concatenate newly found bads to existing bads\n if np.any(bdinds):\n raw.info[\"bads\"].extend(list(ch_names[np.where(bdinds)[0]]))\n\n return raw", "def detect(self, frame, cur_count, player):\n if cur_count % self.freq is 0:\n frame = cv.GaussianBlur(frame, (3, 3), 1)\n self.process(frame, cur_count, player)", "def low_pass_filter_anomaly_detection(event, df,\r\n column_name,hl):\r\n\r\n number_of_stdevs_away_from_mean = 3\r\n #60-day rolling average\r\n df[column_name+'_Rolling_Average']=df[column_name].rolling(window=60, center=True).mean()\r\n #60-day standard deviation\r\n df[column_name+'_Rolling_StDev']=df[column_name].rolling(window=60, center=True).std()\r\n #Detect anomalies by determining how far away from the mean (in terms of standard deviation)\r\n #each data point is\r\n df['Filter_Anomaly']=(abs(df[column_name]-df[\r\n column_name+'_Rolling_Average'])>(\r\n number_of_stdevs_away_from_mean*df[\r\n column_name+'_Rolling_StDev']))\r\n #df['Cleaned']=np.where(df['VOL_ACT_Low_Pass_Filter_Anomaly'] == True, datetime(2019,2,2),df['VOL_ACT'])\r\n df['Clear '+column_name]=df[column_name]\r\n for ind in df.index.values:\r\n if (df['Filter_Anomaly'].loc[ind] == True) and not(ind in hl.index.values):\r\n df['Clear '+column_name].loc[ind] = fa.P_clean(df, ind, column_name)\r\n\r\n df = df.drop([column_name+'_Rolling_StDev',column_name+'_Rolling_Average'], axis = 1)\r\n\r\n\r\n print(df)\r\n return df", "def test_wifi_scanner_batch_scan_channel_sanity(self):\n scan_setting = {\"channels\": self.wifi_chs.MIX_CHANNEL_SCAN,\n \"periodInMs\": SCANTIME,\n \"reportEvents\":\n wutils.WifiEnums.REPORT_EVENT_AFTER_BUFFER_FULL}\n self.wifi_scanner_batch_scan(scan_setting)", "def pre_filter_channels(self, channels=None): # pragma: no cover\n pass", "def post_filter_channels(self, channels=None): # pragma: no cover\n # Remove the DC component...\n # level_data_for(channels)\n pass", "def qc_Bad_Chans(infile, mad_rms, med_rms):\n\n BAD_CHAN = []\n\n stat_file = open(infile, 'r')\n LINES = stat_file.readlines()[2:]\n stat_file.close()\n\n threshold = 1.2 # value selected to be more consistent with SoFiA flagged criterion\n \n# value = med_madfm + 0.4 # Deviation from the med_madfm. Need to check with larger sample of data to decide the best value. \n\n for i in range(len(LINES)):\n line = LINES[i]\n TOKS = line.split()\n chan = TOKS[0]\n # madfm = float(TOKS[5])\n rms = float(TOKS[3])\n \n value = abs(rms - med_rms)\n criterion = 1.4826*threshold*mad_rms\n if value > criterion:\n BAD_CHAN.append(chan)\n\n if BAD_CHAN == []:\n BAD_CHAN.append('none')\n QC_badchan_id = 'good'\n else:\n QC_badchan_id = 'bad'\n\n mosaic_bad_chan = 'mosaic_badchans.txt'\n print (','.join(BAD_CHAN), file=open(fig_dir + '/' + mosaic_bad_chan,'w'))\n\n n_bad_chan = len(BAD_CHAN)\n\n # Check if number of bad channel recorded is 1. If yes, check if is it a none keyword.\n # If yes, number of bad channel should be 0.\n \n if n_bad_chan == 1:\n with open(fig_dir + '/' + mosaic_bad_chan) as f:\n if 'none' in f.read():\n n_bad_chan = 0\n print ('yes')\n \n return n_bad_chan, mosaic_bad_chan, QC_badchan_id", "def process(self, frame, cur_count):\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n _, gray = cv.threshold(gray, 30, 255, cv.THRESH_BINARY)\n black_count = float(np.sum(gray)) / float(gray.size)\n # If at least 80% of the frame is true black, race has stopped\n if black_count <= 0.2:\n self.handle(frame, cur_count)", "def _remove_flux_extinction(self):\n self.fluxUnred = self.flux.copy()\n self.fluxErrUnred = self.fluxErr.copy()\n self.fluxRenorm = self.flux.copy()\n self.fluxErrRenorm = self.fluxErr.copy()\n\n # Using negative a_v so that extinction.apply works in reverse and removes the extinction\n if self.mwebv:\n extinctions = extinction.fitzpatrick99(wave=self._good_filter_wave, \\\n a_v=-3.1 * self.mwebv, r_v=3.1, unit='aa')\n\n for i, pb in enumerate(self._good_filters):\n mask = (self.passband == pb)\n\n flux_pb = self.flux[mask]\n fluxerr_pb = self.fluxErr[mask]\n npbobs = len(flux_pb)\n\n if npbobs < 1:\n return\n\n if self.mwebv:\n flux_out = extinction.apply(extinctions[i], flux_pb, inplace=False)\n fluxerr_out = extinction.apply(extinctions[i], fluxerr_pb, inplace=False)\n else:\n flux_out = flux_pb\n fluxerr_out = fluxerr_pb\n self.fluxUnred[mask] = flux_out\n self.fluxErrUnred[mask] = fluxerr_out\n\n if npbobs > 1:\n # there's at least enough observations to find minimum and maximum\n minfluxpb = flux_out.min()\n maxfluxpb = flux_out.max()\n norm = maxfluxpb - minfluxpb\n self.fluxRenorm[mask] = (flux_out - minfluxpb) / norm\n self.fluxErrRenorm[mask] = fluxerr_out / norm\n elif npbobs == 1:\n # deal with the case with one observation in this passband by setting renorm = 0.5\n norm = self.fluxUnred[mask] / 0.5\n self.fluxRenorm[mask] /= norm\n self.fluxErrRenorm[mask] /= norm\n\n self._default_cols = ['time', 'flux', 'fluxErr', 'fluxUnred', 'fluxErrUnred', \\\n 'fluxRenorm', 'fluxErrRenorm', 'photflag', 'zeropoint', 'obsId']\n return", "def _check_for_noise(self) -> None:\n safety_stop = 5\n while self._has_noise() and safety_stop > 0:\n self.filter(size=3)\n safety_stop -= 1", "def apply_filter(self, image):\n pass", "def filter_unknown_bases(self):\n self.failed[\"unknowns\"] = self.stats.index[\n self.stats[\"unknowns\"] > self.tolerance[\"unknowns\"]\n ]\n self.passed = self.stats.drop(self.failed[\"unknowns\"])", "def detect_maxfilt_zeros(raw):\n if raw.filenames[0] is not None:\n log_fname = raw.filenames[0].replace('.fif', '.log')\n if 'log_fname' in locals() and exists(log_fname):\n try:\n starttime = raw.first_time\n endtime = raw._last_time\n with open(log_fname) as f:\n lines = f.readlines()\n\n # for determining the start, end and point\n phrase_ndataseg = ['(', ' data buffers)']\n gotduration = False\n\n # for detecting zeroed out data\n zeroed=[]\n phrase_zero = ['Time ', ': cont HPI is off, data block is skipped!']\n for line in lines:\n if gotduration == False and phrase_ndataseg[1] in line:\n gotduration = True\n n_dataseg = float(line.split(phrase_ndataseg[0])[1].split(phrase_ndataseg[1])[0]) # number of segments\n if phrase_zero[1] in line:\n zeroed.append(float(line.split(phrase_zero[0])[1].split(phrase_zero[1])[0])) # in seconds\n\n duration = raw.n_times/n_dataseg # duration of each data segment in samples\n starts = (np.array(zeroed) - starttime) * raw.info['sfreq'] # in samples\n bad_inds = np.zeros(raw.n_times)\n for ii in range(len(starts)):\n stop = starts[ii] + duration # in samples\n bad_inds[int(starts[ii]):int(stop)] = 1\n return bad_inds.astype(bool)\n except:\n s = \"detecting zeroed out data from maxfilter log file failed\"\n logger.warning(s)\n return None\n else:\n s = \"No maxfilter logfile detected - detecting zeroed out data not possible\"\n logger.info(s)\n return None", "def filtering(self):\r\n # 1 ###########################################################################################################\r\n fft_image = np.fft.fft2(self.image)\r\n # 2 ###########################################################################################################\r\n fft_shift_image = np.fft.fftshift(fft_image)\r\n\r\n ###\r\n mag_dft = np.log(np.abs(fft_shift_image))\r\n mag_dft = (255 * (mag_dft / np.max(mag_dft))).astype(dtype='uint8')\r\n ###\r\n\r\n # 3 ###########################################################################################################\r\n if self.filter_name == 'butterworth_l' or self.filter_name == 'butterworth_h':\r\n mask = self.filter(fft_shift_image.shape, self.cutoff, self.order)\r\n else:\r\n mask = self.filter(fft_shift_image.shape, self.cutoff)\r\n # 4 ###########################################################################################################\r\n # multiply the dft (fft shift image) by the mask\r\n filtered_image = fft_shift_image * mask\r\n\r\n ###\r\n mag_filtered_image = mag_dft * mask\r\n ###\r\n\r\n # 5 ###########################################################################################################\r\n inverse_fft_shift_image = np.fft.ifftshift(filtered_image)\r\n # 6 ###########################################################################################################\r\n inverse_fft_image = np.fft.ifft2(inverse_fft_shift_image)\r\n # 7 ###########################################################################################################\r\n mag_image = np.zeros(inverse_fft_image.shape, dtype=complex)\r\n for i in range(inverse_fft_image.shape[0]):\r\n for j in range(inverse_fft_image.shape[1]):\r\n if inverse_fft_image[i][j] < 0:\r\n mag_image[i][j] = -1 * inverse_fft_image[i][j]\r\n else:\r\n mag_image[i][j] = inverse_fft_image[i][j]\r\n # magnitude of inverse fft is complete\r\n # 8 ###########################################################################################################\r\n full_contrast_image = self.post_process_image(mag_image)\r\n\r\n return [mag_dft, mag_filtered_image, full_contrast_image]", "def filters(self, low_freq=1/7, high_freq=128, notch_freq=50):\n self.raw.filter(l_freq=low_freq, h_freq=high_freq)\n self.raw.notch_filter(range(notch_freq, high_freq, notch_freq), filter_length='auto',\n phase='zero', fir_design='firwin')", "def filterEdgeDetect2( bmp, threshold, savefile = '' ):\n b_count = 0\n w_count = 0\n for h in range(bmp.height-1):\n for w in range(bmp.width-1):\n ii = intensity(bmp.pixels[h][w])\n \"\"\" Compare to North and Right neighbor \"\"\"\n if( ii > threshold + intensity(bmp.pixels[h][w+1]) or\n ii < intensity(bmp.pixels[h][w+1]) - threshold or\n ii > threshold + intensity(bmp.pixels[h+1][w]) or\n ii < intensity(bmp.pixels[h+1][w]) - threshold ):\n bmp.pixels[h][w] = BLACK\n b_count += 1\n else:\n bmp.pixels[h][w] = WHITE\n w_count += 1\n\n \"\"\" Test statistics about filter results \"\"\"\n if( w_count == 0 ):\n w_count = 1 # avoid divide-by-zero\n print(\">> Ratio of Black to White is\",b_count,\"/\",w_count,\":\",str(round(b_count*100/w_count,1)) + \"%\")\n\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def test_wifi_scanner_single_scan_channel_sanity(self):\n scan_setting = {\"channels\": self.wifi_chs.MIX_CHANNEL_SCAN,\n \"periodInMs\": SCANTIME,\n \"reportEvents\":\n wutils.WifiEnums.REPORT_EVENT_AFTER_EACH_SCAN}\n self.wifi_scanner_single_scan(scan_setting)", "def filter_fusion(luma_bin, sat_bin, grad_bin, mentor_bin):\n binary = np.zeros_like(luma_bin)\n binary[ (((grad_bin==1) | (sat_bin==1)) & (luma_bin==1)) | (mentor_bin==1) ] = 1\n\n # Erosion and dilation - Seems doesn't work. Mask-off\n #kernel = np.ones((5,5))\n #binary_dilation = cv2.dilate(binary, kernel, iterations=1)\n #binary_erosion = cv2.erode(binary_dilation, kernel, iterations=1)\n #binary = binary_erosion\n\n return binary", "def test_high_voltage_passing_signal(self):\n data = gen_random_data(-0.5, 0.5, self.channels)\n self.assertFalse(self.highvoltage_rule.is_broken(data))", "def cut_scifi_event(data_dict, event) :\n digits = event.digits()\n saturation_counter = 0\n\n for digit in digits :\n if digit.get_adc() == 255 :\n saturation_counter += 1\n\n if saturation_counter > 1000 :\n return True\n\n return False", "def test_high_voltage_failing_signal(self):\n data = gen_random_data(-5, 0, self.channels)\n # ascertain that at least one random datapoint is above threshold to test np.amax edgecase\n data[np.random.randint(self.channels)] = 1.5\n self.assertTrue(self.highvoltage_rule.is_broken(data))", "def robust_daisy():\n _2way = None\n _3way = None\n _info = ''\n ROBUST_DAISY_IMSHOW = False\n\n VV.set_image( curr_im, 1 ) #set current image\n VV.set_image( prev_im, 2 )# set previous image (at this stage dont need lut_raw to be set as it is not used by release_candidate_match2_guided_2way() )\n\n selected_curr_i, selected_prev_i, sieve_stat = VV.release_candidate_match2_guided_2way( feat2d_curr, feat2d_prev )\n #\n #\n # # # min/ max\n # # if (float(min(feat2d_curr.shape[1],feat2d_prev.shape[1])) / max(feat2d_curr.shape[1],feat2d_prev.shape[1])) < 0.70:\n # # match2_total_score -= 3\n # # print 'nTracked features are very different.'\n # #\n #\n match2_total_score = VV.sieve_stat_to_score( sieve_stat ) #remember to do min/max scoring. ie. reduce score if nTracked features are very different in both frames\n print '=X=Total_score : ', match2_total_score, '=X='\n _info += '=X=Total_score : '+ str(match2_total_score)+ '=X=\\n'\n _info += 'After 2way_matching, n=%d\\n' %( len(selected_curr_i) )\n\n if ROBUST_DAISY_IMSHOW:\n xcanvas_2way = VV.plot_2way_match( curr_im, np.int0(feat2d_curr[0:2,selected_curr_i]), prev_im, np.int0(feat2d_prev[0:2,selected_prev_i]), enable_lines=True )\n cv2.imshow( 'xcanvas_2way', xcanvas_2way )\n\n\n # Rules\n if match2_total_score > 3:\n # Accept this match and move on\n print 'Accept this match and move on'\n print tcol.OKGREEN, 'Accept (Strong)', tcol.ENDC\n _info += tcol.OKGREEN+ 'a: Accept (Strong)'+ tcol.ENDC + '\\n'\n _2way = (selected_curr_i,selected_prev_i)\n\n if match2_total_score > 2 and match2_total_score <= 3 and len(selected_curr_i) > 20:\n # Boundry case, if you see sufficient number of 2way matches, also accpt 2way match\n print 'Boundary case, if you see sufficient number of 2way matches, also accept 2way match'\n print tcol.OKGREEN, 'Accept', tcol.ENDC\n _info += tcol.OKGREEN+ 'b: Accept'+ tcol.ENDC+'\\n'\n\n _2way = (selected_curr_i,selected_prev_i)\n\n\n if match2_total_score >= 0.5 and match2_total_score <= 3:\n # Try 3way. But plot 2way and 3way.\n # Beware, 3way match function returns None when it has early-rejected the match\n print 'Attempt robust_3way_matching()'\n\n # set-data\n VV.set_image( curr_m_im, 3 ) #set curr-1 image\n VV.set_lut_raw( __lut_curr_im, 1 ) #set lut of curr and prev\n VV.set_lut_raw( __lut_prev_im, 2 )\n # VV.set_lut( curr_lut, 1 ) #only needed for in debug mode of 3way match\n # VV.set_lut( prev_lut, 2 ) #only needed for in debug mode of 3way match\n\n # Attempt 3way match\n # q1,q2,q3: pts_curr, pts_prev, _pts_curr_m,\n # q4 : per_match_vote,\n # q5 : (dense_match_quality, after_vote_match_quality)\n # See GeometricVerification class to know more on this function.\n q1,q2,q3,q4,q5 = VV.robust_match3way()\n print 'dense_match_quality : ', q5[0]\n print 'after_vote_match_quality: ', q5[1]\n _info += 'After 3way_matching:\\n'\n _info += 'dense_match_quality:%4.2f\\n' %(q5[0])\n _info += 'after_vote_match_quality:%4.2f\\n' %(q5[1])\n\n\n if q1 is None:\n print 'Early Reject from robust_match3way()'\n print tcol.FAIL, 'Reject', tcol.ENDC\n _info += 'Early Reject from robust_match3way()\\n'\n _info += tcol.FAIL+ 'c: Reject'+ tcol.ENDC+'\\n'\n _3way = None\n\n else:\n print 'nPts_3way_match : ', q1.shape\n print 'Accept 3way match'\n print tcol.OKGREEN, 'Accept', tcol.ENDC\n _info += 'n3way_matches: %s' %( str(q1.shape) ) + '\\n'\n _info += tcol.OKGREEN+ 'c: Accept'+ tcol.ENDC + '\\n'\n if ROBUST_DAISY_IMSHOW:\n gridd = VV.plot_3way_match( VV.im1, np.array(q1), VV.im2, np.array(q2), VV.im3, np.array(q3) )\n cv2.imshow( '3way Matchi', gridd )\n #fill up _3way\n _3way = (q1,q2,q3)\n\n\n\n if match2_total_score < 0.5:\n # Reject (don't bother computing 3way)\n print 'Reject 2way matching, and do not compute 3way matching'\n print tcol.FAIL, 'Reject (Strong)', tcol.ENDC\n _info += tcol.FAIL+ 'd: Reject (Strong)'+ tcol.ENDC+'\\n'\n _2way = None\n _3way = None\n\n\n if ROBUST_DAISY_IMSHOW:\n cv2.waitKey(10)\n return _2way, _3way, _info", "def get_noisy_channel():\n def no_aug(images, DIFFICULTY):\n return images\n\n func_names=[\n 'static',\n 'blur',\n 'random_scale',\n 'transform'\n ]\n func_names = [n for n in func_names if n is not None]\n\n\n @tf.function\n def noise_pipeline(images, funcs, DIFFICULTY):\n \"\"\"Apply a series of functions to images, in order\n \"\"\"\n if DIFFICULTY == 0:\n return images\n else:\n for func in funcs:\n images = func(images, DIFFICULTY)\n return images\n funcs = []\n for func_name in func_names:\n assert func_name in dir(DifferentiableAugment), f\"Function '{func_name}' doesn't exist\"\n funcs.append(getattr(DifferentiableAugment, func_name))\n return lambda images, DIFFICULTY: noise_pipeline(images, funcs, DIFFICULTY)", "def process_frame(self, data):\n logging.error(\"filter_frame needs to be implemented for %s\",\n data.__class__)\n raise NotImplementedError(\"filter_frame needs to be implemented\")" ]
[ "0.6451672", "0.62761134", "0.6208499", "0.5965947", "0.5960218", "0.5822532", "0.5812152", "0.56674945", "0.5547122", "0.5531795", "0.5470612", "0.5420324", "0.5415153", "0.5385043", "0.53281367", "0.5241387", "0.5231101", "0.51918143", "0.5190904", "0.51698804", "0.51601934", "0.5148857", "0.51469105", "0.51429397", "0.510955", "0.50537205", "0.50259054", "0.50218004", "0.5015514", "0.5012803" ]
0.6694439
0
Bad channels detection for Neuropixel probes developed by IBL
def detect_bad_channels_ibl( raw, fs, psd_hf_threshold, dead_channel_thr=-0.5, noisy_channel_thr=1.0, outside_channel_thr=-0.75, n_neighbors=11, nyquist_threshold=0.8, welch_window_ms=0.3, ): _, nc = raw.shape raw = raw - np.mean(raw, axis=0)[np.newaxis, :] nperseg = int(welch_window_ms * fs / 1000) import scipy.signal fscale, psd = scipy.signal.welch(raw, fs=fs, axis=0, window="hann", nperseg=nperseg) # compute similarities ref = np.median(raw, axis=1) xcorr = np.sum(raw * ref[:, np.newaxis], axis=0) / np.sum(ref**2) # compute coherence xcorr_neighbors = detrend(xcorr, n_neighbors) xcorr_distant = xcorr - detrend(xcorr, n_neighbors) - 1 # make recommendation psd_hf = np.mean(psd[fscale > (fs / 2 * nyquist_threshold), :], axis=0) ichannels = np.zeros(nc, dtype=int) idead = np.where(xcorr_neighbors < dead_channel_thr)[0] inoisy = np.where(np.logical_or(psd_hf > psd_hf_threshold, xcorr_neighbors > noisy_channel_thr))[0] ichannels[idead] = 1 ichannels[inoisy] = 2 # the channels outside of the brains are the contiguous channels below the threshold on the trend coherency # the chanels outide need to be at either extremes of the probe ioutside = np.where(xcorr_distant < outside_channel_thr)[0] if ioutside.size > 0 and (ioutside[-1] == (nc - 1) or ioutside[0] == 0): a = np.cumsum(np.r_[0, np.diff(ioutside) - 1]) ioutside = ioutside[a == np.max(a)] ichannels[ioutside] = 3 return ichannels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_badchannels(raw, picks, ref_meg=\"auto\", significance_level=0.05):\n\n gesd_args = {'alpha': significance_level}\n\n if (picks == \"mag\") or (picks == \"grad\"):\n chinds = mne.pick_types(raw.info, meg=picks, ref_meg=ref_meg, exclude='bads')\n elif picks == \"meg\":\n chinds = mne.pick_types(raw.info, meg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eeg\":\n chinds = mne.pick_types(raw.info, eeg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eog\":\n chinds = mne.pick_types(raw.info, eog=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"ecg\":\n chinds = mne.pick_types(raw.info, ecg=True, ref_meg=ref_meg, exclude='bads')\n else:\n raise NotImplementedError(f\"picks={picks} not available.\")\n ch_names = np.array(raw.ch_names)[chinds]\n\n bdinds = sails.utils.detect_artefacts(\n raw.get_data(picks=chinds),\n axis=0,\n reject_mode=\"dim\",\n ret_mode=\"bad_inds\",\n gesd_args=gesd_args,\n )\n\n s = \"Modality {0} - {1}/{2} channels rejected ({3:02f}%)\"\n pc = (bdinds.sum() / len(bdinds)) * 100\n logger.info(s.format(picks, bdinds.sum(), len(bdinds), pc))\n\n # concatenate newly found bads to existing bads\n if np.any(bdinds):\n raw.info[\"bads\"].extend(list(ch_names[np.where(bdinds)[0]]))\n\n return raw", "def sentinel2_(image):\n nubes = image.select(\"QA60\")\n opaque = tools.compute_bits_client(nubes, 10, 10, \"opaque\")\n cirrus = tools.compute_bits_client(nubes, 11, 11, \"cirrus\")\n mask = opaque.Or(cirrus)\n result = image.updateMask(mask.Not())\n return result", "def _find_bad_channels(cfg, raw, subject, session, task, run) -> None:\n if not (cfg.find_flat_channels_meg or cfg.find_noisy_channels_meg):\n return\n\n if (cfg.find_flat_channels_meg and\n not cfg.find_noisy_channels_meg):\n msg = 'Finding flat channels.'\n elif (cfg.find_noisy_channels_meg and\n not cfg.find_flat_channels_meg):\n msg = 'Finding noisy channels using Maxwell filtering.'\n else:\n msg = ('Finding flat channels, and noisy channels using '\n 'Maxwell filtering.')\n\n logger.info(**gen_log_kwargs(message=msg, subject=subject,\n session=session))\n\n bids_path = BIDSPath(subject=subject,\n session=session,\n task=task,\n run=run,\n acquisition=acq,\n processing=proc, # XXX : what is proc?\n recording=cfg.rec,\n space=cfg.space,\n suffix=cfg.datatype,\n datatype=cfg.datatype,\n root=cfg.deriv_root)\n\n auto_noisy_chs, auto_flat_chs, auto_scores = \\\n mne.preprocessing.find_bad_channels_maxwell(\n raw=raw,\n calibration=cfg.mf_cal_fname,\n cross_talk=cfg.mf_ctc_fname,\n origin=mf_head_origin,\n coord_frame='head',\n return_scores=True\n )\n\n preexisting_bads = raw.info['bads'].copy()\n bads = preexisting_bads.copy()\n\n if find_flat_channels_meg:\n msg = f'Found {len(auto_flat_chs)} flat channels.'\n logger.info(**gen_log_kwargs(message=msg,\n subject=subject, session=session))\n bads.extend(auto_flat_chs)\n if find_noisy_channels_meg:\n msg = f'Found {len(auto_noisy_chs)} noisy channels.'\n logger.info(**gen_log_kwargs(message=msg,\n subject=subject, session=session))\n bads.extend(auto_noisy_chs)\n\n bads = sorted(set(bads))\n raw.info['bads'] = bads\n msg = f'Marked {len(raw.info[\"bads\"])} channels as bad.'\n logger.info(**gen_log_kwargs(message=msg,\n subject=subject, session=session))\n\n if find_noisy_channels_meg:\n auto_scores_fname = bids_path.copy().update(\n suffix='scores', extension='.json', check=False)\n with open(auto_scores_fname, 'w') as f:\n json_tricks.dump(auto_scores, fp=f, allow_nan=True,\n sort_keys=False)\n\n if interactive:\n import matplotlib.pyplot as plt\n plot_auto_scores(auto_scores)\n plt.show()\n\n # Write the bad channels to disk.\n bads_tsv_fname = bids_path.copy().update(suffix='bads',\n extension='.tsv',\n check=False)\n bads_for_tsv = []\n reasons = []\n\n if find_flat_channels_meg:\n bads_for_tsv.extend(auto_flat_chs)\n reasons.extend(['auto-flat'] * len(auto_flat_chs))\n preexisting_bads = set(preexisting_bads) - set(auto_flat_chs)\n\n if find_noisy_channels_meg:\n bads_for_tsv.extend(auto_noisy_chs)\n reasons.extend(['auto-noisy'] * len(auto_noisy_chs))\n preexisting_bads = set(preexisting_bads) - set(auto_noisy_chs)\n\n preexisting_bads = list(preexisting_bads)\n if preexisting_bads:\n bads_for_tsv.extend(preexisting_bads)\n reasons.extend(['pre-existing (before MNE-BIDS-pipeline was run)'] *\n len(preexisting_bads))\n\n tsv_data = pd.DataFrame(dict(name=bads_for_tsv, reason=reasons))\n tsv_data = tsv_data.sort_values(by='name')\n tsv_data.to_csv(bads_tsv_fname, sep='\\t', index=False)", "def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5", "def _pick_bad_channels(event, params):\n # Both bad lists are updated. params['info'] used for colors.\n bads = params['raw'].info['bads']\n params['info']['bads'] = _select_bads(event, params, bads)\n _plot_update_raw_proj(params, None)", "def _test_ndvi_incorrect_bands(self):\n scene = Landsat8Scene(self.filenames)\n self.assertEquals(scene.band_numbers, 8)\n\n try:\n scene2.ndvi()\n except SatProcessError as e:\n self.assertEquals(e.message, 'nir band is not provided')\n\n scene2 = scene.select(['nir', 'blue', 'green'])\n\n try:\n scene2.ndvi()\n except SatProcessError as e:\n self.assertEquals(e.message, 'red band is not provided')", "def detect_bad_channels(\n recording,\n method=\"coherence+psd\",\n std_mad_threshold=5,\n psd_hf_threshold=0.02,\n dead_channel_threshold=-0.5,\n noisy_channel_threshold=1.0,\n outside_channel_threshold=-0.75,\n n_neighbors=11,\n nyquist_threshold=0.8,\n direction=\"y\",\n chunk_duration_s=0.3,\n num_random_chunks=10,\n welch_window_ms=10.0,\n highpass_filter_cutoff=300,\n neighborhood_r2_threshold=0.9,\n neighborhood_r2_radius_um=30.0,\n seed=None,\n):\n import scipy.stats\n\n method_list = (\"std\", \"mad\", \"coherence+psd\", \"neighborhood_r2\")\n assert method in method_list, f\"{method} is not a valid method. Available methods are {method_list}\"\n\n # Get random subset of data to estimate from\n random_chunk_kwargs = dict(\n num_chunks_per_segment=num_random_chunks,\n chunk_size=int(chunk_duration_s * recording.sampling_frequency),\n seed=seed,\n )\n\n # If recording is not filtered, apply a highpass filter\n if not recording.is_filtered():\n recording_hp = highpass_filter(recording, freq_min=highpass_filter_cutoff)\n else:\n recording_hp = recording\n\n # Adjust random chunk kwargs based on method\n if method in (\"std\", \"mad\"):\n random_chunk_kwargs[\"return_scaled\"] = False\n random_chunk_kwargs[\"concatenated\"] = True\n elif method == \"coherence+psd\":\n random_chunk_kwargs[\"return_scaled\"] = True\n random_chunk_kwargs[\"concatenated\"] = False\n elif method == \"neighborhood_r2\":\n random_chunk_kwargs[\"return_scaled\"] = False\n random_chunk_kwargs[\"concatenated\"] = False\n\n random_data = get_random_data_chunks(recording_hp, **random_chunk_kwargs)\n\n channel_labels = np.zeros(recording.get_num_channels(), dtype=\"U5\")\n channel_labels[:] = \"good\"\n\n if method in (\"std\", \"mad\"):\n if method == \"std\":\n deviations = np.std(random_data, axis=0)\n else:\n deviations = scipy.stats.median_abs_deviation(random_data, axis=0)\n thresh = std_mad_threshold * np.median(deviations)\n mask = deviations > thresh\n bad_channel_ids = recording.channel_ids[mask]\n channel_labels[mask] = \"noise\"\n\n elif method == \"coherence+psd\":\n # some checks\n assert recording.has_scaled(), (\n \"The 'coherence+psd' method uses thresholds assuming the traces are in uV, \"\n \"but the recording does not have scaled traces. If the recording is already scaled, \"\n \"you need to set gains and offsets: \"\n \">>> recording.set_channel_gains(1); recording.set_channel_offsets(0)\"\n )\n assert 0 < nyquist_threshold < 1, \"nyquist_threshold must be between 0 and 1\"\n\n # If location are not sorted, estimate forward and reverse sorting\n channel_locations = recording.get_channel_locations()\n dim = [\"x\", \"y\", \"z\"].index(direction)\n assert dim < channel_locations.shape[1], f\"Direction {direction} is wrong\"\n locs_depth = channel_locations[:, dim]\n if np.array_equal(np.sort(locs_depth), locs_depth):\n order_f = None\n order_r = None\n else:\n # sort by x, y to avoid ambiguity\n order_f, order_r = order_channels_by_depth(recording=recording, dimensions=(\"x\", \"y\"))\n\n # Create empty channel labels and fill with bad-channel detection estimate for each chunk\n chunk_channel_labels = np.zeros((recording.get_num_channels(), len(random_data)), dtype=np.int8)\n\n for i, random_chunk in enumerate(random_data):\n random_chunk_sorted = random_chunk[order_f] if order_f is not None else random_chunk\n chunk_channel_labels[:, i] = detect_bad_channels_ibl(\n raw=random_chunk_sorted,\n fs=recording.sampling_frequency,\n psd_hf_threshold=psd_hf_threshold,\n dead_channel_thr=dead_channel_threshold,\n noisy_channel_thr=noisy_channel_threshold,\n outside_channel_thr=outside_channel_threshold,\n n_neighbors=n_neighbors,\n nyquist_threshold=nyquist_threshold,\n welch_window_ms=welch_window_ms,\n )\n\n # Take the mode of the chunk estimates as final result. Convert to binary good / bad channel output.\n mode_channel_labels, _ = scipy.stats.mode(chunk_channel_labels, axis=1, keepdims=False)\n if order_r is not None:\n mode_channel_labels = mode_channel_labels[order_r]\n\n (bad_inds,) = np.where(mode_channel_labels != 0)\n bad_channel_ids = recording.channel_ids[bad_inds]\n\n channel_labels[mode_channel_labels == 1] = \"dead\"\n channel_labels[mode_channel_labels == 2] = \"noise\"\n channel_labels[mode_channel_labels == 3] = \"out\"\n\n if bad_channel_ids.size > recording.get_num_channels() / 3:\n warnings.warn(\n \"Over 1/3 of channels are detected as bad. In the precense of a high\"\n \"number of dead / noisy channels, bad channel detection may fail \"\n \"(erroneously label good channels as dead).\"\n )\n\n elif method == \"neighborhood_r2\":\n # make neighboring channels structure. this should probably be a function in core.\n geom = recording.get_channel_locations()\n num_channels = recording.get_num_channels()\n chan_distances = np.linalg.norm(geom[:, None, :] - geom[None, :, :], axis=2)\n np.fill_diagonal(chan_distances, neighborhood_r2_radius_um + 1)\n neighbors_mask = chan_distances < neighborhood_r2_radius_um\n if neighbors_mask.sum(axis=1).min() < 1:\n warnings.warn(\n f\"neighborhood_r2_radius_um={neighborhood_r2_radius_um} led \"\n \"to channels with no neighbors for this geometry, which has \"\n f\"minimal channel distance {chan_distances.min()}um. These \"\n \"channels will not be marked as bad, but you might want to \"\n \"check them.\"\n )\n max_neighbors = neighbors_mask.sum(axis=1).max()\n channel_index = np.full((num_channels, max_neighbors), num_channels)\n for c in range(num_channels):\n my_neighbors = np.flatnonzero(neighbors_mask[c])\n channel_index[c, : my_neighbors.size] = my_neighbors\n\n # get the correlation of each channel with its neighbors' median inside each chunk\n # note that we did not concatenate the chunks here\n correlations = []\n for chunk in random_data:\n chunk = chunk.astype(np.float32, copy=False)\n chunk = chunk - np.median(chunk, axis=0, keepdims=True)\n padded_chunk = np.pad(chunk, [(0, 0), (0, 1)], constant_values=np.nan)\n # channels with no neighbors will get a pure-nan median trace here\n neighbmeans = np.nanmedian(\n padded_chunk[:, channel_index],\n axis=2,\n )\n denom = np.sqrt(np.nanmean(np.square(chunk), axis=0) * np.nanmean(np.square(neighbmeans), axis=0))\n denom[denom == 0] = 1\n # channels with no neighbors will get a nan here\n chunk_correlations = np.nanmean(chunk * neighbmeans, axis=0) / denom\n correlations.append(chunk_correlations)\n\n # now take the median over chunks and threshold to finish\n median_correlations = np.nanmedian(correlations, 0)\n r2s = median_correlations**2\n # channels with no neighbors will have r2==nan, and nan<x==False always\n bad_channel_mask = r2s < neighborhood_r2_threshold\n bad_channel_ids = recording.channel_ids[bad_channel_mask]\n channel_labels[bad_channel_mask] = \"noise\"\n\n return bad_channel_ids, channel_labels", "def get_classification_simulator(self, image):\n\n r_channel = image[:,:,2]\n g_channel = image[:,:,1]\n\n\n\n # Threshold color channel\n s_rgy_min = 50\n s_thresh_min = 245\n s_thresh_max = 255\n \n #s_binary = np.zeros_like(r_channel)\n r_binary = np.zeros_like(r_channel)\n g_binary = np.zeros_like(r_channel)\n y_binary = np.zeros_like(r_channel)\n \n #s_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) | ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n \n r_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & (g_channel <= s_rgy_min)] = 1\n g_binary[((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max)) & (r_channel <= s_rgy_min)] = 1\n y_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n\n #res = cv2.bitwise_and(img,img,mask = s_binary)\n \n #maxx=image.shape[1]\n maxy=image.shape[0]\n \n y_top=0\n window_size_y=50\n y_bottom=y_top+window_size_y\n \n max_color=0\n tf_color=TrafficLight.UNKNOWN\n \n while (y_bottom< maxy):\n #print(img[y_top:y_bottom,:,:])\n rs= r_binary[y_top:y_bottom,:].sum()\n gs= g_binary[y_top:y_bottom,:].sum()\n ys= y_binary[y_top:y_bottom,:].sum()\n if (rs>max_color):\n max_color=rs\n tf_color=TrafficLight.RED\n if (gs>max_color):\n max_color=gs\n tf_color=TrafficLight.GREEN\n if (ys>max_color):\n max_color=ys\n tf_color=TrafficLight.YELLOW\n y_top+=window_size_y\n y_bottom+=window_size_y\n \n if (max_color<100):\n tf_color=TrafficLight.UNKNOWN\n \n\n\n return tf_color", "def bad_pixel_mask(self):\n from mkidpipeline.pipeline import PROBLEM_FLAGS # This must be here to prevent a circular import!\n return self.flagged(PROBLEM_FLAGS)", "def detect(self, frame, cur_count):\n self.variables['is_black'] = False\n self.process(frame, cur_count)", "def skin_detection(img):\n for index_line, line in enumerate(img):\n for index_pixel, pixel in enumerate(line):\n if pixel[2] > 95 and pixel[1] > 40 and pixel[0] > 20 and max(pixel) - min(pixel) > 15 \\\n and abs(pixel[2] - pixel[1]) > 15 and pixel[2] > pixel[0] and pixel[2] > pixel[1] \\\n and index_pixel > len(line) / 2:\n # img[index_line][index_pixel] = (255, 255, 255)\n pass\n else:\n img[index_line][index_pixel] = (0, 0, 0)\n return img", "def deal_with_bad_channels(self, selection_method, plot=True, threshold_sd_of_mean=40, interpolate=True,\n file_path=None):\n # TODO: (Everyone) Check how well the automatic detection works on your data\n\n if file_path is None:\n file_path = os.getcwd()\n file_name = os.path.join(file_path, 'participant_{}_bad_channels.csv'.format(self.participant_id))\n \n if selection_method == \"automatic\":\n if self.epochs is None:\n raise AttributeError('Please create epochs first, as the automatic algorithm needs them to work.')\n else:\n df = self.epochs.to_data_frame()\n\n group = df.groupby('epoch')\n mean = group.mean()\n\n a = mean.std()\n a = a[1:]\n print('standard deviation of mean across epochs:')\n print(np.mean(a), np.std(a))\n print('higher than %s:' % threshold_sd_of_mean)\n print(a[a > threshold_sd_of_mean].index)\n\n for i in a[a > threshold_sd_of_mean].index:\n self.raw.info['bads'].append(i)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n pd.DataFrame({'participant': self.participant_id,\n 'bad_channels': self.raw.info['bads']}).to_csv(path_or_buf=file_name,\n index=False)\n\n print(\"Saving bad channels as {}\".format(file_name))\n\n elif selection_method == \"file\":\n bads = pd.read_csv(file_name)\n self.raw.info['bads'] = list(bads['bad_channels'].values)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n elif selection_method != \"manual\":\n ValueError(\"selection_method can be automatic, file, or manual\")\n\n if plot or selection_method == \"manual\":\n self.raw.plot(block=True)\n\n print(\"Marked as bad: \", self.raw.info['bads'])\n\n print(\"N marked as bad: \", len(self.raw.info['bads']))\n\n if file_path is None:\n file_path = os.getcwd()\n file_name = os.path.join(file_path, 'participant_{}_bad_channels.csv'.format(self.participant_id))\n pd.DataFrame({'participant': self.participant_id,\n 'bad_channels': self.raw.info['bads']}).to_csv(path_or_buf=file_name,\n index=False)\n\n print(\"Saving bad channels as {}\".format(file_name))\n\n if interpolate:\n \"Interpolating bad channels...\"\n if len(self.raw.info['bads']) > 0:\n self.raw.interpolate_bads(reset_bads=True)", "def ledaps(image):\n cmask = image.select('QA')\n\n valid_data_mask = tools.compute_bits(cmask, 1, 1, 'valid_data')\n cloud_mask = tools.compute_bits(cmask, 2, 2, 'cloud')\n snow_mask = tools.compute_bits(cmask, 4, 4, 'snow')\n\n good_pix = cloud_mask.eq(0).And(valid_data_mask.eq(0)).And(snow_mask.eq(0))\n result = image.updateMask(good_pix)\n\n return result", "def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0", "def demo(net, image_name):\n # Load the demo image\n im_file = os.path.join(im_path, image_name)\n timer = Timer()\n timer.tic()\n im = cv2.imread(im_file)\n timer.toc()\n print ('reading image took {:.3f}s for detection').format(timer.total_time)\n crop_size=6000 #裁减图像大小\n crop_overlap=100 #裁减图像的重叠区域\n # ipdb.set_trace()\n if im.shape[0]>crop_size and im.shape[1]>crop_size:\n index=crop_im(crop_size,crop_overlap,im)\n all_dets=[[]for _ in xrange(2)] \n #print index\n for im_index in range(0,len(index)): \n start_x=index[im_index][0][0]\n start_y=index[im_index][0][1]\n end_x=index[im_index][0][2]\n end_y=index[im_index][0][3] \n scores, boxes = im_detect(net, im[start_x:end_x,start_y:end_y])\n \n # skip j = 0, because it's the background class\n for class_index in xrange(1, 2):\n inds = np.where(scores[:, class_index] > CONF_THRESH[class_index-1])[0] #confidence thresh\n if len(inds)==0:\n continue\n # from ipdb import set_trace\n # set_trace() \n cls_scores = scores[inds, class_index]\n #cls_boxes = boxes[inds, class_index * 4:(class_index + 1) * 4]\n cls_boxes = boxes[inds, 4:8]\n #from ipdb import set_trace\n #set_trace() \n ###函数im_detect的输出是什么样的?这里为啥要乘上4???????????\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\n .astype(np.float32, copy=False)\n #后处理函数\n #cls_dets=postprocess(cls_dets,del_theta) \n #softnms,如果不使用该方法可以注释掉,这个是faster自带的softnms,但是\n #它是将所有类不加区分放在一起进行softnms,而且所有的类共用一个置信概率 \n #keep = soft_nms(cls_dets, sigma=0.5, Nt=0.3, threshold=0.001, method=2)\n #2是高斯,1是线性,设其他是nms\n #nms,如果不使用该方法也注释掉,它和soft_nms二选一\n #from ipdb import set_trace\n #set_trace() \n #keep = nms(cls_dets, NMS_THRESH[class_index-1]) #nms thresh\n #cls_dets = cls_dets[keep, :]\n ##index的每一行的结构((start_x,start_y,end_x,end_y),h_num*(j-1)+k)\n cls_dets[:,:1]=(cls_dets[:,:1]+index[im_index][0][1])\n cls_dets[:,1:2]=(cls_dets[:,1:2]+index[im_index][0][0])\n cls_dets[:,2:3]=(cls_dets[:,2:3]+index[im_index][0][1])\n cls_dets[:,3:4]=(cls_dets[:,3:4]+index[im_index][0][0])\n all_dets[class_index].append(cls_dets.tolist())\n \n # from ipdb import set_trace\n # set_trace() \n for j in xrange(1, 2):\n if len(all_dets[j])==0:\n continue\n whole_dets=np.vstack(all_dets[j])\n \n \n ##后处理1\n # keep2=postprocess(whole_dets,del_theta,del_theta_p)#1111111111111\n \n \n #keep = soft_nms(whole_dets, sigma=0.5, Nt=0.3, method=2, threshold=0.001) \n ##后处理2,一般NMS,上面用的是soft-NMS\n whole_dets=whole_dets.astype(np.float32, copy=False)\n keep = nms(whole_dets, NMS_THRESH[class_index-1]) #111111111111\n #whole_dets=all_dets_pos[keep]#11111111111111111\n ##后处理3\n # whole_dets1=all_dets_pos[keep]\n # ind=postprocess2(whole_dets1,del_theta2[j-1])\n whole_dets=whole_dets[keep] \n \n ##把最终结果按得分排序,不需要所以注释掉\n # a_arg=np.argsort(-whole_dets[:,4])\n # whole_dets=whole_dets[a_arg] #rank\n\n if os.path.exists(result_path):\n pass\n else:\n os.mkdir(result_path)\n file1=open(result_path+'det_test_'+CLASSES[j]+'.txt','a')\n for i in range(whole_dets.shape[0]):\n bbox = tuple(int(np.round(x)) for x in whole_dets[i, :4])\n score = whole_dets[i, -1]\n \n ##画图\n if score>0.5:\n cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)\n cv2.putText(im, '%s: %.3f' % (CLASSES[j], score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,\n 1.0, (0, 0, 255), thickness=1)\n\n # if image_name.find('.tiff') == -1: # this img is png or tif\n # im_name=image_name[:-4]\n # else: #this img is tiff\n # im_name=image_name[:-5] \n line=image_name+' '+str(score)+' '+str(bbox[0])+' '+str(bbox[1])+' '+str(bbox[2])+' '+str(bbox[3])+'\\n'\n file1.write(line)\n\t\t\t\t#file1.write(line)\n file1.close()\n else:\n scores, boxes = im_detect(net, im)\n # from ipdb import set_trace\n # set_trace() \n for class_index in xrange(1, 2):\n #print(class_index)\n inds = np.where(scores[:, class_index] > CONF_THRESH[class_index-1])[0] #confidence thresh\n if len(inds)==0:\n continue\n #############################\n #print(inds)\n ###############################\n cls_scores = scores[inds, class_index]\n cls_boxes = boxes[inds, 4:8]\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\n .astype(np.float32, copy=False)\n # keep2=postprocess(cls_dets,del_theta,del_theta_p)\n # all_dets_pos=cls_dets[keep2]\n #keep = soft_nms(cls_dets, sigma=0.5, Nt=0.3, method=2, threshold=0.001) \n keep = nms(cls_dets, NMS_THRESH[class_index-1]) #nms thresh\n cls_dets = cls_dets[keep]\n \n # ind=postprocess2(cls_dets,del_theta2[class_index-1])\n # cls_dets=cls_dets[ind]\n # a_arg=np.argsort(-cls_dets[:,4])\n # cls_dets=cls_dets[a_arg]\n\n if os.path.exists(result_path):\n pass\n else:\n os.mkdir(result_path)\n \n file1=open(result_path+'det_test_'+CLASSES[class_index]+'.txt','a')\n for i in range(cls_dets.shape[0]):\n bbox = tuple(int(np.round(x)) for x in cls_dets[i, :4])\n score = cls_dets[i, -1]\n if score>0.5:\n cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)\n cv2.putText(im, '%s: %.3f' % (CLASSES[class_index], score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,\n 1.0, (0, 0, 255), thickness=1)\n # if image_name.find('.tiff') == -1: # this img is png or tif\n # im_name=image_name[:-4]\n # else: #this img is tiff\n # im_name=image_name[:-5] \n \n line=im_name+' '+str(score)+' '+str(bbox[0])+' '+str(bbox[1])+' '+str(bbox[2])+' '+str(bbox[3])+'\\n'\n file1.write(line)\n file1.close()\n \n \n if os.path.exists(save_path):\n pass\n else:\n os.mkdir(save_path) \n cv2.imwrite(os.path.join(save_path+'/'+image_name),im)", "def handle_colordetection(self):\n self.robot.sensormap.tank_drive.stop()\n if self.robot.sensormap.cs_l.color in self.colors and self.robot.sensormap.cs_l.color not in self.detected:\n self.detected.add(self.robot.sensormap.cs_l.color)\n elif self.robot.sensormap.cs_r.color in self.colors and self.robot.sensormap.cs_r.color not in self.detected:\n self.detected.add(self.robot.sensormap.cs_r.color)\n elif self.robot.sensormap.cs_m.color in self.colors and self.robot.sensormap.cs_m.color not in self.detected:\n self.detected.add(self.robot.sensormap.cs_m.color)", "def test_no_rgb_colorspace(self):\n user = UserFactory.create()\n file_path = os.path.join(os.path.dirname(__file__), \"broken_colorspace.gif\")\n self._upload_photo(user, file_path)", "def filterEdgeDetect2( bmp, threshold, savefile = '' ):\n b_count = 0\n w_count = 0\n for h in range(bmp.height-1):\n for w in range(bmp.width-1):\n ii = intensity(bmp.pixels[h][w])\n \"\"\" Compare to North and Right neighbor \"\"\"\n if( ii > threshold + intensity(bmp.pixels[h][w+1]) or\n ii < intensity(bmp.pixels[h][w+1]) - threshold or\n ii > threshold + intensity(bmp.pixels[h+1][w]) or\n ii < intensity(bmp.pixels[h+1][w]) - threshold ):\n bmp.pixels[h][w] = BLACK\n b_count += 1\n else:\n bmp.pixels[h][w] = WHITE\n w_count += 1\n\n \"\"\" Test statistics about filter results \"\"\"\n if( w_count == 0 ):\n w_count = 1 # avoid divide-by-zero\n print(\">> Ratio of Black to White is\",b_count,\"/\",w_count,\":\",str(round(b_count*100/w_count,1)) + \"%\")\n\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def detect():\n pass", "def detect_state(self, camera, image, send_q):\n print('Therefore, should never get to this print statement')\n pass", "def detect_infrared():\n try:\n count = 0\n while True:\n if GPIO.input(PIN_NO) == True:\n count += 1\n print('[+] Detected ' + str(count))\n output_sound()\n send_message()\n time.sleep(2)\n except Exception as e:\n GPIO.cleanup()", "def test_on_merlin_image_binary(self):\n im = diffread(TEST_MIB)\n self.assertEqual(im.shape, (256, 256))\n self.assertEqual(im.dtype, np.dtype(\">u2\"))", "def is_colour(self, im):\n hsl = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n h, s, v = np.mean(hsl, (0, 1))\n if s < 100:\n self.log.info(\n \"Grayscale scan detected (hsv %s, %s, %s), converting...\", h, s, v\n )\n return False\n return True", "def count_nonblack_np(img):\n return img.any(axis=-1).sum()", "def testPluginEdgeError(self):\n schema = self.dataset.makeMinimalSchema()\n task = lsst.meas.base.SingleFrameMeasurementTask(schema=schema, config=self.config)\n exposure, cat = self.dataset.realize(noise=100.0, schema=schema, randomSeed=3)\n # Set the size large enough to trigger the edge error\n self.config.plugins[self.algName].size = exposure.getDimensions()[1]//2\n task.log.setLevel(task.log.FATAL)\n task.run(cat, exposure)\n source = cat[0]\n self.assertTrue(source.get(self.algName + \"_flag\"))\n self.assertFalse(source.get(self.algName + \"_flag_containsNan\"))\n self.assertTrue(source.get(self.algName + \"_flag_edge\"))", "def white_balance(device, img, mode='hist',debug=None, roi=None):\n device += 1\n\n ori_img = np.copy(img)\n\n if roi is not None:\n roiint = all(isinstance(item, int) for item in roi)\n\n if len(roi) != 4 | roiint is False:\n fatal_error('If ROI is used ROI must have 4 elements as a list and all must be integers')\n else:\n pass\n\n if len(np.shape(img)) == 3:\n iy, ix, iz = np.shape(img)\n hmax=255\n type = np.uint8\n else:\n iy, ix = np.shape(img)\n if img.dtype == 'uint8':\n hmax=255\n type=np.uint8\n elif img.dtype == 'uint16':\n hmax=65536\n type=np.uint16\n\n mask = np.zeros((iy, ix, 3), dtype=np.uint8)\n\n if roi is None:\n x = 0\n y = 0\n w = ix\n h = iy\n\n else:\n x = roi[0]\n y = roi[1]\n w = roi[2]\n h = roi[3]\n\n if len(np.shape(img)) == 3:\n cv2.rectangle(ori_img, (x, y), (x + w, y + h), (0, 255, 0), 3)\n c1 = img[:, :, 0]\n c2 = img[:, :, 1]\n c3 = img[:, :, 2]\n if mode == 'hist':\n channel1 = _hist(c1, hmax, x, y, h, w, type)\n channel2 = _hist(c2, hmax, x, y, h, w, type)\n channel3 = _hist(c3, hmax, x, y, h, w, type)\n else:\n channel1 = _max(c1, hmax, mask, x, y, h, w, type)\n channel2 = _max(c2, hmax, mask, x, y, h, w, type)\n channel3 = _max(c3, hmax, mask, x, y, h, w, type)\n\n finalcorrected = np.dstack((channel1, channel2, channel3))\n\n else:\n cv2.rectangle(ori_img, (x, y), (x + w, y + h), (255, 255, 255), 3)\n if mode == 'hist':\n finalcorrected = _hist(img, hmax, x, y, h, w, type)\n elif mode == 'max':\n finalcorrected = _max(img, hmax, mask, x, y, h, w, type)\n\n if debug == 'print':\n print_image(ori_img, (str(device) + '_whitebalance_roi.png'))\n print_image(finalcorrected, (str(device) + '_whitebalance.png'))\n\n elif debug == 'plot':\n plot_image(ori_img, cmap='gray')\n plot_image(finalcorrected, cmap='gray')\n\n return device, finalcorrected", "def test_change_color_of_the_device__false():", "def get_falsecolor(input):\n rgb_band_idxs = [bands.index(b) for b in [\"S2B8\", \"S2B4\", \"S2B3\"]]\n return input[rgb_band_idxs]", "def check_image_color(image):\n\n def check_color(i, j, k):\n \"\"\" Function used only for DEBUGGING\"\"\"\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()\n\n if not os.path.isfile(image):\n return \"Image not found\"\n\n def calculate_bgr(data):\n average_color_per_row = numpy.average(data, axis=0)\n average_color = numpy.average(average_color_per_row, axis=0)\n return tuple(average_color)\n\n def calculate_y(r, g, b):\n alpha = 0.299\n betta = 0.587\n gamma = 0.114\n return alpha * r + betta * g + gamma * b\n\n # split the image for four squares calucate averate pixel for them and take higest value\n # blure image and save to /Library/Caches as com.apple.desktop.admin.png\n # in case using blur tool --> blur = cv2.blur(img,(5,5))\n try:\n img_cv_data = cv2.imread(image)\n B, G, R = calculate_bgr(img_cv_data)\n Y = calculate_y(B, G, R)\n height, width = img_cv_data.shape[:2]\n except Exception as err:\n print(f\"[ERROR] {err} with image: {image}\")\n return \"Error parsing image\"\n\n # image detection\n if Y < 72.0:\n _type = \"dark\"\n elif Y >= 73.0 and Y <= 108.0:\n _type = \"evening\"\n else:\n _type = \"light\"\n\n return _type", "def traffic_sign_detection_noisy(img_in):\n img = img_in.copy()\n clean_picture = cv2.fastNlMeansDenoisingColored(\n src=img,\n dst=None,\n templateWindowSize=7,\n searchWindowSize=21,\n h=15,\n hColor=15\n )\n clean_picture = cv2.bilateralFilter(clean_picture, 9, 75, 75)\n return traffic_sign_detection(clean_picture, light_size=(8, 30), light_offset=10)" ]
[ "0.6287099", "0.60764647", "0.6043073", "0.59790766", "0.5947519", "0.5931392", "0.58910775", "0.5872547", "0.57613003", "0.5647657", "0.5642624", "0.56293416", "0.55950636", "0.55592114", "0.5558336", "0.5542864", "0.55260485", "0.55084145", "0.55017304", "0.54997927", "0.5498697", "0.548246", "0.5470905", "0.54679847", "0.54635084", "0.5436044", "0.5428223", "0.54232895", "0.54122883", "0.5405557" ]
0.6900998
0
Subtract the trend from a vector The trend is a median filtered version of the said vector with tapering
def detrend(x, nmed): ntap = int(np.ceil(nmed / 2)) xf = np.r_[np.zeros(ntap) + x[0], x, np.zeros(ntap) + x[-1]] import scipy.signal xf = scipy.signal.medfilt(xf, nmed)[ntap:-ntap] return x - xf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msub(trace):\n \n return(trace - np.mean(trace))", "def mad(v):\n return np.median(np.abs(v-np.median(v)))", "def subtract_filters(tod,az,el,filter_tod, filter_coefficients, atmos, atmos_coefficient):\n tod_out = tod - filter_tod*filter_coefficients -\\\n Statistics.AtmosGroundModel(atmos,az,el)*atmos_coefficient\n tod_out -= np.nanmedian(tod_out)\n return tod_out", "def demeaned(self):\n return self.data - self.mean", "def trend(data):\n argmin = np.argmin(data)\n argmax = np.argmax(data)\n\n divider = (data[argmax] + data[argmin])\n\n if divider == 0.0:\n return 0.0\n\n if argmin < argmax:\n return (data[argmax] - data[argmin]) / (data[argmax] + data[argmin])\n elif argmin > argmax:\n return (data[argmin] - data[argmax]) / (data[argmin] + data[argmax])\n\n return 0.0", "def detrend(ovar):\n\t\n\tovar1=anomaly(ovar)\n\t\n\tt1=c1=xr.DataArray(np.arange(len(ovar1.time)),dims='time',coords={'time': ovar1.time})\n\tslope=covmat(t1,ovar1)/np.std(t1)**2\n\t\n\tovar1 -= slope*t1 # remove linear trend\n\tovar2=anomaly(ovar1)\n\t\n\treturn ovar2", "def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor", "def trend_extremum(data):\n if data[0] < data[-1]:\n argmin = data[0]\n argmax = data[-1]\n\n if argmax + argmin:\n return (argmax - argmin) / (argmax + argmin)\n\n elif data[0] > data[-1]:\n argmin = data[-1]\n argmax = data[0]\n\n if argmax + argmin:\n return (argmin - argmax) / (argmax + argmin)\n\n return 0.0", "def de_mean(x):\n x_bar = mean(x)\n return [ x_i - x_bar for x_i in x]", "def detrend(x):\n\n t = x['t']\n f = x['f']\n t0 = np.mean(x['t'])\n time_since_transit = t - t0\n\n # select out just the continuum points\n continuum = x['continuum']==1\n\n pfit = np.polyfit(\n time_since_transit[continuum], f[continuum], poly_degree\n )\n\n fldt = f.copy()\n fldt -= np.polyval(pfit,time_since_transit)\n return fldt", "def de_mean(x):\n x_bar = mean(x)\n return [x_i - x_bar for x_i in x]", "def de_mean(x):\n x_bar = mean(x)\n return [x_i - x_bar for x_i in x]", "def de_mean(x):\n x_bar = mean(x)\n return [x_i - x_bar for x_i in x]", "def de_mean(xs: List[float]) -> float:\n x_bar = mean(xs)\n return [x - x_bar for x in xs]", "def mad(v):\n return np.mean(np.abs(v - np.mean(v)))", "def rm_trend(self, dim=\"time\", nan_policy=\"none\"):\n return rm_trend(self._obj, dim=dim, nan_policy=nan_policy)", "def moving_average_filter(val, filtered_val_prev, zeta):\n filtered_val = (1-zeta)*filtered_val_prev + zeta*val\n return filtered_val", "def mdiff(x):\n return ma.median(ma.diff(x))", "def dspmt(t):\n t = np.asarray(t)\n return spmt(t) - spmt(t - 1)", "def untruncatedMedian(self, x):\n self.raiseAnError(NotImplementedError,'untruncatedMedian not yet implemented for ' + self.type)", "def unwhiten_back(self, sample):\n sample = sample*self.Y_std.unsqueeze(1) + self.Y_mean.unsqueeze(1)\n return sample", "def unwhiten_back(self, sample):\n sample = sample*self.Y_std.unsqueeze(1) + self.Y_mean.unsqueeze(1)\n return sample", "def dctrend(f):\r\n \r\n fdc=sps.detrend(f)\r\n \r\n return fdc", "def median_filter(self):\n print \"Median-Filtering...\"\n D = self.D\n x = np.median(np.median(D,axis=1),axis=1)\n for i in xrange(len(x)):\n D[i,:,:] -= x[i]\n self.D = D\n print \"done.\"", "def subtractVector(self, subtrahend):\n result = self.addVector(subtrahend.scalarMultiplication(-1.0))\n return result", "def ilerp(a, b, t):\n return (t - a) / (b - a)", "def filter(self):\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_", "def ReWeight(Vec):\n Out = Vec\n Exclude = isnan(Vec)\n Out[Exclude] = 0 #set missing to 0\n Out = Out / sum(Out) #normalize\n return(Out)", "def untruncatedMedian(self):\n return self._distribution.untrMedian()", "def remove_temporal_mean(self):\n if not hasattr(self, 'detrended_data'):\n self.detrend_data()\n self.mean_removed_data = self.detrended_data - \\\n np.mean(self.detrended_data, axis=-1, keepdims=True)" ]
[ "0.61765325", "0.5848676", "0.57519156", "0.573888", "0.56955457", "0.5648303", "0.5647168", "0.56332374", "0.5627263", "0.5606333", "0.5601126", "0.5601126", "0.5601126", "0.55868834", "0.5551623", "0.55299646", "0.5528388", "0.5499842", "0.5426456", "0.5414013", "0.5376006", "0.5376006", "0.537065", "0.5340667", "0.5312759", "0.5306758", "0.52935946", "0.52890027", "0.5284024", "0.5256758" ]
0.6236731
0
Return the CPSSORTER version, location and Python powering it.
def version_msg(): python_version = sys.version[:3] location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) message = 'CPS-SORTER %(version)s from {} (Python {})' return message.format(location, python_version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def system_info() -> str:\n return \"\\n\".join(\n [\n f\"Python version: {platform.python_version()}\",\n f\"Python implementation: {platform.python_implementation()}\",\n f\"Python compiler: {platform.python_compiler()}\",\n f\"PyTorch version: {torch.__version__}\",\n f\"System: {platform.system() or 'Unable to determine'}\",\n f\"System version: {platform.release() or 'Unable to determine'}\",\n f\"Processor: {platform.processor() or 'Unable to determine'}\",\n f\"Number of CPUs: {multiprocessing.cpu_count()}\",\n ]\n )", "def version():\n\n print(VERSION_CODE)", "def get_version():\n from colubrid import __version__\n from sys import version\n return '%s - Python %s' % (__version__, version.split('\\n')[0].strip())", "def where_is_pc(self):\n program_counter = self.chipdata.get_reg_strict('REGFILE_PC')\n return self.debuginfo.get_source_info(program_counter.value)", "def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"", "def pythonversionstr():\n return '{t[0]}.{t[1]}.{t[2]}'.format(t=platform.python_version_tuple())", "def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)", "def get_version(program):\n\n return \"%s from mrtools %s\" % (program, mrtools_version)", "def systemversionstr():\n return platform.uname().system", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def get_version():\n return '%d.%d.%d' % version_info", "def python_build():\n return _sys_version()[4:6]", "def versionstring():\n return \"%i.%i.%i\" % __version__", "def versionstring():\n return \"%i.%i.%i\" % __version__", "def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)", "def version_info():\r\n return tuple(map(int, __version__.split('.')))", "def version():\n\n pass", "def get_python_version() -> str:\n return \"{} {} on {}\".format(\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )", "def version():\n return uname().version", "def version():\n return uname().version", "def _pyVersion(self):\n return sys.version", "def read_version():\n # code parts were taken from here https://stackoverflow.com/a/67692\n\n path2setup = os.path.dirname(__file__)\n version_file = os.path.abspath(\n os.path.join(path2setup, \"diffusion_maps\", \"version.py\"))\n\n spec = importlib.util.spec_from_file_location(\"version\", version_file)\n version = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(version)\n return version.version.v_short", "def _pyVersion(self): \n return sys.version", "def get_preptools_version() -> str:\n try:\n version = pkg_resources.get_distribution('preptools').version\n except pkg_resources.DistributionNotFound:\n version_path = os.path.join(PROJECT_ROOT_PATH, 'VERSION')\n with open(version_path, mode='r') as version_file:\n version = version_file.read()\n except:\n version = 'unknown'\n return version", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def pythonVersionString(self):\n vstring = \"{0}.{1}.{2}\".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n if sys.version_info.releaselevel != \"final\":\n vstring += \" ({})\".format( sys.version_info.releaselevel )\n if sys.version_info.serial != 0:\n vstring += \" (serial: {})\".format( sys.version_info.serial )\n return vstring", "def getwindowsversion(): # real signature unknown; restored from __doc__\n pass", "def osversion():\n return platform()", "def version():\n print(\"Code writen for Python3.6.4. You're using python version:\")\n print(platform.python_version())", "def python_compiler():\n return _sys_version()[6]" ]
[ "0.6168813", "0.5991945", "0.5864778", "0.58291036", "0.57870007", "0.57867646", "0.5765549", "0.57526255", "0.5737532", "0.57125515", "0.57054865", "0.56973195", "0.5696263", "0.5696263", "0.567503", "0.5664359", "0.56593263", "0.5656099", "0.5653776", "0.5653776", "0.5646976", "0.5645648", "0.5639515", "0.5633702", "0.56192374", "0.5617751", "0.5611569", "0.56047106", "0.5599379", "0.55936426" ]
0.6110174
1
Creates a grid image from a list of tiles.
def build_grid(tiles, tile_size, grid_rows=None, grid_cols=None): if grid_rows is None or grid_cols is None: grid_rows = int(math.sqrt(len(tiles))) grid_cols = int(math.ceil(len(tiles) / grid_rows)) grid = np.zeros( (grid_rows * tile_size[1], grid_cols * tile_size[0], 3), np.uint8) for tile_id, tile in enumerate(tiles): assert(tile.shape[0] == tile_size[1] and tile.shape[1] == tile_size[0]) yy = int(tile_id / grid_cols) xx = tile_id % grid_cols grid[(yy * tile_size[1]):((yy + 1) * tile_size[1]), (xx * tile_size[0]):((xx + 1) * tile_size[0]), :] = tile return grid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_grid(images, n_rows=4, n_cols=4):\n k = min(n_rows * n_cols, len(images))\n indices = [i for i in range(k)]\n return _create_grid(images, indices, n_rows, n_cols)", "def createTiles():\n Renderer.Clear()\n map = []\n w, h = len(testmap[0]), len(testmap)\n x, y = 0, 0\n for row in testmap:\n for char in row:\n map.append(makeTile(char, x, y))\n x += 1\n y += 1\n x = 0\n\n return map, w, h", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def tiles_to_images(wfc_ns, tile_grid, tile_catalog, tile_size, visualize=False, partial=False, grid_count=None):\n new_img = np.zeros((tile_grid.shape[0] * tile_size, tile_grid.shape[1] * tile_size, wfc_ns.channels), dtype=np.int64)\n if partial and (len(tile_grid.shape) > 2):\n for i in range(tile_grid.shape[0]):\n for j in range(tile_grid.shape[1]):\n for u in range(wfc_ns.tile_size):\n for v in range(wfc_ns.tile_size):\n pixel_merge_list = []\n for k in range(tile_grid.shape[2]):\n tile = tile_grid[i,j,k]\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = None#[200, 0, 200]\n #print(tile)\n if (visualize) and ((-1 == tile) or (-2 == tile)):\n if (-1 == tile):\n pixel = [200, 0, 200]\n if 0 == (i + j) % 2:\n pixel = [255, 0, 255]\n else:\n pixel = [0, 255, 255]\n else:\n if (WFC_PARTIAL_BLANK != tile) and (WFC_NULL_VALUE != tile): # TODO: instead of -3, use MaskedArrays\n pixel = tile_catalog[tile][u,v]\n if not(pixel is None):\n pixel_merge_list.append(pixel)\n if len(pixel_merge_list) == 0:\n if 0 == (i + j) % 2:\n pixel_merge_list.append([255, 0, 255])\n else:\n pixel_merge_list.append([0, 172, 172])\n \n if len(pixel_merge_list) > 0:\n pixel_to_add = pixel_merge_list[0]\n if len(pixel_merge_list) > 1:\n pixel_to_add = [round(sum(x) / len(pixel_merge_list)) for x in zip(*pixel_merge_list)]\n try:\n while (len(pixel_to_add) < wfc_ns.channels):\n pixel_to_add.append(255)\n new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = pixel_to_add\n except TypeError as e:\n wfc_logger.warning(e)\n wfc_logger.warning(\"Tried to add {} from {}\".format(pixel_to_add, pixel_merge_list))\n else:\n for i in range(tile_grid.shape[0]):\n for j in range(tile_grid.shape[1]):\n tile = tile_grid[i,j]\n for u in range(wfc_ns.tile_size):\n for v in range(wfc_ns.tile_size):\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = [200, 0, 200]\n #print(f\"tile: {tile}\")\n if (visualize) and ((-1 == tile) or (-2 == tile)):\n if (-1 == tile):\n if 0 == (i + j) % 2:\n pixel = [255, 0, 255]\n if (-2 == tile):\n pixel = [0, 255, 255]\n else:\n if (WFC_PARTIAL_BLANK != tile):\n pixel = tile_catalog[tile][u,v]\n # Watch out for images with more than 3 channels!\n new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = np.resize(pixel, new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v].shape)\n logging.debug('Output image shape is', new_img.shape)\n return new_img", "def grid_maker(width, height):\n grid = [['.' for i in range(width)] for j in range(height)]\n return grid", "def _make_grid(self, imageset, format_kwargs=None):\n grid_size = imageset[\"grid_size\"]\n return ImageGrid.from_imageset(\n self._make_stills(imageset, format_kwargs=format_kwargs), grid_size\n )", "def MakeCoordPlot(tiles, coords, image_size=10000, boarder_width=20):\n tile_size = tiles.shape[1]\n\n grid_coords = Cloud2Grid(\n coords, grid_dim=(image_size - 2 * tile_size), tile_size=tile_size\n )\n grid_coords = grid_coords + tile_size # for black boarder\n grid_image = Image.new(\"RGB\", (image_size, image_size))\n for i in range(len(tiles)): # paste each tile onto image\n tile = ColorTileBoarder(tiles[i], channel=0, boarder_width=2)\n tile = Image.fromarray(tiles[i])\n x, y = grid_coords[i, :]\n grid_image.paste(tile, (int(x), int(y)))\n coords[\"grid1\"] = grid_coords[:, 0] + tile_size // 2\n coords[\"grid2\"] = grid_coords[:, 1] + tile_size // 2\n return grid_image, coords", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def tiles(self, nums, row = 1, spaces = 0):\r\n # We add the (\" \" * 5) to align the rows\r\n # with odd number of values\r\n separator = (\"+---+\" + (\" \" * 5)) * row\r\n space = (\" \" * 5) * spaces\r\n\r\n tile = space + separator + space + \"\\n\"\r\n \r\n tile += space\r\n for i in nums:\r\n # We add the (\" \" * 5) to align the rows\r\n # with odd number of values\r\n tile += f\"| {i} |\" + (\" \" * 5)\r\n tile += space + \"\\n\"\r\n \r\n tile += space + separator + space + \"\\n\"\r\n \r\n return tile", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image", "def render_tiles(self, tiles):\n for row in tiles:\n for tile in row:\n if tile is not None:\n if tile.height < 0:\n color = (0, 100, 0)\n else:\n z = max(0, tile.height)\n color = tuple([z * 255] * 3)\n self.surface.set_at((tile.x, tile.y), color)", "def __createTiles(self, length, width, height):\n\n rectangles = []\n centrePoints = []\n \n # Defines the dimensions required to fit all tiles\n totalHeight = length * height\n totalWidth = length * width\n \n # Go through all tiles\n y = length\n while y < totalHeight + length:\n\n x = length\n while x < totalWidth + length:\n # Creates a Rect object\n rectangle = pygame.Rect(x, y, length, length)\n rectangles.append(rectangle)\n\n # Calculates the tile's centre point.\n centrePoint = (math.floor(x + length/2), math.floor(y + length/2))\n centrePoints.append(centrePoint)\n\n x += length\n y += length\n\n return rectangles, centrePoints", "def createGridcells(mapdata, listOfP):\n new_gridcells = GridCells()\n new_gridcells.header = mapdata.header\n new_gridcells.cell_width = mapdata.info.resolution\n new_gridcells.cell_height = mapdata.info.resolution\n new_gridcells.cells = []\n for p in listOfP:\n new_gridcells.cells.append(PathPlanner.grid_to_world(mapdata, p[0], p[1]))\n return new_gridcells", "def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]", "def construct_image(imgs):\n\n # todo fill missing pieces and\n\n if len(imgs) == 0:\n return None\n # taking the first\n w, h = imgs[0][1].size\n img_array = order_2d(imgs)\n x_count = len(img_array[0])\n y_count = len(img_array)\n height = h * y_count\n width = w * x_count\n new_im = Image.new('RGB', (width, height))\n for y in range(y_count):\n for x in range(x_count):\n _, im = img_array[y][x]\n new_im.paste(im, (x * w, y * h))\n return new_im", "def create_rand_grid(images, n_rows=4, n_cols=4):\n k = min(n_rows * n_cols, len(images))\n indices = random.sample(range(len(images)), k)\n return _create_grid(images, indices, n_rows, n_cols)", "def generate_image_grid(sess, op):\n n = 10\n x_points = np.linspace(-20, 20, n)\n y_points = np.linspace(-20, 20, n)\n\n nx, ny = len(x_points), len(y_points)\n plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=0.05, wspace=0.05)\n\n for i, g in enumerate(gs):\n z = np.concatenate(([x_points[int(i / ny)]], [y_points[int(i % nx)]]))\n z = np.reshape(z, (1, 2))\n x = sess.run(op, feed_dict={decoder_input: z})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_aspect('auto')\n plt.show()", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def _create_grid_with_cells(self, width, height):\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n if column % 2 == 1 and row % 2 == 1:\n grid[row].append(TILE_EMPTY)\n elif (\n column == 0 or row == 0 or column == width - 1 or row == height - 1\n ):\n grid[row].append(TILE_CRATE)\n else:\n grid[row].append(TILE_CRATE)\n grid[-2][-3] = TILE_EMPTY\n grid[1][0] = TILE_EMPTY\n return grid", "def build_tiles(cls):\n\n LOGGER.debug(\"Building tiles\")\n\n for tile_id in tiledata.TILE_DATA:\n if not Tile.tile_factory(tile_id):\n LOGGER.error(\"Could not construct tile with ID %d\", tile_id)\n sys.exit(1)", "def join(tiles, width=0, height=0):\n # Don't calculate size if width and height are provided\n # this allows an application that knows what the\n # combined size should be to construct an image when\n # pieces are missing.\n\n if width > 0 and height > 0:\n im = Image.new(\"RGBA\", (width, height), None)\n else:\n im = Image.new(\"RGBA\", get_combined_size(tiles), None)\n columns, rows = calc_columns_rows(len(tiles))\n for tile in tiles:\n try:\n im.paste(tile.image, tile.coords)\n except IOError:\n # do nothing, blank out the image\n continue\n return im", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def populate_tiles(self):\n\n # grid format :\n # grid(x,y,z)[0]: A valid WorldTile type (i.e. WorldTile.door)\n # grid(x,y,z)[1]: A list of ASCII color or format codes for ColorIze\n # grid(x,y,z)[2]: The tile object\n\n self.t_count = 0 # Tile count, increment for each tile added\n self.build_start = time.clock()\n self.logger.info(\"[*] Starting world building script\")\n\n script_list = [\n self.build_boss_room,\n self.build_rooms,\n self.build_halls,\n self.build_doors,\n self.build_chests,\n self.build_traps,\n self.build_mobs,\n self.build_npcs\n ]\n for func in script_list:\n self.logger.debug(\"\\tRunning {}\".format(func.__name__))\n if not func():\n e_text = \"Build script failed : {}\".format(func.__name__)\n raise AssertionError(e_text)\n\n self.logger.info(\"[*] World building script completed\")\n self.logger.debug(\"\\tTiles Placed : {}\".format(self.t_count))\n build_time = time.clock()-self.build_start\n self.logger.debug(\"\\tTook {}s\".format(build_time))\n self.logger.debug(\"\\tTiles/s : {}\".format(t_count/build_time))", "def plot_grid(im_list, grid_shape, scale=0.1, axes_pad=0.07):\r\n # https://gist.github.com/lebedov/7018889ba47668c64bcf96aee82caec0\r\n\r\n # Grid must be 2D:\r\n assert len(grid_shape) == 2\r\n\r\n # Make sure all images can fit in grid:\r\n assert np.prod(grid_shape) >= len(im_list)\r\n\r\n grid = ImageGrid(plt.gcf(), 111, grid_shape, axes_pad=axes_pad)\r\n for i, data in enumerate(im_list):\r\n\r\n # Scale image:\r\n im = PIL.Image.fromarray(data)\r\n thumb_shape = [int(scale*j) for j in im.size]\r\n im.thumbnail(thumb_shape, PIL.Image.ANTIALIAS)\r\n data_thumb = np.array(im)\r\n grid[i].plot_nnua(data_thumb)\r\n\r\n # Turn off axes:\r\n grid[i].axes.get_xaxis().set_visible(False)\r\n grid[i].axes.get_yaxis().set_visible(False)" ]
[ "0.70259213", "0.6990671", "0.6980882", "0.6948259", "0.68473595", "0.6711176", "0.66005284", "0.65379393", "0.6532611", "0.65262944", "0.6488695", "0.64755493", "0.64710665", "0.6470593", "0.6410475", "0.6399757", "0.6379704", "0.6372506", "0.634613", "0.634", "0.63370925", "0.6322197", "0.63094443", "0.63094443", "0.6307638", "0.62875664", "0.62610894", "0.625397", "0.6253699", "0.62308544" ]
0.73709124
0
Colorizes a label map.
def colorize_label_map(label): if label.ndim != 2: raise ValueError('Expect 2-D input label. Got {}'.format(label.shape)) colormap = colormap_ade20k label_mod = np.mod(label, len(colormap)) return colormap[label_mod].astype(np.uint8)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap", "def create_cityscapes_label_colormap():\r\n colormap = np.zeros((256, 3), dtype=np.uint8)\r\n colormap[0] = [128, 64, 128]\r\n colormap[1] = [244, 35, 232]\r\n colormap[2] = [70, 70, 70]\r\n colormap[3] = [102, 102, 156]\r\n colormap[4] = [190, 153, 153]\r\n colormap[5] = [153, 153, 153]\r\n colormap[6] = [250, 170, 30]\r\n colormap[7] = [220, 220, 0]\r\n colormap[8] = [107, 142, 35]\r\n colormap[9] = [152, 251, 152]\r\n colormap[10] = [70, 130, 180]\r\n colormap[11] = [220, 20, 60]\r\n colormap[12] = [255, 0, 0]\r\n colormap[13] = [0, 0, 142]\r\n colormap[14] = [0, 0, 70]\r\n colormap[15] = [0, 60, 100]\r\n colormap[16] = [0, 80, 100]\r\n colormap[17] = [0, 0, 230]\r\n colormap[18] = [119, 11, 32]\r\n return colormap", "def map_label_colors(array, ignore_vals=[0]):\n colset = [(166, 206, 227),\n (31, 120, 180),\n (178, 223, 138),\n (51, 160, 44),\n (251, 154, 153),\n (227, 26, 28),\n (253, 191, 111),\n (255, 127, 0),\n (202, 178, 214),\n (106, 61, 154),\n (255, 255, 153),\n (177, 89, 40)]\n levels = np.unique(array)\n levels = [l for l in levels if l not in ignore_vals]\n if len(levels) == 0:\n return\n if len(levels) == 1:\n return({levels[0]: colset[0]})\n step = len(colset) / (len(levels) - 1)\n\n col_idx = np.arange(0, len(colset), step)\n colors = {}\n for idx in range(len(levels)):\n colors[levels[idx]] = colset[col_idx[idx]]\n return colors", "def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map", "def decode_segmap(self, label_mask, plot=False):\n label_colours = self.get_pascal_labels()\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, self.n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb", "def decode_segmap(self, label_mask, plot=False):\n label_colours = self.get_pascal_labels()\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, self.n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb", "def decode_segmap(label_mask, num_classes):\n label_colours = get_capsicum_labels()\n\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, num_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3)).astype(np.uint8)\n # rgb[:, :, 0] = r / 255.0\n # rgb[:, :, 1] = g / 255.0\n # rgb[:, :, 2] = b / 255.0\n rgb[:, :, 0] = r\n rgb[:, :, 1] = g\n rgb[:, :, 2] = b\n return rgb", "def compute_colors_for_labels(self, labels):\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def compute_colors_for_labels(self, labels):\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def overlay_labels(labels):\n labels_scaled = rescaled(labels,0,256)\n labels_colored = plt.get_cmap(\"jet\")(labels_scaled)\n border_mask = region_borders(labels) & (labels > 0)\n labels_colored[~border_mask,:,3] = 0 # set alpha to zero\n return labels_colored", "def decode_segmap(label_mask, n_classes, hex_color_dict, dataset, plot=False):\r\n\r\n r = label_mask.copy()\r\n g = label_mask.copy()\r\n b = label_mask.copy()\r\n for ll in range(0, n_classes):\r\n r[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[0]\r\n g[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[1]\r\n b[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[2]\r\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\r\n rgb[:, :, 0] = r / 255.0\r\n rgb[:, :, 1] = g / 255.0\r\n rgb[:, :, 2] = b / 255.0\r\n\r\n return rgb", "def class2color(self, labels, clean_up_clusters=0, mode=None):\n clean_up_clusters *= clean_up_clusters # create an area\n colored_labels = np.zeros(labels.shape[:2] + (3,)).astype(np.uint8)\n labels = np.squeeze(labels)\n if clean_up_clusters > 0:\n labels = DropClusters.drop(labels, min_size=clean_up_clusters)\n ys, xs = np.where(labels)\n colored_labels[ys, xs, :] = self.label_color\n return colored_labels", "def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def color(self, label):\n if self.grayscale:\n return (\"#ffffff\", \"#555555\", \"#888888\", \"#bbbbbb\", \"#222222\")[label]\n # COC WL WR SL SR\n return (\"#4e73b0\", \"#fdb863\", \"#b2abd2\", \"#e66101\", \"#5e3c99\")[label]", "def encode_labelmap(colour_img, colourlabelmap):\n colour_img = colour_img.astype(int)\n labels = np.zeros((colour_img.shape[0], colour_img.shape[1]), dtype=np.int16)\n for label_id, colour in enumerate(colourlabelmap):\n labels[np.where(np.all(colour == colour_img, axis=-1))] = label_id\n\n return labels", "def compute_color_for_labels(label):\n\tcolor = [int((p * (label**2 - label + 1)) % 255) for p in palette]\n\treturn tuple(color)", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour, mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (\n maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def canonical_coloring_label_1(G,c):\n \n H = G.copy()\n #H.allow_loops( true )\n\n for i in c:\n print( i )\n H.add_edges([(i,j) for j in c[i]])\n\n P = [G.vertices(), c.keys()]\n return H.canonical_label(partition=P)", "def ListColorMaps(self):\n p.rc('text', usetex=False)\n a=p.outerproduct(numpy.arange(0,1,0.01),numpy.ones(10))\n p.figure(figsize=(10,5))\n p.subplots_adjust(top=0.8,bottom=0.05,left=0.01,right=0.99)\n maps=[m for m in p.cm.datad.keys() if not m.endswith(\"_r\")]\n maps.sort()\n l=len(maps)+1\n i=1\n for m in maps:\n p.subplot(1,l,i)\n p.axis(\"off\")\n p.imshow(a,aspect='auto',cmap=p.get_cmap(m),origin=\"lower\")\n p.title(m,rotation=90,fontsize=10)\n i=i+1\n #savefig(\"colormaps.png\",dpi=100,facecolor='gray')\n p.show()", "def get_label_color_mapping(idx):\n # https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae\n def bitget(byteval, ch):\n return (byteval & (1 << ch)) != 0\n r = g = b = 0\n for j in range(8):\n r = r | (bitget(idx, 0) << 7 - j)\n g = g | (bitget(idx, 1) << 7 - j)\n b = b | (bitget(idx, 2) << 7 - j)\n idx = idx >> 3\n return np.array([r, g, b], dtype=np.uint8)", "def _read_color_labels(filename):\n line_parser = lambda line: (int(line.split(',')[0]), line.split(',')[-1])\n with open(filename, 'r') as labels:\n label_map = dict([line_parser(line.strip()) for line in labels])\n return label_map", "def plot_colour(self, label):\n label = label.lower()\n pretty_colours = {}\n # SPIce HD\n pretty_colours['544'] = 'maroon'\n pretty_colours['545'] = 'goldenrod'\n pretty_colours['548'] = 'blueviolet'\n pretty_colours['549'] = 'forestgreen'\n # H2\n ## DOM Efficiency Sets\n pretty_colours['551'] = 'cornflowerblue'\n pretty_colours['552'] = 'cornflowerblue'\n pretty_colours['553'] = 'cornflowerblue'\n pretty_colours['554'] = 'mediumseagreen'\n pretty_colours['555'] = 'mediumseagreen'\n pretty_colours['556'] = 'mediumseagreen'\n ## Hole Ice Sets\n pretty_colours['560'] = 'olive'\n pretty_colours['561'] = 'olive'\n pretty_colours['564'] = 'darkorange'\n pretty_colours['565'] = 'darkorange'\n pretty_colours['572'] = 'teal'\n pretty_colours['573'] = 'teal'\n ## Dima Hole Ice Set without RDE\n pretty_colours['570'] = 'mediumvioletred'\n ## Baseline\n pretty_colours['585'] = 'slategrey'\n # Systematics\n pretty_colours['aeff_scale'] = 'maroon'\n pretty_colours['atm_muon_scale'] = 'goldenrod'\n pretty_colours['deltam31'] = 'blueviolet'\n pretty_colours['theta23'] = 'forestgreen'\n pretty_colours['hole_ice_fwd'] = 'mediumvioletred'\n pretty_colours['dom_eff'] = 'cornflowerblue'\n pretty_colours['genie_ma_qe'] = 'mediumseagreen'\n pretty_colours['genie_ma_res'] = 'olive'\n pretty_colours['hole_ice'] = 'darkorange'\n pretty_colours['nue_numu_ratio'] = 'teal'\n pretty_colours['theta13'] = 'fuchsia'\n pretty_colours['barr_nu_nubar'] = 'thistle'\n pretty_colours['barr_uphor'] = 'orchid'\n pretty_colours['delta_index'] = 'navy'\n # Mass ordering\n pretty_colours['no'] = 'r'\n pretty_colours['io'] = 'b'\n # Asimov fits\n pretty_colours['th_to_wh'] = 'darkviolet'\n pretty_colours['wh_to_th'] = 'deepskyblue'\n colourlabel = None\n for colourkey in pretty_colours.keys():\n if (colourkey in label) or (colourkey == label):\n colourlabel = pretty_colours[colourkey]\n if colourlabel is None:\n logging.debug(\"I do not have a colour scheme for your label %s. \"\n \"Returning black.\"%label)\n colourlabel = 'k'\n return colourlabel", "def plot_cm_dict(labels, preds, label_mapper, cmap='magma'):\n plot_heatmaps_raw(confusion_matrix(labels, preds), precision=0, cmap=cmap)\n plt.colorbar()\n plt.xticks(range(len(label_mapper)), label_mapper.values(), rotation=90)\n plt.yticks(range(len(label_mapper)), label_mapper.values(), rotation=0)\n plt.xlabel(\"Predicted Label\")\n plt.ylabel(\"True Label\")", "def label_to_color_image(label):\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label')\n \n colormap = create_label_colormap()\n \n if np.max(label) >= len(colormap):\n raise ValueError('label value too large.')\n \n return colormap[label]", "def label_color(label):\n if label < len(colors):\n return colors[label]\n else:\n warnings.warn('Label {} has no color, returning default.'.format(label))\n return (0, 255, 0)" ]
[ "0.7158274", "0.6700223", "0.66767853", "0.65805185", "0.65408355", "0.65408355", "0.6459192", "0.64117914", "0.64117914", "0.6396002", "0.63941085", "0.63720775", "0.6268199", "0.62457615", "0.62457615", "0.62457615", "0.62457615", "0.62456137", "0.6224787", "0.62216127", "0.6107746", "0.607539", "0.6074516", "0.60484654", "0.6025054", "0.5989499", "0.5982239", "0.597639", "0.59706336", "0.59319615" ]
0.76586264
0
Visualizes GT fragment fields.
def visualize_gt_frag( gt_obj_ids, gt_obj_masks, gt_frag_labels, gt_frag_weights, gt_frag_coords, output_size, model_store, vis_prefix, vis_dir): # Consider the first (i.e. the closest) fragment. frag_ind = 0 centers_vis = np.zeros((output_size[1], output_size[0], 3)) for gt_id, obj_id in enumerate(gt_obj_ids): obj_mask = gt_obj_masks[gt_id] obj_frag_labels = gt_frag_labels[obj_mask][:, frag_ind] centers_vis[obj_mask] = model_store.frag_centers[obj_id][obj_frag_labels] weights_vis = gt_frag_weights[:, :, frag_ind] weights_vis /= weights_vis.max() coords_vis = np.zeros((output_size[1], output_size[0], 3)) for gt_id, obj_id in enumerate(gt_obj_ids): obj_mask = gt_obj_masks[gt_id] obj_frag_labels = gt_frag_labels[obj_mask][:, frag_ind] obj_frag_coords = gt_frag_coords[obj_mask][:, frag_ind, :] # Scale by fragment sizes. frag_scales = model_store.frag_sizes[obj_id][obj_frag_labels] obj_frag_coords *= np.expand_dims(frag_scales, 1) coords_vis[obj_mask] = obj_frag_coords # Reconstruct the XYZ object coordinates. xyz_vis = centers_vis + coords_vis # Normalize the visualizations. centers_vis = centers_vis - centers_vis.min() centers_vis /= centers_vis.max() coords_vis = coords_vis - coords_vis.min() coords_vis /= coords_vis.max() xyz_vis = xyz_vis - xyz_vis.min() xyz_vis /= xyz_vis.max() # Save the visualizations. inout.save_im( os.path.join(vis_dir, '{}_gt_frag_labels.png'.format(vis_prefix)), (255.0 * centers_vis).astype(np.uint8)) inout.save_im( os.path.join(vis_dir, '{}_gt_frag_coords.png'.format(vis_prefix)), (255.0 * coords_vis).astype(np.uint8)) inout.save_im( os.path.join(vis_dir, '{}_gt_frag_reconst.png'.format(vis_prefix)), (255.0 * xyz_vis).astype(np.uint8)) inout.save_im( os.path.join(vis_dir, '{}_gt_frag_weights.png'.format(vis_prefix)), (255.0 * weights_vis).astype(np.uint8))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize(self):\n dot = Graph()\n \n for k, v in self.vs.items():\n if v.observed:\n dot.node(v.word, style=\"filled\")\n else:\n dot.node(v.word)\n\n for i, (k, v) in enumerate(self.fs.items()):\n dot.node(str(i), shape=\"square\", style=\"bold\")\n s, t = k[1], k[3]\n dot.edge(s, str(i))\n dot.edge(t, str(i))\n \n print dot.source\n #src.render('test-output/holy-grenade.gv', view=True)", "def define_gabor_fragment(frag_size):\n bg_value = 0\n\n # frag = np.ones(frag_size, dtype='uint8') * 255\n # frag[:, frag_size[0] // 2 - 2, :] = 0\n # frag[:, frag_size[0] // 2 - 1, :] = 0\n # frag[:, frag_size[0] // 2, :] = 0\n # frag[:, frag_size[0] // 2 + 1, :] = 0\n # frag[:, frag_size[0] // 2 + 2, :] = 0\n\n frag = np.array([\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255]\n ])\n frag = np.stack([frag, frag, frag], axis=-1)\n\n # --------------------------------------------------------------\n plt.figure()\n plt.imshow(frag)\n plt.title(\"Specified Fragment\")\n import pdb\n pdb.set_trace()\n\n print(\"Finding Gabor Fit ...\")\n frag = (frag - frag.min()) / (frag.max() - frag.min())\n gabor_params_list = gabor_fits.find_best_fit_2d_gabor(frag, verbose=1)\n\n g_params = gabor_fits.convert_gabor_params_list_to_dict(gabor_params_list)\n g_params.print_params(g_params)\n\n fitted_gabor = gabor_fits.get_gabor_fragment(gabor_params, frag_size[:2])\n\n f, ax_arr = plt.subplots(1, 2)\n ax_arr[0].imshow(frag)\n ax_arr[0].set_title(\"Specified Fragment\")\n ax_arr[1].imshow(fitted_gabor)\n ax_arr[1].set_title(\"Generated Fragment\")\n\n return fitted_gabor, g_params, bg_value", "def print_fragmentation():\n\n frag_dict = calculate_fragmentation()\n \n _print_fragmentation(frag_dict, sys.stdout)", "def visualize_pred_frag(\n frag_confs, frag_coords, output_size, model_store, vis_prefix, vis_dir,\n vis_ext='png'):\n num_objs = frag_confs.shape[2]\n tiles_centers = []\n tiles_coords = []\n tiles_reconst = []\n for obj_id in range(1, num_objs + 1):\n\n # Fragment confidences of shape [field_h, field_w, num_frags].\n conf_obj = frag_confs[:, :, obj_id - 1, :]\n field_shape = (conf_obj.shape[0], conf_obj.shape[1], 3)\n\n # Indices of fragments with the highest confidence.\n top_inds = np.argmax(conf_obj, axis=2)\n top_inds_f = top_inds.flatten()\n\n # Fragment centers.\n top_centers = np.reshape(\n model_store.frag_centers[obj_id][top_inds_f], field_shape)\n\n # Fragment coordinates of shape [field_h * field_w, num_frags, 3].\n num_frags = frag_coords.shape[3]\n coords_obj = frag_coords[:, :, obj_id - 1, :, :].reshape((-1, num_frags, 3))\n\n # Top fragment coordinates of shape [field_h * field_w, 3].\n top_coords_rel = coords_obj[np.arange(top_inds.size), top_inds_f]\n top_scales = model_store.frag_sizes[obj_id][top_inds_f]\n top_coords = top_coords_rel * top_scales.reshape((-1, 1))\n\n # Reshape to [field_h, field_w, 3].\n top_coords = top_coords.reshape(field_shape)\n\n # Reconstruction of shape [field_h * field_w, 3].\n top_reconst = top_centers + top_coords\n\n txt_list = [{'name': 'cls', 'val': obj_id, 'fmt': ':d'}]\n tiles_centers.append(visualization.write_text_on_image(\n colorize_xyz(top_centers), txt_list, size=10, color=(1.0, 1.0, 1.0)))\n tiles_coords.append(visualization.write_text_on_image(\n colorize_xyz(top_coords), txt_list, size=10, color=(1.0, 1.0, 1.0)))\n tiles_reconst.append(visualization.write_text_on_image(\n colorize_xyz(top_reconst), txt_list, size=10, color=(1.0, 1.0, 1.0)))\n\n # Assemble and save the visualization grids.\n fname = '{}_pred_frag_centers.{}'.format(vis_prefix, vis_ext)\n grid = build_grid(tiles_centers, output_size)\n inout.save_im(os.path.join(vis_dir, fname), grid)\n\n fname = '{}_pred_frag_coords.{}'.format(vis_prefix, vis_ext)\n grid = build_grid(tiles_coords, output_size)\n inout.save_im(os.path.join(vis_dir, fname), grid)\n\n fname = '{}_pred_frag_reconst.{}'.format(vis_prefix, vis_ext)\n grid = build_grid(tiles_reconst, output_size)\n inout.save_im(os.path.join(vis_dir, fname), grid)", "def _print_fragmentation(frag_dict, out):\n\n headers = [\"Order\", \"Free Pages\", \"Fragmentation[%]\"]\n widths = [4, 9, 15]\n \n write = out.write\n \n def columnize(columns, max_lens, widths, sep=4*' '):\n \"\"\" Helper to create a string with columns evenly spaced \"\"\"\n \n padded_columns = []\n \n for _str, max_len, width in zip(columns, max_lens, widths):\n length_diff = max_len - len(str(_str))\n\n padded_column = ' ' * length_diff\n padded_column += str(_str)\n padded_column = padded_column.center(width)\n\n padded_columns.append(padded_column)\n \n return sep.join(padded_columns)\n\n for node, zone_dict in frag_dict.iteritems():\n for zone in zone_dict.iterkeys():\n total_free_pages = 0\n overall_frag_pct = 0\n \n write(\"{0}, Zone: {1}\\n\".format(node, zone))\n write(columnize(headers, map(len, headers), widths) + '\\n')\n\n rows = []\n max_lens = [0, 0, 0]\n \n for order, frag_info in zone_dict[zone].iteritems():\n free_count, frag_pct = frag_info\n\n total_free_pages += (2**order) * free_count\n overall_frag_pct += frag_pct\n\n frag_pct = \"{0:.0%}\".format(frag_pct)\n\n rows.append((order, free_count, frag_pct))\n\n # Find max length for each column for use in pretty printing\n for order, free_count, frag_pct in rows:\n max_lens[0] = max(len(str(order)), max_lens[0])\n max_lens[1] = max(len(str(free_count)), max_lens[1])\n max_lens[2] = max(len(str(frag_pct)), max_lens[2])\n\n for row in rows:\n write(columnize(row, max_lens, widths, sep=5*' ') + '\\n')\n\n # Calculate the mean over all orders\n overall_frag_pct /= 11\n \n write(\"Total Free Pages: {0}\\n\".format(total_free_pages))\n write(\"Overall Fragmentation: {0:.0%}\\n\".format(overall_frag_pct))\n write('\\n')", "def visualise():\n\n column = request.form.getlist('columnName')\n regions = request.form.getlist('raw_regions')\n #take the single string and return a list\n regions = query_proc.prep_regions(regions)\n #get that tables of interst\n table = query_proc.column_to_table(column)\n\n var_data = query_proc.get_region_data(table, column, regions)\n minval = query_proc.get_region_data_min(table, column, regions)\n maxval = query_proc.get_region_data_max(table, column, regions)\n\n #column diction to get human fiendly designation\n column_dict = name_column.get_name_column_dict()\n real_column = column_dict[column[0]]\n\n\n ##packing for the template\n region = regions[0]\n min_max = [minval, maxval]\n step = query_proc.calc_steps(min_max)\n min_max.append(step)\n\n min_max = json.dumps(min_max)\n json_vardata = json.dumps(var_data)\n\n return render_template('visualise.html',\n title='Data on a Map!',\n column=column,\n real_column=real_column,\n region=region,\n min_max=min_max,\n json_vardata=json_vardata)", "def show_graph(self):\n graph_file = self.dump_graph()\n subprocess.check_output(shlex.split(f'gwenview {graph_file}'))", "def show_custom_graph(self):\n pass", "def DegViewshed (FLOOR, HEIGHT):\n\n #Select Record\n arcpy.SelectLayerByAttribute_management(PointsFL,\"NEW_SELECTION\",SQL)\n \n #Set Observer Height (OffSETA)\n arcpy.CalculateField_management(PointsFL,\"OFFSETA\",HEIGHT,\"PYTHON_9.3\")\n \n #perform viewshed analysis\n arcpy.SetProgressorLabel(\"Performing Viewshed Analysis for point \"+str(value))\n outViewshed = IntermediateFiles+\"\\\\vs_\"+str(FLOOR)+\"_\"+str(value).split(\".\")[0]\n arcpy.Viewshed_3d(outCon,PointsFL,outViewshed)\n\n #convert viewshed to polygon\n arcpy.SetProgressorLabel(\"Converting viewshed\"+str(value)+\" on floor \"+str(FLOOR)+\" to polygon.\")\n OutPoly = IntermediateFiles+\"\\\\\"+os.path.basename(outViewshed).split(\".\")[0]+\"_poly.shp\"\n arcpy.RasterToPolygon_conversion(outViewshed,OutPoly)\n\n #Intersect viewshed polygon with buffer clip\n #This will allow the viewshed poly to inherit attribute fields needed for later analysis\n FinalView = Final_Floor_Viewsheds+\"\\\\FinalViewshed_\"+str(FLOOR)+\"_\"+str(value)+\".shp\"\n arcpy.Intersect_analysis([BufferClip,OutPoly],FinalView)\n \n #Select features in viewshed polygon with Gridcode = 1\n #If no records with grid = 1 exist, scriptwill skip to setting viewshed in degrees to 0\n \n #Convert viewshed polygon to layer\n ViewshedLayer = outName(FinalView,\"lyr\")\n arcpy.MakeFeatureLayer_management(FinalView,ViewshedLayer)\n\n #Select records with gridcode = 1\n arcpy.SelectLayerByAttribute_management(ViewshedLayer,\"NEW_SELECTION\",\"GRIDCODE =\"+str(1)+\"\") \n\n #Get count of the # of records selected in viewshed poly layer\n VsLyrCount = int(arcpy.GetCount_management(ViewshedLayer).getOutput(0))\n \n NoView = SummaryTables+\"\\\\summary_\"+str(FLOOR)+\"_\"+str(value)+\".dbf\"\n YesView = SummaryTables+\"\\\\summary_\"+str(FLOOR)+\"_\"+str(value)+\".dbf\"\n StatsField0 = [[\"GRIDCODE\",\"SUM\"]]\n CaseField0 = [\"ID\",\"SPOT\",FloorField] \n StatsField1 = [[\"LENGTH\",\"SUM\"]]\n CaseField1 = [\"GRIDCODE\",\"ID\",\"SPOT\",FloorField]\n VsArcLengths = ArcLengths+\"\\\\ArcLength_\"+str(FLOOR)+\"_\"+str(value)+\".shp\"\n \n if VsLyrCount == 0: #no viewable areas exist\n arcpy.SelectLayerByAttribute_management(ViewshedLayer,\"CLEAR_SELECTION\")\n arcpy.SetProgressorLabel(\"Calculating viewshed statistics for parcel \"+str(value))\n arcpy.Statistics_analysis(ViewshedLayer,NoView, StatsField0,CaseField0)\n\n #Add field to summary table to hold viewshed value of 0\n #Add field to note which floor viewshed corresponds to\n arcpy.AddField_management(NoView, \"FLR_RAN\",\"SHORT\")\n arcpy.AddField_management(NoView, \"VIEW_\"+Year,\"DOUBLE\")\n arcpy.AddField_management(NoView,\"OFFSETA\",\"SHORT\")\n arcpy.CalculateField_management(NoView,\"FLR_RAN\",FLOOR)\n arcpy.CalculateField_management(NoView,\"VIEW_\"+Year,0)\n arcpy.CalculateField_management(NoView,\"OFFSETA\",HEIGHT)\n\n else: #Calculate viewshed, in degrees, for selected records\n arcpy.SetProgressorLabel(\"Getting arc length for parcel\"+str(value)+\" at the \"+str(FLOOR)+\" floor.\")\n arcpy.Intersect_analysis([BufferLine,ViewshedLayer],VsArcLengths,\"\",10,\"LINE\")#Intersect with any line within 10 ft. \n arcpy.AddField_management(VsArcLengths, \"Length\",\"DOUBLE\")\n arcpy.CalculateField_management(VsArcLengths,\"Length\",\"!SHAPE.length@miles!\",\"PYTHON_9.3\")\n arcpy.Statistics_analysis(VsArcLengths,YesView,StatsField1,CaseField1)\n\n #Add fields to output summary table\n arcpy.AddField_management(YesView,\"FLR_RAN\",\"SHORT\")\n arcpy.AddField_management(YesView,\"VIEW_\"+Year,\"DOUBLE\")\n arcpy.AddField_management(YesView,\"OFFSETA\",\"SHORT\")\n arcpy.CalculateField_management(YesView,\"FLR_RAN\",FLOOR)\n arcpy.CalculateField_management(YesView,\"OFFSETA\",HEIGHT)\n arcpy.CalculateField_management(YesView,\"VIEW_\"+Year,\"((!SUM_LENGTH!/3.14)*180)\",\"PYTHON_9.3\")\n arcpy.SelectLayerByAttribute_management(ViewshedLayer,\"CLEAR_SELECTION\")", "def test_vggmini_visualize(self):\n\t\tpass", "def show(self):\n if self.nodes_ is None:\n logging.debug(\"Segment - Nothing to show. Skipping.\")\n return\n\n if len(self.polygons_) != 0:\n logging.debug(\"Segment - Showing 3D Segments using `vedo`.\")\n logging.warning(\"Segment - Showing 3D Segments can be slow!.\")\n\n import vedo\n\n points = vedo.Points(self.nodes)\n lines = []\n for p in self.polygons:\n p = np.asarray(p).astype(np.int32)\n lines.append(vedo.Line(self.nodes[p]))\n\n vedo.show([points, *lines]).show().close()\n\n else:\n logging.debug(\"Segment - Showing 2D Segments using `matplotlib`.\")\n\n import matplotlib.pyplot as plt\n\n plt.scatter(\n self.nodes_[:, 0],\n self.nodes_[:, 1],\n c=\"pink\",\n zorder=1000,\n )\n\n for c in self.connectivity_:\n plt.plot(\n self.nodes_[c][:,0],\n self.nodes_[c][:,1],\n c=\"grey\",\n lw=2,\n zorder=10,\n )\n\n plt.show()", "def __repr__(self):\n return \"%s(hit_id=%r, query_id=%r, %r fragments)\" % (\n self.__class__.__name__,\n self.hit_id,\n self.query_id,\n len(self),\n )", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def dftb_geom(name): \n dftb_geom = \"\"\"Geometry = GenFormat {\n <<< \"{{ title }}\"\n }\n \"\"\"\n return Environment().from_string(dftb_geom).render(title=name)", "def show(self):\n # if a blender object already exists then the mesh_grower must have been finalized;\n # in this case make a new mesh_grower and object\n viz.add_box(self.lower_vertex, self.upper_vertex)", "def render(self, vertex_highlighting=False):\n pass", "def show(self):\n import Helpers\n for p in self.parts:\n color = (p[1][0]*255, p[1][1]*255, p[1][2]*255, 0)\n Helpers.show(p[0], color)", "def setDisplayWireframe():\n for node in nuke.allNodes():\n print node.name()\n goodGeo = [\"Group\", \"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\"]\n if node.Class() in goodGeo:\n if node.Class() == \"Group\":\n node.begin()\n for child in nuke.allNodes():\n if child.Class() in goodGeo:\n child['display'].setValue(1)\n node.end()\n else:\n node['display'].setValue(1)", "def visualize(self):\n\n self.check_model()\n show(prepare(self.model, self.vectorized_data, self.vectorizer, mds='tsne'))", "def fGT(self):\n pass", "def intf_VIEWSHOW(E):\n out= \"View Properties\\n\"\n out+= \"---------------\\n\"\n out+= \"svgoutfile=%s\\n\" % OUT.outfile\n out+= \"camera=%s {camset}\\n\" % (','.join([str(x) for x in OUT.camera]))\n out+= \"target=%s {tarset}\\n\" % (','.join([str(x) for x in OUT.target]))\n out+= \"opacity=%s {hlr,hide}\\n\" % str(OUT.opacity)\n out+= \"facelines=%s {facelines}\\n\" % str(OUT.facelines)\n out+= \"vlinewidth=%0.2f {vlw,viewlinewidth}\\n\" % OUT.vlinewidth\n out+= \"vrefreshms=%d {refreshms,viewrefreshms}\\n\" % OUT.vrefreshms\n out+= \"vbox=(%d,%d) {viewbox[xy]}\\n\" % (OUT.vboxX,OUT.vboxY)\n out+= \"vtran=(%d,%d) {vtran[xy],viewtran[xy]}\\n\" % (OUT.vtranX,OUT.vtranY)\n out+= \"vscale=(%d,%d) {vscale[xy],viewscale[xy]}\\n\" % (OUT.vscaleX,OUT.vscaleY)\n print(out)", "def vis_gt_boxes(self):\n import cv2\n num_images = len(self.gt)\n for i in range(num_images):\n im = cv2.imread(self.image_path_at(i))\n im = im[:, :, (2, 1, 0)]\n plt.cla()\n plt.imshow(im)\n gt_image = self.gt[i]\n for j in range(len(gt_image['boxes'])):\n bbox = gt_image['boxes'][j]\n c = gt_image['gt_classes'][j] \n plt.gca().add_patch(plt.Rectangle((float(bbox[0]), float(bbox[1])),\n float(bbox[2]) - float(bbox[0]),\n float(bbox[3]) - float(bbox[1]), fill=False,\n edgecolor='r', linewidth=3))\n x = (bbox[0] + bbox[2])/2\n y = bbox[1]\n s = '{}'.format(self.classes[c])\n plt.text(x, y, s, fontsize=14,horizontalalignment='center',weight='bold',backgroundcolor=(1,1,1))\n plt.show()", "def visualize(model: Model, structural_part=True, measurement_part=False,\n view=True, filename=None, title=''):\n g = gv.Digraph(format='jpg', graph_attr={'label': title})\n if structural_part:\n g.node_attr.update(color='red', shape='box')\n for i, j in model.parameters['Beta']:\n lval, rval = model.beta_names[0][i], model.beta_names[0][j]\n g.edge(rval, lval)\n if measurement_part:\n g.node_attr.update(color='black', shape='circle')\n for i, j in model.parameters['Lambda']:\n lval, rval = model.lambda_names[0][i], model.lambda_names[0][j]\n g.edge(lval, rval)\n g.render(filename, view=view)", "def run_fragments(options):\n fragments.fragments(\n bam=options.bam,\n fragment_path=options.fragments,\n min_mapq=options.min_mapq,\n nproc=options.nproc,\n cellbarcode=options.barcodetag,\n readname_barcode=options.barcode_regex,\n chromosomes=options.use_chrom,\n cells=options.cells,\n max_distance=options.max_distance,\n chunksize=options.chunksize,\n )", "def show(self, fig=None):\n i = 0\n # for t = 0:obj.step_size:obj.duration\n # TODO: make a generator?\n iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_precision) + 1)\n tfInterp_l = np.zeros((4, 4, len(iterator)))\n tfInterp_r = np.zeros((4, 4, len(iterator)))\n for t in iterator:\n [lfp, rfp] = self.footPosition(t)\n tfInterp_l[:, :, i] = lfp\n tfInterp_r[:, :, i] = rfp\n i = i + 1\n\n self.show_tf(fig, tfInterp_l, len(iterator))\n self.show_tf(fig, tfInterp_r, len(iterator))", "def fragment(self, *args, **kwargs) -> Any:\n pass", "def visualize(self, U, **kwargs):\n raise NotImplementedError", "def isfragment(self):\n return True", "def report(self):\n bbox = \"verts: \" + str(self.lower_vertex) + \" \" + str(self.upper_vertex)\n dimensions = \"dimensions: \" + \",\".join(\n (\n str(self.dimension_along(0)),\n str(self.dimension_along(1)),\n str(self.dimension_along(2)),\n )\n )\n string = bbox + \"\\n\" + dimensions\n return bbox", "def graf_F(self):\n vert_funktion(self, typ='D', titel='$Empirische\\; Verteilungsfunktion$' + '\\n ')" ]
[ "0.55321085", "0.5522345", "0.5433646", "0.5431757", "0.5373131", "0.5313519", "0.5297603", "0.52738523", "0.5157684", "0.5131119", "0.50463957", "0.50377893", "0.5006303", "0.4998228", "0.49903286", "0.4951498", "0.4939597", "0.49295256", "0.49017107", "0.4890147", "0.4865713", "0.48483872", "0.48462024", "0.48460066", "0.4842207", "0.48343834", "0.4822581", "0.4814037", "0.48107532", "0.4809079" ]
0.61502445
0
Visualizes predicted fragment fields.
def visualize_pred_frag( frag_confs, frag_coords, output_size, model_store, vis_prefix, vis_dir, vis_ext='png'): num_objs = frag_confs.shape[2] tiles_centers = [] tiles_coords = [] tiles_reconst = [] for obj_id in range(1, num_objs + 1): # Fragment confidences of shape [field_h, field_w, num_frags]. conf_obj = frag_confs[:, :, obj_id - 1, :] field_shape = (conf_obj.shape[0], conf_obj.shape[1], 3) # Indices of fragments with the highest confidence. top_inds = np.argmax(conf_obj, axis=2) top_inds_f = top_inds.flatten() # Fragment centers. top_centers = np.reshape( model_store.frag_centers[obj_id][top_inds_f], field_shape) # Fragment coordinates of shape [field_h * field_w, num_frags, 3]. num_frags = frag_coords.shape[3] coords_obj = frag_coords[:, :, obj_id - 1, :, :].reshape((-1, num_frags, 3)) # Top fragment coordinates of shape [field_h * field_w, 3]. top_coords_rel = coords_obj[np.arange(top_inds.size), top_inds_f] top_scales = model_store.frag_sizes[obj_id][top_inds_f] top_coords = top_coords_rel * top_scales.reshape((-1, 1)) # Reshape to [field_h, field_w, 3]. top_coords = top_coords.reshape(field_shape) # Reconstruction of shape [field_h * field_w, 3]. top_reconst = top_centers + top_coords txt_list = [{'name': 'cls', 'val': obj_id, 'fmt': ':d'}] tiles_centers.append(visualization.write_text_on_image( colorize_xyz(top_centers), txt_list, size=10, color=(1.0, 1.0, 1.0))) tiles_coords.append(visualization.write_text_on_image( colorize_xyz(top_coords), txt_list, size=10, color=(1.0, 1.0, 1.0))) tiles_reconst.append(visualization.write_text_on_image( colorize_xyz(top_reconst), txt_list, size=10, color=(1.0, 1.0, 1.0))) # Assemble and save the visualization grids. fname = '{}_pred_frag_centers.{}'.format(vis_prefix, vis_ext) grid = build_grid(tiles_centers, output_size) inout.save_im(os.path.join(vis_dir, fname), grid) fname = '{}_pred_frag_coords.{}'.format(vis_prefix, vis_ext) grid = build_grid(tiles_coords, output_size) inout.save_im(os.path.join(vis_dir, fname), grid) fname = '{}_pred_frag_reconst.{}'.format(vis_prefix, vis_ext) grid = build_grid(tiles_reconst, output_size) inout.save_im(os.path.join(vis_dir, fname), grid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_predictions(self):\n self.vis.draw_predictions()", "def visualize(self):\n\n self.check_model()\n show(prepare(self.model, self.vectorized_data, self.vectorizer, mds='tsne'))", "def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)", "def displayFiducial(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n displayNode = modelNode.GetDisplayNode()\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\")!=None:\n if 1:\n i = int(modelNode.GetAttribute(\"nth\"))\n if self.fiducialnode[i] == 0: \n polyData = modelNode.GetPolyData()\n nb = int(polyData.GetNumberOfPoints()-1)\n coord = [0,0,0]\n if nb>10:\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\n polyData.GetPoint(nb,coord) \n self.fiducialnode[i].SetName(self.option[i])\n self.fiducialnode[i].SetFiducialCoordinates(coord) \n self.fiducialnode[i].Initialize(slicer.mrmlScene)\n self.fiducialnode[i].SetLocked(1)\n self.fiducialnode[i].SetSelectable(0)\n fidDN = self.fiducialnode[i].GetDisplayNode()\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\n fidDN.SetGlyphScale(0)\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\n fidTN.SetTextScale(3)\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\n \n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\n else: \n if modelNode.GetDisplayNode().GetVisibility():\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility()-1))\n if self.fiducialnode[i].GetDisplayVisibility()==1:\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\n else:\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def draw(self, frame):\n for prediction in self.predictions:\n prediction.draw(frame)", "def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n\r\n pass", "def plot_observed_predictions(self):\n \n # Plot of X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1) \n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def visualize_F(self, phase=None):\n y_np = to_np(self.y_seq)\n g_np = to_np(self.g_seq)\n x_np = to_np(self.x_seq)\n\n if self.opt.normalize_domain:\n for i in range(len(x_np)):\n x_np[i] = x_np[i] * self.data_s[i] + self.data_m[i]\n\n fn = 'prediction.png'\n if phase is not None:\n fn = 'prediction_{}.png'.format(phase)\n\n for x, y, g in zip(x_np, y_np, g_np):\n for i in range(2):\n for j in range(2):\n mark = ['+', '.'][i]\n color = ['b', 'r'][j]\n plt.plot(x[(y == i) & (g == j), 0], x[(y == i) & (g == j), 1], mark, color=color, markersize=10)\n plt.savefig(self.opt.outf + '/' + fn)\n plt.close()", "def visualize_svd():", "def plot(self):\n\t\tself.plotOfTF().plot()", "def visualize(image, preds, fps):\n # show inference info\n fps_text = \"FPS : {:.2f}\".format(fps)\n cv2.putText(image, fps_text, (11, 40), cv2.FONT_HERSHEY_PLAIN, 4.0, (238, 130, 238), 4, cv2.LINE_AA)\n\n if preds.shape[0] != 0:\n for box in preds:\n (x1, y1, x2, y2) = box.astype(np.int)\n cv2.rectangle(image, (x1, y1), (x2, y2), (255, 255, 0), 2)\n\n return image", "def show(self):\n print(\"depth: \", self.depth, \"split_id: \", self.split_id,\n \"split_val: \", self.split_val, \"predict_label: \",\n self.predict_label)\n if self.left_node is not None:\n self.left_node.show()\n if self.right_node is not None:\n self.right_node.show()", "def visualize(self):\n # TODO\n #pyLDAvis.enable_notebook()\n #vis = pyLDAvis.gensim.prepare(self.lda_model, self.stemmed_corpus)\n return", "def illustrate_prediction(model, test_data, test_target):\n selects = np.random.random_integers(0, len(test_data), 16)\n labels = test_target[selects]\n predicts = model.predict(test_data[selects])\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(test_data[selects[k]])\n if predicts[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')\n\n if predicts[k] != labels[k]:\n plt.plot([0, 24], [0, 24], 'r', linewidth=2)\n plt.plot([0, 24], [24, 0], 'r', linewidth=2)", "def plot_predictions(self):\n\n plt.title(\"Targets vs. Predictions\")\n plt.plot(self.T, label=\"Targets\")\n plt.plot(self.Y, label=\"Predictions\")\n plt.xlabel(\"Sample number\")\n plt.legend()\n plt.show()", "def predict(model, segments, fields=['count', 'orientation', 'red_mean',\n 'green_mean', 'blue_mean']):\n# segs = segments[fields] # fix this!!!!\n\n predictions = model.predict(segments)\n\n return predictions", "def visualize_predictions(model : torch.nn.Module, dataSet : Dataset, \r\n axes, device :torch.device, numTestSamples : int,\r\n id_to_color : np.ndarray = train_id_to_color):\r\n model.to(device=device)\r\n model.eval()\r\n\r\n # predictions on random samples\r\n testSamples = np.random.choice(len(dataSet), numTestSamples).tolist()\r\n # _, axes = plt.subplots(numTestSamples, 3, figsize=(3*6, numTestSamples * 4))\r\n \r\n for i, sampleID in enumerate(testSamples):\r\n inputImage, gt = dataSet[sampleID]\r\n\r\n # input rgb image \r\n inputImage = inputImage.to(device)\r\n landscape = inverse_transform(inputImage).permute(1, 2, 0).cpu().detach().numpy()\r\n axes[i, 0].imshow(landscape)\r\n axes[i, 0].set_title(\"Landscape\")\r\n\r\n # groundtruth label image\r\n label_class = gt.cpu().detach().numpy()\r\n axes[i, 1].imshow(id_to_color[label_class])\r\n axes[i, 1].set_title(\"Groudtruth Label\")\r\n\r\n # predicted label image\r\n y_pred = torch.argmax(model(inputImage.unsqueeze(0)), dim=1).squeeze(0)\r\n label_class_predicted = y_pred.cpu().detach().numpy() \r\n axes[i, 2].imshow(id_to_color[label_class_predicted])\r\n axes[i, 2].set_title(\"Predicted Label\")\r\n\r\n plt.show()", "def show(self):\n print \"Name: \"+str(self.name)\n ss = self.y.shape[0]\n for i in xrange(ss):\n print \"Actual: \"+str(self.y[i])\n print \"Prediction: \"+str(self.a[i])\n print \"\"\n print \"\\n\"", "def print_fragmentation():\n\n frag_dict = calculate_fragmentation()\n \n _print_fragmentation(frag_dict, sys.stdout)", "def show_visuals(self, objects_in_scene, image, axe_pred):\n image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)\n\n # draw grid (slow)\n #image = self.draw_grid(image)\n\n # add axe bounding box\n #image = self.return_bbox_image(image, objects_in_scene.axes, \"Axe\", AXE_COLOR)\n\n # add mundo bounding box\n #image = self.return_bbox_image(image, objects_in_scene.mundos, \"Mundo\", MUNDO_COLOR)\n\n # add a circle/dot at the centre of the axe bbox\n image = self.show_centre_of_bbox(image, objects_in_scene.axes)\n\n # if there is a prediction made in the current frame, draw an arrow graphic to highlight\n # where the program predicts the axe will go\n if axe_pred:\n image = self.draw_pred_arrows(image, axe_pred, 1)\n\n\n\n\n # open live capture window with new shapes\n try:\n image = cv2.resize(image, (960, 540)) \n cv2.imshow(\"visualisation\", image)\n\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n exit()\n\n except:\n pass", "def visualize(self, reduced_data):\n\t\t# Step size of the mesh. Decrease to increase the quality of the VQ.\n\t\th = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].\n\t\t\n\t\t# Plot the decision boundary. For that, we will assign a color to each\n\t\tx_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1\n\t\ty_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1\n\t\txx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n\t\t# Obtain labels for each point in mesh. Use last trained model.\n\t\tZ = self.estimator.predict(np.c_[xx.ravel(), yy.ravel()])\n\n\t\t# Put the result into a color plot\n\t\tZ = Z.reshape(xx.shape)\n\t\t\n\t\tplt.figure(1)\n\t\tplt.clf()\n\t\tplt.imshow(Z, interpolation='nearest',\n\t\t extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n\t\t cmap=plt.cm.Paired,\n\t\t aspect='auto', origin='lower')\n\n\t\tplt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=4)\n\t\t# Plot the centroids as a white X\n\t\tcentroids = self.estimator.cluster_centers_\n\t\tplt.scatter(centroids[:, 0], centroids[:, 1],\n\t\t marker='x', s=169, linewidths=3,\n\t\t color='w', zorder=10)\n\t\tplt.title('K-means clustering with random data (PCA-reduced data)\\n'\n\t\t 'Centroids are marked with white cross')\n\t\tplt.xlim(x_min, x_max)\n\t\tplt.ylim(y_min, y_max)\n\t\tplt.xticks(())\n\t\tplt.yticks(())\n\t\tplt.show()", "def visualize_gt_frag(\n gt_obj_ids, gt_obj_masks, gt_frag_labels, gt_frag_weights, gt_frag_coords,\n output_size, model_store, vis_prefix, vis_dir):\n # Consider the first (i.e. the closest) fragment.\n frag_ind = 0\n\n centers_vis = np.zeros((output_size[1], output_size[0], 3))\n for gt_id, obj_id in enumerate(gt_obj_ids):\n obj_mask = gt_obj_masks[gt_id]\n obj_frag_labels = gt_frag_labels[obj_mask][:, frag_ind]\n centers_vis[obj_mask] = model_store.frag_centers[obj_id][obj_frag_labels]\n\n weights_vis = gt_frag_weights[:, :, frag_ind]\n weights_vis /= weights_vis.max()\n\n coords_vis = np.zeros((output_size[1], output_size[0], 3))\n for gt_id, obj_id in enumerate(gt_obj_ids):\n\n obj_mask = gt_obj_masks[gt_id]\n obj_frag_labels = gt_frag_labels[obj_mask][:, frag_ind]\n obj_frag_coords = gt_frag_coords[obj_mask][:, frag_ind, :]\n\n # Scale by fragment sizes.\n frag_scales = model_store.frag_sizes[obj_id][obj_frag_labels]\n obj_frag_coords *= np.expand_dims(frag_scales, 1)\n\n coords_vis[obj_mask] = obj_frag_coords\n\n # Reconstruct the XYZ object coordinates.\n xyz_vis = centers_vis + coords_vis\n\n # Normalize the visualizations.\n centers_vis = centers_vis - centers_vis.min()\n centers_vis /= centers_vis.max()\n\n coords_vis = coords_vis - coords_vis.min()\n coords_vis /= coords_vis.max()\n\n xyz_vis = xyz_vis - xyz_vis.min()\n xyz_vis /= xyz_vis.max()\n\n # Save the visualizations.\n inout.save_im(\n os.path.join(vis_dir, '{}_gt_frag_labels.png'.format(vis_prefix)),\n (255.0 * centers_vis).astype(np.uint8))\n\n inout.save_im(\n os.path.join(vis_dir, '{}_gt_frag_coords.png'.format(vis_prefix)),\n (255.0 * coords_vis).astype(np.uint8))\n\n inout.save_im(\n os.path.join(vis_dir, '{}_gt_frag_reconst.png'.format(vis_prefix)),\n (255.0 * xyz_vis).astype(np.uint8))\n\n inout.save_im(\n os.path.join(vis_dir, '{}_gt_frag_weights.png'.format(vis_prefix)),\n (255.0 * weights_vis).astype(np.uint8))", "def visualize(self):\n dot = Graph()\n \n for k, v in self.vs.items():\n if v.observed:\n dot.node(v.word, style=\"filled\")\n else:\n dot.node(v.word)\n\n for i, (k, v) in enumerate(self.fs.items()):\n dot.node(str(i), shape=\"square\", style=\"bold\")\n s, t = k[1], k[3]\n dot.edge(s, str(i))\n dot.edge(t, str(i))\n \n print dot.source\n #src.render('test-output/holy-grenade.gv', view=True)", "def plot_receptive_field(model, data):\n with tf.GradientTape() as tape:\n tape.watch(data)\n prediction = model(data)\n loss = prediction[:,5,5,0]\n\n gradients = tape.gradient(loss, data)\n\n gradients = np.abs(gradients.numpy().squeeze())\n gradients = (gradients > 0).astype('float32')\n gradients[5, 5] = 0.5\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n\n plt.xticks(np.arange(0, 10, step=1))\n plt.yticks(np.arange(0, 10, step=1))\n ax.xaxis.set_minor_locator(FixedLocator(np.arange(0.5, 10.5, step=1)))\n ax.yaxis.set_minor_locator(FixedLocator(np.arange(0.5, 10.5, step=1)))\n plt.grid(which=\"minor\")\n plt.imshow(gradients, vmin=0, vmax=1)\n plt.show()", "def displayFiducial(self):\r\n # obsolete?\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n for modelNode in modelNodes.values():\r\n displayNode = modelNode.GetDisplayNode()\r\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\") != None:\r\n if 1:\r\n i = int(modelNode.GetAttribute(\"nth\"))\r\n if self.fiducialnode[i] == 0:\r\n polyData = modelNode.GetPolyData()\r\n nb = int(polyData.GetNumberOfPoints() - 1)\r\n coord = [0, 0, 0]\r\n if nb > 10:\r\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\r\n polyData.GetPoint(nb, coord)\r\n self.fiducialnode[i].SetName(self.option[i])\r\n self.fiducialnode[i].SetFiducialCoordinates(coord)\r\n self.fiducialnode[i].Initialize(slicer.mrmlScene)\r\n self.fiducialnode[i].SetLocked(1)\r\n self.fiducialnode[i].SetSelectable(0)\r\n fidDN = self.fiducialnode[i].GetDisplayNode()\r\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n fidDN.SetGlyphScale(0)\r\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\r\n fidTN.SetTextScale(3)\r\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n\r\n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\r\n else:\r\n if modelNode.GetDisplayNode().GetVisibility():\r\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility() - 1))\r\n if self.fiducialnode[i].GetDisplayVisibility() == 1:\r\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\r\n else:\r\n self.displayFiducialButton.text = \"Display Labels on Needles\"", "def visualize(self, outfn):\n if self.has_viz_data:\n pyLDAvis.save_html(self.vis_data, outfn)\n return\n assert(self.has_vocab and self.has_corpus)\n assert(self.is_trained)\n # this might crash. I think because corpus, vocab, and _lda_model are all big. \n self.vis_data = prepare(self._lda_model, self.corpus, self.vocab)\n self.has_viz_data = True\n pyLDAvis.save_html(self.vis_data, outfn)", "def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()", "def model(Prio: NNEmbeddings, plot_emb: bool = False, pickle_file: str = None):\n # New Predicitons\n df_metrics = Prio.predict(pickle_file=pickle_file)\n plot_single(df_metrics)\n\n if plot_emb:\n # TSNE Plots\n Prio.plot_embeddings()\n Prio.plot_embeddings_labeled(layer='tests')\n Prio.plot_embeddings_labeled(layer='files')\n\n # UMAP Plots\n Prio.plot_embeddings(method='UMAP')\n Prio.plot_embeddings_labeled(layer='tests', method='UMAP')\n Prio.plot_embeddings_labeled(layer='files', method='UMAP')", "def display_preprocessed(env,frame):\n env.reset()\n\n #Plot the figure\n plt.figure()\n\n #Show the pre processed frame\n plt.imshow(preprocess_frame(env.reset(), (0, 0, 0, 0), 84), cmap=\"gray\")\n\n #Add title\n plt.title('Pre Processed image')\n\n #Show the plot\n plt.show()", "def plot_trace_vs_prediction(self, trace, tuning=True):\n pass" ]
[ "0.6237396", "0.58392346", "0.5504426", "0.54394037", "0.54047847", "0.5402861", "0.53779405", "0.5372202", "0.53684616", "0.53396916", "0.5336576", "0.5331659", "0.53001356", "0.5289845", "0.52895397", "0.52718323", "0.5260922", "0.5260298", "0.52548635", "0.5251781", "0.52290195", "0.52131367", "0.519511", "0.51937693", "0.51661825", "0.51382005", "0.51259804", "0.509249", "0.50899446", "0.5087354" ]
0.6102499
1
Pass in target movie_id, target days and full comments data. Comments data is automatically filtered based on parameters. Raise value error if days out of range
def __init__(self, movie_id, days_window: list, comments): self.movie_id = movie_id dcs_uid = brands[brands['brand_ods_id'] == self.movie_id]['data_profile_dcs_uid'].values self.comments = comments self.comments = self.comments[self.comments['post_author_dcs_uid'].isin(dcs_uid)] self.comments = self.comments[self.comments['comment_message'].notna()] if len(self.comments) == 0: raise LookupError('No comments found for this movie. Try a larger set of comments or check the movie id inputted') self.comments['released_on'] = clusters[clusters['brand_ods_id'] == self.movie_id]['released_on'].iloc[0] self.comments['comment_posted_at'] = pd.to_datetime(self.comments['comment_posted_at']).dt.date self.comments['released_on'] = pd.to_datetime(self.comments['released_on']).dt.date self.comments['days_after_release'] = self.comments['comment_posted_at'] - self.comments['released_on'] self.comments['days_after_release'] = pd.to_numeric(self.comments['days_after_release'].apply(lambda x: x.days)) sentiment = [] for x in self.comments['comment_ace_metadata']: try: sentiment.append(json.loads(x)['lfm/sentiment_polarity']) except: sentiment.append('neu') self.comments['sentiment'] = sentiment if days_window[0] >= days_window[1] or days_window[0] < self.comments['days_after_release'].min() or \ days_window[1] > self.comments['days_after_release'].max(): raise KeyError('Days out of bounds, try again with a larger set of comments or adjust the days after release parameter') self.day_window = days_window
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_comments_for_one_day(self, y,m,d):\n in_date = date(y,m,d)\n\n start = self.utc_to_unix_time(in_date - timedelta(1)) \n end = self.utc_to_unix_time(in_date) \n return self.get_comments_between(start,end)", "def get_comments(subreddit, start_date, end_date, limit):\n api = PushshiftAPI()\n return api.search_comments(after=start_date, before=end_date,\n subreddit=subreddit, limit=limit\n # , filter=['author', 'body', 'created_utc', 'nest_level']\n )", "def build_pipeline_november_comments(subreddit, limit):\n data_file_name = subreddit + '_november_comments'\n cleaned_file_name = data_file_name + '_cleaned'\n standardized_file_name = cleaned_file_name + '_standardized'\n vader_file_name = standardized_file_name + '_vader'\n flair_file_name = vader_file_name + '_flair'\n ibm_tone_file_name = flair_file_name + '_tones'\n\n # get historical data\n comment_data = get_november_historical_comments(subreddit, limit)\n\n # save to csv\n save_historical_submission_comments(comment_data, data_file_name + '.csv')\n\n # sanitize characters\n print('sanitizing characters')\n sanitize_characters(data_file_name + '.csv', cleaned_file_name + '.csv')\n\n # standardize comments\n generic_run_standardize_comments(cleaned_file_name + '.csv', standardized_file_name + '.csv')\n\n # add vader sentiment scores\n generic_run_vader_sentiment_scores(standardized_file_name + '.csv', vader_file_name + '.csv')\n\n # add flair sentiment score\n add_flair_sentiment_to_csv(vader_file_name + '.csv', flair_file_name + '.csv')", "def test_post_comment_user_data_validation(self):\n\n # omit both input\n r1 = self.client.post(reverse('movieapi:comments'))\n self.assertJSONEqual(\n r1.content,\n '{\"error\": \"Please provide movie ID and comment\"}'\n )\n self.assertEqual(r1.status_code, 400)\n\n # omit only comment\n r2 = self.client.post(reverse('movieapi:comments'), {'movie_id': 'tt0112573'})\n self.assertJSONEqual(\n r2.content,\n '{\"error\": \"Please provide movie ID and comment\"}'\n )\n self.assertEqual(r2.status_code, 400)\n\n r3 = self.client.post(reverse('movieapi:comments'), {'comment': 'test comment'})\n self.assertJSONEqual(\n r3.content,\n '{\"error\": \"Please provide movie ID and comment\"}'\n )\n self.assertEqual(r3.status_code, 400)", "def getMovieShortComments(movieid, pages=1, proxy=1):\n\n commentList = []\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.98 Safari/537.36',\n 'Cookie': 'bid=PFXqD9SdoDo; douban-fav-remind=1; gr_user_id=0f03311e-0e28-4e2f-a8fd-3a272d2a525f; _vwo_uuid_v2=D54BE21A153A50F178B1EEA3EE252805F|d0f6410ffbf6226399de9cd1715afb86; viewed=\"1148282_30329536_25815142\"; ll=\"118172\"; push_doumail_num=0; douban-profile-remind=1; __yadk_uid=7QS0r1GHatoz4fkcP2sh8IWeD8YWzQ4u; push_noty_num=0; __utmv=30149280.18600; _ga=GA1.2.449624121.1587021337; __utmc=30149280; __utmz=30149280.1589694675.4.3.utmcsr=m.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/; __utmc=223695111; __utmz=223695111.1589694675.4.3.utmcsr=m.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/; __gads=ID=352a53130bca4285:T=1589699239:S=ALNI_MYKpXBWoi1resUvUVMC-9bRu-CuSw; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1589784625%2C%22https%3A%2F%2Fm.douban.com%2Fmovie%2F%22%5D; _pk_ses.100001.4cf6=*; ap_v=0,6.0; __utma=30149280.449624121.1587021337.1589694675.1589784731.5; __utma=223695111.299663224.1587002697.1589694675.1589784731.5; __utmb=223695111.0.10.1589784731; __utmt=1; __utmb=30149280.1.10.1589784731; dbcl2=\"186000836:vB8x8LL+q3k\"; ck=kTW_; _pk_id.100001.4cf6=ffb676b0890cad74.1587002697.6.1589786159.1589699369.'\n }\n session = requests.Session()\n\n proxies = None\n if proxy == 1:\n proxies = get_proxy.get_workable_ip()\n\n # First, try to get the total of comments.\n r = session.get(\n \"https://movie.douban.com/subject/\" + str(movieid) + \"/comments?limit=20&sort=new_score&status=P&start=\",\n headers=headers, proxies=proxies)\n bsObj = bs4.BeautifulSoup(r.text, \"html.parser\")\n numstr = bsObj.body.find('div', {'id': 'wrapper'}).find('ul', {'class': 'fleft CommentTabs'}) \\\n .find('li', {'class': 'is-active'}).span.get_text()\n num = re.match(r'(\\D+)(\\d+)', numstr)\n total = int(num.group(2))\n print(total)\n\n # To avoid the situation that the total of comments is less than the number we set.\n if pages * 20 > total:\n pages = int(total / 20 + 1)\n\n # Get comments.\n try:\n for i in range(0, pages):\n r = session.get(\n \"https://movie.douban.com/subject/\" + str(\n movieid) + \"/comments?limit=20&sort=new_score&status=P&start=\" +\n str(i * 20), headers=headers)\n bsObj = bs4.BeautifulSoup(r.text, \"html.parser\")\n comment_tags = bsObj.body.find('div', {'id': 'comments'}).find_all('div', {'class': 'comment-item'})\n pattern = re.compile('\\d{2}')\n for tag in comment_tags:\n temp = {}\n t = tag.find('span', {'class': re.compile('(.*) rating')})\n if t is not None:\n star = int(pattern.findall(t['class'][0])[0])\n # print(star)\n temp['comment'] = tag.find('p').span.get_text()\n temp['star'] = star\n commentList.append(temp)\n except AttributeError as e:\n print(\"Limited by website, please change your proxy.爬虫好像受到网站的限制,请更换代理。\")\n return commentList", "def get_cast_notes():\n \n #get all movies from db\n movies_df = movie_helper.get_movies_df() \n \n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows(): \n \n #if imdbid exists use it to collect cast notes\n if (row['imdbId']):\n movie = ia.get_movie(str(row['imdbId']))\n cast_list = movie.get('cast')\n if (cast_list != None) :\n for cast_member in cast_list: \n imdb_id = cast_member.personID\n updates = { 'notes' : cast_member.notes }\n selects = {\"p_imdbId\" : imdb_id, \"m_imdbId\" : row['imdbId'] }\n database_helper.update_data(\"actors\", update_params = updates, select_params = selects)\n \n pbar.update(1)", "def test_get_comments():\n comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each comment\n for x in comments:\n print(x.d_)", "def crawl(self, query, start_date, end_date, *args, **kwargs):\n pass", "def request_comments(video_list, API_KEY, csv_path=\"../comments.csv\", as_df=False):\n columns = ['query', 'url', 'title', 'upload_date', 'channel', 'views', 'likes', 'dislikes', 'comment_count', 'comment_text', 'comment_author', 'comment_date', 'comment_likes']\n df = pd.DataFrame(columns=columns)\n \n # If video list is empty, return empty\n \n for video in video_list:\n \n # Grab all comments for video\n comments = yt_comments(video['id'], API_KEY)\n \n # Skip video if comments are disabled\n if not comments:\n continue\n \n for comment in comments:\n youtube_dict = {}\n \n # Write scraped data to csv file\n youtube_dict['query'] = video['query']\n youtube_dict['url'] = \"https://www.youtube.com/watch?v=\" + video['id']\n youtube_dict['title'] = video['title']\n youtube_dict['upload_date'] = video['date']\n youtube_dict['channel'] = video['channel']\n youtube_dict['views'] = video['views']\n youtube_dict['likes'] = video['likes']\n youtube_dict['dislikes'] = video['dislikes']\n youtube_dict['comment_count'] = video['comment_count']\n youtube_dict['comment_text'] = comment['text']\n youtube_dict['comment_author'] = comment['author']\n youtube_dict['comment_date'] = comment['date']\n youtube_dict['comment_likes'] = comment['likes']\n df = df.append(youtube_dict, ignore_index=True)\n \n if as_df:\n return df\n \n df.to_csv(csv_path, encoding=\"UTF-8\", index=False)\n return", "def comment_data(post_id: str, \n sub_reddit: str):\n url_to_open = f\"https://www.reddit.com/r/{sub_reddit}/comments/{post_id}.json\"\n success_status = 0\n while success_status != 200:\n try:\n response = urlopen(url_to_open, timeout=10)\n success_status = response.status\n except HTTPError:\n logging.info(f\"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.\")\n time.sleep(120)\n success_status = 400\n \n sub_reddit_page = json.loads(response.read())\n comments_df = pd.json_normalize(sub_reddit_page[1]['data']['children'])\n comments_df['post_id'] = post_id\n comments_df = comments_df[['post_id', 'data.id', 'data.author_fullname', 'data.body', 'data.created', \n 'data.downs', 'data.ups']]\n comments_df = comments_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment', \n 'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})\n comments_df['reply'] = 'N'\n comments_df['comment_replied_id'] = ''\n # get all replies \n replies_list = []\n for comment in sub_reddit_page[1]['data']['children']:\n replies = comment.get('data').get('replies')\n comment_id = comment.get('data').get('id') \n if replies is None or replies == '':\n pass\n else:\n replies_df = pd.json_normalize(replies['data']['children'])\n try:\n replies_df = replies_df[['data.id', 'data.author_fullname', 'data.body', 'data.created', \n 'data.downs', 'data.ups']]\n except KeyError:\n pass\n replies_df = replies_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment', \n 'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})\n replies_df['reply'] = 'Y'\n replies_df['comment_replied_id'] = comment_id\n replies_df['post_id'] = post_id\n replies_list.append(replies_df)\n if len(replies_list) == 1:\n all_replies = replies_list[0]\n elif len(replies_list) > 1: \n all_replies = pd.concat(replies_list, ignore_index = True)\n else:\n all_replies = None \n\n column_order = [c for c in comments_df.columns]\n comments_df = comments_df[column_order]\n if all_replies is not None:\n all_replies = all_replies[column_order]\n all_comments_replies = pd.concat([comments_df, replies_df], ignore_index=True)\n else:\n all_comments_replies = comments_df\n\n return all_comments_replies", "def test_get_movie_top_date_range(self):\n\n start_date = date(2019, 5, 22)\n end_date = date(2019, 5, 25)\n\n r = self.client.get(reverse('movieapi:top'), {'start_date': start_date, 'end_date': end_date})\n\n qs = Movie.objects \\\n .filter(comment__added_on__range=(start_date, end_date)) \\\n .annotate(total_comments=Count('comment__comment'),\n rank=Window(\n expression=DenseRank(),\n order_by=F('total_comments').desc(),\n )\n ).values('id', 'total_comments', 'rank')\n\n serializer = TopSerializer(qs, many=True)\n\n self.assertJSONEqual(\n r.content,\n serializer.data\n )\n self.assertEqual(r.status_code, 200)", "def tvresult():\n selected_date = request.args.get('selected_date')\n selected_date2 = request.args.get('selected_date2')\n actor = request.args.get('actor')\n genre = request.args.get('genre')\n\n if not selected_date:\n selectedDateFormatted = datetime.datetime.today().strftime('%Y-%m-%d')\n else:\n selectedDate = datetime.datetime.strptime(selected_date, '%d.%m.%Y')\n selectedDateFormatted = selectedDate.strftime('%Y-%m-%d')\n selectedDate2 = datetime.datetime.strptime(selected_date2, '%d.%m.%Y')\n selectedDate2Formatted = selectedDate2.strftime('%Y-%m-%d')\n \n dates = set()\n dates.add(selectedDateFormatted)\n\n \"\"\" Collect dates for searching \"\"\"\n\n nextDateFormatted = selectedDateFormatted\n x = 1\n\n while nextDateFormatted < selectedDate2Formatted:\n nextDate = selectedDate + datetime.timedelta(days=x)\n nextDateFormatted = nextDate.strftime ('%Y-%m-%d')\n dates.add(nextDateFormatted)\n x += 1\n\n \"\"\" Loop through dates \"\"\"\n movies = []\n for x in dates:\n\n searchUrl = \"https://www.iltalehti.fi/telkku/tv-ohjelmat/\" + x + \"/peruskanavat/koko-paiva\"\n \n \"\"\" Gather data from telkku.com with BeautifulSoup. We are interested\n in movies on public television. From page content, look for 'li' tags.\n \"\"\"\n\n page = requests.get(searchUrl)\n soup = BeautifulSoup(page.content, 'html.parser')\n programs = soup.find_all('li')\n\n \"\"\" Loop through tv programs data for current date \"\"\"\n for y in programs:\n \n \"\"\" Movies have the class tag publication__imdb-link. Other data is skipped. \"\"\"\n\n imdb_link_cl = y.find(class_=\"publication__imdb-link\")\n if imdb_link_cl is None:\n continue\n\n movie_title = y.get(\"title\") \n \n if movie_title is None:\n continue \n\n print(movie_title)\n imdb_link = imdb_link_cl.get('href')\n showdatetime = y.find('time').get(\"datetime\")\n \n (sdate_tmp, stime_tmp) = showdatetime.split(\"T\")\n showdate = sdate_tmp[8:10] + \".\" + sdate_tmp[5:7] + \".\" + sdate_tmp[0:4]\n\n showdate_obj = datetime.datetime.strptime(showdate, '%d.%m.%Y')\n\n if showdate_obj > selectedDate2:\n continue\n\n showtime = stime_tmp[0:5]\n imdb_temp = imdb_link.split(\"/\")\n imdb_id = imdb_temp[len(imdb_temp) - 2]\n channel_cl = y.find(class_=\"publication__title\")\n channel_name_href = channel_cl.get(\"href\")\n channel = get_channel_name(channel_name_href)\n if channel == \"Not found\":\n continue\n \n movie_data = find_movie_from_api(imdb_id)\n \n if actor and not actor in movie_data['Actors']:\n continue\n \n if not genre is None and not \"Any\" in genre:\n genres = movie_data['Genre']\n if not genre in genres:\n continue\n\n img = movie_data['Poster']\n if len(img) < 5:\n img = find_poster_from_imdb(imdb_id)\n \n reviews = find_reviews(imdb_id)\n plot = movie_data['Plot'].replace('\"','\\\\\"')\n\n film = {\"showtime\": showtime, \"fi_name\": movie_title, \"reviews\": reviews,\n \"channel\": channel, \"showdate\": showdate, \"imdb_id\": imdb_id,\n \"img\": img, \"name\": movie_data['Title'],\n \"year\": movie_data['Year'], \"country\": movie_data['Country'],\n \"director\": movie_data['Director'], \"actors\": movie_data['Actors'],\n \"genre\": movie_data['Genre'], \"rated\": movie_data['Rated'],\n \"runtime\": movie_data['Runtime'],\"plot\": plot}\n \n if film not in movies:\n movies.append(film)\n\n return render_template(\"results.html\", movies=movies, dateFrom=selected_date, dateTo=selected_date2)", "def build_comment_database_pipeline(subreddit, max):\n data_file_name = subreddit + '_30_months_comments'\n cleaned_file_name = data_file_name + '_cleaned'\n standardized_file_name = cleaned_file_name + '_standardized'\n vader_file_name = standardized_file_name + '_vader'\n flair_file_name = vader_file_name + '_flair'\n ibm_tone_file_name = flair_file_name + '_tones'\n\n # get historical data\n comment_data = get_historical_submissions(subreddit, max)\n\n # save to csv\n save_historical_submission_comments(comment_data, data_file_name + '.csv')\n\n # sanitize characters\n print('sanitizing characters')\n sanitize_characters(data_file_name + '.csv', cleaned_file_name + '.csv')\n\n # standardize comments\n generic_run_standardize_comments(cleaned_file_name + '.csv', standardized_file_name + '.csv')\n\n # add vader sentiment scores\n generic_run_vader_sentiment_scores(standardized_file_name + '.csv', vader_file_name + '.csv')\n\n # add flair sentiment score\n add_flair_sentiment_to_csv(vader_file_name + '.csv', flair_file_name + '.csv')\n\n # add ibm tones\n # add_tone_columns_to_csv(flair_file_name + '.csv', ibm_tone_file_name + '.csv')", "def get_date_in_days(raw_data, target_columns=['Submitby Date Time', 'Posting Date Date']):\r\n output = raw_data.copy()\r\n\r\n for column in target_columns:\r\n for i in range(len(raw_data)):\r\n date = datetime.date(output.loc[i, column+' Year'], output.loc[i, column+' Month'], output.loc[i, column+' Day'])\r\n output.loc[i, column+' Days from 2016'] = (date-datetime.date(2016, 1, 1)).days\r\n\r\n return output", "def comments_to_csv(query, API_KEY, publishedBefore, publishedAfter, maxResults=49, driver_path=\"C:/WebDriver/bin/chromedriver.exe\", csv_path=\"./youtube_comments.csv\", useAPI=True):\n\n \n video_list = request_videos(query, API_KEY, publishedBefore, publishedAfter, maxResults=maxResults)\n \n if (useAPI):\n request_comments(video_list, API_KEY, csv_path)\n else:\n scrape_comments(video_list, driver_path, csv_path)", "def get_november_historical_comments(subreddit, limit):\n all_submissions = []\n\n days = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n month = 11\n year = 2020\n\n for day in days:\n # generate random 4 hour time chunk\n start_hour = random.randint(0, 14)\n end_hour = start_hour + 4\n start_time = int(dt.datetime(year, month, day, start_hour, 0).timestamp())\n end_time = int(dt.datetime(year, month, day, end_hour, 0).timestamp())\n\n # gets submissions and adds submission dictionary to master list\n threads = list(get_submissions(subreddit, start_time, end_time, limit))\n\n for item in threads:\n all_submissions.append(item.d_)\n\n # gets submissions and adds submission dictionary to master list\n threads = list(get_submissions(subreddit, start_time + 5, end_time + 5, limit))\n\n for item in threads:\n all_submissions.append(item.d_)\n\n print('querying day:', day)\n print('total submissions:', len(all_submissions))\n\n return all_submissions", "def eddies_tracker(initial_date, list_days, metric_name='Jaccard',nb_prev_day=5,min_detection=5):\n\n eddies_path = {}\n current_max_id = -1\n nb_days = len(list_days)\n\n # Select the metric\n if metric_name=='Jaccard':\n from metrics import eddies_jaccard_index as metric\n else:\n print(\"eddies_tracker: invalid metric\")\n return {}\n\n # Init the paths\n if nb_days == 0:\n print(\"eddies_tracker: empty list_days\")\n return {}\n eddies_path[initial_date] = {}\n for eddy in list_days[0]:\n current_max_id += 1\n eddies_path[initial_date][current_max_id] = eddy\n\n # Handle the first days separatly (as part of the initialisation)\n for day in range(1,nb_prev_day):\n if nb_days == day:\n return eddies_path\n current_max_id += track_one_day(initial_date+day,eddies_path,list_days[day],\n current_max_id,metric,nb_prev_day=day)\n\n # Match positions day by day\n for day in range(nb_prev_day,nb_days):\n current_max_id += track_one_day(initial_date+day,eddies_path,\n list_days[day],current_max_id,\n metric,nb_prev_day=nb_prev_day)\n\n # Delete eddies observed too view times\n nb_observation = np.zeros((current_max_id+1))\n for day in eddies_path.keys():\n for eddy_id in eddies_path[day].keys():\n nb_observation[eddy_id] += 1\n remove_eddy = nb_observation<min_detection\n for day in eddies_path.keys():\n list_keys = list(eddies_path[day].keys())\n for eddy_id in list_keys:\n if remove_eddy[eddy_id]:\n del eddies_path[day][eddy_id]\n\n return eddies_path", "def retrieve_article_urls(start_year, start_month, end_year, end_month, timeout=120):\n \n st = datetime(start_year, start_month, 1)\n et = datetime(end_year, end_month, monthrange(end_year, end_month)[1])\n \n if st > et:\n raise ValueError(\"End date specified is before the start date.\")\n \n if st > datetime.now():\n raise ValueError(\"We're astronomers not astrologers; we can't predict the future.\")\n \n \n logging.info(\"Looking for peer-reviewed articles on ADS published between %i/%i and %i/%i\" \\\n % (start_year, start_month, end_year, end_month, ))\n \n # Prepare the data for ADS \n affiliation = \"%0D%0A\".join(INSTITUTE_QUERY).replace(' ', '+') \n data = \"\"\"db_key=AST&db_key=PRE&qform=AST&arxiv_sel=astro-ph&arxiv_sel=cond-\n mat&arxiv_sel=cs&arxiv_sel=gr-qc&arxiv_sel=hep-ex&arxiv_sel=hep-lat&arxiv_se\n l=hep-ph&arxiv_sel=hep-th&arxiv_sel=math&arxiv_sel=math-ph&arxiv_sel=nlin&ar\n xiv_sel=nucl-ex&arxiv_sel=nucl-th&arxiv_sel=physics&arxiv_sel=quant-ph&arxiv\n _sel=q-bio&sim_query=YES&ned_query=YES&adsobj_query=YES&aut_logic=OR&obj_log\n ic=OR&author=&object=&start_mon=%i&start_year=%i&end_mon=%i&end_year=%i&ttl_\n logic=OR&title=&txt_logic=OR&text=&kwd_logic=OR&keyword=&aff_req=YES&aff_log\n ic=OR&affiliation=%s&nr_to_return=200&start_nr=1&jou_pick=NO&ref_stems=&data\n _and=ALL&group_and=ALL&start_entry_day=&start_entry_mon=&start_entry_year=&e\n nd_entry_day=&end_entry_mon=&end_entry_year=&min_score=&sort=SCORE&data_type\n =SHORT&aut_syn=YES&txt_syn=YES&txt_syn=YES&aut_wt=1.0&obj_wt=1.0&ttl_wt=0.3&\n txt_wt=3.0&aut_wgt=YES&obj_wgt=YES&ttl_wgt=YES&txt_wgt=YES&ttl_sco=YES&txt_s\n co=YES&version=1&aff_syn=NO&aff_wt=1.0&aff_wgt=YES&kwd_sco=YES&kwd_syn=NO&kw\n d_wt=1.0&kwd_wgt=YES&kwd_sco=YES\"\"\".replace('\\n ', '') \\\n % (start_month, start_year, end_month, end_year, affiliation, )\n \n host = 'http://adsabs.harvard.edu/cgi-bin/nph-abs_connect?' + data\n \n # Perform the query\n request = urllib2.Request(host)\n handle = urllib2.urlopen(request, timeout=timeout)\n data = ''.join(handle.read())\n \n # Search for pre-prints and article links\n \n preprints = re.findall('href=\"\\S+link_type=PREPRINT\"', data)\n articles = re.findall('href=\"\\S+link_type=ARTICLE\"', data)\n \n logging.info(\"Identified %i preprint links and %i article links.\" \\\n % (len(preprints), len(articles), ))\n \n if len(preprints) > len(articles):\n logging.info(\"Preprint links will be used wherever refereed article files are unavailable.\")\n \n # Clean up the links\n preprints = [preprint.split('\"')[1] for preprint in preprints]\n articles = [article.split('\"')[1] for article in articles]\n \n logging.debug(\"Pre-prints:\")\n [logging.debug(preprint) for preprint in preprints]\n \n logging.debug(\"Article links:\")\n [logging.debug(article) for article in articles]\n \n \n article_baselinks = [';'.join(article.split(';')[:-1]) for article in articles]\n \n article_urls = []\n \n # Check for any papers that have preprints but no full refereed journal article\n for preprint in preprints:\n link = ';'.join(preprint.split(';')[:-1])\n \n if link not in article_baselinks:\n # This particular paper had no full PDF link, so we will have to take\n # the pre-print\n article_urls.append(preprint)\n \n else:\n # This will maintain chronological order of all the articles\n article_urls.append(articles[article_baselinks.index(link)])\n \n # Clean up the links [TODO] make this more elegant\n article_urls = [article.replace('&#38;', '&') for article in article_urls] \n \n # Extract bibcodes\n bibcodes = []\n for article in article_urls:\n bibcode = re.findall('(?<=bibcode=)\\S+(?=&db_key)', article)\n \n if len(bibcode) is 0:\n logging.warn(\"Could not find bibliography code from URL (%s).\" \\\n + \"Assigning random string instead.\" % (article, ))\n bibcode = ''\n else: bibcode = bibcode[0].replace('%26', '&') # TODO be more elegant\n \n bibcodes.append(bibcode)\n \n \n return zip(bibcodes, article_urls)", "def prep_data(ratings_df, watched_df=None, watchlist_df=None,\n good_threshold=4, bad_threshold=3):\n id_book = pd.read_csv('title_basics_small.csv')\n try:\n # try to read Letterboxd user data\n # drop rows with nulls in the columns we use\n ratings_df = ratings_df.dropna(axis=0, subset=['Rating', 'Name', 'Year'])\n # split according to user rating\n good_df = ratings_df[ratings_df['Rating'] >= good_threshold]\n bad_df = ratings_df[ratings_df['Rating'] <= bad_threshold]\n neutral_df = ratings_df[(ratings_df['Rating'] > bad_threshold) & (ratings_df['Rating'] < good_threshold)]\n # convert dataframes to lists\n good_list, good_dict = df_to_id_list(good_df, id_book)\n bad_list, bad_dict = df_to_id_list(bad_df, id_book)\n neutral_list, neutral_dict = df_to_id_list(neutral_df, id_book)\n except KeyError:\n # Try to read IMDb user data\n # strip ids of \"tt\" prefix\n ratings_df['movie_id'] = ratings_df['Const'].apply(lambda x: str(x).lstrip(\"tt\"))\n # drop rows with nulls in the columns we use\n ratings_df = ratings_df.dropna(axis=0, subset=['Your Rating', 'Year'])\n # split according to user rating\n good_df = ratings_df[ratings_df['Your Rating'] >= good_threshold*2]\n bad_df = ratings_df[ratings_df['Your Rating'] <= bad_threshold*2]\n neutral_df = ratings_df[(ratings_df['Your Rating'] > bad_threshold*2) & (ratings_df['Your Rating'] < good_threshold*2)]\n # convert dataframes to lists\n good_list = good_df['movie_id'].to_list()\n bad_list = bad_df['movie_id'].to_list()\n neutral_list = neutral_df['movie_id'].to_list()\n # make ratings dictionaries\n good_dict = dict(zip(good_list, good_df['Your Rating'].tolist()))\n bad_dict = dict(zip(bad_list, bad_df['Your Rating'].tolist()))\n neutral_dict = dict(zip(neutral_list, neutral_df['Your Rating'].tolist()))\n except Exception as e:\n # can't read the dataframe as Letterboxd or IMDb user data\n print(\"This dataframe has columns:\", ratings_df.columns)\n raise Exception(e)\n\n ratings_dict = dict(list(good_dict.items()) + list(bad_dict.items()) + list(neutral_dict.items()))\n\n if (watched_df is not None) and (not watched_df.empty):\n # Construct list of watched movies that aren't rated \"good\" or \"bad\"\n # First, get a set of identified IDs.\n rated_names = set(good_df.Name.tolist() + bad_df.Name.tolist() + neutral_list)\n # drop nulls from watched dataframe\n full_history = watched_df.dropna(axis=0, subset=['Name', 'Year'])\n # get list of watched movies that haven't been rated\n hist_list = df_to_id_list(full_history[~full_history['Name'].isin(rated_names)], id_book)[0]\n # add back list of \"neutral\" movies (whose IDs we already found before)\n hist_list = hist_list + neutral_list\n else: hist_list = neutral_list\n\n if (watchlist_df is not None) and (not watchlist_df.empty):\n try:\n watchlist_df = watchlist_df.dropna(axis=0, subset=['Name', 'Year'])\n val_list = df_to_id_list(watchlist_df, id_book)[0]\n except KeyError:\n watchlist_df = watchlist_df.dropna(axis=0, subset=['Const', 'Year'])\n watchlist_df['movie_id'] = watchlist_df['Const'].str.lstrip(\"tt\")\n val_list = watchlist_df['movie_id'].tolist()\n else: val_list = []\n\n return (good_list, bad_list, hist_list, val_list, ratings_dict)", "def get_release_dates():\n #get all movies from db\n movies_df = movie_helper.get_movies_df() \n \n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows():\n \n #get list of release dates from API\n movie = ia.get_movie(str(row['imdbId']), info='release dates')\n release_dates = movie['release dates']\n \n #try to extract UK release dates (string from imdb is a mess)\n uk = [i for i in movie['release dates'] if 'UK' in i and not '(' in i]\n if (len(uk) > 0):\n #if successful update the db with the release date\n date_string = uk[0].split('::')[1]\n date = datetime.strptime(date_string, '%d %B %Y')\n database_helper.update_data(\"movies\", update_params = { \"ukReleaseDate\" : date }, select_params = {\"movieId\" : row[\"movieId\"]})\n else: \n #if no uk release date found print to console\n print(\"No UK release for \", row.title)\n \n pbar.update(1)", "def is_target_ymdh(tweet_ymdh, start_ymdh, end_ymdh):\r\n if start_ymdh is [] and end_ymdh is []:\r\n # No ymdh limit given, taken as all dates.\r\n return True\r\n if (start_ymdh is not [] or end_ymdh is not []) and tweet_ymdh is []:\r\n # Once start or end hes been given, then tweet should be provided as well.\r\n return False\r\n ds = dt = de = None\r\n \r\n format_str_arr = ['%Y', '%m', '%d', '%H']\r\n normed_tw_ymdh = normalize_ymdh(tweet_ymdh)\r\n ele_num = len(normed_tw_ymdh)\r\n if tweet_ymdh is not []:\r\n dt = strptime('-'.join(normed_tw_ymdh), '-'.join(format_str_arr[:ele_num]))\r\n if start_ymdh is not []:\r\n ds = strptime('-'.join(normalize_ymdh(start_ymdh)[:ele_num]), '-'.join(format_str_arr[:ele_num]))\r\n if end_ymdh is not []:\r\n de = strptime('-'.join(normalize_ymdh(end_ymdh)[:ele_num]), '-'.join(format_str_arr[:ele_num]))\r\n if ds and de and (ds - de).days > 0:\r\n raise ValueError('Ending date earlier than beginning date.')\r\n is_t_lt_s = (dt - ds).days >= 0 if ds else True\r\n is_e_lt_t = (de - dt).days >= 0 if de else True\r\n return is_t_lt_s and is_e_lt_t", "def test_get_comment_by_movieid(self):\n\n movie_id = 'tt1737174'\n\n r = self.client.get(reverse('movieapi:comments'), {'movie_id': movie_id})\n\n qs = Comment.objects.filter(movie__imdbid=movie_id)\n serializer = CommentSerializer(qs, many=True)\n\n self.assertJSONEqual(\n r.content,\n serializer.data\n )\n self.assertEqual(r.status_code, 200)", "def get_comments_between(self, start_date, end_date):\n ret = []\n ids = self.get_post_ids(start_date, end_date)\n\n for id in ids:\n comments = self.reddit.submission(id).comments\n ret.append(self.get_nested_comments(comments))\n return ret", "def get_non_normalized_movie_data_df(imdb_ids_list=[\"tt1630029\", \"tt0499549\"], no_records_to_display=0):\n\n title_basics_data_df, title_basics_cols = read_imdb_gz_data(directory=\"../../Data/IMDB/\", level_1=\"title\",\n level_2=\"basics\",\n show=False, no_records_to_show=no_records_to_display)\n\n title_basics_data_df = title_basics_data_df[title_basics_data_df[\"tconst\"].isin(imdb_ids_list)]\n title_basics_data_df = split_cols_into_rows(source_df=title_basics_data_df, split_col_name=\"genres\")\n title_basics_data_df = title_basics_data_df[[\"tconst\", \"genres\"]]\n title_basics_data_df[\"type\"] = \"genre\"\n title_basics_data_df.columns = [\"tconst\", \"value\", \"type\"]\n\n\n title_crew_data_df, title_crew_cols = read_imdb_gz_data(directory=\"../../Data/IMDB/\", level_1=\"title\",\n level_2=\"crew\",\n show=False, no_records_to_show=no_records_to_display)\n title_crew_data_df = title_crew_data_df[title_crew_data_df[\"tconst\"].isin(imdb_ids_list)]\n # title_crew_data_df = title_crew_data_df[title_crew_data_df[\"directors\"].str.contains(\",\")]\n\n name_basics_data_df, name_basics_data_cols = read_imdb_gz_data(directory=\"../../Data/IMDB/\", level_1=\"name\",\n level_2=\"basics\",\n show=False, no_records_to_show=no_records_to_display)\n\n title_crew_data_writers_df = title_crew_data_df[[\"tconst\", \"writers\"]].copy().drop_duplicates()\n title_crew_data_writers_df = split_cols_into_rows(source_df=title_crew_data_writers_df, split_col_name=\"writers\")\n\n joined_writers_df = pd.merge(title_crew_data_writers_df, name_basics_data_df,\n left_on=\"writers\", right_on=\"nconst\", how=\"left\")\n joined_writers_df = joined_writers_df[[\"tconst\", \"primaryName\"]]\n joined_writers_df[\"type\"] = \"writer\"\n joined_writers_df.columns = [\"tconst\", \"value\", \"type\"]\n\n\n title_crew_data_directors_df = title_crew_data_df[[\"tconst\", \"directors\"]].copy().drop_duplicates()\n title_crew_data_directors_df = split_cols_into_rows(source_df=title_crew_data_directors_df, split_col_name=\"directors\")\n joined_directors_df = pd.merge(title_crew_data_directors_df, name_basics_data_df,\n left_on=\"directors\", right_on=\"nconst\", how=\"left\")\n joined_directors_df = joined_directors_df[[\"tconst\", \"primaryName\"]]\n joined_directors_df[\"type\"] = \"director\"\n joined_directors_df.columns = [\"tconst\", \"value\", \"type\"]\n\n\n title_principals_data_df, title_principals_cols = read_imdb_gz_data(directory=\"../../Data/IMDB/\", level_1=\"title\",\n level_2=\"principals\",\n show=False, no_records_to_show=no_records_to_display)\n title_principals_data_df = title_principals_data_df[title_principals_data_df[\"tconst\"].isin(imdb_ids_list)]\n title_principals_data_df = title_principals_data_df[[\"tconst\", \"nconst\"]].copy().drop_duplicates()\n joined_principals_df = pd.merge(title_principals_data_df, name_basics_data_df, on=\"nconst\", how=\"left\")\n joined_principals_df = joined_principals_df[[\"tconst\", \"primaryName\"]]\n joined_principals_df[\"type\"] = \"principal\"\n joined_principals_df.columns = [\"tconst\", \"value\", \"type\"]\n\n non_normalized_df = pd.concat([title_basics_data_df, joined_writers_df, joined_directors_df, joined_principals_df])\n non_normalized_df = non_normalized_df.reset_index(drop=True)\n\n if no_records_to_display > 0:\n print(\"title_basics_data_df :\")\n print(title_basics_data_df.head(no_records_to_display))\n print()\n\n print(\"joined_writers_df :\")\n print(joined_writers_df.head(no_records_to_display))\n print()\n\n print(\"joined_directors_df :\")\n print(joined_directors_df.head(no_records_to_display))\n print()\n\n print(\"joined_principals_df :\")\n print(joined_principals_df.head(no_records_to_display))\n print()\n\n print(\"non_normalized_df :\")\n print(non_normalized_df.head(no_records_to_display))\n print()\n\n return non_normalized_df", "def test_days_weeks_activity():\n assert analytics.activity('daily', yoga_trackings(), 1) == 17\n assert analytics.activity('weekly', run_trackings(), 1) == 4\n assert analytics.activity('daily', read_trackings(), 1) == 18\n assert analytics.activity('daily', meditation_trackings(), 1) == 15\n assert analytics.activity('weekly', french_trackings(), 1) == 5", "def gen_target(self):\n df_iwp = self.get_df_iwp()\n df_iwes = self.get_df_iwes()\n df_iwes = df_iwes.rename(columns={'exitsitedate':'ft_data_dt'})\n df_iwp = df_iwp.rename(columns={'peritonitisdate':'ft_data_dt'})\n df_infect = df_iwes.merge(df_iwp, how='outer')\n df_infect = df_infect.dropna(axis=0)\n first_infect = df_infect.groupby(['idd']).min()\n first_infect = first_infect.reset_index()\n first_infect['target'] = 1\n first_infect['ft_data_dt'] = pd.to_datetime(first_infect['ft_data_dt'], format=\"%Y%m\") + MonthEnd(1)\n all_target = []\n end_data_dt = self.get_end_data_dt()\n print(end_data_dt)\n for index, row in first_infect.iterrows():\n date_series = pd.date_range(*(pd.to_datetime([row['ft_data_dt'], end_data_dt]) + pd.offsets.MonthEnd()), freq='M', name='ft_data_dt')\n idd_list = [row['idd']]\n idd_series = pd.Series(data=idd_list, name='idd')\n date_frame = date_series.to_frame()\n idd_frame = idd_series.to_frame()\n date_frame['key'] = 0\n idd_frame['key'] = 0\n tmp_target = idd_frame.merge(date_frame, on='key', how='outer').drop(columns=['key'])\n all_target.append(tmp_target)\n target = pd.concat(all_target)\n target['target'] = 1\n self.set_target_df(target)", "def date_separation1(input_data, target_columns=['Submitby Date Time', 'Posting Date Date'], max_num_columns=6):\r\n target_suffixes = [' Year', ' Month', ' Day', ' Hour', ' Minute', 'Second']\r\n output = pd.DataFrame()\r\n column_with_none = [None for _ in range(len(input_data))]\r\n\r\n for column in input_data.columns:\r\n if column not in target_columns:\r\n output[column] = input_data[column]\r\n else:\r\n for i in range(len(input_data)):\r\n values = input_data.loc[i, column].replace(' ', '-').replace(':', '-').split('-')\r\n if len(values) not in [3, 6]:\r\n print('data error')\r\n print('len(values)='+str(len(values)))\r\n print('i='+str(i))\r\n exit(1)\r\n for j in range(min(len(values), max_num_columns)):\r\n if (column+target_suffixes[j]) not in output.columns:\r\n output[column+target_suffixes[j]] = column_with_none.copy()\r\n output.loc[i, column+target_suffixes[j]] = int(values[j])\r\n\r\n return output", "def get_comments_for_commentor_and_reference_on_date(self, resource_id, reference_id, from_, to):\n raise errors.Unimplemented()", "def reddit_post(data, comments):\n\n sub = None\n try:\n sub = Source.objects.get(name=data['subreddit'])\n except Source.DoesNotExist:\n #This is jank but can be touched up manually\n sub = Source(name=data['subreddit'], url='reddit.com')\n sub.save()\n print 'source added to db with name: ' + data['subreddit']\n \n data['subreddit'] = sub\n \n (article, keywords) = scrape_article(data['url'], lambda x: timezone.now()) \n data['text'] = article['text']\n data['date'] = article['date']\n data['headline'] = article['headline']\n\n try:\n post = RedditPost(**data)\n post.save()\n make_reddit_keywords(post, keywords)\n make_comments(post, comments)\n except IntegrityError as ex:\n print ex\n print 'not unique reddit post for ' + data['post_title']", "def main():\n ## The standard way to get arguments from the command line, \n ## make sure they are the right type, and print help messages\n parser = argparse.ArgumentParser(description=\"Compute days from yyyy-mm-dd to next mm-dd.\")\n parser.add_argument('year', type=int, help=\"Start year, between 1800 and 2500\")\n parser.add_argument('start_month', type=int, help=\"Starting month, integer 1..12\")\n parser.add_argument('start_day', type=int, help=\"Starting day, integer 1..31\")\n parser.add_argument('end_month', type=int, help=\"Ending month, integer 1..12\")\n parser.add_argument('end_day', type=int, help=\"Ending day, integer 1..12\")\n args = parser.parse_args() # will get arguments from command line and validate them\n year = args.year\n start_month = args.start_month\n start_day = args.start_day\n end_month = args.end_month\n end_day = args.end_day\n \n print(\"Checking date \", str(year) + \"/\" + str(start_month) + \"/\" + str(start_day))\n \n\n if not is_valid(year, start_month, start_day) : \n sys.exit(\"Must start on a valid date between 1800 and 2500\")\n if not is_valid(2000, end_month, end_day):\n sys.exit(\"Ending month and day must be part of a valid date\")\n count_days(year,start_month,start_day,end_month,end_day)" ]
[ "0.5074537", "0.47782722", "0.47501183", "0.4745651", "0.46918073", "0.46691847", "0.46658993", "0.45897898", "0.45842305", "0.4573905", "0.4556693", "0.45527554", "0.454822", "0.45028967", "0.449018", "0.4468614", "0.4454364", "0.44383335", "0.44142863", "0.43958214", "0.43882447", "0.43872017", "0.43549594", "0.43540207", "0.4347015", "0.43401694", "0.4338712", "0.4335974", "0.43260622", "0.4324365" ]
0.6544567
0
Create a word count bar chart. Choose the amount of words to include, as well as whether to clean the comment text before plotting. If no words are found on a particular day, the function will automatically find the next
def count_plot(self, top_words=25, clean=True): if top_words > 25: warnings.warn('Including more than 25 words on the plot will cause labels to be excluded') daily_comments = self.comments[(self.comments['days_after_release'].\ isin(list(range(self.day_window[0], self.day_window[1]+1))))] if len(daily_comments) == 0: warnings.warn('No comments found for this day, trying future dates until comments are found') while len(daily_comments) == 0 and self.day_window[1] <= self.comments['days_after_release'].max(): if self.day_window[1] > self.comments['days_after_release'].max(): raise KeyError('Reached bounds of comment dates available. Make sure all comments are present') self.day_window[1] += 1 daily_comments = self.comments[(self.comments['days_after_release'].\ isin(list(range(self.day_window[0], self.day_window[1]+1))))] print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1])) left = np.where(self.day_window[0] < 0, 'Before', 'After') right = np.where(self.day_window[1] < 0, 'Before', 'After') if clean: daily_comments['clean_comments'] = daily_comments['comment_message'].apply(self.comment_cleaner) res = daily_comments['clean_comments'].str.split(expand=True).stack().value_counts().to_dict() fig = px.bar(x=list(res.keys())[:top_words], y=list(res.values())[:top_words]) fig.update_layout( title='Top {} Words at {} Days {} Release to {} Days {} Release'.format(top_words, self.day_window[0], left, self.day_window[1], right), yaxis_title='Count', xaxis_tickangle=-45 ) fig.show() else: res = daily_comments['clean_comments'].str.split(expand=True).stack().value_counts().to_dict() fig = px.bar(x=list(res.keys())[:top_words], y=list(res.values())[:top_words]) fig.update_layout( title='Top {} Words at {} Days {} Release to {} Days {} Release'.format(top_words, self.day_window[0], left, self.day_window[1], right), yaxis_title='Count', xaxis_tickangle=-45 ) fig.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def top_words_bar_chart(df, n=10):\n messages = df['message'].values\n word_counts = {}\n for message in messages:\n tokens = tokenize(message)\n for token in tokens:\n if token in word_counts:\n word_counts[token] += 1\n else:\n word_counts[token] = 1\n\n items = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)\n items = items[0:n]\n words = list(map(lambda x: x[0], items))\n counts = list(map(lambda x: x[1], items))\n return {\n 'data': [\n Bar(\n x=words,\n y=counts\n )\n ],\n\n 'layout': {\n 'title': 'Most common word stems (outside stopwords)',\n 'yaxis': {\n 'title': \"Count\",\n },\n 'xaxis': {\n 'title': \"Word\"\n }\n }\n }", "def __word_frequency_barplot(self,df, column_name, nr_top_words=20):\n tokenized_only_dict = Counter(np.concatenate(df[column_name].values))\n tokenized_only_df = pd.DataFrame.from_dict(tokenized_only_dict, orient='index')\n tokenized_only_df.rename(columns={0: 'count'}, inplace = True)\n tokenized_only_df.sort_values('count', ascending=False, inplace=True)\n fig, axs = plt.subplots(1,2,figsize=(20,8))\n \n a = tokenized_only_df['count'].values[:nr_top_words]\n amin, amax = min(a) , max(a)\n norm = []\n\n for i, val in enumerate(a):\n norm.append( (val - amin) / (amax- amin))\n\n sns.barplot( norm, list(range(nr_top_words)), palette='hls', orient= 'h', ax=axs[0])\n axs[0].set_yticks(list(range(nr_top_words)))\n axs[0].set_yticklabels(tokenized_only_df.index[:nr_top_words], fontsize=18)\n axs[0].set_title(\"Word Frequencies \" , fontsize=20)\n axs[0].set_xlabel(\"(a) Frequency of a Word\", fontsize = 18)\n\n document_lengths = []\n if column_name == self.__origintext_columnname or column_name == \"clean_text\" :\n document_lengths = np.array(list(map(len, df[column_name].str.split())))\n elif column_name == \"removed_stopwords\" or column_name == \"stem_words\":\n document_lengths = np.array(list(map(len, df[column_name])))\n\n print(\"The average number of Words in a document is: {}.\".format(np.mean(document_lengths)))\n print(\"The max number of Words in a document is: {}.\".format(np.max(document_lengths)))\n print(\"The min number of Words in a document is: {}.\".format(np.min(document_lengths)))\n axs[1].set_title('Distribution of number of words on ' , fontsize = 20)\n axs[1].set_xlabel(\"(b) Sentence Length\", fontsize = 18)\n sns.distplot(document_lengths, bins = 50 , ax =axs[1])\n plt.show()", "def count_words_by_date(self, date):\n srt = sorted(self.all_dates.keys())\n if date not in srt:\n print(\"The date is not exist\")\n return\n print(\"Counting for \" + date)\n indx = srt.index(date)\n tokens = []\n for i in range(6):\n tokens += self.all_dates[srt[indx - i]]\n for word in tokens:\n self.local_total += 1\n self.alternative[word] += 1", "def visualise_initial_most_frequent_words(data: pd.DataFrame \n ) -> None:\n entire_corpus = \" \".join(\n [sentence.lower() for sentence in data[\"sentence\"]]\n ).split()\n \n # Remove stopwords\n entire_corpus = list(filter(lambda x: x not in STOPWORDS, entire_corpus))\n \n # Obtain the most frequent words\n corpus_counter = Counter(entire_corpus)\n most_freq = corpus_counter.most_common(50)\n\n # Create a dataframe for these words\n to_df = [{'word': word[0], 'count': word[1]} for word in most_freq]\n word_freq_df = pd.DataFrame(to_df)\n \n # Display barplot \n sns.barplot(\n data=word_freq_df, y='word', x='count', \n palette=sns.color_palette(\"crest\")\n )\n sns.utils.plt.show()", "def dfc(text: str):\n #Splitting the text into a list\n wordlist = text.split()\n worddictionary = {}\n\n #Creating the wordlist dictionary\n for word in wordlist:\n if word in worddictionary:\n #Increase\n worddictionary[word] += 1\n else:\n #add to the dictionary\n worddictionary[word] = 1\n\n #Converting worddictionary into a dataframe\n df = pd.DataFrame.from_dict(worddictionary, orient='index')\n #Resetting index to a numerical one for ease of use\n df = df.reset_index()\n #Renaming the old string-valued index\n df = df.rename(columns={'index':'word'})\n #Defining two functions (over empty variables) to replace commas and dots\n remover = lambda x: x.replace(',','')\n remover2 = lambda x: x.replace('.','')\n #Using ( too many lines) to apply the functions\n df['word'] = df['word'].apply(remover)\n df['word'] = df['word'].apply(remover2)\n #Row-wise Subselection and assignment to remove words with a frequency smaller than 2\n df = df[df[0] > 2]\n #Renaming word frequncy\n df = df.rename(columns={0:'Frequency'})\n\n return df", "def plot_most_common_words(plotting_string, method):\n top_twenty_after_stop = get_top_words(plotting_string)\n top_twenty_after_stop_dict = dict(top_twenty_after_stop)\n keys = top_twenty_after_stop_dict.keys()\n values = top_twenty_after_stop_dict.values()\n plt.bar(keys, values)\n plt.xticks(rotation=75)\n plt.xlabel(\"Most common words\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Most common words in {} of posts from ErictheCarGuy\".format(method))\n plt.show()", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def visualise_preprocessed_most_frequent_words(corpus: List[str]\n ) -> None:\n\n corpus_words = [word for sentence in corpus for word in sentence.split()]\n \n # Obtain the most frequent words\n corpus_counter = Counter(corpus_words)\n most_freq = corpus_counter.most_common(50)\n\n # Create a dataframe for these words\n to_df = [{'word': word[0], 'count': word[1]} for word in most_freq]\n word_freq_df = pd.DataFrame(to_df)\n \n # Display barplot \n sns.barplot(\n data=word_freq_df, y='word', x='count', \n palette=sns.color_palette(\"crest\")\n )\n sns.utils.plt.show()", "def plot_words(df):\n \n from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, ENGLISH_STOP_WORDS\n \n custom_stop_words = list(ENGLISH_STOP_WORDS) + ['just', 'perfume', 'fragrance', 'don', 'think', 'note', 'notes', \n 'fragrances', 'smells', 'smell', 'scent', 'bottle']\n \n cv = CountVectorizer(stop_words=custom_stop_words, min_df=.01)\n\n sparse_matrix = cv.fit_transform(df['review'])\n features_df = pd.DataFrame(sparse_matrix.todense(), \n columns = cv.get_feature_names())\n\n return features_df.sum().sort_values(ascending = False).head(15).plot(kind = 'barh', figsize = (8,8));", "def sentiment_plot(self, top_words=25):\n if top_words > 25:\n warnings.warn('Including more than 25 words on the X-axis will cause words to be excluded from the axis')\n\n daily_comments = self.comments[(self.comments['days_after_release'].\\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n if len(daily_comments) == 0:\n warnings.warn('No comments found for this day, trying future dates until comments are found')\n\n while len(daily_comments) == 0:\n if self.day_window[1] > self.comments['days_after_release'].max():\n raise KeyError('Reached bounds of comment dates available. Make sure all comments are present')\n self.day_window[1] += 1\n daily_comments = self.comments[(self.comments['days_after_release'].\\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n\n print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1]))\n\n if 'pos' not in daily_comments['sentiment'].values or 'neu' not in daily_comments['sentiment'].values or \\\n 'neg' not in daily_comments['sentiment'].values:\n warnings.warn('No negative or positive sentiments found on this day, trying future dates until positive or negative comments are found')\n\n while 'pos' not in daily_comments['sentiment'].values or 'neu' not in daily_comments['sentiment'].values or \\\n 'neg' not in daily_comments['sentiment'].values:\n if self.day_window[1] > self.comments['days_after_release'].max():\n raise KeyError('Reached bounds of comment dates available. Make sure all comments are present')\n self.day_window[1] += 1\n daily_comments = self.comments[(self.comments['days_after_release']. \\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n\n print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1]))\n\n res_positive = daily_comments[(daily_comments['sentiment']=='pos')]['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n res_neutral = daily_comments[(daily_comments['sentiment']=='neu')]['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n res_negative = daily_comments[daily_comments['sentiment']=='neg']['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n\n fig = make_subplots(rows=3, cols=1,\n y_title='Count',\n subplot_titles=('Positive', 'Neutral', 'Negative'))\n trace = fig.add_trace(px.bar(x=list(res_positive.keys())[:top_words], y=list(res_positive.values())[:top_words]).data[0],\n row=1, col=1)\n fig.append_trace(px.bar(x=list(res_neutral.keys())[:top_words], y=list(res_neutral.values())[:top_words]).data[0],\n row=2, col=1)\n fig.append_trace(px.bar(x=list(res_negative.keys())[:top_words], y=list(res_negative.values())[:top_words]).data[0],\n row=3, col=1)\n\n left = np.where(self.day_window[0] < 0, 'Before', 'After')\n right = np.where(self.day_window[1] < 0, 'Before', 'After')\n fig.update_layout(\n title='Top {} Words at {} Days {} Release to {} Days {} Release'.format(top_words,\n self.day_window[0], left,\n self.day_window[1], right)\n )\n fig.show()", "def analyze_data(df, sentiment_col, tweet_col, path):\n\n # create empty dictionaries to store all encountered words and their frequencies\n all_dict = {}\n pos_dict = {}\n neg_dict = {}\n neu_dict = {}\n # initialize counters to counter total number of tweets based on their emotion\n pos_count = 0\n neg_count = 0\n neu_count = 0\n\n # iterate through each row of the df\n for index, row in df.iterrows():\n if row[sentiment_col] == \"positive\":\n pos_count = iterate_words(\n pos_count, row[tweet_col], all_dict, pos_dict)\n\n if row[sentiment_col] == \"negative\":\n neg_count = iterate_words(\n neg_count, row[tweet_col], all_dict, neg_dict)\n\n if row[sentiment_col] == \"neutral\":\n neu_count = iterate_words(\n neu_count, row[tweet_col], all_dict, neu_dict)\n\n # visualize statistics\n visualize_stats(all_dict, 'all_plot.png', 'all_cloud.png',\n 'Word frequency in all tweets', path)\n visualize_stats(pos_dict, 'pos_plot.png', 'pos_cloud.png',\n 'Word frequency in positive tweets', path)\n visualize_stats(neg_dict, 'neg_plot.png', 'neg_cloud.png',\n 'Word frequency in negative tweets', path)\n visualize_stats(neu_dict, 'neu_plot.png', 'neu_cloud.png',\n 'Word frequency in neutral tweets', path)\n\n # make plot for emotion frequency\n emotions = ('Positive', 'Negative', 'Neutral')\n freq = [pos_count, neg_count, neu_count]\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.xaxis.grid(False)\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.bar(range(len(emotions)), freq, align='center',\n color=['forestgreen', 'firebrick', 'goldenrod'])\n plt.xticks(range(len(emotions)), emotions)\n plt.title('Tweet frequency based on emotion')\n plt.savefig(path + 'emotion_plot.png')\n plt.close()\n\n # make pie for emotion frequency\n sizes = [pos_count / len(df.index), neg_count /\n len(df.index), neu_count / len(df.index)]\n colors = ['forestgreen', 'firebrick', 'goldenrod']\n plt.pie(sizes, labels=emotions, colors=colors,\n autopct='%1.1f%%', startangle=140)\n plt.title('Tweet frequency based on emotion')\n plt.axis('equal')\n plt.savefig(path + 'emotion_pie.png')\n plt.close()", "def generate_plot(tokens):\n\n return FreqDist(word for word in tokens if len(word) > 4).plot(50, cumulative=True)", "def createBagOfWords(df, n):\n from nltk.tokenize import word_tokenize\n from nltk.corpus import stopwords\n import string\n from collections import Counter\n from nltk.stem.porter import PorterStemmer\n \n ps = PorterStemmer()\n stop_words = set(stopwords.words('english') + list(string.punctuation) + ['...' , ',' , '“', '”', '.', 'fig', '.fig'])\n \n dfDict = {}\n# df['count'] = df.groupby('Text')['Text'].transform(pd.Series.value_counts)\n# df.sort_values('count', ascending = False)\n# df.drop('count', axis = 1)\n dfLength = len(df.index)\n for i in range(1, dfLength):\n text = df['Text'][i]\n if dfDict.__contains__(text):\n df['Text'][i] = dfDict.get(text)\n else:\n textModified = text.lower()\n textModified = textModified.replace(',', '')\n textModified = textModified.replace('.', '')\n textModified = textModified.split()\n textModified = [ps.stem(word) for word in textModified if not word in set(stop_words) and len(word) > 1]\n textModified = ' '.join(textModified)\n df['Text'][i] = textModified\n dfDict[text] = textModified\n \n from sklearn.feature_extraction.text import CountVectorizer\n cv = CountVectorizer()\n textBagOfWords = cv.fit_transform(df['Text']).toarray()\n textBagOfWords = pd.DataFrame(textBagOfWords)\n df = df.drop(['Text'], axis = 1)\n df = pd.concat([df, textBagOfWords], axis = 1)\n return df", "def gender_word_counts(data):\n\n # We use the stopwords package from NLTK corpus.\n stop_words = set(stopwords.words('english'))\n data['tweet_words'] = data['text_cleaned'].str.split()\n # Ignoring all the stop words\n data['tweet_words'] = data['tweet_words'].apply(lambda tweet: [word for word in tweet if word not in stop_words])\n\n # Separating Male, Female and Brand profiles.\n male_profiles = data[data['gender'] == 'male']\n female_profiles = data[data['gender'] == 'female']\n brand_profiles = data[data['gender'] == 'brand']\n\n print(\"Top 20 most frequent words used by Men\")\n all_male_tweets = ' '.join(male_profiles['tweet_words'].astype(str))\n Male_words = pd.Series(all_male_tweets.split(\" \")).value_counts()[:20]\n print(Male_words)\n print()\n\n print(\"Top 20 most frequent words used by Women\")\n all_female_tweets = ' '.join(female_profiles['tweet_words'].astype(str))\n Female_words = pd.Series(all_female_tweets.split(\" \")).value_counts()[:20]\n print(Female_words)\n print()\n\n print(\"Top 20 most frequent words used by Brands\")\n all_brand_tweets = ' '.join(brand_profiles['tweet_words'].astype(str))\n Brand_words = pd.Series(all_brand_tweets.split(\" \")).value_counts()[:20]\n print(Brand_words)\n\n # Plotting horizontal bar graphs showing Top 20 tweet words used Vs. the word frequency.\n mp = Male_words.plot(kind='barh', stacked=True, colormap='plasma', title=\"Top 20 most frequently words used by Men\")\n mp.set_ylabel(\"Tweet words used by Males\")\n mp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n fp = Female_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Women\")\n fp.set_ylabel(\"Tweet words used by Females\")\n fp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n bp = Brand_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Brands\")\n bp.set_ylabel(\"Tweet words used by Brands\")\n bp.set_xlabel(\"Word Frequency\")\n plt.show()", "def generate_day_comparison():\n df = pd.read_csv(\"/Users/maxwell/Documents/workspace/CoronaScan/results.csv\",\n names=[i for i in subreddits])\n\n row_values = df.to_numpy()\n counts = row_values[get_offset() + 1]\n vals = []\n for i in counts:\n vals.append(int(i))\n plt.rcParams['xtick.major.pad']='8'\n N = len(subreddits)\n fig, chart = plt.subplots()\n index = np.arange(N)\n width = 0.35\n plot = chart.bar(index, vals, width)\n for i, v in enumerate(vals):\n chart.text(i-.2, v/(vals[i]+100), vals[i], fontsize=11)\n\n chart.set_xticks(index)\n chart.set_xticklabels(subreddits, rotation=45, ha='right', minor=False, fontsize=8)\n chart.set_xlabel(\"Subreddit\", fontsize=14)\n chart.set_ylabel(\"Number of Mentions\", fontsize=14)\n chart.set_title(\"Keyword Mentions by Subreddit on \" +\n str(datetime.date.today()), fontsize=20, pad=20)\n\n plt.tight_layout()\n fig.set_size_inches(18.5, 10.5)\n fig.savefig(\"/Users/maxwell/Documents/workspace/CoronaScan/plots/daily_bar_graphs/\" +\n str(datetime.date.today()), bbox_inches='tight')", "def WordAnalysis(dataframe,column):\r\n from wordcloud import WordCloud, STOPWORDS \r\n import matplotlib.pyplot as plt\r\n from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\r\n import nltk\r\n from nltk.tokenize import word_tokenize\r\n from nltk.probability import FreqDist\r\n from nltk.corpus import stopwords\r\n from nltk.tokenize import sent_tokenize\r\n text = \" \".join(review for review in dataframe[column])\r\n tokenized_word=word_tokenize(text)\r\n stop_words=set(stopwords.words(\"english\"))\r\n filtered_sent=[]\r\n for w in tokenized_word:\r\n if w not in stop_words:\r\n if w not in [':',',','.',\"'\",'\\\\n','-','@','(',')','and/or','?',\"'s\"]:\r\n filtered_sent.append(w)\r\n fdist = FreqDist(filtered_sent)\r\n fdist.plot(30,cumulative=False)\r\n plt.show()", "def stopword_bar(df, stop_words, ax):\n df_test = df.copy()\n df_test['prop'] = df.title.apply(stopword_proportion)\n sns.barplot(data=df_test, x='target', y='prop', ax=ax, ci=False)\n ax.set_title(\"Ratio of Stopwords Between Classes\", size=20)\n ax.set_ylim([1,2])\n ax.set_ylabel(\"Ratio\", size=20)\n ax.set_xlabel(\"Article Class\", size=20)\n plt.xticks(ticks=range(2),labels=['Normal','Clickbait'], size=20)\n return ax", "def wcount(lines, topn=10):\n '''a=[]\n for line in lines:\n word = line.strip()\n a.append(word)\n def histogram(s):\n d = dict()\n for i in s:\n if i in d:\n d[i]+=1\n else:\n d[i]=1\n return d'''\n def process_line(lines,diction):\n lines = lines.replace('-',' ')\n for word in lines.split():\n word=word.strip(string.punctuation+string.whitespace)\n word.lower()\n diction[word]=diction.get(word,0)+1\n\n def process_file(lines):\n diction = {}\n process_line(lines,diction)\n return diction\n diction=process_file(lines)\n x=list(diction.values())\n x.sort()\n x.reverse()\n count = 0\n for i in range(topn):\n for key in list(diction.keys()):\n if diction[key]==x[i] and count<topn:\n print(\"%s %d\"%(key,diction[key]))\n count +=1\n del diction[key]\n pass", "def count_words(subreddit, word_list, payload={}, hot_list=[]):\n url = url.format(subreddit)\n\n if payload == {}:\n payload = payload_default\n\n request = get(url, headers=user_ag)\n\n if (request.status_code == 404):\n return\n\n request = get(url, headers=user_ag, params=payload)\n request_data = request.json().get('data')\n\n post_page = request_data.get('children')\n hot_list.extend([post.get('data').get('title') for post in post_page])\n\n if (request_data.get('after') is not None):\n payload = {\n 'after': request_data.get('after'),\n 'limit': 100\n }\n return count_words(subreddit, word_list, payload, hot_list)\n\n word_list = [word.lower() for word in word_list]\n freq_dict = {word: 0 for word in word_list}\n for title in hot_list:\n title_words = title.lower().split(' ')\n for word in word_list:\n freq_dict[word] += title_words.count(word)\n\n freq_list = [(key, val) for key, val in freq_dict.items() if val != 0]\n\n for entry in sorted(freq_list, key=lambda x: (-x[1], x[0])):\n print(\"{}: {}\".format(entry[0], entry[1]))", "def main():\n name = file_path() # calls the file path method\n dictionary = {'Sun': 0, 'Mon': 0, 'Tue': 0, 'Wed': 0, 'Thu': 0, 'Fri': 0, 'Sat': 0} # store the day val in dict\n value = pop_values(name)\n count = 0\n for i in value:\n if i in dictionary:\n dictionary[i] += 1\n count += len(i)\n val = dictionary.values()\n keys = dictionary.keys()\n zp = zip(dictionary.keys(), dictionary.values())\n for item in val:\n i = val\n j = keys\n plt.bar(j, i, align='center', alpha=0.5)\n\n plt.ylabel('Number of messages') \n plt.title('Emails per day')\n plt.show() # method that shows the bar graph of our code result", "def wcount(lines, topn=10):\n words=lines.lower()\n words=words.replace('.', '')\n words=words.replace(',', ' ')\n words=words.replace('!', ' ')\n words=words.replace('?', ' ')\n words=words.replace(':', ' ')\n words=words.replace('_', ' ')\n words=words.replace('\"', ' ')\n words=words.replace(\"'\", ' ')\n words=words.replace('(', ' ')\n words=words.replace(')', ' ')\n words=words.replace('[', ' ')\n words=words.replace(']', ' ')\n words=words.replace('-', ' ')\n words=words.replace(';', ' ')\n words=words.replace('\"', ' ')\n words=words.replace('*', ' ')\n lst=words.split(' ')\n lst2=list(set(lst))\n lst2.remove('')\n dic={}\n for i in lst2:\n dic[i]=lst.count(i)\n wds=list(dic.keys())\n numbers=list(dic.values())\n numbers2=sorted(numbers, reverse=True)\n for k in range(topn):\n m=numbers.index(numbers2[k])\n print(\"%-15s%-5d\"%(wds[m],numbers2[k]))", "def count_words(subreddit, word_list, word_count={}, after=None):\n import requests\n\n sub_info = requests.get(\"https://www.reddit.com/r/{}/hot.json\"\n .format(subreddit),\n params={\"after\": after},\n headers={\"User-Agent\": \"My-User-Agent\"},\n allow_redirects=False)\n if sub_info.status_code != 200:\n return None\n\n info = sub_info.json()\n\n hot = [child.get(\"data\").get(\"title\")\n for child in info\n .get(\"data\")\n .get(\"children\")]\n if not hot:\n return None\n\n word_list = list(dict.fromkeys(word_list))\n\n if word_count == {}:\n word_count = {word: 0 for word in word_list}\n\n for title in hot:\n split_words = title.split(' ')\n for word in word_list:\n for s_word in split_words:\n if s_word.lower() == word.lower():\n word_count[word] += 1\n\n if not info.get(\"data\").get(\"after\"):\n sorted_counts = sorted(word_count.items(), key=lambda kv: kv[0])\n sorted_counts = sorted(word_count.items(),\n key=lambda kv: kv[1], reverse=True)\n [print('{}: {}'.format(k, v)) for k, v in sorted_counts if v != 0]\n else:\n return count_words(subreddit, word_list, word_count,\n info.get(\"data\").get(\"after\"))", "def main():\r\n wordfile=input(\"Enter word file: \")\r\n year=int(input(\"Enter year: \"))\r\n yrlist=printedWords(wordData.readWordFile(wordfile))\r\n total=wordsForYear(year,yrlist)\r\n print(\"Total printed words in\",year,\":\",total)\r\n import simplePlot\r\n labels = 'Year', 'Total Words'\r\n plot = simplePlot.plot2D('Number of printed words over time', labels)\r\n for yc in yrlist:\r\n point = yc.year, yc.count\r\n plot.addPoint(point)\r\n plot.display()", "def count_words(subreddit, word_list, last_post=\"\", child=0):\n try:\n if last_post is None:\n return []\n query = \"?limit=100&after=\" + last_post\n url = \"https://www.reddit.com/r/{}/hot.json{}\".format(subreddit, query)\n settings = {'headers': {'User-agent': ''}, 'allow_redirects': False}\n data = get(url, **settings).json().get('data')\n titles = [post['data']['title'] for post in data.get('children')]\n titles = titles + count_words(subreddit, word_list, data['after'], 1)\n if child:\n return titles\n except:\n return\n\n word_list = [word.lower() for word in word_list]\n word_dict = {word: word_list.count(word) for word in set(word_list)}\n freq_dict = {}\n for word, frequency in word_dict.items():\n word_dict[word] = 0\n for title in titles:\n word_dict[word] += title.lower().split().count(word) * frequency\n frequency = word_dict[word]\n entry = freq_dict.get(frequency)\n if entry is None:\n freq_dict.update({frequency: [word]})\n else:\n freq_dict[frequency].append(word)\n if 0 in freq_dict:\n del freq_dict[0]\n frequencies_of_words = sorted(freq_dict.items(), reverse=True)\n for frequency, words in frequencies_of_words:\n words.sort()\n for word in words:\n print('{}: {}'.format(word, frequency))", "def plot_phrases_over_time(\n df, *, text_column, date_column, id_column,\n phrases, time_interval='daily', ma_window_size=2,\n lowercase=True, sig_val=1.96,\n annotate_abs_delta_threshold=10, **plot_kws):\n # time interval\n TIME_INTERVALS = ('hourly', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly')\n time_interval = time_interval if time_interval\\\n in TIME_INTERVALS else 'daily'\n time_interval = pd.to_datetime(pd.Series(df[date_column]))\\\n .dt.to_period(time_interval[0].upper())\n\n # calculate keyword trends\n phrases_trends_df = []\n text_column = df[text_column].str.lower()\\\n if lowercase else df[text_column]\n\n i = 0\n for phrase in phrases:\n if isinstance(phrase, str):\n mask = rf'\\b{phrase}\\b'\n else:\n # assume phrase is iterable\n try:\n mask = '|'.join([rf'\\b{w}\\b' for w in phrase])\n phrase = f'Topic #{i}'\n i += 1\n except TypeError:\n raise ValueError('phrases contains non-string and / or non-iterable objects')\n\n _ = df[text_column.str.contains(mask)]\\\n .groupby(time_interval)\\\n .agg(count=pd.NamedAgg(column=id_column, aggfunc=\"count\"))\\\n .reset_index()\\\n .assign(phrase=phrase)\n\n # calculate moving average frequency of phrase(s)\n # in previous time window of size `ma_window_size`\n _[['moving_avg', 'moving_std']] = _['count']\\\n .shift(1)\\\n .fillna(0.)\\\n .rolling(window=ma_window_size, min_periods=1)\\\n .agg(['mean', 'std'])\n\n # add result to list\n phrases_trends_df.append(_)\n\n # concatenate list of results to single dataframe\n phrases_trends_df = pd.concat(phrases_trends_df)\n\n # calculate actual delta\n phrases_trends_df['actual_delta'] =\\\n phrases_trends_df['count'] - phrases_trends_df['moving_avg']\n\n # calculate percentage of change of frequency in\n # current time period compared to previous time\n # window moving average\n phrases_trends_df['change %'] =\\\n phrases_trends_df['actual_delta'] * 100. / phrases_trends_df['moving_avg']\n\n # calculate z-score, i.e., standardized phrase usage rates\n phrases_trends_df['z-score'] =\\\n (phrases_trends_df['actual_delta'] / phrases_trends_df['moving_std'])\\\n .replace([-np.inf, np.inf], np.nan)\n\n # check if topic trend is outlier at the current time period\n phrases_trends_df['outlier'] = phrases_trends_df['z-score']\\\n .apply(lambda z: z < -sig_val or z > sig_val)\n# display(phrases_trends_df)\n\n # plots\n fig, ax = plt.subplots(3, 1)\n\n # set color map\n if 'cmap' not in plot_kws:\n cmap = cm.get_cmap('tab20')\n plot_kws['cmap'] = cmap\n\n # set legend to false for individual axes\n # we want only one legend for the full figure\n plot_kws['legend'] = False\n\n if 'ax' in plot_kws:\n plot_kws.pop('ax')\n\n # set figure title\n fig.suptitle(plot_kws.pop('title', 'Phrase Usage Rate Analysis'), fontsize=16)\n\n # plot trend line\n ax[0] = phrases_trends_df\\\n .pivot(index=date_column, columns='phrase', values=['count'])\\\n .plot(ax=ax[0], **plot_kws)\n# ax[0].legend(loc='center left', bbox_to_anchor=(1, 0.5))\n ax[0].grid(True)\n ax[0].set_title(\"Usage rate\")\n\n # add one common legend for complete figure\n handles, labels = ax[0].get_legend_handles_labels()\n fig.legend(handles, [_[8:-1] for _ in labels], loc='center left', bbox_to_anchor=(1, 0.5))\n\n # plot change percentage\n ax[1] = phrases_trends_df\\\n .pivot(index=date_column, columns='phrase', values=['change %'])\\\n .plot(ax=ax[1], cmap=plot_kws['cmap'], legend=plot_kws['legend'])\n# ax[1].legend(loc='center left', bbox_to_anchor=(1, 0.5))\n ax[1].grid(True)\n ax[1].set_title(\"Usage rate change percentage (%)\")\n\n # annotation of actual difference in change % graph\n top_n_change = phrases_trends_df\\\n .assign(abs_change_perc=phrases_trends_df['change %'].apply(abs))\\\n .replace([np.inf, -np.inf], np.nan)\\\n .dropna()\\\n .sort_values('abs_change_perc', ascending=False).iloc[:30]\n top_n_change = top_n_change[[date_column, 'change %', 'actual_delta']]\n texts = []\n for x, y, z in top_n_change.itertuples(index=False, name=None):\n if abs(z) >= annotate_abs_delta_threshold:\n # ax[1].annotate(f'+{z}' if z >= 0. else f'{z}', xy=(x, y), xytext=(x, y + 2))\n texts += [ax[1].text(x, y, f'+{z}' if z >= 0. else f'{z}')]\n adjust_text(texts, ax=ax[1], only_move={'points':'y', 'texts':'y'},\n expand_points=(1.8, 1.8),\n arrowprops=dict(arrowstyle=\"->\", color='r', lw=0.5))\n ax[1].set_xlabel(\"Annotations are actual volume delta between current time period and moving average.\")\n\n # plot z-score bubble plot\n# colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n colors = [handles[i].get_color() for i in range(len(handles))]\n texts = []\n for i, (phrase, df_g) in enumerate(phrases_trends_df.groupby(['phrase'])):\n df_g[date_column] = df_g[date_column].dt.to_timestamp()\n df_g = df_g[df_g['z-score'].notnull()]\n# display(df_g)\n\n # colored bubbles if outlier\n ax[2] = df_g[df_g['outlier']]\\\n .plot.scatter(x=date_column, y='z-score',\n# color=colors[i % len(colors)],\n color=colors[i],\n s=df_g.loc[df_g['outlier'], 'count'] * 4,\n ax=ax[2], label=phrase,\n legend=plot_kws['legend'], alpha=.5)\n\n # get texts for annotation\n for x, y, z in df_g.loc[df_g['outlier'], [date_column, 'z-score', 'actual_delta']]\\\n .itertuples(index=False, name=None):\n if abs(z) >= annotate_abs_delta_threshold:\n texts += [ax[2].text(x, y, f'+{z}' if z >= 0. else f'{z}')]\n\n # greyed out bubbles if not outlier\n ax[2] = df_g[~df_g['outlier']]\\\n .plot.scatter(x=date_column, y='z-score',\n c='grey', s=df_g.loc[~df_g['outlier'], 'count'] * 2,\n ax=ax[2], legend=False, alpha=.5)\n\n # annotate actual delta for outliers\n adjust_text(texts, ax=ax[2], expand_points=(1.8, 1.8),\n arrowprops=dict(arrowstyle=\"->\", color='r', lw=0.5))\n\n ax[2].axhspan(-sig_val, sig_val, fill=False, linestyle='dashed')\n# lgnd = ax[2].legend(loc='center left', bbox_to_anchor=(1, 0.5))\n# # change the marker size manually for both lines\n# for handle in lgnd.legendHandles:\n# handle._sizes = [20]\n ax[2].tick_params(which='major', length=15)\n ax[2].xaxis.set_minor_locator(WeekdayLocator(interval=1))\n ax[2].xaxis.set_minor_formatter(DateFormatter('%W'))\n ax[2].grid(True)\n ax[2].set_title(\"Usage rate Z-score\")\n ax[2].set_xlabel(\"Annotations are actual volume delta between current time period and moving average.\")\n\n fig.tight_layout()\n return fig, ax", "def count_words(filename):", "def keywords_countplot(data: pd.DataFrame\n ) -> None:\n ordered_keywords = data[\"keyword\"].value_counts().keys() \n \n # Initialise the countplot, using the keywords as the x-axis and their \n # respective counts as the y-axis\n c_plt = sns.countplot(\n x=\"keyword\", \n palette=sns.color_palette(\"crest\"),\n data=data, \n order=ordered_keywords \n )\n \n # Rotate the x-axis labels such that they are visible (i.e., they don't \n # overlap with one another)\n c_plt.set_xticklabels(\n labels=c_plt.get_xticklabels(), \n rotation=55, \n horizontalalignment='right',\n fontweight='light'\n )\n \n # Incoprorate annotation so the specific values of each count is shown\n for c, label in zip(c_plt.patches, data[\"keyword\"].value_counts()):\n c_plt.annotate(label, (c.get_x()+0.25, c.get_height()+0.5))\n \n sns.utils.plt.show()", "def report_distribution(count):\n # create a list containing tuples of count and word,\n # while summing the total number of word occurrences\n num = 0\n tup_list = []\n\n for key, value in count.items():\n num += int(value)\n tup_list.append((value, key))\n # make me use string formatting smh im gonna use lambas i don't care what we have learned\n #tup_list.sort(key = lambda t: t[0], reverse = True)\n tup_list.sort(reverse = True)\n\n s_list = []\n s_list.append(\"{:>5}\".format(num))\n max = 20\n for tup in tup_list:\n if max == 0:\n break\n else:\n max -= 1\n s_list.append(\"{:>5}\".format(tup[0]) + \" \" + tup[1])\n\n format_string = \"count word\\n\"\n for i in s_list:\n format_string = format_string + i + \"\\n\"\n\n # remove last new line im too lazy to do it right in the for-loop\n #format_string = format_string[:-1]\n # add lines with the title and total word count to the output string\n \n # sort the list from largest number to smallest,\n # add a line to the output for each word in the top 20 containing count and word\n \n # return the string containing the report\n return format_string", "def get_busiest_day_stats(df, wordcloud=True):\n df_grouped_by_date = df.groupby(df.time.dt.date)\n max_chats_day = df_grouped_by_date.count()['clean_text'].idxmax()\n day_of_max_chats = df_grouped_by_date.get_group(max_chats_day)\n if wordcloud:\n frequency_list = day_of_max_chats['clean_text'].agg(['count', __custom_words_accumulator])[\n '__custom_words_accumulator']\n frequency_dict = dict(frequency_list)\n plot_word_cloud(frequency_dict)\n\n return day_of_max_chats.describe().transpose()", "def barchart_code_frequency(self):\n\n title = _('Code count - text, images and Audio/Video')\n owner, subtitle = self.owner_and_subtitle_helper()\n cur = self.app.conn.cursor()\n values = []\n labels = []\n case_file_name, file_ids = self.get_file_ids()\n if case_file_name != \"\":\n subtitle += case_file_name\n for c in self.codes:\n sql = \"select count(cid) from code_text where cid=? and owner like ?\"\n if file_ids != \"\":\n sql = \"select count(cid) from code_text where cid=? and owner like ? and fid\" + file_ids\n cur.execute(sql, [c['cid'], owner])\n res_text = cur.fetchone()\n sql = \"select count(cid) from code_image where cid=? and owner like ?\"\n if file_ids != \"\":\n sql = \"select count(cid) from code_image where cid=? and owner like ? and id\" + file_ids\n\n cur.execute(sql, [c['cid'], owner])\n res_image = cur.fetchone()\n sql = \"select count(cid) from code_av where cid=? and owner like ?\"\n if file_ids != \"\":\n sql = \"select count(cid) from code_av where cid=? and owner like ? and id\" + file_ids\n cur.execute(sql, [c['cid'], owner])\n res_av = cur.fetchone()\n labels.append(c['name'])\n values.append(res_text[0] + res_image[0] + res_av[0])\n # Create pandas DataFrame\n data = {'Code names': labels, 'Count': values}\n df = pd.DataFrame(data)\n cutoff = self.ui.lineEdit_filter.text()\n mask = df['Count'] != 0\n if cutoff != \"\":\n mask = df['Count'] >= int(cutoff)\n subtitle += _(\"Values\") + \" >= \" + cutoff\n fig = px.bar(df[mask], y='Code names', x='Count', orientation='h', title=title + subtitle)\n fig.show()\n self.helper_export_html(fig)" ]
[ "0.7079399", "0.67071533", "0.62005866", "0.61314", "0.6073859", "0.6038004", "0.60302097", "0.60053146", "0.59687704", "0.5862463", "0.58548695", "0.57932806", "0.5750061", "0.5748947", "0.57324004", "0.5716763", "0.57065487", "0.56946385", "0.565803", "0.5639725", "0.55957496", "0.5593972", "0.5530129", "0.5528672", "0.552563", "0.5512312", "0.55055714", "0.55045694", "0.5481835", "0.5462107" ]
0.7242556
0
Apply lemmatization to cleaned comments
def _proc(dat): def lemma(text): lemmatizer = WordNetLemmatizer() w_tokenizer = WhitespaceTokenizer() return [lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)] dat['text_lemmatized'] = dat['clean_comments'].apply(lemma) dat['text_lemmatized'] = dat['text_lemmatized'].apply(' '.join)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lemmatize_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = None\n if self.lemmatize_method == 'wordnet':\n cleaned_tokens = [self.lemmatizer.lemmatize(token) for token in tokens]\n else:\n cleaned_tokens = [self.lemmatizer.stem(token) for token in tokens]\n \n self.doc = ' '.join(cleaned_tokens)", "def lemma(comment):\n words = comment.split()\n tags = []\n lemma_words = []\n result=[]\n for i in range(len(words)):\n tags.append(words[i][words[i].rindex(\"/\"):])\n words[i] = words[i][:words[i].rindex(\"/\")]\n doc = spacy.tokens.Doc(nlp.vocab, words)\n doc = nlp.tagger(doc)\n for i in doc:\n if i.lemma_[0] ==\"-\" and i.string[0]!=\"-\":\n lemma_words.append(i.text)\n else:\n lemma_words.append(i.lemma_)\n \n for i in range(len(lemma_words)):\n \n result.append(lemma_words[i]+tags[i])\n \n return \" \".join(result)", "def normalise(word):\n\tword = word.lower()\n\t# word = stemmer.stem_word(word) #if we consider stemmer then results comes with stemmed word, but in this case word will not match with comment\n\tword = lemmatizer.lemmatize(word)\n\treturn word", "def data_cleaner(doc):\n \n sw = stopwords.words('english')\n regex_token = RegexpTokenizer(r\"([a-zA-Z]+(?:’[a-z]+)?)\")\n doc = regex_token.tokenize(doc)\n doc = [word.lower() for word in doc]\n doc = [word for word in doc if word not in sw]\n #print(doc)\n doc = pos_tag(doc)\n doc = [(word[0], get_wordnet_pos(word[1])) for word in doc]\n #print(doc)\n lemmatizer = WordNetLemmatizer() \n doc = [lemmatizer.lemmatize(word[0], word[1]) for word in doc]\n #print(' '.join(doc))\n return ' '.join(doc)", "def clean_text(text):\n\n lemmizer = WordNetLemmatizer()\n stemmer = porter.PorterStemmer()\n\n stop = stopwords.words('english')\n stop += ['.', ',', ':', '...', '!\"', '?\"', \"'\", '\"', ' - ', ' — ', ',\"', '.\"', '!', ';', '♫♫', '♫', \\\n '.\\'\"', '[', ']', '—', \".\\'\", 'ok', 'okay', 'yeah', 'ya', 'stuff', ' 000 ', ' em ', \\\n ' oh ', 'thank', 'thanks', 'la', 'was', 'wa', '?', 'like', 'go', ' le ', ' ca ', ' I ', \" ? \", \"s\", \" t \",\n \"ve\", \"re\"]\n # stop = set(stop)\n\n cleaned_text = []\n\n for post in text:\n cleaned_words = []\n\n # remove parentheticals\n clean_parens = re.sub(r'\\([^)]*\\)', ' ', post)\n\n #clean_parens = [line.decode('utf-8').strip() for line in clean_parens]\n\n # tokenize into words\n for word in wordpunct_tokenize(clean_parens):\n\n\n # lowercase and throw out any words in stop words\n if word.lower() not in stop:\n\n # lemmatize to roots\n low_word = lemmizer.lemmatize(word)\n\n # stem and lowercase ( an alternative to lemmatize)\n # low_word = stemmer.stem(root.lower())\n\n # keep if not in stopwords (yes, again)\n if low_word.lower() not in stop:\n # put into a list of words for each document\n cleaned_words.append(low_word.lower())\n\n # keep corpus of cleaned words for each document\n cleaned_text.append(' '.join(cleaned_words))\n\n\n return cleaned_text", "def lemmatize_text(self, text, print_tokens=False):\n # text = text.replace(\"/\", ' or ')\n # text = text.replace(\"\\\\\", ' or ')\n # # text = text.replace(\"'s\", '')\n # # text = text.replace(\"’s\", '')\n\n if print_tokens:\n print(pos_tag(word_tokenize(text)))\n\n # text = \"We’re looking for an exceptional Deep Learning (DL) Engineer\" # TODO: remove\n for word, tag in pos_tag(word_tokenize(text)):\n if tag.startswith('NN'): # NOUN\n # NN noun, singular ‘desk’, ’dog’\n # NNS noun plural ‘desks’, ‘dogs’\n # NNP proper noun, singular ‘Harrison’\n # NNPS proper noun, plural ‘Americans’\n yield self.wnl.lemmatize(word, pos='n') # wordnet.NOUN\n elif tag.startswith('VB'): # VERB\n # VB verb, base form take\n # VBD verb, past tense took\n # VBG verb, gerund/present participle taking\n # VBN verb, past participle taken\n # VBP verb, sing. present, non-3d take\n # VBZ verb, 3rd person sing. present takes\n yield self.wnl.lemmatize(word, pos='v') # wordnet.VERB\n elif tag.startswith('JJ'): # ADJ\n # JJ adjective ‘big’, ’good’\n # JJR adjective, comparative ‘bigger’, ‘better’\n # JJS adjective, superlative ‘biggest’\n yield self.wnl.lemmatize(word, pos='a') # wordnet.ADJ\n elif tag.startswith('RB'): # ADV\n # RB adverb very, silently,\n # RBR adverb, comparative better\n # RBS adverb, superlative best\n yield self.wnl.lemmatize(word, pos='r') # wordnet.ADV\n else:\n yield word", "def preprocess(docs):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n for i in range(len(docs)):\r\n docs[i] = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(docs[i])) if tag in tags]\r\n return lemmatize_docs(docs)", "def normalize_words(document):\n stopwords = set(nltk.corpus.stopwords.words('english'))\n lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\n for token in document:\n token = token.lower()\n if token in string.punctuation: continue\n if token in stopwords: continue\n yield lemmatizer.lemmatize(token)", "def lemmatize_text(text):\n text = nlp(text)\n text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])\n return text", "def lemmatize_string(doc, stop_words=STOP_WORDS):\n\n if not stop_words:\n stop_words = []\n\n # remove unicode\n clean_doc = \"\".join([char for char in doc if char in printable])\n\n # Run the doc through spaCy\n doc = nlp(clean_doc)\n\n # Lemmatize and lower text\n tokens = [re.sub(\"\\W+\",\"\",token.lemma_.lower()) for token in doc ]\n tokens = [t for t in tokens if len(t) > 1]\n\n return ' '.join(w for w in tokens if w not in stop_words)", "def lemitization(text_vector):\n\n text_vector = postag_doc(text_vector)\n global lemmatizer\n tokenised_document = [lemmatizer.lemmatize(word, pos=map_postags(\n postag)) for word, postag in text_vector]\n return tokenised_document", "def lemmatiser(list_of_words, tag):\n \n output = []\n for entry in list_of_words:\n if phrases:\n # just get the rightmost word\n word = entry[-1]\n entry.pop()\n else:\n word = entry\n if translated_option.startswith('u'):\n if word in taglemma:\n word = taglemma[word]\n else:\n if word == 'x':\n word = 'Other'\n # only use wordnet lemmatiser when appropriate\n elif not dependency:\n if word in wordlist:\n word = wordlist[word]\n word = lmtzr.lemmatize(word, tag)\n # do the manual_lemmatisation\n else:\n if word in wordlist:\n word = wordlist[word]\n if phrases:\n entry.append(word)\n output.append(entry)\n else:\n output.append(word)\n return output", "def lemmatize(token, pos_tag):\n lemmatizer = TextPreprocessor.LEMMATIZER\n return lemmatizer.lemmatize(token, pos_tag)", "def preprocess_tweet(tweet):\n clean_tweet = tp.clean(tweet)\n\n # perform lemmatization\n tokenizer = TweetTokenizer()\n tweet_tokens = tokenizer.tokenize(clean_tweet)\n\n lemmatized_tweet = lemmatize_tweet(tweet_tokens)\n\n # remove stopwords\n preprocessed_tweet = remove_stopwords(lemmatized_tweet)\n return preprocessed_tweet", "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def __call__(self, text):\r\n if self.use_pos_tagging:\r\n return [self.wnl.lemmatize(t, self.pos(t)) for t in word_tokenize(self.clean(text))]\r\n else:\r\n return [self.wnl.lemmatize(t) for t in word_tokenize(self.clean(text))]", "def lemmatize(text, nlp):\n\n return [word.lemma_ for word in nlp(text)]", "def lemmatize(query):\n wordlist = [wnl.lemmatize(word) for word in query.split()]\n return \" \".join(wordlist)", "def lemmatize(query):\n wordlist = [wnl.lemmatize(word).lower() for word in query]\n return \" \".join(wordlist)", "def lemmatize_docs(docs):\r\n wordnet_lemmatizer = WordNetLemmatizer()\r\n for i in range(len(docs)):\r\n docs[i] = [wordnet_lemmatizer.lemmatize(w, t) for (w, t) in docs[i]]\r\n return docs", "def lemmatiser(list_of_words, tag):\n output = []\n for word in list_of_words:\n if translated_option.startswith('u'):\n word = taglemma.get(word.lower(), 'Other')\n else:\n word = wordlist.get(word, lmtzr.lemmatize(word, tag))\n if not preserve_case:\n word = word.lower()\n output.append(word)\n return output", "def test_lemmatization():\n normalizer = TextNormalizer(stopwords=False, lemmatize=True)\n X = normalizer.transform([[\"start running better old friend\"]])\n assert X[\"corpus\"][0] == [\"start\", \"run\", \"well\", \"old\", \"friend\"]", "def lemmatize(text):\n\n lem = WordNetLemmatizer()\n return ' '.join(list(map(lambda x: lem.lemmatize(x, 'v'),\n text.split())))", "def lemmatize_words(text: str, lemmatizer=WordNetLemmatizer()) -> str:\n return ' '.join(lemmatizer.lemmatize(word) for word in text.split())", "def cleaning(self, document):\n remove_punct = ''.join(i for i in document.lower() if i not in self.punctuation)\n tokenized = [i for i in remove_punct.split() if i not in self.stopwords]\n if self.lang is not 'chinese':\n # Lemmatizes if not chinese\n tokenized = [self.lemmatize.lemmatize(i) for i in tokenized]\n return tokenized", "def lemmatize(text):\n word_tokens = nltk.word_tokenize(text)\n lemmatized_word = [wordnet_lemmatizer.lemmatize(word) for word in word_tokens]\n return (\" \".join(lemmatized_word))", "def lemmatized_phrases(self):\n phrases = [set(lower_words(TextBlob(p).words.lemmatize()))\n for p in self.blob.noun_phrases]\n return [' '.join(p) for p in phrases if not STOPWORDS.intersection(p)]", "def clean_article(self):\n # split into tokens by white space\n tokens = self.text.split(\" \")\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens] # type: List[Any]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # lemmatization and lowercase\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(w.lower()) for w in tokens]\n # filter out short tokens\n tokens = [word for word in tokens if len(word) > 1]\n return tokens", "def lemmatization(tokenized_word_list):\n porter=nltk.stem.PorterStemmer()\n filtered_tokens = [porter.stem(word) for word in tokenized_word_list]\n return filtered_tokens", "def lemmatize(data: pd.Series) -> pd.Series:\n lemmatizer = WordNetLemmatizer()\n return data.apply(lambda row: re.sub(\n r'\\b\\w+\\b', lambda match: lemmatizer.lemmatize(\n match.group(), pos=to_pos([match.group()])), row))" ]
[ "0.7497414", "0.69905686", "0.6854682", "0.66385305", "0.6468014", "0.64453566", "0.6444136", "0.6443159", "0.64086497", "0.6401282", "0.6396557", "0.6334729", "0.6331374", "0.6327046", "0.63106084", "0.6305712", "0.6254337", "0.6175787", "0.61659825", "0.614558", "0.6141945", "0.6108987", "0.61055166", "0.60898787", "0.6088328", "0.606487", "0.6063243", "0.60535896", "0.60508794", "0.6037627" ]
0.76389426
0
Open webcam through OpenCV.
def openWebcam(self): # Récupérer l'id de la caméra entré par l'utilisateur self.device_id = int(self.device_id_text.text()) # Prendre la main sur la webcam en créant un objet VideoCapture self.webcam = cv2.VideoCapture(self.device_id) # Verbose self.printToUser("Webcam #"+str(self.device_id)+" connected.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_cam(width, height):\n\n logging.info('start web cam')\n cap = cv2.VideoCapture(0)\n\n # Check success\n if not cap.isOpened():\n raise ConnectionError(\"Could not open video device\")\n \n # Set properties. Each returns === True on success (i.e. correct resolution)\n assert cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n assert cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n\n return cap", "def showWebcam(model): \n\n cap = cv.VideoCapture(0)\n\n while(True):\n ret, frame = cap.read()\n detectUsingModel(model,frame)\n\n cv.imshow('frame',frame)\n if cv.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv.destroyAllWindows()", "def StartWebcam(self):\n if not os.path.exists('static'):\n os.mkdir('static')\n camera = olpc.Camera('static/webcam.png')\n camera.StartWebcam()", "def main():\n cv2.namedWindow('video', cv2.WINDOW_AUTOSIZE)\n\n cap = cv2.VideoCapture(sys.argv[1])\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret: # done\n break\n\n cv2.imshow('video', frame)\n\n key = cv2.waitKey(30)\n if key & 0xFF == ord('q'): # quit\n break\n\n cap.release()\n cv2.destroyAllWindows()", "def capture_img_cv():\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n cap.release()\n cv2.destroyAllWindows()\n return frame", "def video_handle_for_demo():\n frame = cv2.imread(\"vision.png\")\n\n return frame", "def cameraOn():\n cap = cv2.VideoCapture(CAM0, cv2.CAP_DSHOW) # use camera to monitor the motor-mirror assemnbly by DirectShow\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow(\" Real-Time Video. Press 'q' to exist.\",frame)\n if cv2.waitKey(8) & 0xFF == ord('q'): #display a frame for 8ms, ~120Hz\n break\n \n cap.release() # release the capture\n cv2.destroyAllWindows()", "def open_camera(self):\n camera_source = self.winOpenCam.camera_source_used()\n if camera_source:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.running_video(camera_source)\n self.cam = True", "def camera(ctx, cam_id, verbose):\n client = ctx.obj.client\n cap = cv2.VideoCapture(cam_id)\n frame_num = 1\n classes = {}\n try:\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"Stream unavailable. Exiting.\")\n break\n if verbose:\n print(frame)\n cv2.imshow('Camera Feed', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_num += 1\n except:\n pass\n\n cap.release()\n cv2.destroyAllWindows()", "def startCamera(self):\n if self.video == \"camera\":\n self.cap = cv2.VideoCapture(gstreamer_pipeline(\n capture_width=416, capture_height=416, flip_method=0), cv2.CAP_GSTREAMER)\n else:\n video_path = Path(self.video)\n if not video_path.exists():\n raise Exception(\"Video file not found\")\n self.cap = cv2.VideoCapture(str(video_path))", "def capture():\n\tcap = cv2.VideoCapture(0)\n\tret, frame = cap.read()\n\tcap.release()\n\tcv2.destroyAllWindows()\n\treturn frame", "def open(self):\n self._dev_obj = cv2.VideoCapture(self._dev_num)\n if not self._dev_obj.isOpened():\n LOG.error(\"Fail to open the camera, number: %d\", self._dev_num)\n return False\n\n self._dev_obj.set(cv2.CAP_PROP_FRAME_WIDTH, self._width)\n self._dev_obj.set(cv2.CAP_PROP_FRAME_HEIGHT, self._height)\n self._dev_obj.set(cv2.CAP_PROP_FPS, self._fps)\n\n LOG.info(\"Frame width: %d\", self._width)\n LOG.info(\"Frame height: %d\", self._height)\n\n return True", "def test_camera_id() -> None: # pragma: no cover\n cap = cv2.VideoCapture(1)\n if (cap.isOpened() == False):\n print(\"Error opening video stream or file\")\n ret, frame = cap.read()\n cv2.imshow('frame', frame)\n cv2.waitKey(0)\n cap.release()", "def show_video():\n cap = cv2.VideoCapture(0)\n cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 800) # 3\n cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 600) # 4\n while True:\n ret, frame = cap.read()\n cv2.imshow(\"img\", frame)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n cap.release()\n cv2.destroyAllWindows()\n break", "def capturarVideo():\n camara = cv2.VideoCapture(1)\n #camara = cv.CaptureFromCAM(0)\n\n\n #Se Establece resolucion del video en 320x240\n # esta funcion cno existe en vc2\n #camara.set(3, 640)\n #camara.set(4, 480)\n\n # esta funcion cno existe en vc2\n #if not camara.isOpened():\n # print(\"No se puede abrir la camara\")\n\n return camara", "def camera():\n while True:\n subprocess.check_output(['fswebcam', 'image.jpg'])\n sleep(60)", "def capture_webcam_video(video_filename, sec=None):\n\n cap = cv2.VideoCapture(0)\n fps = 20\n\n # Define the codec and create VideoWriter object\n fourcc = cv2.cv.CV_FOURCC(*'XVID')\n out = cv2.VideoWriter(video_filename,fourcc, fps, (640,480))\n stime = time.time()\n\n while(cap.isOpened()):\n ret, frame = cap.read()\n if ret:\n out.write(frame)\n\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n if sec and (time.time() - stime) > sec:\n break\n else:\n break\n\n out.release()\n cap.release()\n cv2.destroyAllWindows()", "def __init__(self, camera: int = 0) -> None:\n self.video = cv2.VideoCapture(camera)", "def captureDisplay(title=\"Frame\"):\n\tcap = cv2.VideoCapture(0)\n\tret, frame = cap.read()\n\tcv2.namedWindow(title, cv2.WINDOW_NORMAL)\n\tcv2.imshow(title, frame)\n\tcap.release()\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()", "def launch_webcam(self):\n global face_encoding\n\n # Call the image_import.add_user method which launches the camera and\n # returns the face encodings if a new picture is taken\n face_encoding = image_import.add_user()\n\n # Check if a new image was returned from the add_user method\n if len(face_encoding) == 128:\n # Confirm if a new image has been captured\n self.label_face_captured.setText(QtCore.QCoreApplication.translate(\"MainWindow\", \"Image Captured \"))\n self.check_box.show()\n self.check_box.setEnabled(True)\n self.check_box.setChecked(True)\n else:\n # Notify if a new image is not captured\n self.label_face_captured.setText(QtCore.QCoreApplication.translate(\"MainWindow\", \"No Image Captured\"))\n self.check_box.hide()", "def get_camera_streaming(cam_id, w, h, fps):\n capture = cv2.VideoCapture(cam_id)\n capture.set(cv2.CAP_PROP_FRAME_WIDTH, w)\n capture.set(cv2.CAP_PROP_FRAME_HEIGHT, h)\n capture.set(cv2.CAP_PROP_FPS, fps)\n if not capture:\n print(\"Failed to initialize camera\")\n sys.exit(1)\n return capture", "def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):\n cv2.namedWindow(window_name, WINDOW_NORMAL)\n if window_size:\n width, height = window_size\n cv2.resizeWindow(window_name, width, height)\n\n # 选择摄像头,0为本地\n vc = cv2.VideoCapture(0) # http://192.168.0.2:4747/mjpegfeed para camara android remota por medio de Droidcam\n\n # 摄像头分辨率,默认为当前使用摄像头的最高分辨率\n\n # vc.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n # vc.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\n\n if vc.isOpened():\n read_value, webcam_image = vc.read()\n else:\n print(\"[ERROR] No se enontro camara.\")\n return\n while read_value:\n for normalized_face, (x, y, w, h) in find_faces(webcam_image):\n prediction = network.predict(normalized_face) # hace la prediccion\n prediction = prediction[0] # guarda el numero de la emocion para diujar el emoji\n # carga el emoji para dibujarlo\n image_to_draw = emoticons[prediction.tolist().index(max(prediction))]\n # dibuja el emoji\n draw_with_alpha(webcam_image, image_to_draw, (x , y - 100, w, h)) # image_to_draw, , webcam_image,\n cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n cv2.imshow(window_name, webcam_image)\n read_value, webcam_image = vc.read()\n key = cv2.waitKey(update_time)\n if key == 27: # salir con esc\n break\n cv2.destroyWindow(window_name)", "def runFaceRecognition(useHOG=False):\n #Open a handler for the camera\n video_capture = cv2.VideoCapture(CAMERA_DEVICE_ID)\n\n #Setup database\n database = setupDatabase()\n\n skipFrame = 0\n\n while video_capture.isOpened():\n #Skip every 2 frames to increase frame rate\n if (skipFrame < 2):\n skipFrame += 1\n continue\n else:\n skipFrame = 0\n\n #Read frame from camera and check that it went ok\n ok, frame = video_capture.read()\n if not ok:\n print(\"\\n[!] Error reading frame from camera. \", end=\"\")\n print(\"Video capture stopped.\\n\")\n break\n\n #Run facial detection and recognition on image\n detectAndRecognizeFacesInImage(frame,\n database, useHOG)\n\n #Display the resulting image\n cv2.imshow('Video', frame)\n\n #Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n #Release handle to the webcam\n video_capture.release()\n cv2.destroyAllWindows()", "def __init__(self):\n self.video = cv2.VideoCapture(0)\n # Set properties\n self.video.set(cv2.CAP_PROP_FRAME_WIDTH, 480)\n self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, 270)\n self.video.set(cv2.CAP_PROP_FPS, 25)", "def camera_start(self):\n mycam = ONVIFCamera(self.__cam_ip, 80, self.__cam_user, self.__cam_password)\n logging.info('Create media service object')\n media = mycam.create_media_service()\n logging.info('Get target profile')\n media_profile = media.GetProfiles()[0]\n logging.info('Camera working!')\n\n self.mycam = mycam\n self.camera_media_profile = media_profile\n self.camera_media = media\n self.mycam = mycam\n\n return self.mycam", "def play_video(video_path):\n key_esc = 27\n videoCapture = cv2.VideoCapture(video_path)\n\n success, img = videoCapture.read()\n # Loop until there are no more frames.\n while success:\n cv2.imshow('video', img)\n if 0xFF & cv2.waitKey(5) == key_esc:\n break\n success, img = videoCapture.read()\n\n cv2.destroyAllWindows()", "def run_func_on_camera(func, path: str = None):\n if path is None:\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n previous = resize(frame, 0.2)\n else:\n cap = cv2.VideoCapture(path)\n ret, frame = cap.read()\n previous = frame\n while True:\n ret, frame = cap.read()\n if path is not None:\n frame = resize(frame, 0.2)\n if ret:\n cv2.imshow(\"frame\", func(frame, previous))\n previous = frame\n sleep(0.2)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n cap.release()\n cv2.destroyAllWindows()", "def initialize_camera(self):\n if Rescue_PI.input_video_file_path is None:\n print(\"[INFO] starting threaded video stream...\")\n self.vs = VideoStream(src=VID_CAM_INDEX).start()\n else:\n self.vs = cv2.VideoCapture(Rescue_PI.input_video_file_path)", "def mostrarVideo(nombre,frame):\n cv2.imshow(nombre, frame)", "def init(this, id, **kargs):\n\t\tif this._CAP: this.release()\n\t\tthis._CAP = cv2.VideoCapture(id)\n\t\tthis.config(**kargs)\n\t\t\n\t\tthis._CAP.set(cv2.CAP_PROP_FPS, 9999.9) # > 9000.\n\t\t\n\t\ttry: this.getFrame()\n\t\texcept:\n\t\t\tprint 'On a perdu une caméra !'\n\t\t\tthis.release()" ]
[ "0.7465654", "0.7350664", "0.7223432", "0.7108686", "0.7093217", "0.701221", "0.7001169", "0.6957629", "0.6868298", "0.68425375", "0.672854", "0.67270094", "0.666812", "0.66563153", "0.66343963", "0.66135263", "0.65414745", "0.6515322", "0.64338577", "0.64204544", "0.63682413", "0.63548666", "0.63284904", "0.6324742", "0.63221663", "0.6297384", "0.6289984", "0.6288887", "0.62839556", "0.62578845" ]
0.8227863
0
Shows mean values of RGB channels next to the frame from the webcam.
def showAverageRGB(self): # Affichage dans les boxes self.red_label.setText("Mean RED: "+str(int(round(self.R.mean())))) self.red_label.adjustSize() self.green_label.setText("Mean GREEN: "+str(int(round(self.G.mean())))) self.green_label.adjustSize() self.blue_label.setText("Mean BLUE: "+str(int(round(self.B.mean())))) self.blue_label.adjustSize()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def meanColor(self):\n return self.image[self.x, self.y]", "def current_average_luma(camera):\n camera.capture('/home/pi/Desktop/image1.jpg')#camera take picture\n img = Image.open(\"/home/pi/Desktop/image1.jpg\") #opens image\n \n luma=0 #sum of the lumenance of each pixels\n pixels = img.width*img.height #number of pixels\n \n for x in range(img.width):\n for y in range(img.height):\n (r, g, b) = img.getpixel((x,y))#get colour touple \n luma += (0.2126*r + 0.7152*g + 0.0722*b) #calculate luma of RGB data, then add to total\n #END for\n #END for\n \n img.close()#ensure to properly close the image\n return luma/pixels #return average of all pixels", "def meanColor(self):\n return np.array([f.meanColor() for f in self])", "def meanRGB(im_list):\n print 'Computing mean RGB pixel ...'\n mean, std = np.zeros(3), np.zeros(3)\n for i, filename in enumerate(im_list):\n # Write completion bar\n n = 1. * i / len(im_list)\n sys.stdout.write('\\r')\n sys.stdout.write(\"[{:20}] {}%\".format('='*int(n/0.05), int(100*n)))\n sys.stdout.flush()\n # Process image\n im = np.array(Image.open(filename)).reshape(-1, 3)\n mean += np.mean(im, axis=0)\n std += np.std(im, axis=0)\n print ''\n mean, std = mean / len(im_list), std / len(im_list)\n return mean, std", "def mean_pixel(model_variant=None):\n if model_variant is None:\n return _MEAN_RGB\n else:\n return [127.5, 127.5, 127.5]", "def meanrgb(color1,color2):\r\n if check_colormath:\r\n srgb1 = sRGBColor(color1[0],color1[1],color1[2])\r\n srgb2 = sRGBColor(color2[0],color2[1],color2[2])\r\n\r\n lab1 = convert_color (srgb1,LabColor)\r\n lab2 = convert_color (srgb2,LabColor)\r\n lab1tuple = SpectralColor.get_value_tuple(lab1)\r\n lab2tuple = SpectralColor.get_value_tuple(lab2)\r\n labAtuple = ( (lab1tuple[0] + lab2tuple[0])/2.0 , (lab1tuple[1] + lab2tuple[1])/2.0,\r\n (lab1tuple[2] + lab2tuple[2])/2.0 )\r\n labA = LabColor(labAtuple[0],labAtuple[1],labAtuple[2])\r\n rgbA = convert_color(labA,sRGBColor)\r\n rgbAtuple = SpectralColor.get_value_tuple(rgbA)\r\n return list(rgbAtuple)\r\n else:\r\n acolor = [0,0,0]\r\n for j in range(3):\r\n ## this seems to give a useful average color\r\n meancolor = (color1[j] + color2[j])/2.0\r\n # now lighten it a bit\r\n acolor[j] = (1.0 - (0.8 * (1.0 -meancolor )))\r\n return acolor", "def _rgb_norm_frame_channel(self, frame, index):\n frame_arr = frame.getNumpy().astype(np.float32, copy=False)\n b = frame_arr[:,:,0]\n g = frame_arr[:,:,1]\n r = frame_arr[:,:,2]\n rgb_sum = b+g+r\n green_arr = (g*255.0/rgb_sum)\n # Squares - green\n alpha = self._contrasts[index] / 100.0 # [0.0-5.0]\n beta = self._brightnesses[index] - 1000 # [-500 - 500]\n green_arr = np.clip(alpha * green_arr + beta, 0, 255)\n green_img = Image(green_arr, colorSpace = ColorSpace.GRAY)\n return green_img", "def get_video_average(video_path):\n vidcap = cv2.VideoCapture(video_path)\n\n width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n num_color_channels = 3\n\n avg_frame = np.zeros((height, width, num_color_channels), dtype=np.float64)\n frames = 0\n\n while True:\n success, img = vidcap.read()\n if not success:\n break\n avg_frame += img\n frames += 1\n\n avg_frame = avg_frame / frames\n ####avg_frame = cv2.cvtColor(avg_frame, cv2.COLOR_BGR2RGB)\n avg_frame = avg_frame.astype(np.uint8)\n cv2.imwrite(\"average_frame.png\", avg_frame)", "def meancol(source):\n\tonepix = source.copy()\n\tonepix.thumbnail((1,1),Image.ANTIALIAS)\n\treturn onepix.getpixel((0,0))", "def channel_means(self, samplesize=1.):\n total_pix = 0\n mean = 0\n frequency = int(1/samplesize)\n for i in range(0, len(self), frequency):\n im, _ = self[i]\n mean_i = im.mean(axis=(0, 1))\n pix_i = im.shape[0] * im.shape[1]\n mean = np.divide(mean * total_pix + mean_i *\n pix_i, total_pix + pix_i)\n total_pix += pix_i\n\n return mean", "def color_averages(img):\n return np.average(img, axis = (0, 1))", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def mean(self) -> FrameLike:\n return super().mean()", "def average_image(im):\n color_vector = [int(x) for x in ImageStat.Stat(im).mean]\n return color_vector", "def _show_rgb(self):\n R, G, B = self._rgb_frames()\n image = numpy.dstack((R, G, B))\n imageItem = self.parent.image.getImageItem()\n imageItem.updateImage(image)", "def brightness(rgb):\n # return (min(rgb) + max(rgb)) / 2\n return rgb_to_hls(rgb)[1] * 255", "def average_brightness(im):\n imcopy = im.copy().convert('L')\n stat = ImageStat.Stat(imcopy)\n return stat.mean[0]", "def observation(self, img):\r\n img = img[25:200]\r\n img = cv2.resize(img, self.img_size[1:])\r\n if not self.color:\r\n img = img.mean(-1, keepdims=True)\r\n\r\n return img.transpose([2, 0, 1]) / 255", "def run(self):\n\n last_mean = 0\n st = time.time()\n sframe = 0\n while True:\n if time.time()-1 > st:\n st = time.time()\n #print 'fps', self.frame_counter - sframe\n sframe = self.frame_counter\n\n self.frame_counter += 1\n frame = next(self.frame_generator)\n\n xMax = frame.shape[1]\n yMax = frame.shape[0]\n\n capture_area = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n mean, stddev = cv2.meanStdDev(capture_area)\n mean = mean[0][0]\n stddev = stddev[0][0]\n\n if abs(mean-last_mean) > ACTIVATE_MEAN_DIFF:\n self.wakeup()\n\n last_mean = mean\n\n faces = []\n if abs(self.frame_counter - self.last_face_frame) < 20 or self.frame_counter % 5 == 0:\n faces = faceCascade.detectMultiScale(\n capture_area,\n scaleFactor=1.1,\n minNeighbors=MIN_NEIGHBOURS,\n minSize=(30, 30)\n )\n\n if len(faces) == 1:\n self.last_face_frame = self.frame_counter\n face = faces[0]\n x, y, w, h = face\n\n x1 = x\n x2 = x+w\n y1 = y\n y2 = y+h\n\n # expand_area\n width_plus = int(w/4.0)\n height_plus = int(h/4.0)\n x1 -= width_plus\n x2 += width_plus\n y1 -= height_plus\n y2 += height_plus\n\n y_max, x_max = frame.shape[:2]\n\n x1 = max(0, x1)\n y1 = max(0, y1)\n x2 = min(x_max, x2)\n y2 = min(y_max, y2)\n\n colour_face = frame[y1:y2, x1:x2]\n colour_face = np.copy(colour_face)\n\n face_obj = Face(face, colour_face, self.frame_counter)\n self.capture_face(face_obj)\n\n #st = time.time()\n bm = get_best_match(colour_face)\n match_person = bm\n if match_person is not None:\n self.found_people[match_person] += 1\n\n\n #et = time.time()\n #print et-st\n #result = self.pool.apply_async(get_best_match, (colour_face,))\n #self.pool_results.append(result)\n\n if len(self.pool_results) > 0:\n print(len(self.pool_results))\n res = self.pool_results[0]\n try:\n match_person = res.get()\n print('match here', match_person)\n except TimeoutError:\n pass\n else:\n self.pool_results.popleft()\n if match_person is not None:\n self.found_people[match_person] += 1\n\n # do flush if we have enough frames\n if len(self.capture_buffer) >= FRAMES_COUNT_TO_SAVE:\n self.flush_capture_buffer()\n\n # clear buffer if we never got enough frames\n if len(self.capture_buffer) > 0:\n if self.frame_counter - self.capture_buffer[-1].frame_counter > MAX_FRAMES_BETWEEN_CAPTURES:\n self.flush_capture_buffer()\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), DRAWING_COLOR, 15)\n\n # Display the resulting frame\n frame = cv2.flip(frame, flipCode=1)\n\n if self.draw_wanted_start_frame > self.frame_counter - TEXT_DISPLAY_TIME:\n cv2.putText(frame, \"Thanks!\", (150,250), cv2.FONT_HERSHEY_DUPLEX, 8.0, DRAWING_COLOR, 14)\n if self.thank_person is not None:\n cv2.putText(frame, self.thank_person, (150,450), cv2.FONT_HERSHEY_DUPLEX, 6.0, DRAWING_COLOR, 12)\n\n # When the screen goes off, we hang on waitKey, so don't do it if we haven't done a wakeup recently\n # Also no point in updating the screen if it is off.\n if self.last_wakeup + 40 > time.time():\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Display the resulting frame\n cv2.imshow('Video', frame)", "def mean(self) -> typing.Tuple[float, float]:\r\n self.clean_window()\r\n return (\r\n (self.sum_frames_rec / self.window_size),\r\n (self.sum_frames_proc / self.window_size)\r\n )", "def display_images():\n vc = cv2.VideoCapture(0) # Open webcam\n figure, ax = plt.subplots(1, 2, figsize=(10, 5)) # Intiialise plot\n\n count = 0 # Counter for number of aquired frames\n intensity = [] # Append intensity across time\n\n # For loop over generator here\n intensity.append(imageintensity)\n plot_image_and_brightness() # Call plot function\n count += 1\n\n # This triggers exit sequences when user presses q\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # Clean up here\n plt.close('all') # close plots\n generator.close() # Use generator exit for clean up,\n break # break loop", "def meanVal(img):\n\tmean = cv2.mean(img)\n\tif img is None:\n\t\tprint \"ERROR: MeanValue: Sent in None-Type Object\"\n\t\treturn -1\n\tif len(img.shape) == 3:\n\t\treturn (mean[0], mean[1], mean[2])\n\telif len(img.shape) == 2:\n\t\treturn (mean[0])\n\telse:\n\t\treturn mean", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def meanSpectrum(img='g35.03_KDnh3_11.hline.self.image', nBaselineChannels=16,\n sigmaCube=3, verbose=False, nanBufferChannels=2, useAbsoluteValue=False,\n baselineMode='edge', percentile=20, continuumThreshold=None,\n meanSpectrumFile='', centralArcsec=-1, imageInfo=[], chanInfo=[], mask='',\n meanSpectrumMethod='peakOverRms', peakFilterFWHM=15, iteration=0, applyMaskToMask=False):\n if (not os.path.exists(img)):\n casalogPost(\"Could not find image = %s\" % (img))\n return\n myia = createCasaTool(iatool)\n usermaskdata = ''\n if (len(mask) > 0):\n # This is the user mask (not the minpb mask inside the cube).\n myia.open(mask)\n usermaskdata = myia.getregion()\n if (verbose): print \"shape(usermask) = \", np.array(np.shape(usermaskdata))\n if applyMaskToMask:\n usermaskmask = myia.getregion(getmask=True)\n idx = np.where(usermaskmask==False)\n casalogPost('applyMaskToMask has zeroed out %d pixels.' % (len(idx[0])))\n usermaskdata[idx] = 0\n maskAxis = findSpectralAxis(myia)\n if (np.shape(usermaskdata)[maskAxis] > 1):\n singlePlaneUserMask = False\n else:\n singlePlaneUserMask = True\n if (meanSpectrumMethod.find('meanAboveThreshold') >= 0):\n casalogPost(\"single plane user masks not supported by meanSpectrumMethod='meanAboveThreshold', try peakOverMad.\")\n myia.close()\n return\n myia.close()\n myia.open(img)\n axis = findSpectralAxis(myia)\n if verbose: print \"Found spectral axis = \", axis\n myrg = None\n if (centralArcsec < 0 or centralArcsec == 'auto'):\n centralArcsec = -1\n if (len(mask) > 0 or meanSpectrumMethod != 'peakOverMad'):\n pixels = myia.getregion()\n maskdata = myia.getregion(getmask=True)\n nchan = np.shape(maskdata)[axis]\n else:\n bmaj, bmin, bpa, cdelt1, cdelt2, naxis1, naxis2, freq = imageInfo\n blc = [0,0,0,0]\n trc = [naxis1-1,naxis2-1,0,0]\n nchan = chanInfo[0]\n myrg = createCasaTool(rgtool)\n else:\n myrg = createCasaTool(rgtool)\n bmaj, bmin, bpa, cdelt1, cdelt2, naxis1, naxis2, freq = imageInfo\n nchan = chanInfo[0]\n x0 = int(np.round(naxis1*0.5 - centralArcsec*0.5/np.abs(cdelt1)))\n x1 = int(np.round(naxis1*0.5 + centralArcsec*0.5/np.abs(cdelt1)))\n y0 = int(np.round(naxis2*0.5 - centralArcsec*0.5/cdelt2))\n y1 = int(np.round(naxis2*0.5 + centralArcsec*0.5/cdelt2))\n # avoid going off the edge of non-square images\n if (x0 < 0): x0 = 0\n if (y0 < 0): y0 = 0\n if (x0 >= naxis1): x0 = naxis1 - 1\n if (y0 >= naxis2): y0 = naxis2 - 1\n blc = [x0,y0,0,0]\n trc = [x1,y1,0,0]\n trc[axis] = nchan\n region = myrg.box(blc=blc, trc=trc)\n pixels = myia.getregion(region=region)\n casalogPost(\"Taking submask for central area of image: blc=%s, trc=%s\" % (str(blc),str(trc)))\n maskdata = myia.getregion(region=region,getmask=True)\n# myrg.done()\n if (len(mask) > 0):\n usermaskdata = submask(usermaskdata, region)\n if verbose:\n print \"shape of pixels = \", np.array(np.shape(pixels))\n if len(mask) > 0:\n if not (np.array(np.shape(pixels)) == np.array(np.shape(usermaskdata))).all():\n casalogPost(\"Mismatch in shape between image (%s) and mask (%s)\" % (np.shape(pixels),np.shape(usermaskdata)))\n if myrg is not None: myrg.done()\n return\n if (meanSpectrumMethod.find('OverRms') > 0 or meanSpectrumMethod.find('OverMad') > 0):\n # compute myrms, ignoring masked values and usermasked values\n if (meanSpectrumMethod.find('OverMad') < 0):\n casalogPost(\"Computing std on each plane\")\n else:\n casalogPost(\"Computing mad on each plane\")\n myvalue = []\n# for a in range(np.shape(pixels)[axis]):\n for a in range(nchan):\n if ((a+1)%100 == 0): \n print \"Done %d/%d\" % (a+1, nchan)\n# print \"Done %d/%d\" % (a+1, np.shape(pixels)[axis])\n # Extract this one channel\n if (axis == 2):\n if len(mask) > 0:\n mypixels = pixels[:,:,a,0]\n mymask = maskdata[:,:,a,0]\n if (singlePlaneUserMask):\n myusermask = usermaskdata[:,:,0,0]\n else:\n myusermask = usermaskdata[:,:,a,0]\n else:\n blc[axis] = a\n trc[axis] = a\n myregion = myrg.box(blc=blc,trc=trc)\n mypixels = myia.getregion(region=myregion)\n mymask = myia.getregion(region=myregion,getmask=True)\n elif (axis == 3):\n if (len(mask) > 0):\n mypixels = pixels[:,:,0,a]\n mymask = maskdata[:,:,0,a]\n if (singlePlaneUserMask):\n myusermask = usermaskdata[:,:,0,0]\n else:\n myusermask = usermaskdata[:,:,0,a]\n else:\n blc[axis] = a\n trc[axis] = a\n myregion = myrg.box(blc=blc,trc=trc)\n mypixels = myia.getregion(region=myregion)\n mymask = myia.getregion(region=myregion,getmask=True)\n \n if (len(mask) > 0):\n # user mask is typically a clean mask, so we want to use the region outside the\n # clean mask for computing the MAD, but also avoiding the masked edges of the image,\n # which are generally masked to False\n pixelsForStd = mypixels[np.where((myusermask<1) * (mymask==True))]\n else: \n # avoid the masked (typically outer) edges of the image using the built-in mask\n pixelsForStd = mypixels[np.where(mymask==True)]\n if (meanSpectrumMethod.find('OverMad') < 0):\n myvalue.append(np.std(pixelsForStd))\n else:\n myvalue.append(MAD(pixelsForStd))\n# print \"channel %4d: Using %d of %d pixels for MAD/std\" % (a,len(pixelsForStd),np.prod(np.shape(mypixels)))\n if (meanSpectrumMethod.find('OverMad') < 0):\n myrms = np.array(myvalue)\n else:\n mymad = np.array(myvalue)\n print \"Finished\"\n percentagePixelsNotMasked = 100\n if (meanSpectrumMethod.find('peakOver') == 0):\n # compute mymax (an array of channel maxima), then divide by either myrms or mymad array\n gaussianSigma = peakFilterFWHM/2.355\n myvalue = []\n casalogPost(\"B) Current memory usage: %.3f GB, resident: %.3f GB\" % (memoryUsage(), residentMemoryUsage()))\n casalogPost(\"Smoothing and computing peak on each plane.\")\n if (len(mask) > 0):\n pixels[np.where(usermaskdata==0)] = np.nan\n for a in range(nchan):\n if ((a+1)%100 == 0): \n print \"Done %d/%d\" % (a+1, nchan)\n if (axis == 2):\n if len(mask) > 0:\n mypixels = pixels[:,:,a,0]\n else:\n blc[axis] = a\n trc[axis] = a\n myregion = myrg.box(blc=blc,trc=trc)\n mypixels = myia.getregion(region=myregion)\n elif (axis == 3):\n if len(mask) > 0:\n mypixels = pixels[:,:,0,a]\n else:\n blc[axis] = a\n trc[axis] = a\n myregion = myrg.box(blc=blc,trc=trc)\n mypixels = myia.getregion(region=myregion)\n if (gaussianSigma > 1.1/2.355):\n if (len(mask) > 0):\n # taken from stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python\n V = mypixels.copy()\n V[mypixels!=mypixels] = 0\n VV = gaussian_filter(V,sigma=gaussianSigma)\n W = mypixels.copy()+1\n W[mypixels!=mypixels] = 0\n WW = gaussian_filter(W,sigma=gaussianSigma)\n mypixels = VV/WW\n myvalue.append(np.nanmax(mypixels))\n else:\n myvalue.append(np.nanmax(gaussian_filter(mypixels,sigma=gaussianSigma)))\n else:\n myvalue.append(np.nanmax(mypixels))\n print \"finished\"\n mymax = np.array(myvalue)\n if (meanSpectrumMethod == 'peakOverRms'):\n avgspectrum = mymax/myrms\n elif (meanSpectrumMethod == 'peakOverMad'):\n avgspectrum = mymax/mymad\n nansRemoved = removeNaNs(avgspectrum, verbose=True)\n threshold = 0\n edgesUsed = 0\n nansReplaced,nanmin = removeNaNs(avgspectrum, replaceWithMin=True, \n nanBufferChannels=nanBufferChannels, verbose=True)\n elif (meanSpectrumMethod.find('meanAboveThreshold') == 0):\n if (continuumThreshold is not None):\n belowThreshold = np.where(pixels < continuumThreshold)\n if verbose:\n print \"shape of belowThreshold = \", np.shape(belowThreshold)\n pixels[belowThreshold] = 0.0\n if (len(mask) > 0):\n pixelsWithinUserMask = len(np.where(usermaskdata<1)[0])\n pixelsWithinCubeMask = len(np.where(maskdata==1)[0])\n pixelsForMAD = pixels[np.where((maskdata==1) * (usermaskdata<1))]\n npixels = np.prod(np.shape(pixels))\n percent = 100.*len(pixelsForMAD) / npixels\n percent2 = 100.*pixelsWithinUserMask/npixels\n percent3 = 100.*pixelsWithinCubeMask/npixels\n casalogPost(\"Using %d of %d pixels (%.2f%%) for MAD: %d (%.2f%%) outside user mask, %d (%.2f%%) satisfy cube mask, i.e. minpb masking\" % (len(pixelsForMAD),npixels, percent, pixelsWithinUserMask, percent2, pixelsWithinCubeMask, percent3))\n else:\n pixelsForMAD = pixels[np.where(maskdata==1)] # ignore the outer mask edges of the cube\n casalogPost(\"Using %d of %d pixels for MAD\" % (len(pixelsForMAD),np.prod(np.shape(pixels))))\n# pixelsForMAD = pixels # previous method\n madTime = timeUtilities.time()\n std = MAD(pixelsForMAD, axis=None)\n endMadTime = timeUtilities.time()\n casalogPost(\"%.1f sec elapsed in computing MAD within meanSpectrum()\" % (endMadTime-madTime))\n if verbose: print \"MAD of cube = \", std\n naxes = len(np.shape(pixels))\n nchan = np.shape(pixels)[axis]\n\n if (baselineMode == 'edge'):\n # Method #1: Use the two edges of the spw to find the line-free rms of the spectrum\n nEdgeChannels = nBaselineChannels/2\n # lower edge\n blc = np.zeros(naxes)\n trc = [i-1 for i in list(np.shape(pixels))]\n trc[axis] = nEdgeChannels\n myrg = createCasaTool(rgtool)\n region = myrg.box(blc=blc, trc=trc)\n lowerEdgePixels = myia.getregion(region=region)\n # drop all floating point zeros (which will drop pixels outside the mosaic image mask)\n lowerEdgePixels = lowerEdgePixels[np.where(lowerEdgePixels!=0.0)]\n stdLowerEdge = MAD(lowerEdgePixels)\n medianLowerEdge = nanmedian(lowerEdgePixels)\n if verbose: print \"MAD of %d channels on lower edge = %f\" % (nBaselineChannels, stdLowerEdge)\n\n # upper edge\n blc = np.zeros(naxes)\n trc = [i-1 for i in list(np.shape(pixels))]\n blc[axis] = trc[axis] - nEdgeChannels\n region = myrg.box(blc=blc, trc=trc)\n upperEdgePixels = myia.getregion(region=region)\n# myrg.done()\n # drop all floating point zeros\n upperEdgePixels = upperEdgePixels[np.where(upperEdgePixels!=0.0)]\n stdUpperEdge = MAD(upperEdgePixels)\n medianUpperEdge = nanmedian(upperEdgePixels)\n casalogPost(\"meanSpectrum(): edge medians: lower=%.10f, upper=%.10f\" % (medianLowerEdge, medianUpperEdge))\n\n if verbose: \n print \"MAD of %d channels on upper edge = %f\" % (nEdgeChannels, stdUpperEdge)\n if (stdLowerEdge <= 0.0):\n edgesUsed = 1\n stdEdge = stdUpperEdge\n medianEdge = medianUpperEdge\n elif (stdUpperEdge <= 0.0):\n edgesUsed = 0\n stdEdge = stdLowerEdge\n medianEdge = medianLowerEdge\n else:\n edgesUsed = 2\n stdEdge = np.mean([stdLowerEdge,stdUpperEdge])\n medianEdge = np.mean([medianLowerEdge,medianUpperEdge])\n \n if (baselineMode != 'edge'):\n # Method #2: pick the N channels with the lowest absolute values (to avoid\n # confusion from absorption lines and negative bowls of missing flux)\n npixFraction = nBaselineChannels*1.0/nchan\n if (centralArcsec < 0):\n allPixels = myia.getregion()\n else:\n allPixels = pixels\n myia.close()\n # Convert all NaNs to zero\n allPixels[np.isnan(allPixels)] = 0\n # Drop all floating point zeros and internally-masked pixels from calculation\n if (mask == ''):\n allPixels = allPixels[np.where((allPixels != 0) * (maskdata==True))]\n else:\n # avoid identical zeros and clean mask when looking for lowest pixels\n allPixels = allPixels[np.where((allPixels != 0) * (maskdata==True) * (usermaskdata<1))]\n # Take absolute value\n absPixels = np.abs(allPixels)\n # Find the lowest pixel values\n percentileThreshold = scoreatpercentile(absPixels, percentile)\n idx = np.where(absPixels < percentileThreshold)\n # Take their statistics\n stdMin = MAD(allPixels[idx])\n medianMin = nanmedian(allPixels[idx])\n\n if (baselineMode == 'edge'):\n std = stdEdge\n median = medianEdge\n casalogPost(\"meanSpectrum(): edge mode: median=%f MAD=%f threshold=%f (edgesUsed=%d)\" % (medianEdge, stdEdge, medianEdge+stdEdge*sigmaCube, edgesUsed))\n else:\n std = stdMin\n median = medianMin\n edgesUsed = 0\n casalogPost(\"meanSpectrum(): min mode: median=%f MAD=%f threshold=%f\" % (medianMin, stdMin, medianMin+stdMin*sigmaCube))\n \n if (axis == 2 and naxes == 4):\n # drop the degenerate axis so that avgOverCube will work with nanmean(axis=0)\n pixels = pixels[:,:,:,0]\n if (len(mask) > 0):\n maskdata = propagateMaskToAllChannels(maskdata, axis)\n else:\n maskdata = ''\n avgspectrum, percentagePixelsNotMasked = avgOverCube(pixels, useAbsoluteValue, mask=maskdata, usermask=usermaskdata)\n if meanSpectrumMethod.find('OverRms') > 0:\n avgspectrum /= myrms\n elif meanSpectrumMethod.find('OverMad') > 0:\n avgspectrum /= mymad\n threshold = median + sigmaCube*std\n casalogPost(\"Using threshold above which to compute mean spectrum = %f\" % (threshold), verbose)\n pixels[np.where(pixels < threshold)] = 0.0\n casalogPost(\"Running avgOverCube\")\n avgspectrumAboveThreshold, percentagePixelsNotMasked = avgOverCube(pixels, useAbsoluteValue, threshold, mask=maskdata, usermask=usermaskdata)\n if meanSpectrumMethod.find('OverRms') > 0:\n avgspectrumAboveThreshold /= myrms\n elif meanSpectrumMethod.find('OverMad') > 0:\n avgspectrumAboveThreshold /= mymad\n if verbose: \n print \"Running removeNaNs (len(avgspectrumAboveThreshold)=%d)\" % (len(avgspectrumAboveThreshold))\n nansRemoved = removeNaNs(avgspectrumAboveThreshold)\n nansReplaced,nanmin = removeNaNs(avgspectrumAboveThreshold, replaceWithMin=True, \n nanBufferChannels=nanBufferChannels)\n nchan, firstFreq, lastFreq, channelWidth = chanInfo\n frequency = np.linspace(firstFreq, lastFreq, nchan)\n if verbose: \n print \"Running writeMeanSpectrum\"\n writeMeanSpectrum(meanSpectrumFile, frequency, avgspectrum, nansReplaced, threshold,\n edgesUsed, nchan, nanmin, centralArcsec, mask, iteration)\n if (myrg is not None): myrg.done()\n return(avgspectrum, nansRemoved, nansReplaced, threshold, \n edgesUsed, nchan, nanmin, percentagePixelsNotMasked)", "def preprocess_frame(self, frame):\n # Greyscale frame\n img = np.mean(frame,-1)\n\n # Remove black bar at the bottom\n cropped_img = img[:-12, :]\n\n # Normalize Pixel Values\n normalized_frame = cropped_img/255.0\n\n return normalized_frame", "def normalize_for_rgb(raw_frame):\n return tf.cast(raw_frame, tf.float32) / 255.0" ]
[ "0.67817515", "0.659468", "0.65413666", "0.64140654", "0.63425577", "0.6320953", "0.62316763", "0.6121077", "0.6084322", "0.5991751", "0.5955584", "0.5944211", "0.5944211", "0.5944211", "0.5944211", "0.5944211", "0.5944211", "0.58618677", "0.58269626", "0.5824946", "0.5823764", "0.5763152", "0.5756044", "0.57319933", "0.5706604", "0.5676468", "0.56562537", "0.56451505", "0.5640862", "0.5630638" ]
0.6885815
0
Convert cv2 image (BGR numpy array) to QPixelmap object to display it the GUI.
def convertToQPixelmap(self, imgToConvert): # Conversion en image QImage if ( len(imgToConvert.shape) == 3 ): img_qimg = QtGui.QImage(imgToConvert.data, imgToConvert.shape[1], imgToConvert.shape[0], imgToConvert.strides[0], QtGui.QImage.Format_RGB888) else: img_qimg = QtGui.QImage(imgToConvert.data, imgToConvert.shape[1], imgToConvert.shape[0], imgToConvert.strides[0], QtGui.QImage.Format_Indexed8) # Conversion en image QPixmap pour l'afficher return QtGui.QPixmap.fromImage(img_qimg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, ) #Qt.KeepAspectRatio\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, ) #Qt.KeepAspectRatio\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, ) #Qt.KeepAspectRatio\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\r\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\r\n h, w, ch = rgb_image.shape\r\n bytes_per_line = ch * w\r\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\r\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)\r\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.display_width, self.display_height, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.display_width, self.display_height, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def cv_image_to_qt_image(cv_img):\n height, width, channels = cv_img.shape\n\n ##return QtGui.QImage(\n ##cv_img.data, width, height, QtGui.QImage.Format_ARGB32\n ##)\n \n qt_img = QtGui.QImage(width, height, QtGui.QImage.Format_RGB888)\n\n for i, line in enumerate(cv_img):\n for j, pix in enumerate(line):\n qt_img.setPixel(j, i, QtGui.qRgb(pix[2], pix[1], pix[0]))\n\n return qt_img", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(\n rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n display_width = 960\n display_height = 540\n p = convert_to_Qt_format.scaled(\n display_width, display_height, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def Convert_CV_QT(self, cv_img):\n\t\trgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n\t\th, w, ch = rgb_image.shape\n\t\tbytes_per_line = ch * w\n\t\tconvert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n\t\tp = convert_to_Qt_format.scaled(self.camera.size().width(), self.camera.size().height(), Qt.IgnoreAspectRatio)\n\t\t\n\t\treturn QPixmap.fromImage(p)", "def cvimg2qpixmap(cvimg: np.ndarray):\n height, width, channel = cvimg.shape\n bytesPerLine = 3 * width\n qImg = QImage(cvimg.data, width, height, bytesPerLine, QImage.Format_RGB888)\n return QPixmap(qImg)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(225, 220) # , Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n\t\trgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n\t\th, w, ch = rgb_image.shape\n\t\tbytes_per_line = ch * w\n\t\tconvert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n\n\t\tif w > 1000:\n\t\t\trat = w / 1000\n\t\t\tw = 1000\n\t\t\th = h / rat\n\n\t\tp = convert_to_Qt_format.scaled(int(w), int(h), Qt.KeepAspectRatio)\n\t\treturn QPixmap.fromImage(p)", "def numpy_to_pixmap(img, width, height):\n img_resized = cv2.resize(img, (width, height), cv2.INTER_AREA)\n img_rgb = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)\n qt_img = QImage(img_rgb.data, img_rgb.shape[1], img_rgb.shape[0], QImage.Format_RGB888)\n pixmap = QPixmap.fromImage(qt_img)\n return pixmap", "def to_qt_pixmap(self, scale=None):\n bytes_per_line = 3 * self.width\n img = self.to_color().img\n rgb = opencv.cvtColor(img, opencv.COLOR_BGR2RGB)\n q_img = QImage(rgb.data, self.width, self.height, bytes_per_line, QImage.Format_RGB888)\n pixmap = QPixmap.fromImage(q_img)\n\n if scale is not None:\n pixmap = pixmap.scaled(scale, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)\n\n return pixmap", "def GetPixelMapFromImage(image):\n image_pixmap = QPixmap()\n image_pixmap.convertFromImage(image)\n return image_pixmap", "def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n self.camera_feed.setPixmap(qt_img)", "def update_image(self, cv_img):\n\t\tqt_img = self.ImageEdits(cv_img)\n\t\tself.camera.setPixmap(qt_img)", "def update_image(self, cv_img):\n \n qt_img = self.convert_cv_qt(cv_img)\n self.image_label.setPixmap(qt_img)\n #pass", "def frame_to_pixmap(color_image): \r\n height, width, channel = color_image.shape\r\n bytes_per_line = channel * width\r\n\r\n q_img = QImage(color_image.data, width, height, bytes_per_line, QImage.Format_BGR888)\r\n return QPixmap.fromImage(q_img)", "def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n self.image_label.setPixmap(qt_img)", "def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n self.image_label.setPixmap(qt_img)", "def cv2toQImage(cv2image):\n import numpy as np\n height, width = cv2image.shape[0:2]\n tmp = cv2image[:,:,0].copy()\n cv2image[:,:,0] = cv2image[:,:,2]\n cv2image[:,:,2] = tmp\n return QImage(cv2image.data, width, height, width*3, QImage.Format.Format_RGB888)", "def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n self.label.setPixmap(qt_img)", "def update_image(self, cv_img):\n\t\tqt_img = self.convert_cv_qt(cv_img)\n\t\tself.label.setPixmap(qt_img)\n\t\tself.display_info()", "def show_map_window(image):\n cv2.imshow(_WINDOW_NAME, image)", "def update_image(self, img):\r\n qt_img = self.convert_cv_qt(img)\r\n self.main.caption_feed.setPixmap(qt_img)", "def convert_image_to_QTformat(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n convertToQtFormat = QImage(image.data, image.shape[1], image.shape[0], QImage.Format_RGB888)\n qt_image = convertToQtFormat.scaled(500, 375, Qt.KeepAspectRatio)\n return qt_image", "def update_image(self, cv_img):\n self.updated_temp_goal_image = cv_img\n qt_img = self.convert_cv_qt(cv_img)\n self.image_label.setPixmap(qt_img)", "def convertQtVideoFrame(self):\n\n try:\n frame = cv2.resize(self.VideoFrame, (640, 480))\n img = QImage(frame,\n frame.shape[1],\n frame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None" ]
[ "0.718548", "0.718548", "0.718548", "0.7123519", "0.71096885", "0.71096885", "0.7105883", "0.7094785", "0.7086501", "0.70823795", "0.70712984", "0.7009322", "0.68332046", "0.6717237", "0.66939294", "0.6532576", "0.65172887", "0.64648676", "0.6361938", "0.632177", "0.629296", "0.629296", "0.6291553", "0.62730473", "0.6147082", "0.60916775", "0.5938705", "0.5934626", "0.5897705", "0.57539153" ]
0.7442458
0
Upload a model checkpoint to the specified bucket in GCS.
def upload_checkpoint( bucket_namespace: str, bucket_name: str,prefix:str, checkpoint_filepath: Union[Path, str] ): bucket_prefix = prefix dst_path = f"{bucket_prefix}/{checkpoint_filepath}" # dst_path = f"{bucket_prefix}/{target_filepath}" print('Uploading {} => {}'.format(checkpoint_filepath,dst_path)) bucket = get_bucket(bucket_namespace, bucket_name) blob = bucket.blob(dst_path) blob.upload_from_filename(checkpoint_filepath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model_to_gcp(self, model_name):\n local_model_name = f'{model_name}.joblib'\n # saving the trained model to disk (which does not really make sense\n # if we are running this code on GCP, because then this file cannot be accessed once the code finished its execution)\n joblib.dump(self.model, local_model_name)\n print(\"saved model.joblib locally\")\n client = storage.Client().bucket(BUCKET_NAME)\n storage_location = f\"models/{local_model_name}\"\n blob = client.blob(storage_location)\n blob.upload_from_filename(local_model_name)\n print(\"uploaded model.joblib to gcp cloud storage under \\n => {}\".format(storage_location))", "def upload_to_gcs(file_name, tmp_obj_name, google_cloud_storage_conn_id, gcs_bucket):\n\n gcs_hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=google_cloud_storage_conn_id)\n gcs_hook.upload(bucket=gcs_bucket,\n object=file_name,\n filename=tmp_obj_name,\n gzip=True)\n logging.info(f'new file created {file_name}')", "def upload_to_gcs():\n client = storage.Client(project=\"filmreccommendations\")\n bucket = client.get_bucket(\"filmreccommendations.appspot.com\")\n blob = bucket.blob(os.path.basename(PICKLE_FILENAME))\n blob.upload_from_filename(PICKLE_FILENAME)", "def save_model(reg):\n\n # saving the trained model to disk is mandatory to then beeing able to upload it to storage\n # Implement here\n print(\"saved model.joblib locally\")\n\n # Implement here\n print(\"uploaded model.joblib to gcp cloud storage under \\n => {}\".format(storage_location))", "def _cloud_storage_upload(local_file, bucket, filename_on_bucket):\n client = storage.Client()\n\n bucket = client.get_bucket(bucket)\n blob = bucket.blob(filename_on_bucket)\n blob.upload_from_filename(local_file)\n print('uploaded ', bucket, filename_on_bucket)", "def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)", "def write_to_S3(data_bucket, data_key, model_version, bucket_name):\n df = get_S3_df(data_bucket, data_key)\n X = df.drop(columns='target')\n y = df['target']\n fitted_model = fit(RF, X, y)\n\n key = f'model_{model_version}.joblib'\n\n with tempfile.TemporaryFile() as file:\n joblib.dump(fitted_model, file)\n file.seek(0)\n\n s3_resource = boto3.resource('s3')\n s3_resource.Object(bucket_name, key).put(Body=file.read())", "def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise", "def save(self, fname, io=None):\n ckpt_path = self.manager.save()\n logging.info(f'Saved to {ckpt_path}')\n\n print_summary(self.model)\n\n if io is not None:\n io._upload_dir_to_bucket(self.save_path, self.save_path, ['ckpt', 'checkpoint'])", "def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):\n\n checkpoint_path = os.path.join(model_dir, checkpoint_prefix)\n saved_path = checkpoint.save(checkpoint_path)\n logging.info('Saving model as TF checkpoint: %s', saved_path)\n return", "def upload_blob(self, bucket_name, file_name, contents):\n\n bucket = self.storage_client.bucket(bucket_name)\n blob = bucket.blob(file_name)\n blob.upload_from_string(contents)\n print(\n \"File {} uploaded to bucket {} as file {}.\".format(\n file_name, bucket_name, file_name\n )\n )", "def _upload_to_bucket(self, filename, ext_filename):\n if ext_filename is None:\n return\n\n if self.s3:\n self.bucket.upload_file(filename, ext_filename)\n logging.info('Uploaded {} to S3 with name {}'.format(filename, ext_filename))\n if self.gs:\n try:\n client = storage.Client()\n bucket = client.get_bucket(self.bucket_name)\n blob = storage.Blob(ext_filename, bucket)\n blob.upload_from_filename(filename)\n logging.info('Uploaded to {}'.format(ext_filename))\n except:\n logging.warning('Uploading file to bucket failed')", "def __upload_to_gcp_bucket(df, fname):\n blob = BUCKET.blob(fname)\n json_str = df.to_json(orient='records')\n blob.upload_from_string(json_str)", "def _upload_to_gcs(self, file_to_upload):\n hook = GCSHook(\n gcp_conn_id=self.gcp_conn_id,\n impersonation_chain=self.impersonation_chain,\n )\n is_data_file = file_to_upload.get(\"file_name\") != self.schema_filename\n metadata = None\n if is_data_file and self.upload_metadata:\n metadata = {\"row_count\": file_to_upload[\"file_row_count\"]}\n\n object_name = file_to_upload.get(\"file_name\")\n if is_data_file and self.partition_columns:\n # Add partition column values to object_name\n partition_values = file_to_upload.get(\"partition_values\")\n head_path, tail_path = os.path.split(object_name)\n partition_subprefix = [\n f\"{col}={val}\" for col, val in zip(self.partition_columns, partition_values)\n ]\n object_name = os.path.join(head_path, *partition_subprefix, tail_path)\n\n hook.upload(\n self.bucket,\n object_name,\n file_to_upload.get(\"file_handle\").name,\n mime_type=file_to_upload.get(\"file_mime_type\"),\n gzip=self.gzip if is_data_file else False,\n metadata=metadata,\n )", "def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)", "def upload_blob(source_file_name, destination_blob_name, bucket_name=\"bts-ml-data\"):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n bucket_name = \"teststorechakra\"\n source_file_name = \"/Users/demo/Documents/learn/gcp/Setting_gcp_datalabs.sh\"\n destination_blob_name = \"testcloud sdk\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )", "def upload(self, file_path, bucket_name, file_name):\n\n self.client.upload_file(file_path, bucket_name, file_name)", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n storage_client = storage.Client.from_service_account_json(GCS_UPLOAD_KEY)\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)", "def upload(bucket, key, content, extra_agrs):\n # validate_content(content)\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n if extra_agrs:\n client.put_object(Body=content, Bucket=bucket, Key=key, ContentType=extra_agrs['ContentType'])\n else:\n client.put_object(Body=content, Bucket=bucket, Key=key)", "def upload_to_bucket(bucket_name, path_to_source_file, upload_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, upload_file_name)\r\n\r\n # set the path to source file\r\n blob.upload_from_filename(path_to_source_file)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"upload file '{path_to_source_file}' succeed\")\r\n\r\n return None", "def upload_from_file(self, file_obj, name_on_storage, **keyword_args):\n blob = self.bucket.blob(name_on_storage)\n blob.upload_from_file(file_obj, **keyword_args)\n print(f\"Upload object {name_on_storage}\")", "def persist_model(self, model, trade_window):\n object_path = 'model_objects/'\n file_name = f'market_maker_model_{self.target_coin}_{trade_window}.pkl'\n self.s3_client.put_object(Bucket=self.s3_bucket,\n Key=object_path + file_name,\n Body=pickle.dumps(model, pickle.HIGHEST_PROTOCOL)\n )\n return", "def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)", "def upload_blob(bucket_name, src_file, dst_file_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('fansipan-website-290191')\n blob = bucket.blob('uploaded/'+dst_file_name)\n blob.upload_from_string(src_file, content_type='image/jpg')\n print('File uploaded to uploaded/{}.'.format(dst_file_name))", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print('File {} uploaded to {}.'.format(\n source_file_name,\n destination_blob_name))", "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )", "def upload_blob(bucket_name, data, destination_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_string(data)", "def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step,\r\n last_global_data_samples, **kwargs):\r\n checkpoint_state_dict = {\r\n 'epoch': epoch,\r\n 'last_global_step': last_global_step,\r\n 'last_global_data_samples': last_global_data_samples\r\n }\r\n # Add extra kwargs too\r\n checkpoint_state_dict.update(kwargs)\r\n\r\n success = model.network.save_checkpoint(PATH, ckpt_id,\r\n checkpoint_state_dict)\r\n status_msg = 'checkpointing: PATH={}, ckpt_id={}'.format(PATH, ckpt_id)\r\n if success:\r\n logging.info(f\"Success {status_msg}\")\r\n else:\r\n logging.warning(f\"Failure {status_msg}\")\r\n return", "def put_bucket_logging(Bucket=None, BucketLoggingStatus=None):\n pass" ]
[ "0.6675769", "0.6465549", "0.63573015", "0.6309598", "0.6173304", "0.5960331", "0.5890611", "0.58213955", "0.5812179", "0.5769049", "0.5753088", "0.5706468", "0.56932503", "0.56821054", "0.5661021", "0.5624393", "0.56197286", "0.55618644", "0.552092", "0.5520645", "0.5492323", "0.5472247", "0.54663026", "0.5439966", "0.5429463", "0.54174817", "0.54062855", "0.5399128", "0.53952456", "0.5358285" ]
0.72291005
0
Tests that finalize_fn is not run with multiple threads.
def test_finalize_fn_uses_single_thread(ray_start_regular_shared): block_refs_iter = itertools.starmap( lambda block, metadata: (ray.put(block), metadata), block_generator(num_blocks=20, num_rows=2), ) q = queue.Queue() semaphore = threading.Semaphore(value=1) def finalize_enforce_single_thread(batch): already_acquired = not semaphore.acquire(blocking=False) if already_acquired: e = AssertionError("finalize_fn is being run concurrently.") q.put(e, block=True) semaphore.release() return batch # Test that finalize_fn is called in a single thread, # even if prefetch_batches is set. output_batches = iter_batches( block_refs_iter, collate_fn=lambda batch: batch, finalize_fn=finalize_enforce_single_thread, prefetch_batches=4, ) # Force execution of the iterator. # This step should not raise an exception. list(output_batches) try: e = q.get(block=False, timeout=0.1) raise e except queue.Empty: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finalize():\n pass", "def finalize():\n pass", "def finalize() -> None:\n collective.finalize()", "def finalize():", "def finalize():", "def finalize():", "def finalize(self, interrupted=False):\n pass", "def finalize_worker():\n if SAMPLER_POOL is not None:\n for _ in range(NUM_SAMPLER_WORKERS):\n SAMPLER_POOL.apply_async(_exit)\n time.sleep(0.1) # This is necessary but I don't know why\n SAMPLER_POOL.close()", "def _finalize_manager(process, *args, **kwargs):\n\n def _join(functor, *args, **kwargs):\n timeout = kwargs.get('timeout')\n if not timeout is None and timeout < 1:\n kwargs['timeout'] = 1\n\n functor(*args, **kwargs)\n\n process.join = functools.partial(_join, process.join)\n SyncManager._finalize_manager(process, *args, **kwargs)", "def cleanup():\n for th in THREAD_REGISTER.values():\n th.exit()\n th.join(timeout=3)", "def test_cleanup(self):\n delivery_d = Deferred()\n worker_stop_d = Deferred()\n worker_helper = WorkerHelper(broker=FakeBroker(delivery_d=delivery_d))\n worker_helper._workers.append(FakeWorker(stop_d=worker_stop_d))\n d = worker_helper.cleanup()\n self.assertFalse(d.called)\n delivery_d.callback(None)\n self.assertFalse(d.called)\n worker_stop_d.callback(None)\n self.assertTrue(d.called)", "def ensure_finalized(self):\n with self.__finalize_lock:\n if not self.__finalized:\n self.finalize()", "def finalize(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def finalize(self):\n with self.__finalize_lock:\n assert self.__finalized is False\n if self.__finalize_called:\n # No recursive calls!\n return\n self.__finalize_called = True\n self._on_finalize()\n self.__finalized = True\n if not self.__api.is_production_mode():\n lock(self)", "def test_cleanup_sync(self):\n md_helper = MessageDispatchHelper(None, None)\n self.assertEqual(md_helper.cleanup(), None)", "def finalize(self, **kwargs: Any) -> None:\n pass", "def finalize(self):\n self.thread.quit()\n self.color.release()\n self.pos.release()\n\n if self.initCoordinates.f_timer is not None:\n for f_timer in self.initCoordinates.f_timer:\n self.timer.addFunctionTimer(f_timer)\n if self.numMethod.f_timer is not None:\n for f_timer in self.numMethod.f_timer:\n self.timer.addFunctionTimer(f_timer)", "def Cleanup(benchmark_spec):\n pass", "def test_cleanup_sync(self):\n msg_helper = MessageHelper()\n self.assertEqual(msg_helper.cleanup(), None)", "def final_cleanup(self):\n raise NotImplementedError()", "def test_cleanup_worker(self):\n worker_stop_d = Deferred()\n worker = FakeWorker(stop_d=worker_stop_d)\n worker_helper = WorkerHelper()\n worker_helper._workers.append(worker)\n d = worker_helper.cleanup_worker(worker)\n self.assertEqual(worker_helper._workers, [])\n self.assertEqual(d, worker_stop_d)", "def Finalize():\n pass", "def test_generator_cleanup():\n try:\n yield 1\n finally:\n print('cleanup')", "def is_finalizing(): # real signature unknown; restored from __doc__\n pass", "def finalize(self) -> None:\n pass", "def finalize(self):\n for p in self._processes:\n if p.join(30) is None and p.exitcode is None:\n p.kill()", "def finalize(self):", "def finalize(self):", "def tearDown(self):\n self._invoker = None\n self.implementation.destantiate(self._memo)\n self._digest_pool.shutdown(wait=True)", "def _finalize(cls, ret, counter, fill_value):\n pass" ]
[ "0.6559201", "0.6559201", "0.6393924", "0.6308009", "0.6308009", "0.6308009", "0.5953244", "0.58854395", "0.58499384", "0.5809321", "0.57099277", "0.5622887", "0.56003356", "0.5595621", "0.55558133", "0.55535084", "0.54888344", "0.5488354", "0.5486915", "0.5448982", "0.5404258", "0.5401435", "0.5384357", "0.53682834", "0.5362848", "0.536006", "0.5359178", "0.5359178", "0.53386915", "0.5330643" ]
0.66419667
0
given a list of URLs to JSON schema files return a SchemaTemplate object
def _load(self, list_of_schema_urls): for uri in list_of_schema_urls: with urllib.request.urlopen(uri) as url: data = {} try: data = json.loads(url.read().decode()) except: print("Failed to read schema from " + uri) self._parser._load_schema(data) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_url_schema():\n json_str = json.dumps({'fields': [\n {'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def load_schemas():\n schemas = {}\n for filename in os.listdir(get_abs_path('schemas')):\n path = get_abs_path('schemas') + '/' + filename\n file_raw = filename.replace('.json', '')\n with open(path) as file:\n schemas[file_raw] = Schema.from_dict(json.load(file))\n return schemas", "def generate_schema_list():\n src = os.path.join(os.path.dirname(__file__), '../schemas')\n for root, dirs, files in os.walk(src):\n for fname in files:\n if not fname.endswith('.yaml'):\n continue\n if os.path.splitext(fname)[0] in (\n 'draft-01', 'asdf-schema-1.0.0'):\n continue\n yield os.path.join(root, fname)", "def test_json():\n schemas = {\n 'schema-languages': 'bible/languages.json',\n 'schema-book-metadata': 'bible/book-metadata.json',\n 'schema-bible': 'bible/bible-*.json'\n }\n for schema_name, data_path_glob in schemas.items():\n schema_path = 'schemas/{}.json'.format(schema_name)\n with open(schema_path) as schema_file:\n schema = json.load(schema_file)\n data_paths = glob.iglob(data_path_glob)\n for data_path in data_paths:\n with open(data_path) as data_file:\n data = json.load(data_file)\n yield jsonschema.validate, data, schema", "def _generate_schema_from_datafiles(datasets):\n\n schema = {}\n for dataset in datasets:\n schema.update(dataset.native_schema)\n\n return schema", "def load_remote_schema(template_url):\n response = requests.get(template_url)\n response.raise_for_status()\n tf = response.text\n schema = yaml.load(tf)\n assert isinstance(schema, dict)\n return schema", "def _load_schemas(self) -> None:\n schema_paths = self._root.rglob(\"*.json\")\n for schema_path in schema_paths:\n schema = json.loads(schema_path.read_text())\n\n if self._suffix:\n schema[\"name\"] = f'{schema[\"name\"]}{self._suffix}'\n\n fqn = get_avro_fqn(schema)\n self.schemas[fqn] = schema", "def get_schemas(source: Path) -> Dict[str, Optional[dict]]:\n schema_cache = {}\n ext = '.schema'\n\n for filename in config.schemas:\n if filename.endswith(ext):\n schema_name = filename.rsplit(ext, maxsplit=1)[0]\n schema = validate_json_file(source / filename)\n schema_cache[schema_name] = schema\n if not isinstance(schema, dict):\n msg = 'BAD SCHEMA. A JSON schema must be of type \"dict\" (Python) / \"object\" (Javascript)'\n logger.write(filename, msg)\n\n return schema_cache", "def parse_schema(schemaurl, schema_dir=None):\n if schema_dir:\n try:\n # attempts to open .schema file in directory schema_dir\n local_schema_path = schema_dir + \"/\" + \\\n schemaurl[schemaurl.rfind('/') + 1:-1] + \".schema\"\n print \"Looking for schema in file %s\" % (local_schema_path)\n schema = json.load(open(local_schema_path))\n except Exception as e:\n print \"Couldn't load schema %s from file %s\\n%s\" % (\n schemaurl, schema_dir, str(e))\n return None\n else:\n # load the schema directly from schemaurl, i.e., from the web\n try:\n schema = json.load(urllib2.urlopen(schemaurl))\n except Exception as e:\n print \"Couldn't load schema %s\\n%s\" % (schemaurl, str(e))\n return None\n\n if 'extends' in schema and '$ref' in schema['extends']:\n\n parent_schema = json.load(urllib2.urlopen(schema['extends']['$ref']))\n while (True): # exits loop when no additional extensions (break below)\n for key in sorted(parent_schema.keys()):\n if key not in schema:\n schema[key] = parent_schema[key]\n # need to merge these keys individually\n if key == 'properties':\n for key in sorted(parent_schema['properties'].keys()):\n if key not in schema['properties']:\n schema['properties'][key] = parent_schema[\n 'properties'][key]\n if 'extends' in parent_schema:\n parent_schema = json.load(\n urllib2.urlopen(parent_schema['extends']['$ref']))\n else:\n break\n # essentially a do while loop (exit condition)\n\n return schema", "def _load_schema(self, json_schema):\n # use jsonrefs to resolve all $refs in json\n data = jsonref.loads(json.dumps(json_schema))\n return self.__initialise_template(data)", "def __json_schema_generator(file):\n try:\n data = json.load(file)\n metadata_set = set()\n try:\n for datum in data['meta']['view']['columns']:\n metadata_set.add(datum['name'])\n except Exception as e:\n metadata_set.clear()\n for datum in data:\n if isinstance(datum, str):\n metadata_set.add(datum)\n else:\n for datum_property in datum:\n metadata_set.add(str(datum_property))\n\n metadata_list = list(metadata_set)\n # assumes list of objects with sparsse data\n # OR\n # for data_property in data[0]:\n # metadata_list.append(data_property)\n # assumes list of objects and that first entry has full list of properties\n\n return SchemaGenerator.__build_schema(metadata_list)\n except Exception as e:\n logging.error('Failed to parse json file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from json file.\")", "def parse_schemas_17(parser, xnat_serssion, extension_types=True):\n if extension_types:\n schemas_uri = '/xapi/schemas'\n try:\n schema_list = xnat_serssion.get_json(schemas_uri)\n except exceptions.XNATResponseError as exception:\n message = 'Problem retrieving schemas list: {}'.format(exception)\n xnat_serssion.logger.critical(message)\n raise ValueError(message)\n else:\n schema_list = DEFAULT_SCHEMAS\n\n for schema in schema_list:\n if extension_types or schema in ['xdat', 'xnat']:\n parser.parse_schema_uri(xnat_session=xnat_serssion,\n schema_uri='/xapi/schemas/{schema}'.format(schema=schema))", "def _load_json_schema(filename):\n\n relative_path = join(\"schemas\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n print(f\"base uri {base_uri}\")\n print(f\"base path {base_path}\")\n print(f\"relative_path {relative_path}\")\n print(f\"absolute_path {absolute_path}\")\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)", "def schema_generators():\n return {\n \"trips\": trips_schema,\n \"status_changes\": status_changes_schema,\n \"events\": events_schema,\n \"vehicles\": vehicles_schema,\n \"stops\": stops_schema\n }", "def import_schemas_from_file():\n with open('./tblSchemas') as schemas_file:\n schemas = {}\n for line in schemas_file:\n line = line.split()\n if len(line) == 0: continue\n if line[0] == 'tblname':\n tbl_name = line[1]\n schemas[tbl_name] = []\n else:\n schemas[tbl_name].append(line)\n return schemas", "def extract_data(filename: str, schema_filename: str) -> DataFrame:\n data = []\n try:\n with open(schema_filename) as f:\n schema = json.load(f)\n with open(filename) as f:\n for line in f:\n json_doc = json.loads(line)\n if is_valid_data(json_doc, schema):\n data.append(json_doc)\n except ValueError as e:\n log.error(f\"Error parsing json: {e}\")\n except FileNotFoundError as e:\n log.error(f\"File not found error: {e}\")\n raise e\n except Exception as e:\n log.error(e)\n raise e\n return DataFrame(data)", "def create_scheme_file(\n mapping_list: List[MappingField], dataset_name, outputfile_schema\n) -> None:\n logger.info(\"creating modeling rules schema\")\n name_type_dict = {}\n for mapping_rule in mapping_list:\n for raw_event_data in mapping_rule.get_mapped_to_raw_list():\n keys_list = raw_event_data.field_path_raw.split(\".\")\n name = keys_list[0]\n if name not in name_type_dict:\n name_type_dict[name] = raw_event_data.create_schema_types()\n modeling_rules_json = {dataset_name: name_type_dict}\n\n with open(outputfile_schema, \"w\") as f:\n json.dump(modeling_rules_json, f, indent=4)\n logger.info(\"Finished creating modeling rules schema\\n\")", "def read_schemas(schemas_path):\r\n\r\n # Generate list of all schema files in directory\r\n p = Path(schemas_path).glob(\"*.schema\")\r\n # Check if each item in list is not directory\r\n schemas_list = [x for x in p if x.is_file()]\r\n return schemas_list", "def _get_schema_from_object(self, data):\n if \"items\" in data:\n return self._get_schema_from_object(data[\"items\"])\n\n url_key = None\n\n if '$id' in data:\n url_key = '$id'\n\n if 'id' in data:\n url_key = 'id'\n\n if url_key:\n url = data[url_key]\n schema = Schema().build()\n schema.domain_entity = self.get_domain_entity_from_url(url)\n schema.high_level_entity = self.get_high_level_entity_from_url(url)\n schema.module = self.get_module_from_url(url)\n schema.url = url\n return schema\n\n return None", "async def get_schema_list(controller):\n created_schemas = await controller.schema.get_created_schema()\n if not created_schemas:\n raise HTTPException(\n status_code=404, detail=\"Something went wrong. Could not obtain schema list\"\n )\n return created_schemas", "def generate_example_schemas():\n def find_examples_in_schema(path):\n \"\"\"Returns generator for all examples in schema at given path\"\"\"\n with open(path, 'rb') as fd:\n schema_tree = yaml.load(fd)\n\n for node in treeutil.iter_tree(schema_tree):\n if (isinstance(node, dict) and\n 'examples' in node and\n isinstance(node['examples'], list)):\n for desc, example in node['examples']:\n yield example\n\n for schema_path in generate_schema_list():\n for example in find_examples_in_schema(schema_path):\n yield (schema_path, example)", "def create_extract():\n with open(SCHEMA_FILE, \"r\") as f:\n SCHEMA = yaml.safe_load(f)\n\n with open(TOKEN_FILE, \"r\") as f:\n TOKEN = yaml.safe_load(f)\n\n hc = HyperCreator(SCHEMA, HYPER_FILE)\n ts = Tableau(TOKEN[\"server\"], TOKEN[\"site\"], TOKEN[\"name\"], TOKEN[\"value\"])\n\n for table in SCHEMA[\"tables\"]:\n with open(f\"{CONTENT_MANAGEMENT}/{table['query']}\", \"r\") as f:\n query = f.read()\n\n data = ts.query_metadata(query)\n data_map = getattr(GraphQL, table[\"name\"])(data)\n\n hc.populate_extract(table[\"name\"], data_map)", "def create_schemas():\n\n # TEXT: the field is indexed, analyzed. By default it is not stored.\n # phrase=False does not allow to search for phrases.\n # sortable=True allows to sort the indexed values\n # ID: the file is indexed, without being analyzed.\n # STORED: the file is saved but not indexed.\n\n pub_schema = Schema(\n pubtype=TEXT(stored=True),\n key=STORED,\n author=TEXT(stored=True),\n title=TEXT(stored=True),\n pages=STORED,\n year=TEXT(stored=True),\n journal=STORED,\n volume=STORED,\n number=STORED,\n url=STORED,\n ee=STORED,\n crossref=ID(stored=True),\n )\n\n ven_schema = Schema(\n pubtype=STORED,\n key=ID(stored=True),\n author=STORED,\n title=TEXT(stored=True),\n journal=STORED,\n publisher=TEXT(stored=True),\n url=STORED,\n ee=STORED,\n year=STORED,\n isbn=STORED,\n )\n\n return pub_schema, ven_schema", "def downloadSchemaFiles(self, outputFile, url):\n thingsFileFromUrl = urllib.request.urlopen(url)\n data = thingsFileFromUrl.read()\n with open(outputFile, 'w+') as output:\n output.write(data.decode('utf-8'))\n return outputFile", "def endpoint_schema(endpoint, extra_definitions={}):\n # load common schema template and update metadata\n schema = common.load_json(\"./templates/provider/endpoint.json\")\n schema[\"$id\"] = schema[\"$id\"].replace(\"endpoint.json\", f\"{endpoint}.json\")\n schema[\"title\"] = schema[\"title\"].replace(\"endpoint\", endpoint)\n\n # merge custom definitions with relevant common definitions\n definitions = common.load_definitions(\n \"string\",\n \"timestamp\",\n \"uuid\",\n \"version\",\n common.MDS_FEATURE_POINT\n )\n definitions.update(common.point_definition())\n definitions.update(extra_definitions)\n\n endpoint_schema = common.load_json(f\"./templates/provider/{endpoint}.json\")\n\n # for all but stops, merge standard vehicle info with items schema\n if endpoint not in [\"stops\"]:\n items = endpoint_schema[endpoint][\"items\"]\n vehicle = common.vehicle_definition()\n items[\"required\"] = vehicle[\"required\"] + items[\"required\"]\n items[\"properties\"] = { **vehicle[\"properties\"], **items[\"properties\"] }\n definitions.update(common.load_definitions(\"propulsion_type\", \"propulsion_types\", \"vehicle_type\"))\n\n # merge endpoint schema into the endpoint template\n data_schema = schema[\"properties\"][\"data\"]\n data_schema[\"required\"] = [endpoint]\n data_schema[\"properties\"] = endpoint_schema\n\n # insert definitions\n schema[\"definitions\"].update(definitions)\n\n return schema", "def sane_file_naming_schema(files):\n json_dict = {\"events\": []}\n for file in files:\n file_data = {}\n filename, extension = os.path.splitext(file)\n if not extension == \".done\":\n if extension == \".jpg\" or extension == \".png\":\n argument = Path(media_files_directory + file)\n if argument.is_file():\n width, height = read_image_size(argument)\n file_data['width'] = width\n file_data['height'] = height\n parts = filename.split(\"_\")\n if len(parts) == 1:\n # not a single field\n print(\"No valid scheme: \", file)\n return False\n if len(parts) > 1:\n # see if first field is a valid date\n try:\n file_data[\"datetime\"] = str(datetime.strptime(parts[0], \"%Y-%m-%d\"))\n except ValueError:\n print(\"Invalid scheme: \", file)\n return False\n if len(parts) == 2:\n # we have just to fields so the second is the title\n file_data[\"title\"] = parts[1]\n if len(parts) > 2:\n # three fields, so first and second field have to be date and time, third the title\n try:\n file_data[\"datetime\"] = str(datetime.strptime(str(parts[0] + \"_\" + parts[1]), \"%Y-%m-%d_%H-%M-%S\"))\n file_data[\"title\"] = parts[2]\n except ValueError:\n print(\"Invalid date/time: \", file)\n return False\n file_data[\"extension\"] = extension.strip(\".\")\n file_data[\"url\"] = file\n json_dict[\"events\"].append(file_data)\n\n return json_dict", "def load_json_schema(filename):\n relative_path = join('../schema', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(\n schema_file.read(), base_uri=base_uri, jsonschema=True)", "def generate_schemas(self):\n # type: (Generator) -> str\n schemas = {}\n for name, definition in self.parser.specification.get(\"definitions\",\n {}).items():\n schema = copy.deepcopy(definition)\n self.resolve_schema_references(schema)\n schemas[name] = json.dumps(schema, indent=4, sort_keys=True)\n\n return render_to_string(\n self.backend, \"schemas.py\", {\n \"schemas\": schemas,\n \"module\": self.module_name\n })", "def parse_file(\n filename: str, yamldocs: List[Dict[str, Any]], scollection: db.Standard_collection\n) -> Optional[List[defs.Document]]:\n\n resulting_objects = []\n for contents in yamldocs:\n links = []\n\n document: defs.Document\n register_callback: Callable[[Any, Any], Any]\n\n if not isinstance(\n contents, dict\n ): # basic object matching, make sure we at least have an object, go has this build in :(\n logger.fatal(\"Malformed file %s, skipping\" % filename)\n\n return None\n\n if contents.get(\"links\"):\n links = contents.pop(\"links\")\n\n if contents.get(\"doctype\") == defs.Credoctypes.CRE.value:\n document = defs.CRE(**contents)\n register_callback = register_cre\n elif contents.get(\"doctype\") == defs.Credoctypes.Standard.value:\n document = defs.Standard(**contents)\n register_callback = register_standard\n\n for link in links:\n doclink = parse_file(\n filename=filename,\n yamldocs=[link.get(\"document\")],\n scollection=scollection,\n )\n\n if doclink:\n if len(doclink) > 1:\n logger.fatal(\n \"Parsing single document returned 2 results this is a bug\"\n )\n document.add_link(\n defs.Link(\n document=doclink[0],\n ltype=link.get(\"type\"),\n tags=link.get(\"tags\"),\n )\n )\n if register_callback:\n register_callback(document, collection=scollection) # type: ignore\n else:\n logger.warning(\"Callback to register Document is None, likely missing data\")\n\n resulting_objects.append(document)\n return resulting_objects", "def load_schema(filename):\n with open(filename) as f:\n schema = json.load(f)\n\n return schema" ]
[ "0.641576", "0.63740885", "0.62452924", "0.61295396", "0.59485245", "0.59202313", "0.584993", "0.58236873", "0.57742834", "0.56165123", "0.5564679", "0.55167973", "0.54723084", "0.5468001", "0.541009", "0.5409211", "0.53917426", "0.53839236", "0.535626", "0.53263766", "0.5314644", "0.5280605", "0.5271791", "0.5270984", "0.52643186", "0.52612215", "0.52606463", "0.52594566", "0.5242185", "0.5223383" ]
0.7021566
0
load a JSON schema representation
def _load_schema(self, json_schema): # use jsonrefs to resolve all $refs in json data = jsonref.loads(json.dumps(json_schema)) return self.__initialise_template(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = self.alter_key(concept.attrib['id'])\n self.add_item_to_field_order(concept_id)\n concept_name = concept.find('./Name').text\n concept_description = concept.find('./Description').text\n parent = concept.find('./Parent/Ref')\n key_parts = [concept_id, concept_id] if parent is None else [parent.attrib['id'], concept_id]\n translation_key = '.'.join(key_parts)\n jsonschema_field = {\n 'type': ['string', 'null'],\n 'title': concept_name,\n 'description': concept_description,\n 'translation_key': translation_key,\n }\n if self.scope is not None:\n jsonschema_field['scope'] = self.scope\n schema['properties'][concept_id] = jsonschema_field\n\n self.schema = schema", "def load_schema(filename):\n with open(filename) as f:\n schema = json.load(f)\n\n return schema", "def _load_json_schema(filename):\n\n relative_path = join('schemas', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())", "def load_json_schema(filename):\n relative_path = join('../schema', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(\n schema_file.read(), base_uri=base_uri, jsonschema=True)", "def _load_json_schema(filename: str):\n relative_path = path.join('schemas', filename)\n absolute_path = path.join(path.dirname(__file__), relative_path)\n\n with open(absolute_path, 'r', encoding='utf-8') as schema_file:\n schema = json.loads(schema_file.read())\n\n return schema", "def load_schema(name):\r\n\r\n data = pkgutil.get_data(__package__, \"schemas/{0}.json\".format(name))\r\n return json.loads(data.decode(\"utf-8\"))", "def get_schema(path):\n with open(path, 'r') as f:\n return json.load(f)", "def _load_json_schema(filename):\n\n relative_path = join(\"schemas\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n print(f\"base uri {base_uri}\")\n print(f\"base path {base_path}\")\n print(f\"relative_path {relative_path}\")\n print(f\"absolute_path {absolute_path}\")\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)", "def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def load_schema(path, collection, readonly):\n return JSONStorage(path, collection, readonly)", "def read_json_schema(schema_file_path):\n with open(schema_file_path) as f:\n schema = json.load(f)\n return schema", "def get_schema(filename: str) -> dict:\n return _load_json_schema(filename)", "def load_validation_schema(self) -> t.Dict[str, t.Any]:\n if self._schema is None:\n try:\n self._schema = json.loads(self.schema())\n except KeyError:\n device_type_striped = self._device_type.lower().rstrip(string.digits)\n with open(_CT_FILES[device_type_striped], encoding=\"utf-8\") as file_:\n self._schema = json.load(file_)\n return self._schema # type: ignore", "def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def load_schema(schema_path):\n with open(schema_path) as schema_file:\n return Utils.parse(schema_file.read())", "def test_load_json():\n schema = pa.schema([\n pa.field(\"foo\", pa.int32()),\n pa.field(\"bar\", pa.int64())\n ])\n\n path = \"{}/tests/fixtures/simple_json.txt\".format(os.getcwd())\n\n converted_data = client.load_json(path, schema)\n assert converted_data.to_pydict() == {'foo': [1, 10], 'bar': [2, 20]}", "def schema_load(filename):\n print(uc.schema_load(filename))", "def to_json_schema(cls):\n return parsers.to_json_schema(cls)", "def load_schema(self, schema):\n if not self.default_schema_loaded:\n self.load_default_schema()\n # load JSON-LD file of user defined schema\n self.schema_extension_only = preprocess_schema(load_json_or_yaml(schema))\n if \"@context\" in self.schema_extension_only:\n self.context.update(self.schema_extension_only[\"@context\"])\n # convert user defined schema into a networkx DiGraph\n self.schema_extension_nx = load_schema_into_networkx(self.schema_extension_only)\n # update undefined classes/properties\n undefined_nodes = [node for node, attrdict in self.schema_extension_nx.node.items() if not attrdict]\n attr_dict = {}\n \n for _node in undefined_nodes:\n if _node in self.schemaorg_nx.nodes():\n attr_dict[_node] = self.schemaorg_nx.nodes[_node]\n nx.set_node_attributes(self.schema_extension_nx, attr_dict)\n # merge networkx graph of user-defined schema with networkx graph of schema defined by Schema.org\n #self.schema_nx = merge_schema_networkx(self.schemaorg_nx, self.schema_extension_nx)\n self.schema_nx = self.schema_extension_nx\t\n SchemaValidator(self.schema_extension_only, self.schema_nx).validate_full_schema()\n # merge together the given schema and the schema defined by schemaorg\n #self.schema = merge_schema(self.schema_extension_only, self.schemaorg_schema)\n self.schema = self.schemaorg_schema\n # split the schema networkx into individual ones\n isolates = list(nx.isolates(self.schema_nx))\n \n for node, attrdict in self.schema_extension_nx.node.items():\n if not 'type' in attrdict:\n self.schema_extension_nx.nodes[node][\"type\"] = \"Class\" \n for node, attrdict in self.schema_nx.node.items():\n if not 'type' in attrdict:\n self.schema_nx.nodes[node][\"type\"] = \"Class\" \n \n self.extended_class_only_graph = self.schema_extension_nx.subgraph([node for node, attrdict in self.schema_extension_nx.node.items() if attrdict['type'] == 'Class' and node not in isolates])\n self.full_class_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Class'])\n self.property_only_graph = self.schema_nx.subgraph([node for node, attrdict in self.schema_nx.node.items() if attrdict['type'] == 'Property'])\n # instantiate converters for classes and properties\n self._all_class_uris = [node for node,attrdict in self.schema_nx.node.items() if attrdict['type'] in ['Class', 'DataType']]\n self.cls_converter = CurieUriConverter(self.context,\n self._all_class_uris)\n self._all_prop_uris = list(self.property_only_graph.nodes())\n self.prop_converter = CurieUriConverter(self.context,\n self._all_prop_uris)", "def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)", "def schema2jsonschema(self, schema):\n fields = get_fields(schema)\n Meta = getattr(schema, \"Meta\", None)\n partial = getattr(schema, \"partial\", None)\n ordered = getattr(schema, \"ordered\", False)\n\n jsonschema = self.fields2jsonschema(fields, partial=partial, ordered=ordered)\n\n if hasattr(Meta, \"title\"):\n jsonschema[\"title\"] = Meta.title\n if hasattr(Meta, \"description\"):\n jsonschema[\"description\"] = Meta.description\n if hasattr(Meta, \"unknown\") and Meta.unknown != marshmallow.EXCLUDE:\n jsonschema[\"additionalProperties\"] = Meta.unknown == marshmallow.INCLUDE\n\n return jsonschema", "def decide_schema(self, json_data):\n pass", "def decide_schema(self, json_data):\n pass", "def json_schema(schema_file=None, output=\"-\"):\n schemas = read_yaml(schema_file)\n dump_yaml(output, JsonSchemaConverterFromAccessSchema.convert_schemas(schemas))", "def load_resolved_schema(spec_path, file_name=None, schema_obj=None, path_prefix=True):\r\n\r\n # Only one of file_name or schema_obj must be set\r\n assert bool(file_name) != bool(schema_obj)\r\n\r\n if path_prefix:\r\n spec_path = os.path.join(spec_path, \"APIs/schemas/\")\r\n base_path = os.path.abspath(spec_path)\r\n if not base_path.endswith(\"/\"):\r\n base_path = base_path + \"/\"\r\n if os.name == \"nt\":\r\n base_uri_path = \"file:///\" + base_path.replace('\\\\', '/')\r\n else:\r\n base_uri_path = \"file://\" + base_path\r\n\r\n loader = jsonref.JsonLoader(cache_results=False)\r\n\r\n if file_name:\r\n json_file = str(Path(base_path) / file_name)\r\n with open(json_file, \"r\") as f:\r\n schema = jsonref.load(f, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n elif schema_obj:\r\n # Work around an exception when there's nothing to resolve using an object\r\n if \"$ref\" in schema_obj:\r\n schema = jsonref.JsonRef.replace_refs(schema_obj, base_uri=base_uri_path, loader=loader, jsonschema=True)\r\n else:\r\n schema = schema_obj\r\n\r\n return schema", "def deserialize(self, data, schema, **kwargs):\n return self.serializer.load(data, schema, **kwargs)", "def load_yaml(fname, schema=None):\n with open(fname) as fh:\n data = yaml.safe_load(fh.read())\n if schema:\n import jsonschema\n jsonschema.validate(data, schema=schema)\n return data", "def read_json(json_file):\n with open(json_file) as schema:\n val = json.load(schema)\n\n return val", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)" ]
[ "0.7583523", "0.7520712", "0.74996924", "0.7430722", "0.7415444", "0.7349981", "0.7319478", "0.7192056", "0.7140964", "0.71265936", "0.7098515", "0.70178914", "0.6897158", "0.6762067", "0.67160565", "0.66375107", "0.6575512", "0.6442525", "0.6335049", "0.6334846", "0.63198113", "0.628617", "0.6270117", "0.6270117", "0.62475014", "0.6241082", "0.6216493", "0.62054175", "0.61938", "0.616733" ]
0.7622569
0
Read two files (networkr.csv, user_by_city) and create the Graph object. (user_id's will be reordered because Graph object needs consecutive integers for indices of the graph.)
def read_file(network_filename, user_by_city_filename=None): graph = read_dictlist_from_file(network_filename) gg = Graph(directed=False) # new Graph object user_id_map = {} # storing new id info new_id = 0 for user_id in graph: temp_users = [] temp_users.append(user_id) for friend in graph[user_id]: temp_users.append(friend) for id1 in temp_users: if id1 not in user_id_map: user_id_map[id1] = new_id gg.add_vertex() # index for this vertex will be new_id new_id += 1 if id1 > user_id: gg.add_edge(gg.vertex(user_id_map[user_id]), gg.vertex(user_id_map[id1])) print "Done reading the graph." if user_by_city_filename is None: return (gg, None) if user_by_city_filename is not None: cities = read_dict_from_file(user_by_city_filename) # Adding vertex property as city city_prop = gg.new_vertex_property("int") for user_id in cities: city_prop[gg.vertex(user_id_map[user_id])] = cities[user_id] print "Done reading the city." return (gg, city_prop)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_graph_from_csvs(path, sep=\";,.\", num_files=None):\n assert type(path)==str, \"Path must be string type\"\n\n if path[-1] == '/':\n path = path[:-1]\n\n file_list = glob.glob(path+'/*.csv')\n\n if not num_files:\n num_files = len(file_list)\n\n graph = nx.DiGraph()\n\n for file in file_list[:num_files]:\n print(\"Opening \", file)\n with open(file, 'r') as file_to_be_read:\n for line in file_to_be_read:\n user1, user2 = line.split(sep)\n user1 = clean_user_name(user1)\n user2 = clean_user_name(user2)\n if user1 != \"NAN\" and user2 != \"NAN\":\n graph.add_edge(user1, user2)\n\n return graph", "def read_graph(filename, node_index_one=0, node_index_two=1):\n tsv = csv.reader(open(filename), delimiter='\\t')\n return make_graph(tsv, node_index_one, node_index_two)", "def create_social_graph(file):\n social_graph = NonDirectionalGraph(\"SocialGraph\")\n with open(file, \"rt\") as f:\n data = f.readlines()\n n_friendship = 0 # Represents the number of friendships in the graph in each iteration\n highest_n_friendship = 0 # Captures the highest record of n_friendship in the graph\n highest_n_neighbors_per_node_dict = {} # Captures the highest record of friendship per node\n for line in data:\n split_line = line.split()\n if \"became\" in split_line: # \"became\" is in lines where persons become connected\n for name in [split_line[0], split_line[2]]:\n # The following if statement makes sure to instantiate the node and adds it to the graph\n if name not in social_graph:\n node = Node(name)\n social_graph.add_node(node)\n highest_n_neighbors_per_node_dict[name] = 0 ##\n social_graph.add_edge(split_line[0],split_line[2]) # Adds a connection between the nodes\n n_friendship += 1 # Updates the number of friendships\n # The following for loop updates the highest number of friends (neighbors) if it changes\n for name in [split_line[0], split_line[2]]:\n if len(social_graph.nodes[name].neighbors) > highest_n_neighbors_per_node_dict[name]:\n highest_n_neighbors_per_node_dict[name] = len(social_graph.nodes[name].neighbors)\n elif \"cancelled\" in split_line: # \"became\" is in lines where persons become disconnected\n social_graph.remove_edge(split_line[0], split_line[2])\n n_friendship -= 1 # Updates the number of friendships\n # In case any of the words \"cancelled\" or \"became\" is in the line\n else:\n print(\"Unrecognized line\")\n # The following for loop updates the highest number of friendship if it changes\n if n_friendship > highest_n_friendship:\n highest_n_friendship = n_friendship\n return social_graph, highest_n_friendship, highest_n_neighbors_per_node_dict", "def read_graph(filename):\n\n print(\"\\n\\n========== Loading graph: \" + filename + '==================')\n edges = {}\n\n inFile = open(filename)\n for line in inFile:\n roadInfo = line.split()\n\n # Skip blank lines, read in contents from non-empty lines.\n if (len(roadInfo) > 0):\n srcCity = roadInfo[0]\n destCity = roadInfo[1]\n\n if srcCity in edges:\n edges[srcCity] = edges[srcCity] + [destCity]\n else:\n edges[srcCity] = [destCity]\n\n if destCity in edges:\n edges[destCity] = edges[destCity] + [srcCity]\n else:\n edges[destCity] = [srcCity]\n\n print(\" done.\\n\")\n return edges", "def read_graph(path):\n edge_list = pd.read_csv(path).values.tolist()\n graph = nx.from_edgelist(edge_list)\n return graph", "def users(city_file):\n # TODO: complete function\n users_breakdown = city_file.groupby('User Type').size()\n list_users_breakdown = users_breakdown.to_string(header=False).split()\n user_subscriber = 0\n user_customer = 0\n user_dependent = 0\n\n for i, e in enumerate(list_users_breakdown):\n if e == 'Subscriber' in list_users_breakdown:\n user_subscriber = list_users_breakdown[i + 1]\n elif e == 'Customer' in list_users_breakdown:\n user_customer = list_users_breakdown[i + 1]\n elif e == 'Dependent' in list_users_breakdown:\n user_dependent = list_users_breakdown[i + 1]\n\n return (user_subscriber, user_customer, user_dependent)", "def read (path):\n\n with open(path) as f:\n reader = csv.DictReader(f)\n edges, nodes = [], {}\n for row in reader:\n edges.append((row[\"NODE1\"], row[\"NODE2\"]))\n nodes[row[\"NODE1\"]] = [eval(row[\"LONG1\"]), eval(row[\"LAT1\"])]\n nodes[row[\"NODE2\"]] = [eval(row[\"LONG2\"]), eval(row[\"LAT2\"])]\n\n return nodes, edges", "def read_data(city='Chicago'):\n df = pd.read_csv('train.csv')\n df = df[df.City==city]\n\n # I will be using this dictionary to convert direction to degrees\n degrees = {'N':0, 'NE':45, 'E':90, 'SE':135, 'S':180, 'SW':225, 'W':270, 'NW':315}\n\n df[\"EntryHeading_deg\"] = df.EntryHeading.apply(lambda x:degrees[x])\n df[\"ExitHeading_deg\"] = df.ExitHeading.apply(lambda x:degrees[x])\n df[\"TurnDegree\"] = (df.EntryHeading_deg-df.ExitHeading_deg).apply(lambda x: x if abs(x) <=180 else (x+360 if x<0 else x-360))\n df[\"TurnDegree\"] = df.TurnDegree.apply(lambda x: x if x != -180 else x*-1)\n\n # Lets assign a number(StreetId) to each street\n all_streets = np.concatenate([df.ExitStreetName.reindex().values, df.EntryStreetName.reindex().values])\n # there are some nan values so lets just replace them with Unknown\n street_name_list = ['Unknown' if type(x)==type(0.0) else x for x in all_streets]\n street_names = {name: num for num, name in enumerate(street_name_list)}\n df[\"EntryStreetId\"] = np.array([street_names[x] if x in street_names else -999 for x in df.EntryStreetName])\n df[\"ExitStreetId\"] = np.array([street_names[x] if x in street_names else -999 for x in df.ExitStreetName])\n\n # we also want to categorize the street by its type (road, boulevard, ...)\n street_types = {n: i for i, n in enumerate(np.unique([x.split()[-1] for x in street_names.keys()]))}\n street_name_to_type = {}\n for name in street_names.keys():\n typ = name.split()[-1]\n street_name_to_type[name] = street_types[typ]\n df[\"EntryStreetType\"] = np.array([street_name_to_type[x] if x in street_names else -999 for x in df.EntryStreetName])\n df[\"ExitStreetType\"] = np.array([street_name_to_type[x] if x in street_names else -999 for x in df.ExitStreetName])\n\n df[\"EnterHighway\"] = np.array([1 if type(x)==type('') and x.split()[-1] in ['Broadway', 'Parkway', 'Expressway', 'Highway'] else 0 for x in df.EntryStreetName])\n df[\"ExitHighway\"] = np.array([1 if type(x)==type('') and x.split()[-1] in ['Broadway', 'Parkway', 'Expressway', 'Highway'] else 0 for x in df.ExitStreetName])\n df['Season'] = np.array([1 if month in (12,1,2) else 2 if month in (6,7,8) else 3 for month in df.Month.reindex().values])\n df['RushHour'] = np.array([1 if hour in (7,8,9) else 2 if hour in (16,17,18) else 3 if hour>=10 and hour<=15 else 4 for hour in df.Hour])\n return df", "def parse_users(filename):\n users = {}\n with open(filename, 'r') as fn:\n reader = csv.reader(fn)\n skip_rows(reader, 2)\n for row in reader:\n users[int(row[0])] = Point(float(row[1]), float(row[2]))\n \n return users", "def get_tw_nodelist(path_tw, path_connection, path_tw_nodelist, path_tw_core_nodelist, csv):\n names = pd.Series()\n core_names = []\n\n # iterate all .csv 'following' files, each file belongs to one core user\n for filename in os.listdir(path_tw):\n\n # append id to core user list\n core_names.append(clear_filename2(filename))\n\n # also append core user to complete ids\n names = names.append(pd.Series(clear_filename2(filename)))\n\n # read following info\n df = pd.read_csv(path_tw + filename, index_col=0)\n\n if not df.empty:\n # append friend (following) contacts to complete id series\n names = names.append(df['screen_name'])\n\n names = names.unique()\n\n # create nodelist\n nodelist = pd.DataFrame(columns=['id', 'label', 'timeset', 'relevant'])\n\n # fill complete names\n nodelist['id'] = names\n\n # read connection info\n connect = pd.read_csv(path_connection, index_col=0).drop(['twitterid'], axis=1)\n\n # label complete list as core or follow node\n nodelist.loc[nodelist['id'].isin(core_names), 'relevant'] = 'core'\n nodelist['relevant'].fillna('follow', inplace=True)\n nodelist['label'] = nodelist['relevant']\n\n # rename screen_name for merge\n connect.rename(columns={'twitterusername': 'id'}, inplace=True)\n\n # create core nodelist by merging complete nodelist with connection df\n core_nodelist = pd.merge(nodelist, connect, on='id')\n core_nodelist['label'] = core_nodelist['id']\n\n if csv:\n #nodelist.to_csv(path_tw_nodelist, index=False)\n core_nodelist.to_csv(path_tw_core_nodelist, index=False)", "def graph_from_file(self,\n filename,\n delimiter,\n source_label,\n target_label,\n data_source=None,\n source_attributes=[],\n target_attributes=[]):\n with open(filename) as f:\n reader = csv.DictReader(f, delimiter=delimiter)\n data = list(reader)\n return graph_from_dict(data, source_label, target_label, data_source,\n source_attributes, target_attributes)", "def read_file(path):\n\tG = nx.Graph()\n\n\twith open(path, 'r') as in_file:\n\t\tfor line in in_file:\n\t\t\tcontents = line.split(\" \")\n\t\t\tu = int(contents[0])\n\t\t\tv = int(contents[1])\n\t\t\tstreet_type = int(contents[2])\n\t\t\ttime = int(contents[3])\n\t\t\tlength = int(contents[4])\n\t\t\tcost = 1/float(length)\n\t\t\t\n\t\t\tG.add_node(u)\n\t\t\tG.add_node(v)\n\t\t\tif street_type is 1:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\telse:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\t\tG.add_edge(v, u, street_type=street_type, time=time, length=length, cost=cost)\n\n\treturn G", "def load_file(filename):\n # Create matrix from csv lines\n with open(filename) as f:\n m = [list(map(int, line.split(','))) for line in f]\n # Create digraph from matrix\n graph = utils.graph.DiGraph()\n ROWS = len(m)\n COLS = len(m[0])\n for r in range(ROWS):\n for c in range(COLS):\n u = (r, c)\n # Add add to node to the right\n if c+1 < COLS:\n v = (r, c+1)\n weight = m[r][c+1]\n graph.add_edge(u, v, weight)\n # Add add to node below\n if r+1 < ROWS:\n v = (r+1, c)\n weight = m[r+1][c]\n graph.add_edge(u, v, weight)\n # Add add to node above\n if 0 <= r-1:\n v = (r-1, c)\n weight = m[r-1][c]\n graph.add_edge(u, v, weight)\n # also add a start element and create edges to first column\n start_node = 'START'\n for row in range(ROWS):\n node = (row, 0)\n weight = m[row][0]\n graph.add_edge(start_node, node, weight)\n # also add an end element and create edges to the list column\n end_node = 'END'\n c = COLS-1\n for row in range(ROWS):\n node = (row, c)\n weight = 0 # Valid?\n graph.add_edge(node, end_node, weight)\n return graph, start_node, end_node", "def loadDataZachary(fileName):\n\n \"Initialize a graph\"\n G = nx.Graph()\n\n \"Open file\"\n f = open(fileName)\n\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n while line:\n if(line[0]!=\"%\"):\n ls =line.split(' ')\n num,nums=int(ls[0]),int(ls[1])\n G.add_edge(num,nums)\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n\n \"Closing the file\"\n f.close()\n\n return G, 'Zachary'", "def draw_network(graph, users, filename):\n ###TODO-- Completed\n candidate_names = [user['screen_name'] for user in users]\n plt.figure(figsize=(12,12))\n candidate_labels = {node: node if node in candidate_names else '' for node in graph.nodes_iter()}\n #print(candidate_labels)\n nx.draw_networkx(graph, labels=candidate_labels, alpha=0.5, node_color='r', node_size=100, width=0.1)\n #plt.show()\n plt.axis('off')\n plt.savefig(filename)\n #pass", "def create_graph(users, friend_counts):\n ###TODO-- Completed\n G = nx.Graph()\n\n #For Filtering the Nodes\n #print(friend_counts)\n friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]\n candidate_nodes = [user['screen_name'] for user in users]\n\n #print(\"Nodes: \",len(friend_nodes), len(candidate_nodes))\n #Adding Nodes to graph\n G.add_nodes_from(friend_nodes + candidate_nodes)\n\n #Connecting the Nodes with Edges\n for candidate in users:\n for friend in friend_nodes:\n if friend in candidate['friends']:\n G.add_edge(candidate['screen_name'], friend)\n\n return G", "def process_input(input_path):\n\n # Parse lines from input file into list\n with open(input_path, 'r') as input_file:\n lines = input_file.readlines()\n\n # Declare component lists and helper variables\n vertex_map = {} # Mapping of named vertices to indices, handles duplicate connections\n idx = 0\n edges = [] # List of (src, dst) tuples\n weights = [] # Weight of each edge\n\n for line in lines:\n # Parse each line of csv or text file\n if input_path.endswith('.csv'):\n parts = line.split(',')\n else:\n parts = line.split()\n\n # Add source vertex to list of vertices\n src = parts[0]\n if src not in vertex_map:\n vertex_map[src] = idx\n idx += 1\n\n # Add destination vertex to list of vertices\n dst = parts[1]\n if dst not in vertex_map:\n vertex_map[dst] = idx\n idx += 1\n\n # Add integer representation of edges to list of connections\n edges.append((vertex_map[src], vertex_map[dst]))\n weights.append(parts[2])\n\n # Get definite list of vertices\n vertices = vertex_map.keys()\n\n # Print graph information\n vprint(str(len(vertices)) + ' vertices')\n vprint(str(len(edges)) + ' edges')\n\n # Build IGraph representation of network\n graph = ig.Graph(edges, directed=False)\n graph.es['weight'] = [weights[e] for e in range(len(graph.es))]\n\n return graph, vertices", "def load_graph(file_name):\r\n citizens = []\r\n f = open(file_name, 'r')\r\n number_citizens = int(f.readline())\r\n \r\n # creates the citizen's list.\r\n for i in range(number_citizens):\r\n # creates citizen object\r\n citizen = Citizen(i)\r\n citizens.append(citizen)\r\n\r\n # we need this second loop because we cannot create the list of friends \r\n # if we don't have the whole list of citizens in memory.\r\n for citizen in citizens:\r\n # loads basic infor\r\n inf_list = f.readline().split(';')\r\n citizen.location = int(inf_list[1])\r\n citizen.influence_level = int(inf_list[2])\r\n citizen.proactivity_level = inf_list[3]\r\n \r\n # loads opinions\r\n opinions_list = f.readline().split(';')\r\n opinions = {}\r\n \r\n for op in opinions_list[:-1]:\r\n cat_weight = op.split(':')\r\n cat = int(cat_weight[0])\r\n weight = float(cat_weight[1])\r\n idea = Idea(1,'',cat, weight)\r\n opinions[cat] = idea\r\n\r\n citizen.opinions = opinions\r\n \r\n # loads friends \r\n friends_ids_list = f.readline().split(';')\r\n friends = []\r\n for friend_id in friends_ids_list[:-1]:\r\n # note that we match the position of the citizen in the citizens list with its id.\r\n friends.append(citizens[int(friend_id)])\r\n \r\n citizen.friends = friends\r\n \r\n f.close()\r\n \r\n return citizens", "def __init__(self, filename=\"movie_data_small.txt\"):\r\n self.titles = set()\r\n self.actors = set()\r\n self.graph = nx.Graph()\r\n #read data from file\r\n with open(filename, encoding=\"utf8\") as myfile:\r\n content = myfile.readlines()\r\n for c in content:\r\n #split title and each name\r\n cline = c.strip().split('/')\r\n #add title to title set and graph\r\n self.titles.add(cline[0])\r\n self.graph.add_node(cline[0])\r\n #for each actor in movie\r\n for cl in cline[1:]:\r\n #if actor is not already in graph, add them\r\n if cl not in self.actors:\r\n self.actors.add(cl)\r\n self.graph.add_node(cl)\r\n #create edge between actor and movie in graph\r\n self.graph.add_edge(cline[0],cl)", "def flight_paths(city1, city2, data=data):\n cities_to_travel = Graph() # instantiate a new graph\n location_dict = {} # empty dictionary to hold city, location, and distances\n for city in data: # creates dictionary of key cities, values: lat and long\n try:\n location_dict[city['city']] # check if city is already in dictionary\n except KeyError:\n location_dict[city['city']] = city['lat_lon'] # add's city as key and it's lat/long as value\n for city in data: # adds distances between each connected city\n for destination in city['destination_cities']:\n try: # adding edge and weights (distances) between cities\n cities_to_travel.add_edge(city['city'], destination, calculate_distance(city['lat_lon'], location_dict[destination]))\n except KeyError: # edge case; if connection already exists or points to city that doesn't have a lat/long\n pass\n try:\n to_return = cities_to_travel.bellman_ford(city1, city2) # Bellman Ford shortest path through city\n if to_return[0] == float(\"inf\"):\n raise KeyError(\"City does not exist\")\n else:\n return to_return\n except KeyError:\n raise KeyError('City has no Lat or Long given, or does not exist')", "def _read_csv(file: str, semester: str, users: set, courses: set, users_to_courses: dict):\n\n with open(file, mode='r') as file:\n reader = csv.reader(file, delimiter=',', quotechar='\"')\n\n for row in reader:\n email = row[-1]\n users.add((email, None, None))\n\n course_parts = row[1].split()\n course_name = '{}{}'.format(course_parts[0], course_parts[1])\n course = (course_name, semester)\n courses.add(course)\n\n users_to_courses[email].append([course_name, semester])", "def graph_reader(path):\n graph = nx.from_edgelist(pd.read_csv(path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "def construct_graph(social_edges, spatial_edges, output_path=None):\n G = nx.DiGraph()\n with open(social_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n G.add_edge(USER_NODE_PREFIX + edge[0], USER_NODE_PREFIX + edge[-2], weight=float(edge[-1]))\n\n business_nodes = set([])\n with open(spatial_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n lat = float(edge[2])\n lng = float(edge[3])\n if edge[-2] not in business_nodes:\n G.add_node(BUSINESS_NODE_PREFIX + edge[-2], spatial={'lat': lat, 'lng': lng})\n business_nodes.add(edge[-2])\n\n with open(spatial_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n G.add_edge(USER_NODE_PREFIX + edge[0], BUSINESS_NODE_PREFIX + edge[-2], weight=float(edge[-1]))\n\n if output_path:\n pickle.dump(G, open(output_path, 'w'))\n return G", "def parse_file(filename):\n user_ratings = {}\n movie_ratings = {}\n \n import csv\n with open(filename, 'r') as f: \n reader = csv.reader(f) \n for row in reader:\n movie_id = int(row[0])\n user_id = int(row[1])\n rating = float(row[2]) \n user_ratings.setdefault(user_id, {}).update({movie_id: rating})\n movie_ratings.setdefault(movie_id,{}).update({user_id: rating})\n \n return user_ratings, movie_ratings", "def get_network(users_ids, as_edgelist=True):\r\n edges = []\r\n amount = 0\r\n for ind1 in range(len(users_ids)):\r\n for ind2 in range(len(users_ids)):\r\n try:\r\n if users_ids[ind2] in get_friends(users_ids[ind1])['response']['items']:\r\n edges += [(ind1, ind2)]\r\n amount += 1\r\n except KeyError:\r\n pass\r\n\r\n g = Graph(vertex_attrs={\"label\": users_ids},\r\n edges=edges, directed=False)\r\n\r\n plot_graph(g, amount)", "def data_for_app():\n \n import csv\n\n # data structures\n cities = []\n coordinates = {}\n speedlimits = {}\n adjlist = {} # adjacency list\n\n # read city data\n with open('src/data/cities.csv', 'r') as f:\n r = csv.reader(f)\n rows = [row for row in r]\n headers = rows[0]\n data = rows[1:]\n\n # process city data\n for row in data:\n cities.append(row[0])\n coordinates[row[0]] = (float(row[2]), float(row[1])) # (east coordinate, north coordinate) as in x-y coordinate system\n adjlist[row[0]] = []\n\n # read speed limit data\n with open('src/data/speedlimits.csv', 'r') as f:\n r = csv.reader(f)\n rows = [row for row in r]\n headers = rows[0]\n data = rows[1:]\n\n # process speed limit data\n for row in data:\n speedlimits[row[0]] = row[1]\n\n # read highway data\n with open('src/data/highways.csv', 'r') as f:\n r = csv.reader(f)\n rows = [row for row in r]\n headers = rows[0]\n data = rows[1:]\n\n # process highway data\n for row in data:\n start = row[1]\n end = row[2]\n duration = int(row[3]) / int(speedlimits[row[4]]) # duration from start city to end city in hours\n adjlist[start].append((end, duration))\n adjlist[end].append((start, duration))\n \n return (cities, coordinates, speedlimits, adjlist)", "def importer(self, nodesName = False, edgesName = False, mode = \"authorities\"):\n\n\t\tprint mode\n\t\tif nodesName:\n\t\t\tself.outputNodes = nodesName\n\t\tif edgesName:\n\t\t\tself.outputEdges = edgesName\n\t\t\n\t\tif mode == \"authorities\":\n\t\t\tids = {}\n\t\t\twith open(self.outputNodes, \"rt\") as nodes:\n\t\t\t\ti = 0\n\t\t\t\tfor line in nodes:\n\t\t\t\t\tif i != 0:\n\t\t\t\t\t\tdata = line.split(\";\")\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t#Index : id, label, item (0 = neo4j, 1 = authority), centrality\n\t\t\t\t\t\t\tif int(data[2]) == 1:\n\t\t\t\t\t\t\t\tself.index[\"items\"][data[1]] = []\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.index[\"authorities\"].append(data[1])\n\t\t\t\t\t\t\t\tids[data[0]] = data[1]\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint data\n\t\t\t\t\t\t\t#Index : id, label, item (0 = neo4j, 1 = authority), centrality\n\t\t\t\t\t\t\tif int(data[3]) == 1:\n\t\t\t\t\t\t\t\tself.index[\"items\"][data[1]] = []\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.index[\"authorities\"].append(data[1])\n\t\t\t\t\t\t\t\tids[data[0]] = data[1]\n\t\t\t\t\ti += 1\n\n\t\t\tself.index[\"authorities\"] = set(self.index[\"authorities\"])\n\n\t\t\twith open(self.outputEdges, \"rt\") as edges:\n\t\t\t\ti = 0\n\t\t\t\tfor line in edges:\n\t\t\t\t\tif i != 0:\n\t\t\t\t\t\t#source;target\n\t\t\t\t\t\tdata = line.split(\";\")\n\t\t\t\t\t\tif data[0] not in self.index[\"items\"]:\n\t\t\t\t\t\t\tself.index[\"items\"][data[0]] = []\n\t\t\t\t\t\tself.index[\"items\"][data[0]].append(ids[data[1].replace(\"\\n\", \"\")])\n\t\t\t\t\ti += 1\n\t\t\n\t\telif mode == \"cluster\":\n\t\t\tprint \"cluster mode\"\n\t\t\t#Nodes\n\t\t\twith open(self.outputNodes, \"rt\") as nodes:\n\t\t\t\ti = 0\n\t\t\t\tfor line in nodes:\n\t\t\t\t\tif i != 0:\n\t\t\t\t\t\tdata = line.split(\";\")\n\t\t\t\t\t\t#Index : id, label, centrality\n\t\t\t\t\t\tself.index[\"items\"][data[1]] = []\n\t\t\t\t\ti += 1\n\t\t\t#Edges\n\t\t\twith open(self.outputEdges, \"rt\") as edges:\n\t\t\t\ti = 0\n\t\t\t\tfor line in edges:\n\t\t\t\t\tif i != 0:\n\t\t\t\t\t\t#source;target\n\t\t\t\t\t\tdata = line.replace(\"\\n\", \"\").split(\";\")\n\t\t\t\t\t\tif data[0] not in self.index[\"items\"]:\n\t\t\t\t\t\t\tself.index[\"items\"][data[0]] = []\n\t\t\t\t\t\tself.index[\"items\"][data[0]].append((data[1], float(1)/float(data[2])))\n\t\t\t\t\ti += 1\n\n\t\treturn True", "def populate_graph2(self, num_users, avg_friendships):\n # Reset graph\n self.reset()\n # random names to populate network with\n first_names = [\"John\", \"Frank\", \"George\", \"Jane\", \"Bruce\", \"Patrick\", \"James\",\n \"Superman\", \"Betty\", \"Wilma\", \"Michael\", \"Barney\", \"Spiderman\"]\n\n last_names = [\"Miller\", \"Wayne\", \"Rubble\", \"Doe\", \"McVey\",\n \"Gonzalez\", \"Replogle\", \"Franco\", \"Rae\", \"Espinosa\"]\n\n if num_users > avg_friendships:\n # Add users\n for _ in range(num_users):\n random_user_name = (random.choice(\n first_names) + \" \" + random.choice(last_names))\n\n self.add_user(random_user_name)\n\n # Create friendships\n for user_id in self.users:\n # repeat until user has at least avg_friendships\n while len(self.friendships[user_id]) < avg_friendships:\n random_id = random.randint(1, num_users)\n\n if random_id not in self.friendships[user_id] and user_id != random_id:\n self.add_friendship(user_id, random_id)\n else:\n print(\"Number of users must be larger that number of friendship links\")", "def load_data_from_csv(csv_file, users_to_i = {}, items_to_i = {}):\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n # skipping first row (header)\n next(csvreader)\n for user, item in csvreader:\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i)", "def csv_to_graph(csvfile):\r\n net_array = parse(csvfile)\r\n # Return a graph from numpy matrix\r\n net_G = nx.from_numpy_matrix(net_array)\r\n net_G = nx.convert_node_labels_to_integers(net_G, first_label=1)\r\n return net_G" ]
[ "0.6084426", "0.6013192", "0.5921239", "0.57427114", "0.57265186", "0.5637249", "0.5591132", "0.5589975", "0.5519558", "0.5494048", "0.54668725", "0.5440877", "0.54308814", "0.54280937", "0.5413682", "0.5358772", "0.53454447", "0.5342393", "0.5335665", "0.53317857", "0.53198475", "0.5299531", "0.5283537", "0.52718663", "0.52696896", "0.52506727", "0.52435726", "0.52345544", "0.523255", "0.52290034" ]
0.79204625
0
Returns the contents read from the CSV file filename. This function reads the contents of the file filename and returns the contents as a 2dimensional list. Each element of the list is a row, with the first row being the header. Cells in each row are all interpreted as strings; it is up to the programmer to interpret this data, since CSV files contain no type information.
def read_csv(filename): # Implement this function file = open(filename) wrapper = csv.reader(file) result = [] for rpos in wrapper: result = result + [rpos] file.close() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv(filename):\n with open(filename) as csv:\n return [csv_line.strip().split(',') for csv_line in csv]", "def import_csv(filename):\n with open(filename, newline='') as csvfile:\n reader_obj = csv.reader(csvfile, delimiter=',', quotechar='\"')\n data = list(reader_obj)\n return data", "def read_from_csv(file):\n with open(file) as f:\n next(f)\n data = []\n for line in csv.reader(f, delimiter='\\t'):\n data.append(list(line))\n return data", "def read_file(file):\n\tfile_contents = []\n\twith open(file) as csvfile:\n\t\treader = csv.reader(csvfile)\n\t\tfile_contents = list(reader)\n\treturn file_contents", "def readCSV(self):\n\n content = []\n with open(self.filename) as file:\n sn = csv.Sniffer()\n sn.preferred = [self.delimiter]\n try:\n dialect = sn.sniff(file.read(1024))\n except csv.Error:\n if not file.endswith(\"csv\"):\n self.delimiter = \"\\t\"\n file.seek(0)\n reader = csv.reader(file, delimiter=self.delimiter)\n dialect = reader.dialect\n file.seek(0)\n reader = csv.reader(file, dialect)\n rownr = 0\n\n for row in reader:\n\n if rownr == 0:\n header = row\n else:\n # print(row)\n content.append(row)\n rownr += 1\n\n file.close()\n\n return content.copy()", "def read_data(filename):\n print(\"Reading data from\", filename)\n with open(filename) as f:\n reader = csv.reader(f)\n data = []\n for row in reader:\n data.append(row)\n return data", "def read_csv(filename):\n\twith open(filename, newline = '') as filehandle:\n\t\t\n\t\t# create DictReader objects for inputs and read into memory\n\t\treader = csv.DictReader(filehandle, delimiter = '\\t')\n\t\tdata = []\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\t\t\t\n\treturn data", "def read_csv_file(file_name):\n table = []\n with open(file_name) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n for row in csvreader:\n table.append(row)\n return table", "def csvRowToStrings(filename):\n f = open(filename, 'r')\n line = f.readline()\n f.close()\n strings = line.split()\n return strings", "def readFile (filename):\n # some OSes need to know that the file might have some special characters\n f = open(filename)\n # convert reader to a list so we can close the file\n result = [ line.strip().split('\\t') for line in f if len(line) > 1 ]\n # close the file so we do not take up extra system resources\n f.close()\n # throw away the header row(s) of the data\n return result[1:]", "def _load_csv(filename):\n with open(filename) as f:\n csvreader = csv.reader(f, delimiter=str(u','),\n lineterminator='\\r\\n',\n quoting=csv.QUOTE_MINIMAL,\n quotechar=str(u'\"'))\n rows = [row for row in csvreader]\n return rows", "def read_csv_file(filename: str) -> any:\r\n with open(filename) as file:\r\n reader = csv.reader(file)\r\n next(reader)\r\n data = [process_row_p(row) for row in reader]\r\n\r\n return data", "def read_csv(csvfilename):\n rows = []\n with open(csvfilename, encoding='utf-8') as csvfile:\n file_reader = csv.reader(csvfile)\n for row in file_reader:\n rows.append(row)\n return rows", "def read_csv(file_name):\n final_list = []\n reader = csv.reader(open(file_name, 'rb'), delimiter=',')\n for x in reader:\n final_list.append(x)\n return final_list", "def read_txt(filename):\n result = []\n\n with open(filename) as input_file:\n for row in csv.reader(input_file):\n row = list(map(int, row))\n result.append(row)\n\n return result", "def read_data(filepath):\n data = []\n column_names = []\n\n with open(filepath, 'rt') as csvfile:\n data_reader = csv.reader(csvfile, delimiter=',')\n flag = False\n for row in data_reader:\n if not flag:\n column_names = row\n flag = True\n else:\n data.append(row)\n\n return column_names, np.array(data)", "def read_list(filename):\r\n listoutput = []\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n listoutput.append(item)\r\n return listoutput", "def csvread(file):\r\n thisfile = open(file)\r\n thisreader = csv.reader(thisfile)\r\n filelist = np.array(list(thisreader))\r\n return filelist", "def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)", "def read_file(file):\n text = []\n with open(file, newline='') as f:\n reader = csv.reader(f)\n next(reader, None) # skip header row\n for row in reader:\n text.append(row)\n return text", "def readCSV(self, csvFileName):\n\tdata = []\n\twith open(csvFileName) as csvFile:\n\t\treader = csv.reader(csvFile)\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\treturn data", "def file_to_listrows(filename):\n rows = []\n with open(filename) as infile:\n content= infile.read()\n line = content.split('\\n')\n for cells in line:\n rows.append(cells.split('\\t'))\n return rows", "def read_csv(file_path, delimiter=\",\", quotechar='\"'):\n # Opening file\n with open(file_path, newline='') as csvfile:\n # Will be used to store content\n lsts = []\n\n # Loading and reading csv\n csv_data = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)\n\n # Adding data to container\n for row in csv_data:\n lsts.append(row)\n\n return lsts", "def read_csv_file(file_name):\n \n with open(file_name, newline='') as csv_file: # don't need to explicitly close the file now\n csv_table = []\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n csv_table.append(row)\n return csv_table", "def read_csv(csvfilename):\n\trows = []\n\n\twith open(csvfilename, \"rU\") as csvfile:\n\t\tfile_reader = csv.reader(csvfile)\n\t\tfor row in file_reader:\n\t\t\trows.append(row)\n\treturn rows", "def read_csv(file_name, return_the_header=False, delimiter=',', quote_char='|'):\n with open(file_name, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quote_char)\n if return_the_header:\n next(reader) # Skip header row\n data = []\n for row in reader:\n data.append(row)\n return data", "def read_file_to_list(input_file):\n with open(input_file) as csvfile:\n csv_rows = csv.reader(csvfile)\n\n data = []\n for row in csv_rows:\n data.append(row)\n\n return data", "def open_csv(file):\n\n\ttmp = [] # initialise the list\n\twith open(file, 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\ttmp.append(row) # add row to list\n\n\treturn tmp", "def csv_style_file_reader(file_path, delimiter=None, column_title=True):\n the_file = open(file_path,'r')\n\n # This will be return\n column_titles = []\n\n #Get information from the first line\n first_line = the_file.readline()\n split_line = first_line.split(delimiter)\n nb_columns = len(split_line);\n\n # This will be return\n columns = [ [] for i in range(nb_columns) ]\n # Explaination: https://stackoverflow.com/questions/12791501/python-initializing-a-list-of-lists\n\n # Transfer the line in column titles\n if column_title:\n column_titles = split_line\n else:\n column_titles = [\"Column \"+ str(i) for i in range(nb_columns)];\n for i in range (nb_columns):\n columns[i].append( float(split_line[i]))\n\n for line in the_file:\n split_line = line.split(delimiter)\n if len(split_line) != 0:\n for i in range (nb_columns):\n columns[i].append( float(split_line[i]))\n\n return column_titles, columns", "def read_specific_problem(filename):\r\n table = []\r\n with open(filename, newline='') as csvfile:\r\n reader = csv.reader(csvfile, skipinitialspace=True, delimiter=',')\r\n for row in reader:\r\n table.append(row)\r\n return table" ]
[ "0.74909246", "0.7454272", "0.7397418", "0.737931", "0.73525816", "0.7320116", "0.72919524", "0.72789633", "0.7226385", "0.720839", "0.7207552", "0.7207479", "0.7180073", "0.71576065", "0.71297944", "0.7055001", "0.7043814", "0.7033059", "0.7027407", "0.7022255", "0.70219046", "0.7019354", "0.7012869", "0.69893306", "0.6954444", "0.6945633", "0.6942537", "0.69392127", "0.6865547", "0.68647647" ]
0.7575302
0
Returns true if the time takes place during the day. A time is during the day if it is after sunrise but before sunset, as indicated by the daycycle dicitionary. A daycycle dictionary has keys for several years (as int). The value for each year is also a dictionary, taking strings of the form 'mmdd'. The value for that key is a THIRD dictionary, with two keys "sunrise" and "sunset". The value for each of those two keys is a string in 24hour time format.
def daytime(time,daycycle): # HINT: Use the code from the previous exercise to get sunset AND sunrise # Add a timezone to time if one is missing (the one from the daycycle) from dateutil.parser import parse dYear = str(time.strftime('%Y')) dMonDay = str(time.strftime('%m-%d')) mk = '' m = '' MD = '' sunset = '' sunrise = '' for k in daycycle: if dYear == k: mk = k sunrise = daycycle[k][dMonDay]['sunrise'] sunset = daycycle[k][dMonDay]['sunset'] for m in daycycle[k].keys(): if m == dMonDay: MD = m sunrisetz = str_to_time(mk+'-'+MD+' '+sunrise, daycycle["timezone"]) sunsettz = str_to_time(mk+'-'+MD+' '+sunset, daycycle["timezone"]) if time.tzinfo is None or time.tzinfo == None: time = time.replace(tzinfo=tz) if time > sunrisetz and time < sunsettz: return True else: return False else: if time > sunrisetz and time < sunsettz: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_time_type_state_is_afternoon(day):\n\n assert day_time_info(day.hours_0).is_afternoon is False\n assert day_time_info(day.hours_1).is_afternoon is False\n assert day_time_info(day.hours_2).is_afternoon is False\n assert day_time_info(day.hours_3).is_afternoon is False\n assert day_time_info(day.hours_4).is_afternoon is False\n assert day_time_info(day.hours_5).is_afternoon is False\n assert day_time_info(day.hours_6).is_afternoon is False\n assert day_time_info(day.hours_7).is_afternoon is False\n assert day_time_info(day.hours_8).is_afternoon is False\n assert day_time_info(day.hours_9).is_afternoon is False\n assert day_time_info(day.hours_10).is_afternoon is False\n assert day_time_info(day.hours_11).is_afternoon is False\n assert day_time_info(day.hours_12).is_afternoon is False\n assert day_time_info(day.hours_13).is_afternoon is True\n assert day_time_info(day.hours_14).is_afternoon is True\n assert day_time_info(day.hours_15).is_afternoon is True\n assert day_time_info(day.hours_16).is_afternoon is True\n assert day_time_info(day.hours_17).is_afternoon is True\n assert day_time_info(day.hours_18).is_afternoon is False\n assert day_time_info(day.hours_19).is_afternoon is False\n assert day_time_info(day.hours_20).is_afternoon is False\n assert day_time_info(day.hours_21).is_afternoon is False\n assert day_time_info(day.hours_22).is_afternoon is False\n assert day_time_info(day.hours_23).is_afternoon is False", "def test_time_type_state_is_evening(day):\n\n assert day_time_info(day.hours_0).is_evening is False\n assert day_time_info(day.hours_1).is_evening is False\n assert day_time_info(day.hours_2).is_evening is False\n assert day_time_info(day.hours_3).is_evening is False\n assert day_time_info(day.hours_4).is_evening is False\n assert day_time_info(day.hours_5).is_evening is False\n assert day_time_info(day.hours_6).is_evening is False\n assert day_time_info(day.hours_7).is_evening is False\n assert day_time_info(day.hours_8).is_evening is False\n assert day_time_info(day.hours_9).is_evening is False\n assert day_time_info(day.hours_10).is_evening is False\n assert day_time_info(day.hours_11).is_evening is False\n assert day_time_info(day.hours_12).is_evening is False\n assert day_time_info(day.hours_13).is_evening is False\n assert day_time_info(day.hours_14).is_evening is False\n assert day_time_info(day.hours_15).is_evening is False\n assert day_time_info(day.hours_16).is_evening is False\n assert day_time_info(day.hours_17).is_evening is False\n assert day_time_info(day.hours_18).is_evening is True\n assert day_time_info(day.hours_19).is_evening is True\n assert day_time_info(day.hours_20).is_evening is True\n assert day_time_info(day.hours_21).is_evening is True\n assert day_time_info(day.hours_22).is_evening is True\n assert day_time_info(day.hours_23).is_evening is False", "def test_time_type_state_is_morning(day):\n\n assert day_time_info(day.hours_0).is_morning is False\n assert day_time_info(day.hours_1).is_morning is False\n assert day_time_info(day.hours_2).is_morning is False\n assert day_time_info(day.hours_3).is_morning is False\n assert day_time_info(day.hours_4).is_morning is False\n assert day_time_info(day.hours_5).is_morning is True\n assert day_time_info(day.hours_6).is_morning is True\n assert day_time_info(day.hours_7).is_morning is True\n assert day_time_info(day.hours_8).is_morning is True\n assert day_time_info(day.hours_9).is_morning is True\n assert day_time_info(day.hours_10).is_morning is False\n assert day_time_info(day.hours_11).is_morning is False\n assert day_time_info(day.hours_12).is_morning is False\n assert day_time_info(day.hours_13).is_morning is False\n assert day_time_info(day.hours_14).is_morning is False\n assert day_time_info(day.hours_15).is_morning is False\n assert day_time_info(day.hours_16).is_morning is False\n assert day_time_info(day.hours_17).is_morning is False\n assert day_time_info(day.hours_18).is_morning is False\n assert day_time_info(day.hours_19).is_morning is False\n assert day_time_info(day.hours_20).is_morning is False\n assert day_time_info(day.hours_21).is_morning is False\n assert day_time_info(day.hours_22).is_morning is False\n assert day_time_info(day.hours_23).is_morning is False", "def test_time_type_state_is_night(day):\n\n assert day_time_info(day.hours_0).is_night is True\n assert day_time_info(day.hours_1).is_night is True\n assert day_time_info(day.hours_2).is_night is True\n assert day_time_info(day.hours_3).is_night is True\n assert day_time_info(day.hours_4).is_night is True\n assert day_time_info(day.hours_5).is_night is False\n assert day_time_info(day.hours_6).is_night is False\n assert day_time_info(day.hours_7).is_night is False\n assert day_time_info(day.hours_8).is_night is False\n assert day_time_info(day.hours_9).is_night is False\n assert day_time_info(day.hours_10).is_night is False\n assert day_time_info(day.hours_11).is_night is False\n assert day_time_info(day.hours_12).is_night is False\n assert day_time_info(day.hours_13).is_night is False\n assert day_time_info(day.hours_14).is_night is False\n assert day_time_info(day.hours_15).is_night is False\n assert day_time_info(day.hours_16).is_night is False\n assert day_time_info(day.hours_17).is_night is False\n assert day_time_info(day.hours_18).is_night is False\n assert day_time_info(day.hours_19).is_night is False\n assert day_time_info(day.hours_20).is_night is False\n assert day_time_info(day.hours_21).is_night is False\n assert day_time_info(day.hours_22).is_night is False\n assert day_time_info(day.hours_23).is_night is True", "def test_time_type_state_is_midmorning(day):\n\n assert day_time_info(day.hours_0).is_midmorning is False\n assert day_time_info(day.hours_1).is_midmorning is False\n assert day_time_info(day.hours_2).is_midmorning is False\n assert day_time_info(day.hours_3).is_midmorning is False\n assert day_time_info(day.hours_4).is_midmorning is False\n assert day_time_info(day.hours_5).is_midmorning is False\n assert day_time_info(day.hours_6).is_midmorning is False\n assert day_time_info(day.hours_7).is_midmorning is False\n assert day_time_info(day.hours_8).is_midmorning is False\n assert day_time_info(day.hours_9).is_midmorning is False\n assert day_time_info(day.hours_10).is_midmorning is True\n assert day_time_info(day.hours_11).is_midmorning is True\n assert day_time_info(day.hours_12).is_midmorning is False\n assert day_time_info(day.hours_13).is_midmorning is False\n assert day_time_info(day.hours_14).is_midmorning is False\n assert day_time_info(day.hours_15).is_midmorning is False\n assert day_time_info(day.hours_16).is_midmorning is False\n assert day_time_info(day.hours_17).is_midmorning is False\n assert day_time_info(day.hours_18).is_midmorning is False\n assert day_time_info(day.hours_19).is_midmorning is False\n assert day_time_info(day.hours_20).is_midmorning is False\n assert day_time_info(day.hours_21).is_midmorning is False\n assert day_time_info(day.hours_22).is_midmorning is False\n assert day_time_info(day.hours_23).is_midmorning is False", "def test_time_type_state_is_noon(day):\n\n assert day_time_info(day.hours_0).is_noon is False\n assert day_time_info(day.hours_1).is_noon is False\n assert day_time_info(day.hours_2).is_noon is False\n assert day_time_info(day.hours_3).is_noon is False\n assert day_time_info(day.hours_4).is_noon is False\n assert day_time_info(day.hours_5).is_noon is False\n assert day_time_info(day.hours_6).is_noon is False\n assert day_time_info(day.hours_7).is_noon is False\n assert day_time_info(day.hours_8).is_noon is False\n assert day_time_info(day.hours_9).is_noon is False\n assert day_time_info(day.hours_10).is_noon is False\n assert day_time_info(day.hours_11).is_noon is False\n assert day_time_info(day.hours_12).is_noon is True\n assert day_time_info(day.hours_13).is_noon is False\n assert day_time_info(day.hours_14).is_noon is False\n assert day_time_info(day.hours_15).is_noon is False\n assert day_time_info(day.hours_16).is_noon is False\n assert day_time_info(day.hours_17).is_noon is False\n assert day_time_info(day.hours_18).is_noon is False\n assert day_time_info(day.hours_19).is_noon is False\n assert day_time_info(day.hours_20).is_noon is False\n assert day_time_info(day.hours_21).is_noon is False\n assert day_time_info(day.hours_22).is_noon is False\n assert day_time_info(day.hours_23).is_noon is False", "def classify_time(hk_time):\n # datetime object\n date, time = get_date_and_time(hk_time)\n is_working_day = cal.is_working_day(date)\n if is_working_day:\n return (is_working_day, start < time < end)\n else:\n return (is_working_day, False)", "def runs_today(self,s_id,day):\n if self.schedule_keys[s_id][day]==1:\n return True\n else:\n return False", "def is_afternoon_hour(self, time_of_day):\n return (time_of_day >= self.constants.AFTERNOON_HOUR_START /\n self.constants.DURATION_MAX) & \\\n (time_of_day <= self.constants.AFTERNOON_HOUR_END /\n self.constants.DURATION_MAX)", "def is_sanctioned_time(self, minute):\n return ((minute - self.day_start) % self.minutes_in_24h) < (self.hours_per_day * 60)", "def is_lunch_hour(self, time_of_day):\n return (time_of_day >= self.constants.LUNCH_HOUR_START /\n self.constants.DURATION_MAX) & \\\n (time_of_day <= self.constants.LUNCH_HOUR_END /\n self.constants.DURATION_MAX)", "def is_dinner_hour(self, time_of_day):\n return (time_of_day >= self.constants.DINER_HOUR_START /\n self.constants.DURATION_MAX) & \\\n (time_of_day <= self.constants.DINER_HOUR_END /\n self.constants.DURATION_MAX)", "def during_operating_hours(dry_run=False, starthour=None, endhour=None):\n if starthour is None:\n starthour = get_nightly_start_time()\n if endhour is None:\n endhour = get_nightly_end_time()\n ensure_tucson_time()\n hour = time.localtime().tm_hour\n\n if endhour < starthour:\n return dry_run or (hour < endhour) or (hour > starthour)\n else:\n return dry_run or ( (hour < endhour) and (hour > starthour) )", "def is_morning_hour(self, time_of_day):\n return (time_of_day >= self.constants.MORNING_HOUR_START /\n self.constants.DURATION_MAX) & \\\n (time_of_day <= self.constants.MORNING_HOUR_END /\n self.constants.DURATION_MAX)", "def test_time_type_state_types(day):\n\n assert day_time_info(day.hours_0).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_1).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_2).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_3).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_4).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_5).types == {TimeType.MORNING}\n assert day_time_info(day.hours_6).types == {TimeType.MORNING}\n assert day_time_info(day.hours_7).types == {TimeType.MORNING}\n assert day_time_info(day.hours_8).types == {TimeType.MORNING}\n assert day_time_info(day.hours_9).types == {TimeType.MORNING}\n assert day_time_info(day.hours_10).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_11).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_12).types == {TimeType.NOON}\n assert day_time_info(day.hours_13).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_14).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_15).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_16).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_17).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_18).types == {TimeType.EVENING}\n assert day_time_info(day.hours_19).types == {TimeType.EVENING}\n assert day_time_info(day.hours_20).types == {TimeType.EVENING}\n assert day_time_info(day.hours_21).types == {TimeType.EVENING}\n assert day_time_info(day.hours_22).types == {TimeType.EVENING}\n assert day_time_info(day.hours_23).types == {TimeType.NIGHT}", "def is_night_hours(time):\n if time == datetime.time(22, 0, 0, 0):\n return True\n return time.hour in [22, 23, 0, 1, 2, 3, 4, 5]", "def time_category(dtObj):\n if 9 <= dtObj.hour <= 17:\n return \"daytime\"\n elif 5 <= dtObj.hour < 9:\n return \"morning\"\n elif 17 < dtObj.hour < 23:\n return \"evening\"\n else:\n return \"night\"", "def time_is_valid(request, day, time, name):\n\n\tif ((day != '0' and day != '6') and time.hour == 21) or time.minute != 0:\n\t\treturn False\n\n\t# George's time\n\tif name != \"George Yeh\" and day == '6' and time.hour >= 9 and time.hour < 12:\n\t\treturn False\n\n\treturn True", "def json_has_access_now(self, json_str):\n\n day2day = {'mon': 0,\n 'tues': 1,\n 'wed': 2,\n 'thurs': 3,\n 'fri': 4,\n 'sat': 5,\n 'sun': 6\n }\n\n try:\n today = datetime.date.today().weekday()\n cur_time = datetime.datetime.now().time()\n data = json.loads(str(json_str))\n #print(\"data = {}\\n\\n\".format(data))\n for day, times in data.items():\n #print(\"day = [{}] day2day: [{}]\".format(today, day2day[day]))\n if today == day2day[day]:\n start_t = datetime.datetime.strptime(times['start'], '%H:%M:%S').time()\n end_t = datetime.datetime.strptime(times['end'], '%H:%M:%S').time()\n #print(\"{} <= {} and {} >= {}\".format(start_t, cur_time, end_t, cur_time))\n if start_t <= cur_time and end_t >= cur_time:\n return True\n\n except ValueError:\n print(\"ValueError!!!one1! \\njson_str = {}\".format(json_str))\n return False\n\n return False", "def is_market_hours():\n now = datetime.datetime.now()\n day = now.weekday()\n time = now.hour * 100 + now.minute\n\n if day > 4:\n return False\n\n if 930 <= time < 1600:\n return True\n\n return False", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n else:\r\n return False", "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n return False", "def _is_eruption_in(self, days, from_time):\n for te in self.tes:\n if 0 < (te-from_time).total_seconds()/(3600*24) < days:\n return 1.\n return 0.", "def timer_lights_on_off_room2():\n localtime = time.localtime(time.time())[3] # Hour of the day\n day_number = days_since_start()\n if day_number < 30:\n return True # Lights On\n elif day_number >= 30 and day_number < 60:\n if localtime >= 10 and localtime < 16:\n return False # Lights Off\n else:\n return True # Lights On\n elif day_number >= 60 and day_number < 90:\n if localtime >= 6 and localtime < 18:\n return False # Lights Off\n else:\n return True # Lights On\n else:\n if localtime >= 0 and localtime < 6:\n return True # Lights On\n else:\n return False # Lights Off", "def is_current_time_in_schedule(frequency, hour_of_day, day_of_month=None, day_of_week=None):\n est_timezone = pytz.timezone('US/Eastern')\n current_est_time = datetime.datetime.now(est_timezone)\n current_hour_of_day = current_est_time.hour\n current_day_of_week = current_est_time.weekday()\n current_day_of_month = current_est_time.day\n\n # All configurations have an hour of the day, so the hour must always match in order to send a report.\n if hour_of_day == current_hour_of_day:\n # If reports should be sent monthly and today is the same as the day configured, return True\n if frequency == FREQUENCY_TYPE_MONTHLY and day_of_month == current_day_of_month:\n return True\n # If reports should be sent weekly and today is the same as the day configured, return True\n elif frequency == FREQUENCY_TYPE_WEEKLY and day_of_week == current_day_of_week:\n return True\n # If reports should be sent daily, return True\n elif frequency == FREQUENCY_TYPE_DAILY:\n return True\n\n return False", "def timer_lights_on_off_room1():\n localtime = time.localtime(time.time())[3] # Hour of the day\n day_number = days_since_start()\n if day_number < 30:\n return True # Lights On\n elif day_number >= 30 and day_number < 60:\n if localtime >= 10 and localtime < 16:\n return False # Lights Off\n else:\n return True # Lights On\n elif day_number >= 60 and day_number < 90:\n if localtime >= 6 and localtime < 18:\n return False # Lights Off\n else:\n return True # Lights On\n else:\n if localtime >= 0 and localtime < 6:\n return True # Lights On\n else:\n return False # Lights Off", "def test_parse_valid_time_of_day(self):\n from azure.servicefabric.models.time_of_day import (\n TimeOfDay\n )\n\n res = sf_c.parse_time_of_day({\n 'Hour': 23,\n 'Minute': 59\n })\n\n self.assertIsInstance(res, TimeOfDay)\n\n self.assertEqual(res.hour, 23)\n self.assertEqual(res.minute, 59)\n\n res2 = sf_c.parse_time_of_day({\n 'Hour': 0,\n 'Minute': 0\n })\n\n self.assertIsInstance(res2, TimeOfDay)\n\n self.assertEqual(res2.hour, 0)\n self.assertEqual(res2.minute, 0)", "def test_analyze_time(self):\n self.ph5validate.analyze_time()\n self.assertEqual(self.ph5validate.das_time.keys(), [('12183', 1, 500)])\n Dtime = self.ph5validate.das_time[('12183', 1, 500)]\n\n # 3 different deploy time\n self.assertEqual(len(Dtime['time_windows']), 5)\n\n # station 9001\n self.assertEqual(Dtime['time_windows'][0],\n (1550849950, 1550850034, '9001'))\n self.assertEqual(Dtime['time_windows'][1],\n (1550849950, 1550850034, '9001'))\n self.assertEqual(Dtime['time_windows'][2],\n (1550849950, 1550850034, '9001'))\n # station 9002\n self.assertEqual(Dtime['time_windows'][3],\n (1550850043, 1550850093, '9002'))\n # station 9003\n self.assertEqual(Dtime['time_windows'][4],\n (1550850125, 1550850187, '9003'))\n\n self.assertEqual(Dtime['min_deploy_time'],\n [1550849950,\n 'Data exists before deploy time: 7 seconds.'])", "def isoweekday(self):\n return 0", "def isoweekday(self):\n return 0" ]
[ "0.6449394", "0.64007175", "0.6371306", "0.6301575", "0.6152317", "0.59922785", "0.590649", "0.57754403", "0.5772595", "0.57206595", "0.57047856", "0.5666967", "0.55771357", "0.5550245", "0.5542482", "0.54408264", "0.541284", "0.5396091", "0.53877", "0.53581625", "0.5350125", "0.53044623", "0.52918947", "0.52809656", "0.5269817", "0.52384186", "0.5218995", "0.5208475", "0.5194327", "0.5194327" ]
0.7860913
0
Returns (a copy of) a row of the table with the given id. Table is a twodimensional list where the first element of each row is an identifier (string). This function searches table for the row with the matching identifier and returns a COPY of that row. If there is no match, this function returns None. This function is useful for extract rows from a table of pilots, a table of instructors, or even a table of planes.
def get_for_id(id,table): # Implement this function for row in range(1, len(table)): for col in range(len(table[0])): if id in table[row][col]: return table[row]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_table_by_id(soup, id):\n # dont include .tbody after the find() for some reason\n html_table = soup.find(id=id)\n if html_table is None:\n return None\n rows = html_table.find_all('tr')[1:]\n return [row.contents for row in rows]", "def rpc_database_get_row_by_id(self, row_id):\n\t\ttable_name = self.path.split('/')[-2]\n\t\ttable = DATABASE_TABLE_OBJECTS.get(table_name)\n\t\tassert table\n\t\tcolumns = DATABASE_TABLES[table_name]\n\t\tsession = db_manager.Session()\n\t\trow = db_manager.get_row_by_id(session, table, row_id)\n\t\tif row:\n\t\t\trow = dict(zip(columns, (getattr(row, c) for c in columns)))\n\t\tsession.close()\n\t\treturn row", "def get_title_by_id_from_table(table, id):\n\n for line in sales_data:\n if line[ID] == id:\n return line[TITLE]\n return None", "def GetTable(self, table_id):\n for table in self.tables:\n if table.table_id == table_id:\n return table\n\n return None", "def select_id(self, id):\n with self.conn:\n self.c.execute(\n \"\"\"SELECT * FROM %s WHERE id = ?\"\"\" % (TABLE), (id,)\n )\n return self.c.fetchone()", "def find(self, id):\n return self._select_one('''\n select\n *\n from\n {table}\n where\n {primary_key} = %s\n '''.format(table=self.__class__._table,\n primary_key=self.__class__._primary_key), [id])", "def get_title_by_id_from_table(table, id):\n\n # your code", "def load(self, id):\n return self.getTable().get(id).run(self.r)", "def remove(table, id_):\n\n # your code\n for i, row in enumerate(table):\n if row[ID] == id_:\n table.pop(i)\n return table\n\n ui.print_error_message(\"Wrong ID!\")\n\n return table\n\n return table", "def get_item_by_id(self, id):\n results = self.table_connector.query(\n KeyConditionExpression=Key(self.primary_key).eq(id)\n )\n return results[\"Items\"][0] if \"Items\" in results else []", "def get_book_by_id(self, id):\n\n try:\n cur = self._db.cursor()\n results = cur.execute('SELECT rowid, * FROM books WHERE rowid = ?', (id, ))\n book_row = results.fetchone()\n return self._row_to_book(book_row)\n except sqlite3.Error as e:\n raise BookError(f'Error getting book ID {id}') from e", "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "def get_row(self, business_id):\n return next(filter(lambda k: k['id'] == business_id, self.get_rows()))", "def row_by_value(idl_, table, column, match, default=_NO_DEFAULT):\n tab = idl_.tables[table]\n for r in tab.rows.values():\n if getattr(r, column) == match:\n return r\n if default is not _NO_DEFAULT:\n return default\n raise None", "def get(table_name, record_id):\n with get_connection() as conn:\n return rethink.table(table_name).get(record_id).run(conn)", "def get_table(table_id: int) -> Table:\n table = Table.query.filter_by(id=table_id).first()\n return table", "async def find_by_id(self, _id: int) -> Record:\n conn: Connection\n async with self.db_pool.acquire() as conn:\n return await conn.fetchrow(\n f\"SELECT * FROM {self.table_name} WHERE {self.primary_key}=$1\",\n _id,\n )", "def GetRow(self, *args):\n return _table.Table_GetRow(self, *args)", "def row(self, row_id):\r\n return Row(self, row_id)", "def get_row(self, pk):\n ans = self.execute(self.commands.get_row(\n cols=self._join_cols(self.columns),\n table=self.name,\n pk_col=self.primary_key_column,\n pk=pk\n ))\n if not ans:\n return None\n return self._dictify(self.columns, ans[0])", "def get(self, unique_id: int) -> Optional[ModelledTable]:\n\n return self.model.get(self.cursor, unique_id)", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def remove(table, id_):\n\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise IndexError(\"The given ID not in the table.\")\n new_table = [item for item in table if item[ID] != id_]\n return new_table", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def __get_one_by_id(\n self, table_name: str, id_name: str, db_id: str\n ) -> Mapping[str, Any]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n f\"\"\"\n SELECT * FROM {table_name}\n WHERE ({id_name} = ?)\n \"\"\",\n (int(db_id),),\n )\n results = c.fetchall()\n if len(results) != 1:\n raise EntryDoesNotExistException(\n f\"Table {table_name} has no {id_name} {db_id}\"\n )\n return results[0]", "def find_row(self, row_id):\n #Calculate what line in the file the row_id will be found at\n looking_for_line = self.__row_id_in_file(row_id)\n\n #Initiate line-counter\n current_line = 1\n with open(self.__data_file_for_row_id(row_id), 'r') as f:\n for line in f:\n if current_line == looking_for_line:\n return json.loads(line)\n current_line += 1\n\n raise Exception('Could not find row_id ' + row_id)", "def get_run(self, run_id: str) -> sqlite3.Row:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from runs\n WHERE run_id = ?;\n \"\"\",\n (run_id,),\n )\n results = c.fetchall()\n return results[0]", "def copy_table(self, table: Table) -> Table:\n self._requires_table(table)\n return table.copy()", "def extract_row(self, column, identifier, df_input=None):\n\n try:\n if df_input is None:\n return self.df_input.loc[self.df_input[column] == identifier]\n else:\n return df_input.loc[df_input[column] == identifier] \n except Exception as e:\n print(e)", "def remove(table, id_):\n\n # your code\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise IndexError(\"The given ID not in the table.\")\n new_table = [item for item in table if item[ID] != id_]\n return new_table" ]
[ "0.64286244", "0.63104653", "0.6236567", "0.6154454", "0.61423177", "0.6114756", "0.595511", "0.5797238", "0.5750377", "0.57493246", "0.56794655", "0.56740063", "0.56564826", "0.5519462", "0.5502018", "0.54621154", "0.5460119", "0.5443191", "0.54360026", "0.54353404", "0.542247", "0.54106116", "0.54098815", "0.54076433", "0.5389928", "0.5382997", "0.5373133", "0.53438294", "0.5342308", "0.53190017" ]
0.71067375
0
Given a boolean specifying whether to use local disk or S3, setup filesystem
def setup_fs(s3, key="", secret="", endpoint="", region="",cert="", passwords={}): if s3: import s3fs block_size = 55 * 1024 * 1024 if "amazonaws" in endpoint: fs = s3fs.S3FileSystem(key=key, secret=secret, default_block_size=block_size) elif cert != "": fs = s3fs.S3FileSystem( key=key, secret=secret, client_kwargs={"endpoint_url": endpoint, "verify": cert, "region_name": region}, default_block_size=block_size, ) else: fs = s3fs.S3FileSystem( key=key, secret=secret, client_kwargs={"endpoint_url": endpoint, "region_name": region}, default_block_size=block_size, ) else: from pathlib import Path import canedge_browser base_path = Path(__file__).parent fs = canedge_browser.LocalFileSystem(base_path=base_path, passwords=passwords) return fs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, source_path, output_path):\n self.storage_type = 's3' if 's3' in source_path else 'local'\n self.source_path = source_path\n self.output_path = output_path", "def get_filesystem(self, silent=True):\n if self._filesystem:\n return self._filesystem\n try:\n import s3fs # noqa\n except ImportError as exc:\n if not silent:\n raise ImportError(\n f\"AWS s3fs not installed, run pip install s3fs, {exc}\"\n )\n return None\n self._filesystem = fsspec.filesystem(\"s3\", **self.get_storage_options())\n return self._filesystem", "def set_file_storage(source='local', **kwargs):\n pass", "def setUp(self):\r\n \r\n factory.FileSystem._getConnectionPool = SimpleMock(SimpleMock(SimpleMock()))\r\n self._factory = factory.FileSystem(BaseConfiguration(\"http://s3.amazonaws.de/bucketname/keyname\"))", "def _swift_storage_setup(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n self._pull_configs('storage')\n self._swift_install('storage')\n self._set_onhold('storage')\n self._final_install_touches('storage')", "def s3_to_local(s3_path: str,\n sync: Union[bool, None, Literal['if_not_exists']] = None,\n download_mode: DownloadMode = DownloadMode.SIZE_AND_TIMESTAMP,\n include_patterns: Optional[Sequence[str]] = None) -> str:\n if download_mode not in DownloadMode:\n raise ValueError(f'Wrong value for download_mode: {download_mode}')\n\n parsed = urlparse(s3_path)\n if parsed.scheme == '':\n if parsed.netloc != '':\n raise ValueError(f\"{s3_path} must be S3 path or local path\")\n return s3_path\n\n config = sriracha.main.get_config()\n local_sync_dir = config.local_sync_dir\n if local_sync_dir is None:\n raise ValueError(\"Please run 'sriracha configure' to configure \"\n \"local sync directory\")\n\n s3_relpath = Path(parsed.netloc, parsed.path.lstrip('/'))\n local_path = local_sync_dir / s3_relpath\n\n if sync:\n if sync is True:\n download_mode = DownloadMode.ALWAYS_DOWNLOAD\n elif sync == 'if_not_exists':\n download_mode = DownloadMode.FILE_DOES_NOT_EXIST\n elif sync is False:\n download_mode = DownloadMode.NEVER_DOWNLOAD\n else:\n raise ValueError(f'Wrong value for sync: {sync}.')\n logger.warning('The use of sync is deprecated and will be removed in '\n 'the future. Use download_mode instead.')\n\n if download_mode != DownloadMode.NEVER_DOWNLOAD:\n client = boto3.client(\"s3\")\n url = urlparse(s3_path)\n\n if url.scheme != 's3':\n raise InvalidS3Path(s3_path=s3_path,\n reason=InvalidS3Path.Reason.WRONG_SCHEME)\n\n if not url.netloc:\n raise InvalidS3Path(s3_path=s3_path,\n reason=InvalidS3Path.Reason.NO_BUCKET_NAME)\n bucket = url.netloc\n key = url.path.strip('/')\n\n if not key:\n # S3 path is just the bucket name (i.e. no key passed)\n _download_s3_dir(local_path, s3_path, download_mode,\n include_patterns)\n\n try:\n # Try to get object metadata - if successful we have a valid file.\n client.head_object(Bucket=bucket, Key=key)\n\n if include_patterns is not None:\n raise ValueError('include_patterns are only allowed for directories.') # noqa: E501\n\n _download_s3_file(local_path, s3_path, download_mode)\n except botocore.exceptions.ClientError as e:\n # If we get a 404 back then we're dealing with an S3 directory\n if e.response['ResponseMetadata']['HTTPStatusCode'] == 404:\n _download_s3_dir(local_path, s3_path, download_mode,\n include_patterns)\n else:\n raise e\n\n return str(local_path)", "def _create_dir(self, stream_name:str=None, version:int=None, user_id:str=None):\n storage_path = self._get_storage_path(stream_name=stream_name, version=version, user_id=user_id)\n if self.nosql_store == \"hdfs\":\n if not self.fs.exists(storage_path):\n self.fs.mkdir(storage_path)\n return storage_path\n elif self.nosql_store==\"filesystem\":\n if not os.path.exists(storage_path):\n self.fs.makedirs(storage_path)\n return storage_path\n return None", "def __init__(\n self, filename, client_kwargs=None, cache_size=128 * (1024 ** 2)\n ):\n print(\"Establishing Connection, may take a minute ......\")\n\n if client_kwargs is None:\n s3 = s3fs.S3FileSystem()\n else:\n s3 = s3fs.S3FileSystem(client_kwargs=client_kwargs)\n \n store = s3fs.S3Map(root=filename, s3=s3, check=False)\n\n super().__init__(store, cache_size=cache_size)", "def copy_files(self):\n if settings.USE_S3_STORAGE:\n self.copy_to_s3()\n else:\n self.copy_to_local()", "def __init__(\n self, storage_path: str, block_size: Tuple[int, int, int], **kwargs\n ) -> None:\n self.name = \"FilesystemStorageManager\"\n if \"next_layer\" in kwargs:\n self._next = kwargs[\"next_layer\"]\n self.is_terminal = False\n else:\n self.is_terminal = True\n self.storage_path = storage_path\n self.block_size = block_size\n self._cache = kwargs.get(\"cache\", True)\n\n self.fs = ({\"h5\": H5FileInterface}.get(kwargs.get(\"preferred_format\", \"h5\")))(\n self.storage_path\n )", "def external_storage():\r\n\r\n folder_path = os.path.join('/media/pi/', args.storage_name, args.directory_name)\r\n if not os.path.exists(folder_path):\r\n os.makedirs(folder_path)\r\n return folder_path", "def __init__(__self__, *,\n mount_path: pulumi.Input[str],\n type: pulumi.Input[str],\n enable_sub_path: Optional[pulumi.Input[bool]] = None,\n mount_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n read_only: Optional[pulumi.Input[bool]] = None,\n share_name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"mount_path\", mount_path)\n pulumi.set(__self__, \"type\", 'AzureFileVolume')\n if enable_sub_path is None:\n enable_sub_path = False\n if enable_sub_path is not None:\n pulumi.set(__self__, \"enable_sub_path\", enable_sub_path)\n if mount_options is not None:\n pulumi.set(__self__, \"mount_options\", mount_options)\n if read_only is not None:\n pulumi.set(__self__, \"read_only\", read_only)\n if share_name is not None:\n pulumi.set(__self__, \"share_name\", share_name)", "def main():\n\n parser = get_args()\n args = parser.parse_args()\n\n if args.verbose:\n LOG.setLevel(logging.INFO)\n LOG.info('Verbose: on')\n else:\n ## If not verbose, turn down boto3.\n boto3.set_stream_logger(name='boto3', level=logging.WARNING)\n boto3.set_stream_logger(name='botocore', level=logging.WARNING)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n ## Ensure credentials.\n if not args.credentials:\n die_screaming('need a credentials argument')\n LOG.info('Will use credentials: ' + args.credentials)\n ## Ensure directory.\n if not args.directory:\n die_screaming('need a directory argument')\n args.directory = args.directory.rstrip('//')\n LOG.info('Will operate in: ' + args.directory)\n ## Ensure bucket.\n if not args.bucket:\n die_screaming('need a bucket argument')\n bucket, slash, toppath = args.bucket.partition('/')\n if toppath != '':\n LOG.info('Will put to bucket: ' + bucket + '; with path: ' + toppath)\n else:\n LOG.info('Will put to bucket at top level: ' + bucket)\n ## Ensure mimetype metadata.\n if not args.mimetypes:\n LOG.info('Will use internal mimetype defaults')\n else:\n LOG.info('TODO: Will get mimetype metadata from: ' + args.metadata)\n ## Ensure bucket location.\n if not args.location:\n args.location = 'us-east-1'\n LOG.info('Will use S3 bucket location default: ' + args.location)\n else:\n LOG.info('Will use S3 bucket location: ' + args.location)\n\n ## Extract S3 credentials.\n creds = None\n with open(args.credentials) as chandle:\n creds = json.loads(chandle.read())\n #LOG.info(creds)\n\n s3 = boto3.resource('s3', region_name=args.location,\n aws_access_key_id=creds['accessKeyId'],\n aws_secret_access_key=creds['secretAccessKey'])\n\n # s3 = boto3.resource(\"s3\", creds['accessKeyId'], creds['secretAccessKey'])\n\n #s3.Object('mybucket', 'hello.txt').put(Body=open('/tmp/hello.txt', 'rb'))\n\n ## Walk tree.\n for curr_dir, dirs, files in os.walk(args.directory):\n\n ## We can navigate up if we are not in the root.\n relative_to_start = curr_dir.rstrip('//')[len(args.directory):]\n relative_to_start = relative_to_start.lstrip('//')\n LOG.info('curr_dir: ' + curr_dir + ' (' + relative_to_start + ')')\n\n ## Note files and directories.\n for fname in files:\n\n ## Get correct mime type.\n fext = os.path.splitext(fname)[1].lstrip('.')\n mime = MIMES.get('') # start with default\n if MIMES.get(fext, False):\n mime = MIMES.get(fext)\n\n ## Figure out S3 path/key and final filename, keeping in\n ## mind that relative_to_Start can be empty if root.\n s3path = fname\n if relative_to_start:\n s3path = relative_to_start + '/' + fname\n filename = os.path.join(curr_dir, fname)\n\n tags = {}\n if args.number:\n tags['build-number'] = args.number\n if args.pipeline:\n tags['build-pipeline'] = args.pipeline\n tags_str = urllib.parse.urlencode(tags)\n\n ## Visual check.\n LOG.info('file: ' + filename)\n if toppath != '':\n s3path = toppath + '/' + s3path\n LOG.info(' -> [' + bucket + '] ' + s3path + \\\n '(' + mime + ', ' + tags_str + ')')\n\n ## Create the new object that we want.\n s3bucket = s3.Bucket(bucket)\n multipart_upload(filename, s3bucket, s3path, content_type=mime, metadata=tags, policy=\"public-read\")\n\n # newobj = s3.Object(args.bucket, s3path)\n # outfile = open(filename, 'rb')\n # newobj.put(Body=outfile, \\\n # ContentType=mime, \\\n # Metadata=tags,\n # ACL='public-read') #Tagging=tags_str)\n\n # outbod = open(os.path.join(curr_dir, fname), 'rb')\n # .put(Body=outbod, 'rb')\n\n # for dname in dirs:\n # #LOG.info('dir: ' + os.path.join(curr_dir, dname))\n # pass", "def prepare_filesystem(self):\n if self._is_filesystem_prepared:\n return True\n ms = self.getModelObj()\n if ms is None:\n self.warning(\"Could not prepare local filesystem to store macros\")\n return False\n ms_name = ms.getSimpleName().replace('/', '_')\n self._tmp_dir = tempfile.mkdtemp(prefix=ms_name, dir=self._base_tmp_dir)\n self._is_filesystem_prepared = True\n return True", "def make_s3(sitename):\n return s3.S3(sitename)", "def _connect_s3(self):\n if self.sts:\n self.s3 = boto.connect_s3(aws_access_key_id=self.sts[\"access_key\"],\n aws_secret_access_key=self.sts[\"secret_key\"],\n security_token=self.sts[\"session_token\"])\n elif self.config.aws_profile:\n self.s3 = boto.connect_s3(profile_name=self.config.aws_profile)\n else:\n self.s3 = boto.connect_s3()", "def s3_store_data(self):\n\n USERHOMEDIR = os.path.expanduser('~')\n TESTFILEPATH = \"%s/3MBFILE\" % USERHOMEDIR\n if not os.path.exists(TESTFILEPATH):\n with open(TESTFILEPATH, \"wb\") as out:\n out.truncate(1024 * 1024 * 3)\n self.k.set_contents_from_filename(TESTFILEPATH)", "def create_sam_bucket():\n local(f\"aws s3 mb s3://{env.bucket_name} --region {env.aws_region}\")", "def store_to_s3():\n\n try:\n # establish aws/s3 connection\n s3 = boto3.client('s3',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY\n )\n logger.info(\"S3 connection established!\")\n except Exception as e:\n logger.error('Fail to connect to aws s3. Please check your credentials!')\n logger.error(e)\n else:\n try:\n # upload local file to S3 bucket\n logger.info(\"Uploading {} to {} bucket as {}\".format(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename))\n s3.upload_file(config.Local_File_To_Upload,\n config.Bucket_Name,\n config.S3_Filename)\n logger.info('File successfully uploaded to S3 bucket!')\n except FileNotFoundError:\n logger.error('File not found, pleas check the file path.')\n except Exception as e:\n logger.error(e)", "def __init__(self, debug=False):\n\n self.debug = debug\n self.default_path_aws = '/home/ubuntu/'", "def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None,\n host=None, debug=0, https_connection_factory=None,\n calling_format=None, path='/', provider='aws',\n bucket_class=None, mock_s3_fs=None):\n # use mock_s3_fs even if it's {}\n self.mock_s3_fs = combine_values({}, mock_s3_fs)\n self.endpoint = host or 's3.amazonaws.com'", "def connect_filesystem(self, *args, **kwargs):\n return self._get_storage().connect_filesystem(*args, **kwargs)", "def s3_server(request):\n return _s3_server(request)", "def s3_resource(self):\n return boto3.resource('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def __init__(self, resolver_context, path_spec):\n super(FATFileSystem, self).__init__(resolver_context, path_spec)\n self._file_object = None\n self._fsfat_volume = None\n self._root_directory_identifier = None", "def setup():\n processes = []\n try:\n s3.create_bucket(Bucket=BUCKET)\n jotfs_p = subprocess.Popen([\n \"./bin/jotfs\",\n \"-db\", DBNAME,\n \"-port\", str(PORT),\n \"-store_bucket\", BUCKET,\n \"-store_access_key\", STORE_ACCESS_KEY,\n \"-store_secret_key\", STORE_SECRET_KEY,\n \"-store_endpoint\", STORE_ENDPOINT,\n \"-tls_cert\", TLS_CERT,\n \"-tls_key\", TLS_KEY,\n \"-store_region\", \"us-east-1\",\n \"-debug\", \"-store_path_style\", \"-store_disable_ssl\"\n ])\n processes.append(jotfs_p)\n return processes\n except Exception as e:\n for p in processes:\n p.kill()\n raise e", "def _get_s3(key=None, username=None, secret=None, password=None, **kwargs):\n if username is not None:\n if key is not None:\n raise KeyError(\"S3 storage options got secrets argument \"\n \"collision. Please, use either `key` \"\n \"storage option or password field in URLpath, \"\n \"not both options together.\")\n key = username\n if key is not None:\n kwargs['key'] = key\n if password is not None:\n if secret is not None:\n raise KeyError(\"S3 storage options got secrets argument \"\n \"collision. Please, use either `secret` \"\n \"storage option or password field in URLpath, \"\n \"not both options together.\")\n secret = password\n if secret is not None:\n kwargs['secret'] = secret\n return S3FileSystem(**kwargs)", "def setup_local_storage(mod, media_type, media_id, id=None):\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n if mod == 'post':\n mod = 'posts'\n path = os.path.join(BASE_DIR, 'save', mod, str(media_id))\n if id:\n path = os.path.join(BASE_DIR, 'save', mod, str(id))\n name = media_type.lower()\n try:\n os.mkdir(path)\n except FileExistsError as e:\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n name += f\"_{timestamp}\"\n except OSError as e:\n raise InvalidUsage(\"OSError in setup_local_storage. \", status_code=501, payload=e)\n filename = f\"{str(path)}/{name}\"\n return path, filename", "def _init_files_dirs(self):\n self.local.create_files_dirs()\n self.remote.create_files_dirs()", "def create_fs_on_disk(vm_name, disk_alias, executor=None):\n if ll_vms.get_vm_state(vm_name) == config.VM_DOWN:\n ll_vms.startVm(\n True, vm_name, wait_for_status=config.VM_UP,\n wait_for_ip=True\n )\n if not executor:\n executor = get_vm_executor(vm_name)\n\n logger.info(\n \"Find disk logical name for disk with alias %s on vm %s\",\n disk_alias, vm_name\n )\n disk_logical_volume_name = get_logical_name_by_vdsm_client(\n vm_name, disk_alias\n )\n if not disk_logical_volume_name:\n # This function is used to test whether logical volume was found,\n # raises an exception if it wasn't found\n message = \"Failed to get %s disk logical name\" % disk_alias\n logger.error(message)\n return False, message\n\n logger.info(\n \"The logical volume name for the requested disk is: '%s'\",\n disk_logical_volume_name\n )\n\n logger.info(\n \"Creating label: %s\", CREATE_DISK_LABEL_CMD % disk_logical_volume_name\n )\n rc, out, _ = executor.run_cmd(\n (CREATE_DISK_LABEL_CMD % disk_logical_volume_name).split()\n )\n logger.info(\"Output after creating disk label: %s\", out)\n if rc:\n return rc, out\n logger.info(\n \"Creating partition %s\",\n CREATE_DISK_PARTITION_CMD % disk_logical_volume_name\n )\n rc, out, _ = executor.run_cmd(\n (CREATE_DISK_PARTITION_CMD % disk_logical_volume_name).split()\n )\n logger.info(\"Output after creating partition: %s\", out)\n if rc:\n return rc, out\n # '1': create the fs as the first partition\n # '?': createFileSystem will return a random mount point\n logger.info(\"Creating a File-system on first partition\")\n mount_point = create_filesystem(\n vm_name=vm_name, device=disk_logical_volume_name, partition='1',\n fs=FILESYSTEM, executor=executor\n )\n return True, mount_point" ]
[ "0.6136107", "0.61083025", "0.5886789", "0.56999713", "0.55318826", "0.5386369", "0.5384897", "0.5347873", "0.5322004", "0.53185993", "0.52828926", "0.527598", "0.5212449", "0.5212169", "0.52068335", "0.5196238", "0.51807004", "0.5143956", "0.51013356", "0.50991315", "0.5097787", "0.5097443", "0.5093536", "0.5083578", "0.5041889", "0.5040653", "0.5028079", "0.5020888", "0.50091404", "0.5006" ]
0.70687497
0
Given a list of DBC file paths, create a list of conversion rule databases
def load_dbc_files(dbc_paths): import can_decoder from pathlib import Path db_list = [] for dbc in dbc_paths: db = can_decoder.load_dbc(Path(__file__).parent / dbc) db_list.append(db) return db_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_pdblist(pdblist, addext = 0):\n\n\t#Load the pdblist, and convert to a list.\n\tlistfile = open(pdblist, 'r')\n\tpdbs = listfile.readlines()\n\t\n\tfor pdb in pdbs:\n\t\tpdbname = pdb.strip()\n\t\tif (addext):\n\t\t\tpdbname = pdb.strip() + '.pdb'\n\t\t\n\t\tcmd.load(pdbname)", "def table_collector(path2mdbs):\n containing_folder = path2mdbs\n contained_files = os.listdir(containing_folder)\n table_list = []\n for mdb_path in contained_files:\n if os.path.splitext(mdb_path)[1]=='.mdb' or os.path.splitext(mdb_path)[1]=='.accdb':\n instance = arcno(os.path.join(containing_folder,mdb_path))\n for tablename, size in instance.actual_list.items():\n if tablename not in table_list:\n table_list.append(tablename)\n return table_list", "def makeDatabaseList():\n charList = []\n for ch in lower:\n # ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch)\n for ch in numbers:\n ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch)\n for ch in special:\n ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch)\n for ch in other:\n ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch)\n if(caseSensitive):\n for ch in upper:\n # ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch, url)\n if(wildCards):\n for ch in wildCards:\n # ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch, url)\n return charList", "def get_sqls_from_dir(self, dir_path):\n sql_list = []\n files = os.listdir(dir_path)\n for create_file in files:\n if not create_file.startswith(\".\"):\n file_path = \"%s/%s\" % (dir_path, create_file)\n sql = self.get_sql_from_file(file_path)\n sql_dict = {}\n sql_dict[\"file_name\"] = create_file.split(\".\")[0]\n sql_dict[\"sql\"] = sql\n sql_list.append(sql_dict)\n return sql_list", "def get_files_based_on_identifiers(conn, identifiers_list):\n\n http_ptrn = config.get(\"paths\", \"http_path\")\n # Path to \"release/public-facing\" area\n release_dir_ptrn = os.path.join(config.get(\"paths\", \"release_dir\"), \"brain\")\n\n with open(identifiers_list) as ifh:\n idents = [line.rstrip() for line in ifh]\n\n # Right now I believe only derived identifiers are necessary but I would rather include other tables if specified.\n tabletypes = [tables.sequence, tables.alignment, tables.derived]\n desired_files = []\n for t in tabletypes:\n query = db.select([t.c.file_url]) \\\n .where(t.c.identifier.in_(idents))\n result_proxy = conn.execute(query)\n # Iterate through records and add files\n for row in result_proxy:\n # Replace HTTP URL with \"release/public-facing\" pathname\n desired_files.append(row[0].replace(http_ptrn, release_dir_ptrn))\n return desired_files", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def create_templates_database(dataset_path_list, db_file_path):\n paths_list = dataset_path_list\n\n templates = dict()\n for file in paths_list:\n\n image = face_recognition.load_image_file(file)\n tmp = face_recognition.face_encodings(image)\n if tmp:\n template = face_recognition.face_encodings(image)[0]\n if template.size != 0:\n templates[file] = template\n\n dump_dict_to_db(templates, db_file_path)", "def load_compares(compare_list):\n compare_insert = \"INSERT INTO git_compare VALUES\" \\\n \" (?, ?, ?, ?)\"\n dbutils.load_list(compare_insert, compare_list, DATABASE_FILE)", "def construct_bibfile_data(*paths):\n bibs = [reffile_factory(path) for path in paths]\n return bibs", "def parse_databases(default_dbname=\"cal_manager.db\"):\n db_list = []\n calconf = get_calconf()\n if not calconf:\n return db_list\n upload_cookie = calconf.get(\"upload_cookie\")\n # Allow old-format file to be read\n try:\n databases = calconf[\"databases\"]\n except KeyError:\n databases = calconf.get(\"database_dir\")\n if not databases:\n return db_list\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\"Use 'databases' instead of 'database_dir' in \"\n \"config file.\",\n DeprecationWarning\n )\n for line in databases.splitlines():\n if not line: # handle blank lines\n continue\n db, *flags = shlex.split(line)\n # \"get\" is default if there are no flags, but if any flags are\n # specified, then \"get\" must be there explicitly\n kwargs = {\"get_cal\": not bool(flags),\n \"store_cal\": False}\n for flag in flags:\n kwarg = f\"{flag}_cal\"\n if kwarg in kwargs:\n kwargs[kwarg] = True\n else:\n raise ValueError(\"{}: Unknown flag {!r}\".format(db, flag))\n\n expanded_db = path.expanduser(db)\n if path.isdir(expanded_db):\n db = path.join(db, default_dbname)\n cls = LocalDB\n elif path.isfile(expanded_db):\n cls = LocalDB\n elif \"/\" in expanded_db and \"//\" not in expanded_db:\n cls = LocalDB\n else: # does not check\n cls = RemoteDB\n kwargs[\"upload_cookie\"] = upload_cookie\n db_list.append((cls, db, kwargs))\n return db_list", "def get_rules(paths):\n raw_rules = []\n for path in paths:\n with open(path, \"r\", encoding=\"utf8\") as f:\n raw_rules += f.read().splitlines()\n \n return AdblockRules(raw_rules)", "def process(self, path):\n\n # Extract filtered content and build source databases to process\n for source in Execute.SOURCES:\n spath = os.path.join(path, source)\n\n # Extract Posts.xml from 7za file\n decompress = Decompress()\n decompress(spath)\n\n posts = os.path.join(spath, \"Posts.xml\")\n filtered = os.path.join(spath, \"Filtered.xml\")\n\n # Filter Posts.xml file for matching questions\n sift = Sift()\n sift(posts, filtered)\n\n dbfile = os.path.join(spath, f\"{source}.db\")\n\n # Convert filtered Posts.xml file to SQLite db file\n xml2db = XML2DB()\n xml2db(filtered, dbfile)\n\n # Get list of all databases to consolidate\n return [\n os.path.join(path, source, f\"{source}.db\") for source in Execute.SOURCES\n ]", "def setupDatabases(con, options, dbList):\n currentDatabases = dbGetFirstColumnAsMap(con, \"select datname from pg_database where datistemplate = false\")\n currentRolenames = dbGetFirstColumnAsMap(con, \"select rolname from pg_roles\")\n trace(\"currentDatabases = \" + str(currentDatabases))\n for dbName in dbList:\n trace(\"dbName='%s'\" % str(dbName))\n setupDatabase(con, options, currentDatabases, currentRolenames, dbName, dbList[dbName])", "def pdbfile_list():\n \n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def from_files(paths: list[str]) -> Catalog:\n cat = Catalog()\n for file in paths:\n with fsspec.open(file, mode=\"r\") as fh:\n new_cat = Catalog.from_str(fh.read())\n cat = cat.join(new_cat)\n return cat", "def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def _do_build ():\n if os.path.exists(\"./database\"):\n data_path = \"./database/\"\n elif os.path.exists(\"../database\"):\n data_path = \"../database/\"\n elif os.path.exists(\"../../database\"):\n data_path = \"../../database/\"\n else:\n data_path = \".\"\n\n dir_specs = {}\n databases = []\n\n # first pass over the databases to create complete tree:\n for dirpath, dirnames, filenames in os.walk(data_path):\n # all databases are stored\n for name in filenames:\n if name.endswith(\".db\"):\n databases.append(os.path.join(dirpath, name).replace(data_path, \"\"))\n # but we need to store specs here otherwise things could get a bit confusing\n elif name.endswith(\".spec\"):\n possible_dir = os.path.join(dirpath, name[:-5]+\".db\")\n if os.path.exists(possible_dir) and os.path.isdir(possible_dir):\n spec_name = possible_dir.replace(data_path, \"\")\n dir_specs[spec_name] = parse_spec(os.path.join(dirpath, name))\n\n # and we create DatabaseFolders for each subfolder\n for name in dirnames:\n if name.endswith(\".db\"):\n # dump the extension here too\n obj_name = name[:-3]\n this_folder = DatabaseFolder(obj_name)\n\n if dir_specs.has_key(name):\n this_folder.spec = dir_specs.pop(name)\n\n if dirpath != data_path:\n search = dirpath.replace(data_path, \"\").split(PATH_DELIM)\n try:\n top_folder = globals()[search[0]]\n except KeyError:\n raise DatabaseError, \"Subdirectory of a db folder without a DatabaseFolder?\"\n for p in search[1:]:\n if p == name:\n break\n try:\n top_folder = getattr(top_folder, p)\n except AttributeError:\n raise DatabaseError, \"Subdirectory of a db subfolder without a DatabaseFolder subfolder!\"\n top_folder.append(this_folder)\n else:\n globals()[obj_name] = this_folder\n\n for database in databases:\n build_from_file_name(database, data_path)", "def read_dmp_multiple(list_files, **kwargs):\n # Read files\n list_data = []\n for file in list_files:\n list_data.append(read_dmp(file, **kwargs))\n # Combine into single dataframe\n data = pd.concat(list_data,\n axis=0, join='outer', ignore_index=True)\n # Drop duplicates if multiple DMP files contain the same address\n data = data.drop_duplicates(subset=['address', 'city', 'zip'])\n return data", "def parse_database(db_files, key_list,\n parse_func=None,\n filter_func=lambda x: True):\n import shelve\n import utils.utilities as utils\n if parse_func is None:\n parse_func = utils.eat_keys\n assert not isinstance(parse_func, tuple)\n if not isinstance(db_files, list):\n temp = []\n temp.append(db_files)\n db_files = temp\n res = {}\n for k in key_list:\n res[k] = []\n for fil in db_files:\n # print(\"Opening {}...\".format(fil))\n tar = shelve.open(fil)\n keys = sorted(list(z for z in list(tar.keys()) if z !=\n 'current_response'))\n for ky in keys:\n if filter_func(tar[ky]) is True:\n for k in key_list:\n res[k].append(parse_func(tar[ky], k))\n # res[k].append(utils.eat_keys(tar[ky], k))\n tar.close()\n return res", "def generate_schema_list():\n src = os.path.join(os.path.dirname(__file__), '../schemas')\n for root, dirs, files in os.walk(src):\n for fname in files:\n if not fname.endswith('.yaml'):\n continue\n if os.path.splitext(fname)[0] in (\n 'draft-01', 'asdf-schema-1.0.0'):\n continue\n yield os.path.join(root, fname)", "def linear(files):\n return list(map(insert_to_mongo, files))", "def get_channel_id_list_from_scanning_content_database_dir(content_database_dir):\n db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')\n db_names = [db.split('.sqlite3', 1)[0] for db in db_list]\n valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]\n invalid_db_names = set(db_names) - set(valid_db_names)\n if invalid_db_names:\n logging.warning(\"Ignoring databases in content database directory '{directory}' with invalid names: {names}\"\n .format(directory=content_database_dir, names=invalid_db_names))\n return valid_db_names", "def load_multiple_constraints(paths: list) -> list:\n constraints = list()\n for constraint_path in paths:\n current_constraints = load_constraints(constraint_path)\n constraints = constraints + current_constraints\n return unique(constraints)", "def build_tables(connection, filename):\n\n # create a table to identify the .db for later\n connection.execute('''CREATE TABLE format\n (\n schema INTEGER,\n basefile TEXT\n ) ''')\n\n connection.execute('INSERT INTO format VALUES (26, ?)', (filename,))\n\n # create the 'cards' table\n connection.execute('''CREATE TABLE cards\n (\n cardid INTEGER PRIMARY KEY AUTOINCREMENT,\n cardname TEXT,\n castcost TEXT,\n color TEXT,\n con_mana INTEGER,\n loyalty TEXT,\n type TEXT,\n power TEXT,\n toughness TEXT,\n v_hand TEXT,\n v_life TEXT,\n cn_position INTEGER,\n virtual TEXT,\n cardtext TEXT\n ) ''')\n\n # create a table for publication data\n connection.execute('''CREATE TABLE published\n (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n expansion TEXT,\n rarity TEXT,\n cnum INTEGER\n ) ''')\n\n # create a table for the setlist\n connection.execute('''CREATE TABLE sets\n (\n abbreviation TEXT,\n setname TEXT,\n released TEXT\n ) ''')\n\n # read a list of sets from setlist.txt\n for data in filtered_file('setlist.txt'):\n connection.execute('INSERT INTO sets VALUES (?, ?, ?)', \\\n (data[1], data[0], data[2]))\n\n # create a table for legal sets for given formats\n connection.execute('''CREATE TABLE legalsets\n (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n format TEXT,\n expansion TEXT\n )''')\n\n # read in formats.txt, using it to fill the legalsets table\n formats = open('formats.txt', 'r')\n\n for data in filtered_file('formats.txt'):\n for expansion in data[1].split(','):\n connection.execute('INSERT INTO legalsets (format, expansion) ' +\\\n 'VALUES (?, ?)', (data[0], expansion))\n\n # close the formats file\n formats.close()\n\n # create a table for banned/restricted cards\n connection.execute('''CREATE TABLE badcards\n (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n format TEXT,\n card TEXT,\n status TEXT\n ) ''')\n\n # read in a list of banned/restricted cards\n for data in filtered_file('bans.txt'):\n connection.execute('INSERT INTO badcards (format, status, card) ' +\\\n 'VALUES (?,?,?)', (data[0], data[1], data[2]))\n\n # create a table listing timeshifted cards\n connection.execute('''CREATE TABLE timeshifted\n (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n expansion TEXT,\n card TEXT\n ) ''')\n\n # add the 'timeshifted' set to the determine_cgroup function\n determine_cgroup.timeshifted = set()\n\n # read in the list of timeshifted cards and insert them\n for data in filtered_file('timeshifted.txt'):\n connection.execute('INSERT INTO timeshifted (expansion, card) ' +\\\n 'VALUES (?,?)', (data[0], data[1]))\n determine_cgroup.timeshifted.add(data[1])\n\n # commit the DB and we're done.\n connection.commit()", "def loading_data_to_sqlite(list_files):\n engine = connecting_database()\n if engine is None:\n return False\n\n print()\n print(\"-\".rjust(60, \"-\"))\n print(\"Loading data\".center(60))\n print(\"-\".rjust(60, \"-\"))\n\n for filename in list_files:\n name, ext = os.path.splitext(filename)\n if ext != '.csv':\n print(\">> WARNING: CSV file invalid!\")\n return False\n\n print(f\">> Populating the table: stg_{name}\")\n df = pd.read_csv(path + inputfile + filename, sep=',', header=0)\n df.to_sql(f\"stg_{name}\", con=engine, index=False, if_exists='replace')\n print(\"-\".rjust(60, \"-\"))\n\n return True", "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def main(path_to_cdr_ids, path_to_db):\n from sqlalchemy import create_engine\n import pandas as pd\n\n cdr_ids_to_get = set(open(path_to_cdr_ids).readlines())\n\n cdr_ids_str = ','.join(['\"{}\"'.format(x) for x in cdr_ids_to_get])\n query_fmt = 'select * from cdr_id_to_homology where cdr_id in ({})'.format\n\n sql_con = create_engine('sqlite:///{}'.format(path_to_db))\n\n df = pd.read_sql(query_fmt(cdr_ids_str), sql_con)\n\n df = df.pivot(columns='homology').fillna(False)\n\n df.to_pickle('data/generated/homology_df.pkl')", "def load_cfm_results(result_dir, db_list, spec_type, cleanup=False):\n for spec_file in os.listdir(result_dir):\n if ('param' in spec_file) or (spec_file[-4:] != \".log\"):\n continue\n with open(os.path.join(result_dir, spec_file)) as infile:\n data = []\n for x in re.split(r\"energy\\d\\n\", infile.read())[1:]:\n split_data = []\n for y in x.strip().split('\\n'):\n split_data.append([float(z) for z in y.split()])\n data.append(split_data)\n try:\n for db in db_list:\n if spec_type == 'pos':\n db.compounds.update_many(\n {\"Inchikey\": {\"$regex\": \"^\" + spec_file[:-4]}},\n {\"$set\": {\"Pos_CFM_spectra\": {\"10 V\": data[0],\n \"20 V\": data[1],\n \"40 V\": data[2]}}})\n elif spec_type == 'neg':\n db.compounds.update_many(\n {\"Inchikey\": {\"$regex\": \"^\" + spec_file[:-4]}},\n {\"$set\": {\"Neg_CFM_spectra\": {\"10 V\": data[0],\n \"20 V\": data[1],\n \"40 V\": data[2]}}})\n elif spec_type == 'ei':\n db.compounds.update_many(\n {\"Inchikey\": {\"$regex\": \"^\" + spec_file[:-4]}},\n {\"$set\": {\"EI_CFM_spectra\": {\"70 V\": data[0]}}})\n else:\n raise ValueError('invalid spectrum spec_type')\n except IndexError:\n print(spec_file)\n if cleanup:\n os.remove(os.path.join(result_dir, spec_file))", "def get_clean_sets(clean_cycle_dict, file_name, database_name):\n clean_set_df = pd.DataFrame()\n #name = file_name.split('.')[0]\n #while '/' in file_name: \n while '/' in file_name:\n file_name = file_name.split('/', maxsplit = 1)[1]\n name = file_name.split('.')[0]\n \n for k, v in clean_cycle_dict.items():\n clean_set_df = clean_set_df.append(v, ignore_index = True)\n\n #clean_set_df = clean_set_df.sort_values(['Data_Point'], ascending = True)\n # clean_set_df.reset_index(drop = True)\n \n dbfs.update_database_newtable(clean_set_df, name + 'CleanSet', database_name)\n \n print('All clean cycles recombined and saved in database')\n return clean_set_df", "def makeDatabaseNamesList(n, ):" ]
[ "0.57001996", "0.5689761", "0.565364", "0.56203485", "0.55612475", "0.5559682", "0.5539606", "0.5530373", "0.5513971", "0.5467378", "0.54303205", "0.5418948", "0.5383131", "0.5382098", "0.5376149", "0.5364787", "0.5357825", "0.53471833", "0.53178245", "0.5315089", "0.5298326", "0.52945995", "0.5274347", "0.5273003", "0.5271351", "0.52469915", "0.5235636", "0.5228585", "0.52268624", "0.52091974" ]
0.72360516
0
Given a list of device paths, list log files from specified filesystem. Data is loaded based on the list of start datetimes
def list_log_files(fs, devices, start_times, verbose=True, passwords={}): import canedge_browser log_files = [] if len(start_times): for idx, device in enumerate(devices): start = start_times[idx] log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords) log_files.extend(log_files_device) if verbose: print(f"Found {len(log_files)} log files\n") return log_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))", "def find_logs():\n\n file_list_targets = [r'/Program Files/IDEMIA/MFace Flex IA/first/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/first/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex MS/logs/*.log*',\n r'/Program Files (x86)/IDEMIA/DocAuth/logs/*.log*',\n r'/Temp/*.log*',\n r'/Temp/*.csv*',\n r'/STIP/*.log*',\n r'/ECAT/BioFDRS/*.xml*',\n r'/ECAT/FDRS/*.xml*',\n r'/Program Files/IDEMIA/Cameras/First/*.log*',\n r'/Program Files/IDEMIA/Cameras/Second/*.log*']\n\n file_lists_of_lists = [glob.glob(i, recursive=False) for i in file_list_targets]\n\n # Flatten out the list of lists into one list\n file_list = []\n for i in file_lists_of_lists:\n file_list.extend(i)\n\n return file_list", "def GetDeviceLogs(log_filenames, logger):\n device_logs = []\n\n for device, device_files in log_filenames.items():\n logger.debug('%s: %s', device, str(device_files))\n device_file_lines = []\n for cur_file in device_files:\n with open(cur_file) as f:\n device_file_lines += [(cur_file, f.read().splitlines())]\n combined_lines = CombineLogFiles(device_file_lines, logger)\n # Prepend each line with a short unique ID so it's easy to see\n # when the device changes. We don't use the start of the device\n # ID because it can be the same among devices. Example lines:\n # AB324: foo\n # AB324: blah\n device_logs += [('\\n' + device[-5:] + ': ').join(combined_lines)]\n return device_logs", "def getFileList(self):\n sid = 86400 # change to 3600 for hour-by-hour\n uDays = range(sid*(int(self.uStart)/sid),sid+(sid*(int(self.uStop)/sid)),sid)\n fileList = []\n sep = os.path.sep\n for d in uDays:\n s = unixTimeToString(d)\n ymdPath = 'year' + s[0:4] + sep + 'month' + s[5:7] + sep + 'day' + s[8:10]\n dirname = self.basePath + sep + ymdPath + sep + self.sensor + sep + 'padhist'\n pattern = '*' + self.sensor + '_hstv*.mat'\n nameList = glob.glob1(dirname,pattern)\n for name in nameList:\n uTime = stringTimeToUnix(name[0:13] + '_00_00.000')\n if ( self.uStart <= uTime <= self.uStop ):\n #print 'IN: %s' % unixTimeToString(uTime)\n fileList.append(dirname + sep + name)\n fileList.sort()\n self.fileList = fileList", "def getFileList(self):\n print 'getting fileList ...',\n sid = 86400 # change to 3600 for hour-by-hour\n uDays = range(sid*(int(self.uStart)/sid),sid+(sid*(int(self.uStop)/sid)),sid)\n fileList = []\n sep = os.path.sep\n for d in uDays:\n s = unixTimeToString(d)\n ymdPath = 'year' + s[0:4] + sep + 'month' + s[5:7] + sep + 'day' + s[8:10]\n dirname = self.basePath + sep + ymdPath + sep + self.subDir\n pattern = '*' + self.sensor\n nameList = glob.glob1(dirname,pattern)\n for name in nameList:\n ufStart = stringTimeToUnix(name[0:23])\n ufStop = stringTimeToUnix(name[24:47])\n if ( ufStart <= self.uStart <= ufStop ) or ( self.uStart <= ufStart <= self.uStop ) or ( ufStart <= self.uStop <= ufStop ):\n #print 'IN: %s' % unixTimeToString(uTime)\n fileList.append(dirname + sep + name)\n## else:\n## print 'OUT:\\n%s\\n%s\\n%s' % (unixTimeToString(ufStart),unixTimeToString(self.uStart),unixTimeToString(ufStop))\n fileList.sort()\n self.fileList = fileList\n print 'done'", "def FindLogFiles(base_dir):\n logcat_filter = re.compile(r'^logcat_(\\S+)_(\\d+)$')\n # list of tuples (<device_id>, <seq num>, <full file path>)\n filtered_list = []\n for cur_file in os.listdir(base_dir):\n matcher = logcat_filter.match(cur_file)\n if matcher:\n filtered_list += [(matcher.group(1), int(matcher.group(2)),\n os.path.join(base_dir, cur_file))]\n filtered_list.sort()\n file_map = {}\n for device_id, _, cur_file in filtered_list:\n if device_id not in file_map:\n file_map[device_id] = []\n\n file_map[device_id] += [cur_file]\n return file_map", "def getPadFiles(padPath, dateStart, dateStop, sensor, ext):\n if dateStart >= dateStop:\n raise 'why start after stop?'\n start = split(dateStart, '_')\n startS = float(start[-1])\n startY, startM, startD, startH, startN = map(int, start[:-1])\n stop = split(dateStop, '_')\n stopS = float(stop[-1])\n stopY, stopM, stopD, stopH, stopN = map(int, stop[:-1])\n y,m,d = prevDate(startY,startM,startD)\n result = ''\n #while y <= stopY and m <= stopM and d <= stopD: # does not handle begin month borders\n while (y,m,d) <= (stopY,stopM,stopD): \n # grab all sensor matching headers from each day ('ls' results are sorted)\n cmd = 'ls -1 %s/year%s/month%02d/day%02d/*/*%s%s' % (padPath, y, m, d, sensor, ext)#; print cmd\n cmdOutput = getoutput(cmd)\n if cmdOutput[-25:] != 'No such file or directory':\n result += cmdOutput + '\\n'#; print result\n y, m , d = nextDate(y, m , d)\n\n if result == '': return [],[],[] # no files to process\n\n # make sure all filenames are OK\n trimmed = split(result, '\\n')\n allLines = []\n for i in trimmed:\n if i != '':\n allLines.append(i)\n\n## print 'allLines[0] is ' + allLines[0]\n\n # keep files with data after dateStart & before dateStop\n padFiles = []\n for i in allLines:\n fname = split(i,'/')[-1] # toss path\n e = split(fname, '-')\n if len(e) == 1:\n e = split(fname, '+')\n if (e[1] >'%s.%s%s' % (dateStart, sensor, ext)) and (e[0] <= '%s.%s%s' % (dateStop, sensor, ext)):\n padFiles.append(i)\n \n # get number of dat columns\n dataColumns = 4 # default\n if sensor == u'oare' or sensor == u'ossraw':\n dataColumns = 6 # mams has temperature and status columns\n\n # get sample rate of first PAD header file\n if padFiles:\n if ext == '':\n sampleRate = float(parse(padFiles[0]+'.header').documentElement.getElementsByTagName('SampleRate')[0].childNodes[0].nodeValue)\n else:\n sampleRate = float(parse(padFiles[0]).documentElement.getElementsByTagName('SampleRate')[0].childNodes[0].nodeValue)\n return padFiles,sampleRate,dataColumns\n else:\n return [],[],[]", "def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files", "def list_files(line_id, datetime_, len_):\n the_dir = pathlib.Path(data_dir())/str(line_id)\n format_spec= \"%Y-%m-%d %H:%M\"\n dt = datetime.strptime(datetime_, format_spec)\n format_spec2 = \"%Y/%m/%d\" # For path.\n date_path = dt.strftime(format_spec2)\n\n format_spec3 = \"%Y%m%d%H\" # For filename, excluding the minute part for matching all.\n filename_part = dt.strftime(format_spec3)\n\n leaf_dir = the_dir.joinpath(date_path)\n # sorted helps make sure the files are in ascending order.\n files_in_hour = sorted(leaf_dir.glob(\"*{}*.bin\".format(filename_part)))\n\n # Filter further to include only the files starting at the minute and the length.\n starting_minute = dt.time().minute\n ending_minute = starting_minute + len_\n ending_dt = dt + timedelta(minutes=len_)\n \n # define a filter function.\n def filter_by_minutes(file_: pathlib.Path):\n parts = file_.name.split('-')\n assert len(parts) == 3\n file_dt_minute = int(parts[1][10:12]) # Only the yyyymmddHHMM length is 12. \n return file_dt_minute >= starting_minute and file_dt_minute <= ending_minute\n \n\n result_files = list(filter(filter_by_minutes, files_in_hour))\n\n return result_files", "def list_log_files():\n for filename in os.listdir(\"/home/malyhass/log-parser\"):\n if filename.startswith(\"access.log\"):\n yield filename", "def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname", "def CombineLogFiles(list_of_lists, logger):\n cur_device_log = ['']\n for cur_file, cur_file_lines in list_of_lists:\n # Ignore files with just the logcat header\n if len(cur_file_lines) < 2:\n continue\n common_index = 0\n # Skip this step if list just has empty string\n if len(cur_device_log) > 1:\n try:\n line = cur_device_log[-1]\n # Used to make sure we only splice on a timestamped line\n if re.match(r'^\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3} ', line):\n common_index = cur_file_lines.index(line)\n else:\n logger.warning('splice error - no timestamp in \"%s\"?', line.strip())\n except ValueError:\n # The last line was valid but wasn't found in the next file\n cur_device_log += ['***** POSSIBLE INCOMPLETE LOGCAT *****']\n logger.info('Unable to splice %s. Incomplete logcat?', cur_file)\n\n cur_device_log += ['*'*30 + ' %s' % cur_file]\n cur_device_log.extend(cur_file_lines[common_index:])\n\n return cur_device_log", "def filelist(basedir):\n day_files = []\n for root, dirs, files in os.walk(basedir):\n for file in files:\n if file.endswith(\".png\"):\n day_files.append(os.path.join(file))\n dates_files = []\n\n for i in day_files:\n year = i.split('_')[1]\n day = i.split('_')[2]\n mounth = i.split('_')[3]\n hour = i.split('_')[4]\n dates_files.append(UTCDateTime(year+'-'+mounth+'-'+day+'T'+hour)-3)\n return sorted(dates_files)", "def gen_paths():\n global log_dir, events_file, log_file, datastream_dir\n\n #The root log directory\n log_root_dir = os.path.join(root_dir, \"logs\")\n\n #Figure out what log file index we should use\n #The log file index is a 4-digit number corresponding to an unused log folder\n index = 0\n #If our base log_root_dir exists:\n if os.path.exists(log_root_dir):\n\n #Get existing folders, convert to string list, and sort\n folders = os.listdir(log_root_dir)\n ids = [int(f) for f in folders]\n ids.sort()\n\n #This algorithm determines the next sequential value for our log index, it scans through the existing numbers\n #until either it finds a missing number in sequence, or runs out of numbers to scan.\n\n #Set this to a high number to start with, as it will get set every loop iteration\n last_id = 10000\n for present_index in ids:\n #If we have a break in the number sequence, abort and use what we have\n if present_index > last_id + 1:\n break\n #If we have found a bigger number to use for index\n if present_index > index:\n index = present_index\n\n last_id = present_index\n\n #Convert from largest existing index to the index we should use!\n index += 1\n\n #Set the log_dir, which is the directory for storing all logs during this run session\n log_dir = os.path.join(log_root_dir, str(index).zfill(4))\n\n #Set the log_file, which is a dump of all console output\n log_file = os.path.join(log_dir, \"main.log\")\n\n #Set the events_file, which is where all events are recorded\n events_file = os.path.join(log_dir, \"events.rec\")\n\n #Set the datastream_dir, within which all datastreams are recorded\n datastream_dir = os.path.join(log_dir, \"datastreams\")", "def parse_logs(log_paths):\n from psclient.file_util import read as _read\n\n import datetime, json\n result = []\n for log_path in log_paths:\n data = _read(log_path).split('\\n')\n for row in data:\n if not row: continue\n timestamp, log = row.split(',', 1)\n result.append((\n datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S\"),\n json.loads(log)))\n\n # sort by timestamp\n result.sort()\n\n return result", "def get_access_logs(file_dir=log_dir):\n \n file_list = []\n for myfile in glob.glob1(file_dir, 'access_log*'):\n file_list.append('%s/%s' % (file_dir, myfile))\n# print file_list\n return file_list", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def datafiles(cam, date=None):\n files = [fn for fn in fullpathlist(DATAPATH) if cam in fn]\n if date:\n files = filter_filenames(files, [date])\n return files", "def blosxom_file_list_handler(args):\n request = args[\"request\"]\n\n data = request.getData()\n config = request.getConfiguration()\n\n if data['bl_type'] == 'dir':\n filelist = tools.Walk(request, data['root_datadir'], int(config['depth']))\n elif data['bl_type'] == 'file':\n filelist = [data['root_datadir']]\n else:\n filelist = []\n\n entrylist = []\n for ourfile in filelist:\n entry = FileEntry(request, ourfile, data['root_datadir'])\n entrylist.append((entry._mtime, entry))\n\n # this sorts entries by mtime in reverse order. entries that have\n # no mtime get sorted to the top.\n entrylist.sort()\n entrylist.reverse()\n entrylist = [x[1] for x in entrylist]\n \n # Match dates with files if applicable\n if data['pi_yr']:\n month = (data['pi_mo'] in tools.month2num.keys() and tools.month2num[data['pi_mo']] or data['pi_mo'])\n matchstr = \"^\" + data[\"pi_yr\"] + month + data[\"pi_da\"]\n valid_list = [x for x in entrylist if re.match(matchstr, x['fulltime'])]\n else:\n valid_list = entrylist\n\n return valid_list", "def get_file_paths(dir_path, start_date, end_date):\n # Path must end with \"/*\"\n if dir_path[-2:] != '/*':\n raise Exception('Path must end with \"/*\"')\n\n # Get list of files\n list_of_files = glob.glob(dir_path)\n\n # Get first and last files (acording to last modified time)\n first_file = min(list_of_files, key=os.path.getctime)\n latest_file = max(list_of_files, key=os.path.getctime)\n\n # Get timestamps of first and last files\n first_file_ts = get_timestamp_from_path(first_file)\n last_file_ts = get_timestamp_from_path(latest_file)\n\n # Check if we have enought files for period\n if first_file_ts > start_date:\n raise Exception('The provided start_date does not exist in files.')\n if last_file_ts < end_date:\n raise Exception('The provided end_date does not exist in files.')\n\n # Get list of file paths for period (and their timestamps)\n paths, stamps = [], []\n for file_path in list_of_files:\n timestamp = get_timestamp_from_path(file_path)\n # print(start_date/1000,timestamp/1000, end_date/1000)\n\n paths.append(file_path)\n stamps.append(timestamp)\n\n\n # if start_date >= timestamp <= end_date:\n # paths.append(file_path)\n # stamps.append(timestamp)\n # elif timestamp > end_date:\n # return paths, stamps\n return paths, stamps", "def filelist(basedir,interval_period_date,channel_list):\n files = []\n files_list = glob.glob(basedir+'/*')\n files_list_ch = []\n for s in files_list:\n if any(day_s in s for day_s in channel_list):\n files_list_ch.append(s)\n day_files = []\n for ch_folder in files_list_ch:\n files = glob.glob(ch_folder+'/*')\n date_file = [file for file in files if interval_period_date in file]\n if date_file != []:\n day_files.append(date_file[0])\n return sorted(day_files)", "def get_data_files(main_directory):\n print('************************************')\n print('Log data list')\n print('************************************')\n log_files_list = globlin(main_directory + '/*/*.json' , recursive=True)\n song_files_list = globlin(main_directory + '/*/*/*/*/*.json', recursive=True)\n print(log_files_list)\n print('************************************')\n print('Song data list')\n print('************************************')\n print(song_files_list)\n return log_files_list, song_files_list", "def _get_files_list(self):\n ts_filepaths = []\n conn_filepaths = []\n ts_filepaths_from_dir = sorted(os.listdir(self.ts_dir))\n conn_filepaths_from_dir = sorted(os.listdir(self.conn_dir))\n for sub_id in self.ids:\n for ts_file in ts_filepaths_from_dir:\n if sub_id in ts_file:\n ts_filepaths += [os.path.join(self.ts_dir, ts_file)]\n ts_filepaths_from_dir.remove(ts_file)\n break\n for conn_file in conn_filepaths_from_dir:\n if sub_id in conn_file:\n conn_filepaths += [os.path.join(self.conn_dir, conn_file)]\n conn_filepaths_from_dir.remove(conn_file)\n break\n\n return ts_filepaths, conn_filepaths", "def read_local_20Hz_files(**kwargs):\n pathlst = kwargs.get('pathlst')\n product = kwargs.get('product')\n varalias = kwargs.get('varalias')\n sdate = kwargs.get('sdate')\n edate = kwargs.get('edate')\n twin = kwargs.get('twin')\n\n # establish coords if defined in config file\n timestr = satellite_dict[product]['vardef']['time']\n lonstr = satellite_dict[product]['vardef']['lons']\n latstr = satellite_dict[product]['vardef']['lats']\n\n # adjust start and end\n sdate = sdate - timedelta(minutes=twin)\n edate = edate + timedelta(minutes=twin)\n # get meta data\n ncmeta = ncdumpMeta(pathlst[0])\n ncvar = get_filevarname(varalias, variable_info,\n satellite_dict[product], ncmeta)\n # retrieve sliced data\n ds = read_netcdfs(pathlst)\n ds_sort = ds.sortby(timestr)\n\n # get indices for included time period\n nptime = ds_sort[timestr].data\n print('here0')\n print(len(nptime))\n #dtime = [parse_date(str(nptime[i])) for i in range(len(nptime))]\n print('here1')\n #idx = find_included_times_pd(dtime, sdate=sdate, edate=edate)\n idx = find_included_times_pd(nptime, sdate=sdate, edate=edate)\n print(len(nptime[idx]))\n print('here2')\n dtime = [parse_date(str(nptime[idx][i])) for i in range(len(nptime[idx]))]\n print(dtime)\n print('here3')\n #dtime = list(np.array(dtime)[idx])\n lons = list(((ds_sort[lonstr].data[idx] - 180) % 360) - 180)\n lats = list(ds_sort[latstr].data[idx])\n\n unxt = (nptime[idx].astype(int) / 10**9)\n\n # make dict and start with stdvarname for varalias\n stdvarname = variable_info[varalias]['standard_name']\n vardict = {}\n vardict[stdvarname] = list(ds_sort[ncvar].data[idx])\n vardict['longitude'] = lons\n vardict['latitude'] = lats\n vardict['time'] = unxt\n vardict['datetime'] = dtime\n vardict['time_unit'] = variable_info['time']['units']\n print(vardict.keys())\n return vardict", "def testListDirectory(self):\n test_file_path = self._GetTestFilePath(['unified_logging'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n expected_directory_entries = [\n '0000000000000030.tracev3',\n '0000000000000f85.tracev3',\n 'timesync',\n 'uuidtext']\n\n directory_entries = sorted(test_helper.ListDirectory(test_file_path))\n self.assertEqual(directory_entries, expected_directory_entries)", "def load_logfiles(logfiles):\n headers = []\n dataframes = []\n\n for logfile in logfiles.value:\n header, dataframe = load_logfile(logfile)\n headers.append(header)\n dataframes.append(dataframe)\n\n return headers, dataframes", "def read_by_paths(path_list):\n # create empty df to concatenate to\n base_df = pd.DataFrame(data=None, columns=['timestamp', 'seq', 'accel_x', 'accel_y', 'accel_z', 'accel_magnitude',\n 'accel_pca', 'accel_x_smooth', 'accel_x_lp', 'accel_x_hp',\n 'accel_x_grad', 'accel_x_doublegrad', 'accel_y_smooth', 'accel_y_lp',\n 'accel_y_hp', 'accel_y_grad', 'accel_y_doublegrad', 'accel_z_smooth',\n 'accel_z_lp', 'accel_z_hp', 'accel_z_grad', 'accel_z_doublegrad',\n 'accel_magnitude_smooth', 'accel_magnitude_lp', 'accel_magnitude_hp',\n 'accel_magnitude_grad', 'accel_magnitude_doublegrad',\n 'accel_pca_smooth', 'accel_pca_lp', 'accel_pca_hp', 'accel_pca_grad',\n 'accel_pca_doublegrad', 'subject', 'activity', 'correctness'])\n\n activity_name_dict = get_activity_name_dict()\n\n for path in tqdm.tqdm(path_list):\n subject, activity_name, correctness, _ = path.split('/')[-1].split('_')\n\n df = read_single_path(path, keep_axes=True)\n df['subject'] = subject\n df['activity'] = activity_name_dict[activity_name]\n df['correctness'] = correctness.lower()\n\n # concatenate to base\n base_df = pd.concat([base_df, df])\n base_df.reset_index(drop=True, inplace=True)\n\n return base_df", "def list_files(tag=None, inst_id=None, data_path=None, format_str=None):\n if format_str is None:\n # user did not supply an alternative format template string\n format_str = '???c{day:03d}{hour:1d}.{year:02d}?.Z'\n # we use a pysat provided function to grab list of files from the\n # local file system that match the format defined above\n file_list = pysat.Files.from_os(data_path=data_path, format_str=format_str,\n two_digit_year_break=90)\n\n return file_list", "def test_log_filenames_multiple_date_in_past(self):\n time_lower = datetime.datetime.now() - datetime.timedelta(seconds=7210)\n time_upper = time_lower + datetime.timedelta(seconds=20)\n (tracks, statuses) = self.app.log_filenames(\n [self.track_path('silence.mp3')]*5,\n timestamp='2 hours ago'\n )\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertGreaterEqual(track_obj['timestamp'], time_lower)\n self.assertLess(track_obj['timestamp'], time_upper)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])", "def list_logs():\n resource_route = \"/static/log/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n logs_path = os.path.join(path_to_current_file, 'static', 'log')\n directory_list = os.listdir(logs_path)\n log_files = [f for f in directory_list if os.path.isfile(os.path.join(logs_path, f))]\n log_files.sort()\n if '.gitignore' in log_files:\n log_files.remove('.gitignore')\n full_log_paths = [file_request_path + f for f in log_files]\n response_code = 200\n return make_response(jsonify({'files': full_log_paths}), response_code)" ]
[ "0.6728006", "0.6695421", "0.6667964", "0.65992254", "0.6544616", "0.6534836", "0.6399142", "0.6344492", "0.6294777", "0.6196148", "0.6186986", "0.61749536", "0.6134531", "0.61090463", "0.5961263", "0.59397554", "0.5871865", "0.5811617", "0.58008575", "0.5787555", "0.57859534", "0.57759786", "0.5774384", "0.5767983", "0.5766848", "0.5755038", "0.5751603", "0.5743027", "0.56861013", "0.56741744" ]
0.76617414
0
Rename Signal names by prefixing the full CAN ID (in hex) and/or J1939 PGN
def add_signal_prefix(df_phys, can_id_prefix=False, pgn_prefix=False, bus_prefix=False): from J1939_PGN import J1939_PGN if df_phys.empty: return df_phys else: prefix = "" if bus_prefix: prefix += df_phys["BusChannel"].apply(lambda x: f"{x}.") if can_id_prefix: prefix += df_phys["CAN ID"].apply(lambda x: f"{hex(int(x))[2:].upper()}." ) if pgn_prefix: prefix += df_phys["CAN ID"].apply(lambda x: f"{J1939_PGN(int(x)).pgn}.") df_phys["Signal"] = prefix + df_phys["Signal"] return df_phys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mangle(signal):\n if type(signal) is list:\n return [mangle(s) for s in signal]\n else:\n (name, index) = signal\n return name + '_' + str.join('_', [str(x) for x in index])", "def _signal_to_common_name(signal: domain.Signal) -> str: # pragma: no cover\n stationb_map: domain.SIGNAL_MAP = {\n (550.0, 10.0, 610.0, 20.0): \"mRFP1\",\n (430.0, 10.0, 480.0, 10.0): \"ECFP\",\n (500.0, 10.0, 530.0, None): \"EYFP\",\n (485.0, 12.0, 520.0, None): \"GFP\",\n (485.0, 12.0, 530.0, None): \"GFP530\",\n 600.0: \"OD\",\n 700.0: \"OD700\",\n }\n return signal.to_label(stationb_map)", "def convert_barcode_id_to_name(multiplex, fc_name, fq):\n fqout = list([None, None])\n if multiplex is None:\n fqout[0] = fq[0]\n if not fq[1] == None:\n fqout[1] = fq[1]\n else:\n bcid2name = dict([(mp['barcode_id'], mp['name']) for mp in multiplex])\n for bcid in bcid2name.keys():\n mstr = \"%s_%s_\" % (fc_name, bcid) \n if fq[0].find(mstr) != -1:\n from_str = \"%s_%s_\" %(fc_name, bcid)\n to_str = \"%s_%s_\" %(fc_name, bcid2name[bcid])\n fqout[0] = fq[0].replace(from_str, to_str)\n if not fq[1] == None:\n fqout[1] = fq[1].replace(from_str, to_str)\n fqout[0] = fqout[0].replace(\"_fastq.txt\", \".fastq\")\n if not fqout[1] == None:\n fqout[1] = fqout[1].replace(\"_fastq.txt\", \".fastq\")\n return os.path.basename(fqout[0]), (os.path.basename(fqout[1]) if len(fqout) > 1 else None)", "def putconename(self,j_,name_):\n if isinstance(name_,unicode):\n name_ = name_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putconename(self.__nativep,j_,name_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def convert_name_to_barcode_id(multiplex, fc_name, fq):\n fqout = list([None, None])\n name2bcid = dict([(mp['name'], mp['barcode_id']) for mp in multiplex])\n for name in name2bcid.keys():\n mstr = \"%s_%s_\" % (fc_name, name) \n if fq[0].find(mstr) != -1:\n from_str = \"%s_%s_\" %(fc_name, name)\n to_str = \"%s_%s_\" %(fc_name, name2bcid[name])\n fqout[0] = fq[0].replace(from_str, to_str)\n if not fq[1] == None:\n fqout[1] = fq[1].replace(from_str, to_str)\n fqout[0] = fqout[0].replace(\".fastq\", \"_fastq.txt\")\n if not fqout[1] == None:\n fqout[1] = fqout[1].replace(\".fastq\", \"_fastq.txt\")\n return os.path.basename(fqout[0]), (os.path.basename(fqout[1]) if len(fqout) > 1 else None)", "def convertion_name(idn):\n inputn = 'f522_dh.trainingdata_in.lcv.'+idn+'.hdf5'\n outputn = 'jacobian_'+idn+'.npy'\n return(inputn, outputn)", "def rename(self):\n\n # Remove any zero-padding from single-digit parameter names\n # This reverses any change applied by one of the CUDA writers\n for i in range(self.parser.comp-1, len(self.parser.parsedModel.parameterId)):\n old_name = self.parser.parsedModel.parameterId[i]\n num = old_name[len('parameter'):]\n if len(num) > 1 and num[0] == '0':\n new_name = 'parameter' + str(num[1:])\n self.parser.parsedModel.parameterId[i] = new_name\n self.parser.rename_everywhere(old_name, new_name)\n\n # Remove any zero-padding from single-digit species names\n # This reverses any change applied by one of the CUDA writers\n for i in range(len(self.parser.parsedModel.speciesId)):\n old_name = self.parser.parsedModel.speciesId[i]\n num = old_name[len('species'):]\n if len(num) > 1 and num[0] == '0':\n new_name = 'species' + str(num[1:])\n self.parser.parsedModel.speciesId[i] = new_name\n self.parser.rename_everywhere(old_name, new_name)", "def _revert_encoded_reg_name(self, vdef):\n if vdef.find(\"%\") != -1:\n for (o_reg, re_reg) in self.arch.reg_rename_tbl.items():\n vdef = vdef.replace(re_reg, o_reg)\n return vdef", "def control_fastq_filename(demux_folder):\n pattern=re.compile(\"^(P[0-9]+)-([0-9]{3,4}).+fastq.*$\")\n for root, dirs, files in os.walk(demux_folder):\n for f in files:\n matches=pattern.search(f)\n if matches:\n new_name=f.replace(\"{}-{}\".format(matches.group(1), matches.group(2)), \"{}_{}\".format(matches.group(1), matches.group(2)))\n os.rename(os.path.join(root, f), os.path.join(root, new_name))", "def symbolize_sensorname(name):\n return name.lower().replace(\" \", \"_\")", "def seq_name(seq):\n if len(seq) == 1:\n return cp_name(seq[0])\n return 'u' + '_'.join('%04X' % cp for cp in seq)", "def reformat(self, seq_name, *, prefix=\"s\"):\n\t\treturn \"%s_%012u\" % (prefix, self.get_sid(seq_name))", "def rename_name_gene(listOfFile, PATH_FASTA_RENAME) :\n\n\tprint \"\\n#################\"\n\tprint \"# Rename protein\"\n\tprint \"#################\\n\"\n\n\tcreate_folder(PATH_FASTA_RENAME)\n\n\tnew_listOfFile=[]\n\n\tfor my_file in listOfFile :\n\t\tif os.stat(my_file).st_size != 0 :\n\t\t\tnew_listOfFile.append(my_file)\n\n\tseq_to_rename = find_rename_fasta(new_listOfFile)\n\tdict_count = dict([(sequence[1:].rstrip(\" \"), 0) for sequence in seq_to_rename])\n\tprogression=1\n\tnumber_of_file = len(new_listOfFile)\n\n\tfor my_file in new_listOfFile :\n\n\t\tfile_name = os.path.basename(my_file)\n\n\t\tsys.stdout.write(\"{:.2f}% : {}/{} files renamed\\r\".format(progression/float(number_of_file)*100, progression,number_of_file))\n\t\tsys.stdout.flush()\n\t\tprogression += 1\n\n\t\thandle = open(os.path.join(PATH_FASTA_RENAME, file_name), 'w')\n\t\tfasta_reading = SeqIO.parse(my_file, \"fasta\")\n\n\t\tfor seq in fasta_reading :\n\t\t\tif seq.id in dict_count :\n\t\t\t\tif dict_count[seq.id] == 0 :\n\t\t\t\t\tdict_count[seq.id] += 1\n\t\t\t\telse :\n\t\t\t\t\tdict_count[seq.id] += 1\n\t\t\t\t\tif \"NC_\" in seq.id :\n\t\t\t\t\t\t# NOTE New name : NC_XXXXXX[_numero de systeme si deux systemes trouvés][_Num(et le nombre de fois nom trouvé)]_nomSysteme_D_nomProteine\n\t\t\t\t\t\tseq.id = \"_\".join(seq.id.split(\"_\")[:2])+\"_Num\"+str(dict_count[seq.id])+\"_\"+\"_\".join(seq.id.split(\"_\")[2:])\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# NOTE New name : NNNN[_numero de systeme si deux systemes trouvés][_Num(et le nombre de fois nom trouvé)]_nomSysteme_V_nomProteine\n\t\t\t\t\t\tseq.id = seq.id.split(\"_\")[0]+\"_Num\"+str(dict_count[seq.id])+\"_\"+\"_\".join(seq.id.split(\"_\")[1:])\n\t\t\t\t\tseq.name = seq.id\n\t\t\t\t\tseq.description = \"\"\n\n\t\t\tSeqIO.write(seq, handle, \"fasta\")\n\n\t\thandle.close()\n\n\tprint\n\tprint \"Done!\"\n\treturn", "def create_motion_name(test_name, sensor_code, code_suffix=\"\"):\n return \"%s-%s-%s\" % (test_name, sensor_code, code_suffix)", "def rename_sequences(self, new_fasta, mapping):\n assert isinstance(new_fasta, FASTA)\n new_fasta.create()\n for seq in self:\n new_name = mapping[seq.id]\n nucleotides = str(seq.seq)\n new_fasta.add_str(nucleotides, new_name)\n new_fasta.close()", "def symbolize_sensorname_sysfs(name):\n return name.split(\"_\")[1] + \"_temp\"", "def unique_label(orig_label: str) -> str:\n return orig_label[0] + \"l\" + uuid4().hex\n # TODO: check for meteors.", "def correct_naming(obsid, inst):\n cobsid = str(int(float(obsid)))\n if len(cobsid) == 5:\n return \n\n lobsid = mcf.add_leading_zero(obsid, 5)\n \n for sdir in ['secondary', 'analysis']:\n\n cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/hrcf* >' + zspace\n os.system(cmd)\n\n data = mcf.read_data_file(zspace, remove=1)\n for ent in data:\n atemp = re.split('\\/', ent)\n fname = atemp[-1]\n mc = re.search(lobsid, fname)\n if mc is not None:\n continue\n else:\n atemp = re.split('hrcf', fname)\n btemp = re.split('_', atemp[1])\n sobs = btemp[0]\n new = fname.replace(sobs, lobsid)\n full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new\n\n cmd = 'mv ' + ent + ' ' + full\n os.system(cmd)", "def get_unique_name(self, prefix):\n\t\tident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1\n\t\treturn '%s_%d' % (prefix, ident)", "def unmangle_measurement_name(measurement_name):\n measurement_name = measurement_name.replace('_sp_', ' ')\n measurement_name = measurement_name.replace('_dsh_', '-')\n return measurement_name", "def mangle(raw_name: str) -> str:\n\n # Handle names with '.'.\n if '.' in raw_name:\n res = []\n for name in raw_name.split('.'):\n if invalid_identifier.search(name):\n res.append(mangle(name))\n else:\n res.append(name)\n return '.'.join(res)\n\n name = raw_name.lstrip('_')\n underscores = '_' * (len(raw_name) - len(name))\n return underscores + 'hyx_' + _mangle_re.sub(_match, name)", "def _compress_name(name):\n n = 0\n for c in name:\n n = (n * _P1 + ord(c)) % _P2 \n return '%09d' % n", "def rename(*args, ignoreShape: bool=True, uuid: bool=True, **kwargs)->AnyStr:\n pass", "def rename_regvar(*args):\n return _ida_frame.rename_regvar(*args)", "def generate_rename_direct(self, prefix):\n return \"#define %s%s %s\" % (prefix, self.__name, self.__rename)", "def adjust_event_name(event_name):\n pos=find_first_digit(event_name)\n return event_name[pos:]", "def _build_name(name_id):\n return \"xp_%08d\" % name_id", "def _build_name(name_idx):\n return \"explored%s.set_%05d.xa_%08d\" % (\n ArrayParameter.IDENTIFIER,\n name_idx // 1000,\n name_idx,\n )", "def dummy_junction12():\n return \"junction:chr1:176-224:+\"", "def changeName(name):\n\tif name in [\"<OPEN>\", \"<HIGH>\", \"<LOW>\", \"<CLOSE>\"]:\n\t\t# Frist charector is upper case\n\t\tname = name.replace('<', '').replace('>', '')\n\t\t#name = name[0] + name[1:].lower()\t\t\n\telif name in [\"<VOL>\"]:\n\t\t#name = name.replace(\"<VOL>\", \"Volume\")\n\t\tname = name.replace(\"<VOL>\", \"VOLUME\")\n\telif name in [\"<DTYYYYMMDD>\"]:\n\t\t#name = name.replace(\"<DTYYYYMMDD>\", \"Date\")\n\t\tname = name.replace(\"<DTYYYYMMDD>\", \"DATE\")\n\treturn name" ]
[ "0.6341284", "0.618658", "0.58944404", "0.5803514", "0.56855726", "0.5682436", "0.5643482", "0.5629958", "0.56288826", "0.5605033", "0.5587481", "0.5485764", "0.54565454", "0.5455336", "0.5408659", "0.54004014", "0.53873926", "0.53735286", "0.53688616", "0.5349927", "0.5348748", "0.5335352", "0.5324729", "0.53194356", "0.53170717", "0.5310112", "0.5305447", "0.5303086", "0.53014106", "0.5290801" ]
0.63069767
1
Illustrative example for how to extract a signal and evaluate statistical values vs. defined thresholds. The function can be easily modified for your needs.
def test_signal_threshold(df_phys, signal, threshold): df_signal = df_phys[df_phys["Signal"] == signal]["Physical Value"] stats = df_signal.agg(["count", "min", "max", "mean", "std"]) delta = stats["max"] - stats["min"] if delta > threshold: print(f"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def signal_significance(\n y_true, y_proba, sig2incl_ratio, threshold=None, sample_weight=None\n):\n fpr, tpr, thresholds = roc_curve(y_true, y_proba, sample_weight=sample_weight)\n n_bkg = (1 - sig2incl_ratio) * 100\n n_sig = sig2incl_ratio * 100\n B = n_bkg * fpr\n S = n_sig * tpr\n significances = S / np.sqrt(S + B)\n if threshold:\n for t, s in zip(thresholds, significances):\n if t >= threshold:\n return s, t\n else:\n return significances, thresholds", "def _check_threshold(threshold, value):\r\n return threshold[0](value, threshold[1])", "def threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):\r\n \r\n muV0, DmuV0 = -60e-3,10e-3\r\n sV0, DsV0 =4e-3, 6e-3\r\n TvN0, DTvN0 = 0.5, 1.\r\n \r\n return P0+P1*(muV-muV0)/DmuV0+\\\r\n P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\\\r\n 0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\\\r\n P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\\\r\n P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\\\r\n P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\\\r\n P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0", "def apply_thresholding(x):\n return x > threshold_otsu(x)", "def getstats(img, thresholds):\n number = np.zeros(img.shape, np.float64)\n ev = np.zeros(img.shape, np.float64)\n scatter = np.zeros(img.shape, np.float64)\n for n, s, low, high, evs in thresholds:\n for i in numba.prange(img.shape[0]):\n for j in numba.prange(img.shape[1]):\n if (low < img[i, j]) and (img[i, j] < high):\n scatter[i, j] = s\n number[i, j] = n\n ev[i, j] = img[i, j] - evs\n return ev, number, scatter", "def discrete_potential(function, threshold):\n\n return np.where(function >= threshold, 1, 0)", "def evaluate(self, threshold=0.5):\n pass", "def tryDifferentThreshold(data, device, threshold, isboxplot):\n\n signal = np.array(data.accel_energy_512)\n\n intensity_bool = ((signal >\n np.roll(signal, 1)) & (signal > np.roll(signal, -1)))\n intensity = signal[intensity_bool]\n res = quantileValues(intensity, device)\n nb_intensities = len(intensity)\n # plot all the peaks after filter by threshold\n # way2 try threshold===> need to plot many times\n peaks_boolean = (signal > threshold)\n peaks = signal[peaks_boolean]\n res = quantileValues(peaks, device)\n nb_peaks = len(peaks)\n # boxplot(peaks) # this will take long time\n if threshold == 0 and isboxplot == 0:\n # plot all the peaks ==> BLUE points in the picture\n plt.plot(signal) # same with s.plot()\n plt.plot(intensity_bool.nonzero()[0],\n signal[intensity_bool], 'ro', color='blue')\n nb_peaks = nb_intensities\n elif isboxplot == 0:\n plt.plot(signal)\n plt.plot(peaks_boolean.nonzero()[0],\n signal[peaks_boolean], 'ro', color='yellow')\n elif isboxplot == 1:\n boxplot(peaks)\n\n return res, nb_intensities, nb_peaks", "def signal_eff(y_true, y_proba, mistag_rate_thresh, sample_weight=None):\n\n if hasattr(mistag_rate_thresh, \"__iter__\"):\n effs = []\n for t in mistag_rate_thresh:\n eff = signal_eff(y_proba, y_true, t, sample_weight=sample_weight)\n effs.append(eff)\n return effs\n\n fpr, tpr, _ = roc_curve(y_true, y_proba, sample_weight=sample_weight)\n for b_tag_eff, mistag_rate in zip(tpr, fpr):\n if mistag_rate > mistag_rate_thresh:\n return b_tag_eff", "def overlay_thresholding_function(threshold, positive=True):\n # from the interface class definition above, there will be 3 values\n # for the thresh type: inactive, less than, greater than\n t = threshold[0]\n if threshold[-1] == 'inactive':\n if positive:\n return lambda x: np.ones(x.shape, 'B')\n return lambda x: np.zeros(x.shape, 'B')\n elif threshold[-1] == 'less than':\n if positive:\n return lambda x: np.less(x,t)\n return lambda x: np.greater_equal(x,t)\n elif threshold[-1] == 'greater than':\n if positive:\n return lambda x: np.greater(x,t)\n return lambda x: np.less_equal(x,t)\n else:\n print 'unrecognized thresholding parameters:', threshold", "def preprocessSignal(signal, coverage):\n rate = numpy.convolve(signal/coverage, avgWindow, \"same\")\n rate[numpy.any([numpy.isinf(rate), numpy.isnan(rate)], axis=0)] = 0\n mu = numpy.mean(rate)\n sd = numpy.std(rate)\n logging.info(\"RateMean %f -- RateStd %f\" % (mu, sd))\n return rate, mu, sd", "def test_basic(self):\n data = get()\n metrics = [verif.metric.Within(),\n verif.metric.A(), # Hit\n verif.metric.B(), # FA\n verif.metric.C(), # Miss\n verif.metric.D(), # Correct rejection\n verif.metric.Hit(),\n verif.metric.Threat(),\n verif.metric.Conditional(),\n verif.metric.XConditional(func=np.median),\n ]\n intervals = [verif.interval.Interval(-np.inf, 0, True, True), # [-inf, 0]\n verif.interval.Interval(-np.inf, 1, True, True),\n verif.interval.Interval(-np.inf, 2, True, True),\n ]\n obs = [0, 1.5, 2]\n fcst = [3.1, 1.1, -2.1]\n N = len(obs)*1.0\n\n # Each line is one metric (one number for each threshold)\n expected = [[0/N, 100/N, 100/N], # Within\n [0/N, 0/N, 2/N], # Hit\n [1/N, 1/N, 0/N], # FA\n [1/N, 1/N, 1/N], # Miss\n [1/N, 1/N, 0/N], # Correct rejection\n [0, 0, 2.0/3], # Hit rate\n [0, 0, 2.0/3], # Threat score\n [3.1, 3.1, 0.7], # Average fcst given obs in interval\n [0, 0, 1.5], # Average obs given obs in interval\n ]\n\n for m in range(len(metrics)):\n metric = metrics[m]\n for i in range(len(intervals)):\n value = metric.compute_from_obs_fcst(np.array(obs), np.array(fcst), intervals[i])\n ex = expected[m][i] * 1.0\n if np.isnan(value):\n self.assertTrue(np.isnan(ex))\n else:\n self.assertAlmostEqual(ex, value)", "def test_thresholding_args():\n from sleepecg._heartbeat_detection import _thresholding\n filtered_ecg = np.arange(100)\n integrated_ecg = np.arange(100)\n fs = 10\n\n _thresholding(filtered_ecg, integrated_ecg, fs)\n _thresholding(filtered_ecg, integrated_ecg, fs=fs)\n _thresholding(filtered_ecg, integrated_ecg=integrated_ecg, fs=fs)\n _thresholding(filtered_ecg=filtered_ecg, integrated_ecg=integrated_ecg, fs=fs)", "def testD():\n results = {}\n for threshold in ['volume', 25, 35,40, 20, ]: #adjust here\n print threshold\n results[threshold] = testC(threshold=threshold)\n return results", "def test_thresholds_main():\n\n # Parsing arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", required=True, help=\"Input image\")\n ap.add_argument(\"-o\", \"--out_dir\", required=True, help=\"Output directory\")\n args = ap.parse_args()\n\n # Reading image\n img = cv2.imread(args.image, 0)\n\n if img is None:\n print(\"Invalid input image\")\n return\n\n img_name = basename(args.image).split(\".\")[0]\n\n test_thresholds(img, args.out_dir, img_name)", "def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)", "def analyze_thresholds(datapath, threshold_lt1, threshold_lt2, normalize = True, save = 1):\n print 'analyzing thresholds...' \n current_dir = os.getcwd()\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n \n CR_cts_after_seq_lt1 = data['cr_hist_LT1_first']\n CR_cts_after_seq_lt2 = data['cr_hist_LT2_first']\n\n nr_of_counts = arange(len(CR_cts_after_seq_lt1))\n\n CR_cts_total_lt1 = data['cr_hist_LT1_total']\n CR_cts_total_lt2 = data['cr_hist_LT2_total']\n \n if normalize:\n CR_cts_after_seq_lt2 = CR_cts_after_seq_lt2/float(sum(CR_cts_after_seq_lt2))\n CR_cts_total_lt2 = CR_cts_total_lt2/float(sum(CR_cts_total_lt2))\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()*100\n \n CR_cts_after_seq_lt1 = CR_cts_after_seq_lt1/float(sum(CR_cts_after_seq_lt1))\n CR_cts_total_lt1 = CR_cts_total_lt1/float(sum(CR_cts_total_lt1))\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100\n else:\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_after_seq_lt2.sum())*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_total_lt2.sum())*100\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_after_seq_lt1.sum())\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_total_lt1.sum())\n\n\n #print 'After sequence: LT2 percentage passed = ',num2str(sum(times_passed_after_seq_lt2),1),'%'\n #print 'and LT1 percentage passed = ',num2str(sum(times_passed_after_seq_lt1),1),'%'\n\n Log = False\n\n figure6 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(223)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt2,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,25)\n \n plt.subplot(224)\n plt.bar(nr_of_counts,CR_cts_total_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: all CR checks, passed threshold: '+num2str(times_passed_overall_lt2,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,25)\n\n plt.subplot(221)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt1,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,50)\n \n plt.subplot(222)\n plt.bar(nr_of_counts,CR_cts_total_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: all CR checks, passed threshold: '+num2str(times_passed_overall_lt1,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,50)\n \n if save:\n if normalize:\n figure6.savefig('CR_information_LT1_and_LT2_normalized.pdf')\n else:\n figure6.savefig('CR_information_LT1_and_LT2.pdf')\n\n\n return times_passed_overall_lt1, times_passed_after_seq_lt1, times_passed_overall_lt2, times_passed_after_seq_lt2", "def global_threshold(img, threshold_method):\n pass", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def plotThreshold(data, type_plot):\n signal = np.array(data.accel_energy_512)\n p = (signal > np.roll(signal, 1)) & (signal > np.roll(signal, -1))\n peaks = signal[p]\n\n if type_plot == 0:\n mean = peaks.mean()\n std = peaks.std()\n max_Peak = peaks.max()\n min_Peak = peaks.min()\n x = np.arange(min_Peak, max_Peak, 0.1)\n # y = normfun(x, mean, std)\n y = norm.pdf(x, mean, std)\n plt.plot(x, y, color='red')\n plt.hist(peaks, bins=500, color='steelblue', rwidth=0.9, normed=True)\n sns.distplot(peaks, kde=True, rug=True, rug_kws={\"color\": \"k\"},\n kde_kws={\"color\": \"red\", \"lw\": 3, \"label\": \"KDE\"},\n hist_kws={\"histtype\": \"step\", \"lw\": 3, \"alpha\": 1,\n \"color\": \"g\"},\n bins=500)\n plt.title('Vibration Intensity Distribution')\n plt.xlabel('Vibration Intensity')\n plt.ylabel('Probability')\n elif type_plot == 1:\n # or using seaborn\n # sns.distplot(peaks, rug=True, hist=True)\n # ax = sns.distplot(peaks, rug=True, hist=False) # biger\n ax = sns.distplot(peaks, kde=True, rug=True, rug_kws={\"color\": \"k\"},\n kde_kws={\"color\": \"red\", \"lw\": 3, \"label\": \"KDE\"},\n hist_kws={\"histtype\": \"step\", \"lw\": 3, \"alpha\": 1,\n \"color\": \"g\"},\n bins=500) # faster\n ax.set(xlabel='Vibration Intensity', ylabel='Probability')", "def OF1_CalculateThresholdValues(param_list, classNum):\n thresholdValues = [(-1., -1.) for _ in range(classNum-1)] # np.arange(classNum - 1)\n #numRow = sp.math.factorial(classNum-1)\n #numCol = classNum-1\n #thresholdValues = np.arange(numCol*numRow).reshape(numRow, numCol)\n indexOrder = np.argsort(param_list[classNum:classNum * 2])\n\n P = [param_list[indexOrder[i]] for i in range(classNum)]\n my = np.sort(param_list[classNum:classNum * 2])\n sigma = [param_list[classNum * 2 + indexOrder[i]] for i in range(classNum)]\n\n for i in range(classNum - 1):\n a = sigma[i] ** 2 - sigma[i + 1] ** 2\n b = 2 * ( my[i] * ( sigma[i + 1] ** 2 ) - my[i + 1] * ( sigma[i] ** 2 ) )\n c = ( sigma[i] * my[i + 1] ) ** 2 - ( sigma[i + 1] * my[i] ) ** 2 + 2 * ( ( sigma[i] * sigma[i + 1] ) ** 2 ) * math.log(( ( sigma[i + 1] * P[i] ) / ( sigma[i] * P[i + 1] ) ))\n\n p = np.poly1d([a, b, c], False, \"T\")\n p_roots = np.roots(p)\n\n if p_roots.size == 1:\n thresholdValues[i] = (np.real(p_roots[0]), -1)\n else:\n r1 = np.real(p_roots[0])\n r2 = np.real(p_roots[1])\n if (r1 == r2) or (r2 < 0.) or (r2 > 255.):\n thresholdValues[i] = (r1, -1)\n elif (r1 < 0) or (r1 > 255):\n thresholdValues[i] = (r2, -1)\n else:\n thresholdValues[i] = (r1, r2)\n #r1 = np.amin(p_roots)\n #r2 = np.amax(p_roots)\n #if i > 0:\n #if r1 >= thresholdValues[i-1]:\n #thresholdValues[i] = r1\n #else:\n #thresholdValues[i] = r2\n #else:\n #if (r1 >= my[i]) and (r1 < my[i+1]):\n #thresholdValues[i] = r1\n #else:\n #thresholdValues[i] = r2\n\n return thresholdValues", "def test_am_threshold(Simulator, plt, seed, rng):\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n d2 = int(d / 2)\n vocab2 = Vocabulary(d2, pointer_gen=rng)\n vocab2.populate('A; B; C; D')\n\n def input_func(t):\n return '0.49 * A' if t < 0.1 else '0.8 * B'\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(\n threshold=0.5, input_vocab=vocab, output_vocab=vocab2,\n function=filtered_step_fn, mapping='by-key')\n m.stimulus = spa.Transcode(input_func, output_vocab=vocab)\n m.stimulus >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.subplot(2, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert np.mean(sim.data[out_p][below_th]) < 0.01\n assert_sp_close(t, sim.data[out_p], vocab2['B'], skip=0.25, duration=0.05)", "def evaluation(model_path, threshold):\n classifier = joblib.load(model_path)\n\n positive = np.load(\"./processed_data/validation/positive.npy\")\n unlabeled = np.load(\"./processed_data/validation/unlabeled.npy\")\n\n p_result = np.array(classifier.predict_proba(positive[:, :-1])[:, 1])\n plt.hist(p_result, bins=300)\n plt.show()\n\n tp_rate = np.where(p_result >= threshold, 1, 0).sum() / p_result.shape[0]\n print(tp_rate)\n\n u_result = np.array(classifier.predict_proba(unlabeled[:, :-1])[:, 1])\n plt.hist(u_result, bins=300)\n plt.show()\n\n\n # the following steps aim to filter 'possible' negative instances in the evaluation-unlabeled set\n stageone_classifier = joblib.load(\"./solver_result/liblinear/0.01/logistic.pkl\")\n stgone_result = np.array(stageone_classifier.predict_proba(unlabeled[:,:-1])[:, 1])\n possibly_negative = unlabeled[np.where(stgone_result <= _negative_threshold)]\n print(positive.shape)\n print(unlabeled.shape)\n print(possibly_negative.shape)\n possi_ng_result = np.array(classifier.predict_proba(possibly_negative[:, :-1])[:, 1])\n fp_rate = np.where(possi_ng_result >= threshold, 1, 0).sum() / possi_ng_result.shape[0]\n plt.hist(possi_ng_result, bins=300)\n plt.show()\n\n print(fp_rate)\n print(\"TP: \" + str(tp_rate) + \" FP: \" + str(fp_rate) + \" GMean: \" + str(math.sqrt(tp_rate * (1 - fp_rate))))", "def test():\n X,Xval,Yval = _load_sample_data()\n mu,var = estimate_gaussian_params(X)\n pval = get_probability(Xval,mu,var)\n\n figure()\n plot(X[:,0],X[:,1],'b+',label='data'); xlabel(\"Latency (ms)\"); ylabel(\"Throughput (Mb/s)\")\n epsilon, F1 = determine_threshold(Yval,pval)\n print(\"Optimal epsilon and F1 score for sample dataset {}, {}\".format(epsilon, F1))\n plot_gaussian(mu,var,epsilon=epsilon)\n\n ## Plot Outliers\n predictions = get_probability(X,mu, var)\n outliers = X[predictions < epsilon]\n plot(outliers[:,0],outliers[:,1],'ro',mfc=None,label='outliers');\n legend()\n grid()", "def _threshold(data, sigma=2.0):\r\n return np.mean(data)-sigma*np.sqrt(np.var(data))", "def outlier_flag(data=0, tolerance=2.5, sig_change=0.7, how=1, demo=False, test=False):\n # test section start ------------------------\n if test:\n # build trace with 2 distributions\n N_pts = 2000\n s1 = 10\n u1 = 0\n N_otl = 200\n s2 = 10\n u2 = 100\n data = s1 * np.random.randn(N_pts, 1) + u1\n for _ in range(N_otl):\n randii = int((N_pts - 1) * np.random.rand(1, 1))\n data[randii] = s2 * np.random.randn(1, 1) + u2\n # test section stop ------------------------\n\n sig_ratio = 0\n sigma_nw = 1e20\n flags = np.full_like(data, 1)\n while sig_ratio < sig_change:\n sigma_old = sigma_nw\n inliers = data[flags == 1]\n outliers = data[flags == 0]\n av = np.median(inliers)\n sigma_nw = np.std(inliers)\n sig_ratio = sigma_nw / sigma_old\n\n if how == 1:\n flags = (data - av) < tolerance * sigma_nw\n elif how == 0:\n flags = abs(data - av) < tolerance * sigma_nw\n elif how == -1:\n flags = (data - av) > -tolerance * sigma_nw\n\n if demo:\n lo = np.min(inliers)\n hi = np.max(inliers)\n bins = np.linspace(lo, hi, 40)\n fig2, ax2 = plt.subplots(1, 1)\n ax2.hist(inliers, bins, histtype=\"bar\")\n plt.show()\n\n return inliers, outliers, flags", "def population_statistics(feature_description, data, treatment, target, threshold, is_above, statistic_functions):\n stat_func = statistic_functions\n recorded_values = []\n if is_above:\n for i in range(len(data[treatment])):\n if data[treatment][i] > threshold:\n recorded_values.append(data[target][i])\n else:\n for i in range(0, len(data[treatment])):\n if data[treatment][i] <= threshold:\n recorded_values.append(data[target][i])\n print(f\"{feature_description}:\\n{target}: {stat_func[1](recorded_values)}, {stat_func[2](recorded_values)}\")", "def runTest(exdeflike, indeflike):\n\n with open (\"../data/2016/data/test\", \"r\") as f:\n records = re.split(\"\\n\\n\", f.read().strip()) #separate by double new line\n\n threshold = [0.3, .1] #just a guess for now\n ev = defaultdict(lambda: [0,0,0,0])\n\n for record in records:\n data = [re.split(\"\\t\", d) for d in re.split(\"\\n\", record)]\n try:\n tokens, tags = zip(*data)\n except:\n print data\n pass\n\n for i, token, el, il in test(tokens, exdeflike, indeflike):\n for model in range(4):\n result = \"tn\"\n if decide(el, il, model, threshold):\n result = \"tp\" if tags[i][0] == \"B\" else \"fp\"\n elif tags[i][0] == \"B\":\n result = \"fn\"\n ev[result][model] += 1\n\n for model in range(4):\n ev[\"precision\"][model] = ev[\"tp\"][model] / (ev[\"tp\"][model] + ev[\"fp\"][model])\n ev[\"recall\"][model] = ev[\"tp\"][model] / (ev[\"tp\"][model] + ev[\"fn\"][model])\n ev[\"F1\"][model] = harmonic_mean([ev[\"precision\"][model], ev[\"recall\"][model]])\n\n return ev", "def recursive_threshold_search(\n metric_name, metric_val, y_proba, y_true, sample_weights=None, verbose=False\n):\n ts_next = np.linspace(0, 1, 11)\n prev_min = -1\n prev_max = 999\n ts_final = None\n n_points = 5\n it = 0\n eps_rel = 1e-3\n while True:\n it += 1\n ts, trps, fprs, purities = calc_metrics(\n ts_next, y_proba, y_true, sample_weights\n )\n\n if metric_name == \"score\" or metric_name == \"proba\":\n vals = ts\n elif metric_name == \"eff\":\n vals = trps\n elif metric_name == \"mistag_rate\":\n vals = fprs\n elif metric_name == \"purity\":\n vals = purities\n else:\n raise ValueError(f\"illegal value for `metric_name`: {metric_name}\")\n\n idx = np.argmin(abs(vals - metric_val))\n if abs(vals[idx] - metric_val) / max(metric_val, 1e-10) < eps_rel:\n if verbose:\n print(f\"finish with t={ts[idx]}, v={vals[idx]} [target={metric_val}]\")\n break\n\n if it > 10:\n if verbose:\n print(\n f\"finish with t={ts[idx]}, v={vals[idx]} [target={metric_val}] [due to REP]\"\n )\n break\n\n prev_min = np.min(vals)\n prev_max = np.max(vals)\n\n if idx == 0:\n ts_next = np.linspace(ts[0], ts[1], n_points)\n continue\n if idx == len(ts) - 1:\n ts_next = np.linspace(ts[-2], ts[-1], n_points)\n continue\n\n if (vals[idx] - metric_val) * (vals[idx + 1] - metric_val) < 0:\n pair = ts[idx], ts[idx + 1]\n ts_next = np.linspace(min(pair), max(pair), n_points)\n elif (vals[idx] - metric_val) * (vals[idx - 1] - metric_val) < 0:\n pair = ts[idx], ts[idx - 1]\n ts_next = np.linspace(min(pair), max(pair), n_points)\n if abs(vals[idx] - metric_val) / max(metric_val, 1e-10) > 10 * eps_rel:\n print(\n f\"Warning: returning {vals[idx]} while target was {metric_val}, relative diff. = {abs(vals[idx]-metric_val) / max(metric_val, 1e-10)}\"\n )\n return ts[idx], vals[idx]", "def SampleConditions(testcase, delta):\n if testcase == 0:\n t0 = delta + 1\n t1 = 0\n elif testcase == 1:\n t0 = 0\n t1 = delta + 1\n else:\n t0 = delta + 1\n t1 = delta + 1\n\n # Sample a test case\n passed = False\n while not passed:\n x0 = _xm * rng.uniform(-1, 1)\n x1 = _xm * rng.uniform(-1, 1)\n vm = _vm * rng.uniform(0, 1)\n am = _am * rng.uniform(0, 1)\n v0 = vm * rng.uniform(-1, 1)\n v1 = vm * rng.uniform(-1, 1)\n\n curve1 = interpolator.Compute1DTrajectory(x0, x1, v0, v1, vm, am)\n if not (len(curve1) == 2):\n continue\n t0 = curve1[0].duration\n t1 = curve1[1].duration\n if testcase == 0:\n passed = t0 < delta and t1 >= delta\n elif testcase == 1:\n passed = t0 >= delta and t1 < delta\n else:\n passed = t0 < delta and t1 < delta\n \n return x0, x1, v0, v1, vm, am" ]
[ "0.61532384", "0.61456704", "0.6120584", "0.61185485", "0.60634893", "0.60165465", "0.59483373", "0.59300256", "0.5880095", "0.5865144", "0.58355635", "0.5829337", "0.5825964", "0.5804376", "0.57418394", "0.573643", "0.5662644", "0.56473243", "0.56338394", "0.5594367", "0.55928767", "0.5585554", "0.5583197", "0.55797535", "0.55774194", "0.5533549", "0.55291224", "0.5519757", "0.54837656", "0.5460387" ]
0.6491543
0
Helper function for calculating a new signal based on two signals and a function. Returns a dataframe with the new signal name and physical values
def add_custom_sig(df_phys, signal1, signal2, function, new_signal): import pandas as pd try: s1 = df_phys[df_phys["Signal"] == signal1]["Physical Value"].rename(signal1) s2 = df_phys[df_phys["Signal"] == signal2]["Physical Value"].rename(signal2) df_new_sig = pd.merge_ordered( s1, s2, on="TimeStamp", fill_method="ffill", ).set_index("TimeStamp") df_new_sig = df_new_sig.apply(lambda x: function(x[0], x[1]), axis=1).dropna().rename("Physical Value").to_frame() df_new_sig["Signal"] = new_signal df_phys = df_phys.append(df_new_sig) except: print(f"Warning: Custom signal {new_signal} not created\n") return df_phys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_signals():\n x = np.linspace(390, 410, 200)\n doublet = [(399, 1), (401, 1)]\n y = add_signals(x, doublet, 1)\n X = np.array([x for x, _ in ADD_SIGNALS_DATASET])\n Y = np.array([y / 2 for _, y in ADD_SIGNALS_DATASET]) # scale to match\n print(y)\n print(Y)\n assert np.array_equal(x, X)\n assert np.array_equal(y, Y)", "def combine_signals(series1: pd.Series, series2: pd.Series) -> pd.Series:\n return ((np.sign(series1) == np.sign(series2)) * series1).astype(int, copy=False)", "def generate_signals(symbol, period=default_period, std=default_std, refresh=False, start_date=config.start_date, end_date=config.end_date):\n\n bb(symbol, period, std, refresh=False, start_date=start_date, end_date=end_date)\n df = pd.read_csv(utils.get_file_path(config.ta_data_path, table_filename, symbol=symbol), index_col=\"Date\", parse_dates=[\"Date\"])[start_date:end_date]\n\n signal_column_name = get_signal_name(period=period, std=std)\n if signal_column_name not in df.columns:\n lower_column_name = \"Lower\"\n upper_column_name = \"Upper\"\n\n conditions = [\n ((df[\"Close\"].shift(1) > df[lower_column_name].shift(1)) & (df[\"Close\"] < df[lower_column_name])), # price crosses lower band; buy signal\n ((df[\"Close\"].shift(1) < df[upper_column_name].shift(1)) & (df[\"Close\"] > df[upper_column_name])), # price crosses upper band; sell signal\n False, # ((df[\"Close\"].shift(1) < df[\"Mid\"].shift(1)) & (df[\"Close\"] > df[\"Mid\"])) # bb breaches the mid line after a buy signal, soft sell\n False # ((df[\"Close\"].shift(1) > df[\"Mid\"].shift(1)) & (df[\"Close\"] < df[\"Mid\"])) # bb breaches the mid line after a sell signal, soft buy\n ]\n\n df[signal_column_name] = np.select(conditions, ta.signals, default=ta.default_signal)\n utils.debug(df[signal_column_name])\n df.to_csv(utils.get_file_path(config.ta_data_path, table_filename, symbol=symbol))\n\n return df[signal_column_name]", "def caculate_signals(self):\n\t\traise NotImplementedError(\"Should implement calculate_signals()\")", "def test_create_signal(self):\n self.df_stock = self.backtest.handle_data(self.backtest.df_stock, **self.hd_args)\n\n for side in ('buy', 'sell'):\n self.cs_args['side'] = side\n self.df_signal = self.backtest.create_signal(self.df_stock, **self.cs_args)\n\n print self.df_signal.to_string(line_width=400)\n\n columns = ('date0', 'date1', 'signal0', 'signal1',\n 'close0', 'close1', 'holding', 'pct_chg')\n\n for column in columns:\n self.assertIn(column, self.df_signal.columns)\n\n print 'sum:', self.df_signal['pct_chg'].sum()\n\n print '=' * 100", "def macd_signal(self, period_fast=12, period_slow=26, signal=9,\n column='adj_close'):\n\n ema_fast = pd.Series(\n self.ohlcv[column].ewm(ignore_na=False,\n min_periods=period_fast - 1,\n span=period_fast).mean(),\n name='EMA_fast')\n ema_slow = pd.Series(\n self.ohlcv[column].ewm(ignore_na=False,\n min_periods=period_slow - 1,\n span=period_slow).mean(),\n name='EMA_slow')\n macd_series = pd.Series(ema_fast - ema_slow, name='MACD')\n macd_signal_series = pd.Series(\n macd_series.ewm(ignore_na=False, span=signal).mean(),\n name='MACD_Signal')\n return pd.concat([macd_signal_series, macd_series], axis=1)", "def generate_signals(self):\n signals = {}\n \n\n # Create the set of short and long exponential moving averages over the \n # respective periods\n signals['short'] = self.bars.ewm(span = self.short_window , min_periods=self.long_window-1).mean()\n signals['long'] = self.bars.ewm(span = self.long_window , min_periods=self.long_window-1).mean()\n signals['MACD'] = signals['short'] - signals['long']\n signals['MACDsign'] = signals['MACD'].ewm(span = self.signal_window , min_periods=self.long_window-1).mean()\n signals['MACDdiff'] = signals['MACD'] - signals['MACDsign']\n\n \n return signals", "def MACD(DF,a,b,c):\n df = DF.copy()\n df[\"MA_Fast\"]=df[\"Adj Close\"].ewm(span=a,min_periods=a).mean()\n df[\"MA_Slow\"]=df[\"Adj Close\"].ewm(span=b,min_periods=b).mean()\n df[\"MACD\"]=df[\"MA_Fast\"]-df[\"MA_Slow\"]\n df[\"Signal\"]=df[\"MACD\"].ewm(span=c,min_periods=c).mean()\n df.dropna(inplace=True)\n return df", "def scikit_signal_factory(signal_function: callable):\n return FunctionTransformer(signal_function)", "def calculate_signals(self):\n raise NotImplementedError(\"Should implement calculate_signals()\")", "def calc_signal(self,\n events,\n data: pd.DataFrame,\n idx: str,\n pf: Portfolio) -> None:\n self.pf = pf\n for key, item in self.id_weight.items():\n positions = self.pf.position_handler.positions\n # No positions in portfolio. Buy to match target weight.\n if not list(positions.items()):\n price = data[key].iloc[0]\n date = pf.current_date\n quantity = int(item * self.pf.total_market_value / price)\n trans = t(name=key,\n direction='B',\n quantity=quantity,\n price=price,\n commission_scheme=self.pf.commission,\n date=date)\n trans_ev = Transaction(date=pf.current_date,\n trans=trans)\n events.put(item=trans_ev)\n # Existing positions. Buy or sell to match target weight.\n else:\n pos_mv = pf.position_handler.positions[key].market_value\n pf_mv = pf.total_market_value\n pos_weight = pos_mv / pf_mv\n diff = pos_weight - item\n\n price = data[key].iloc[0]\n date = pf.current_date\n\n quantity = int(diff * pf_mv / price)\n\n if quantity > 0:\n # Sell excess weight.\n trans = t(name=key,\n direction='S',\n quantity=quantity,\n price=price,\n commission_scheme=self.pf.commission,\n date=date)\n else:\n # Buy the difference in weight.\n trans = t(name=key,\n direction='B',\n quantity=quantity * -1,\n price=price,\n commission_scheme=self.pf.commission,\n date=date)\n trans_ev = Transaction(date=pf.current_date,\n trans=trans)\n events.put(item=trans_ev)", "def getNewDF_X(self, originalDF):\n new_temps = [x for x in range(-10, 10, 1)]\n for unit in range(-10, 10, 1):\n new_temps[unit] = originalDF[['R1', 'G1', 'B1', 'R2', 'G2', 'B2', 'R3', 'G3', 'B3']].iloc[:] + unit\n new_temps[unit]['W1'] = originalDF['W1']\n new_temps[unit]['W2'] = originalDF['W2']\n new_temps[unit]['W3'] = originalDF['W3']\n returnVal = pd.concat(new_temps)\n return returnVal", "def MACD(prices, slow, fast, signal):\r\n emaslow = expMovingAverage(prices, slow)\r\n emafast = expMovingAverage(prices, fast)\r\n emasignal = expMovingAverage(prices, signal )\r\n return emaslow, emafast, emafast - emaslow, emasignal", "def __call__(self, inputs, states):\n new_h = Symmetric_MPS_wavefn(inputs,\n states,\n self.output_size,\n self._num_orders,\n self._virtual_dim,\n True)\n new_h = self._activation(new_h)\n return new_h, new_h", "def generate_signals(self):\n signals = {}\n\n # Create the set of short and long simple moving averages over the \n # respective periods\n signals['short_mavg'] = self.bars.rolling(window=self.short_window).mean()\n signals['long_mavg'] = self.bars.rolling(window=self.long_window).mean()\n\n\n return signals", "def technical_analysis(df, periods=ta_periods, macd_periods=periods_MACD):\n\n original_join_state = SETTINGS.join\n SETTINGS.join = False\n\n if 'Volume' not in df.columns and cols_in_df(df, ['Vol']) != []:\n df['Volume'] = df['Volume (BTC)'] # some TA functions need a 'Volume' column\n\n if cols_in_df(df, ['Vol']) != []:\n result = pd.concat([compute_function_different_periods(df, periods, ta.ATR),\n compute_function_different_periods(df, periods, pta.BBANDS),\n compute_function_different_periods(df, periods, pta.STO),\n compute_function_different_periods(df, periods, pta.TRIX),\n # Vortex is a FUCKIN SHIT that gives randomly high values. Fuck it\n # compute_function_different_periods(df, [period for period in periods if period > 6], pta.Vortex),\n compute_function_different_periods(df, periods, pta.RSI),\n # compute_function_different_periods(df, periods, pta.ACCDIST),\n compute_function_different_periods(df, periods, pta.MFI),\n compute_function_different_periods(df, periods, pta.OBV),\n compute_function_different_periods(df, periods, pta.FORCE),\n # compute_function_different_periods(df, periods, pta.EOM),\n compute_function_different_periods(df, periods, pta.CCI),\n compute_function_different_periods(df, periods, pta.COPP),\n compute_function_different_periods(df, periods, pta.KELCH),\n compute_function_different_periods(df, periods, pta.STDDEV),\n compute_function_different_periods(df, periods, pta.MA),\n compute_function_different_periods(df, periods, ta.MMed),\n compute_function_different_periods(df, periods, pta.EMA),\n # compute_function_different_periods(df, periods, pta.MOM),\n # compute_function_different_periods(df,periods, pta.ROC),\n # compute_function_different_periods(df, ROC, log=True),\n # pta.MACD(df, 10, 30),\n\n compute_MACD_different_periods(df, periods=macd_periods)\n # pta.PPSR(df)\n ], axis=1)\n\n else:\n result = pd.concat([compute_function_different_periods(df, periods, ta.ATR),\n compute_function_different_periods(df, periods, pta.BBANDS),\n compute_function_different_periods(df, periods, pta.STO),\n compute_function_different_periods(df, periods, pta.TRIX),\n compute_function_different_periods(df, periods, pta.RSI),\n compute_function_different_periods(df, periods, pta.CCI),\n compute_function_different_periods(df, periods, pta.COPP),\n compute_function_different_periods(df, periods, pta.KELCH),\n compute_function_different_periods(df, periods, pta.STDDEV),\n compute_function_different_periods(df, periods, pta.MA),\n compute_function_different_periods(df, periods, ta.MMed),\n compute_function_different_periods(df, periods, pta.EMA),\n compute_MACD_different_periods(df, periods=macd_periods)\n ], axis=1)\n\n\n # result = result.fillna(method='pad')\n SETTINGS.join = original_join_state\n return out(SETTINGS, df, result)", "def make_state_signal(rec, state_signals=['pupil'], permute_signals=[], generate_signals=[],\n new_signalname='state', sm_win_len=180):\n\n newrec = rec.copy()\n resp = newrec['resp'].rasterize()\n state_signals = state_signals.copy()\n permute_signals = permute_signals.copy()\n generate_signals = generate_signals.copy()\n\n # normalize mean/std of pupil trace if being used\n if any([s.startswith('pupil') for s in state_signals]):\n # save raw pupil trace\n # normalize min-max\n p_raw = newrec[\"pupil\"].as_continuous().copy()\n # p[p < np.nanmax(p)/5] = np.nanmax(p)/5\n # norm to mean 0, variance 1\n p = p_raw - np.nanmean(p_raw)\n p /= np.nanstd(p)\n # hack to make sure state signal matches size of resp\n if 'resp' in newrec.signals.keys():\n #import pdb;pdb.set_trace()\n if p.shape[1] > newrec['resp'].shape[1]:\n p = p[:, :newrec['resp'].shape[1]]\n p_raw = p_raw[:, :newrec['resp'].shape[1]]\n newrec[\"pupil\"] = newrec[\"pupil\"]._modified_copy(p)\n newrec[\"pupil_raw\"] = newrec[\"pupil\"]._modified_copy(p_raw)\n\n if any([s.startswith('pupil') for s in state_signals]):\n # save raw pupil trace\n # normalize min-max\n p_raw = newrec[\"pupil\"].as_continuous().copy()\n # p[p < np.nanmax(p)/5] = np.nanmax(p)/5\n # norm to mean 0, variance 1\n p = p_raw - np.nanmean(p_raw)\n p /= np.nanstd(p)\n # hack to make sure state signal matches size of resp\n if 'resp' in newrec.signals.keys():\n # import pdb;pdb.set_trace()\n if p.shape[1] > newrec['resp'].shape[1]:\n p = p[:, :newrec['resp'].shape[1]]\n p_raw = p_raw[:, :newrec['resp'].shape[1]]\n newrec[\"pupil\"] = newrec[\"pupil\"]._modified_copy(p)\n newrec[\"pupil_raw\"] = newrec[\"pupil\"]._modified_copy(p_raw)\n\n if 'pupiln' in state_signals:\n log.info('norm pupil min/max = 0/1')\n p_raw = newrec[\"pupil_raw\"].as_continuous().copy()\n p = p_raw - np.nanmin(p_raw)\n p /= np.nanmax(p)\n newrec[\"pupiln\"] = newrec[\"pupil\"]._modified_copy(p)\n\n for state_signal in [s for s in state_signals if s.startswith('pupil_r')]:\n # copy repetitions of pupil\n newrec[state_signal] = newrec[\"pupil\"]._modified_copy(newrec['pupil']._data)\n newrec[state_signal].chans = [state_signal]\n for state_signal in [s for s in state_signals if s.startswith('pupiln_r')]:\n # copy repetitions of pupil\n newrec[state_signal] = newrec[\"pupiln\"]._modified_copy(newrec['pupiln']._data)\n newrec[state_signal].chans = [state_signal]\n if ('pupil2') in state_signals:\n newrec[\"pupil2\"] = newrec[\"pupil\"]._modified_copy(p ** 2)\n newrec[\"pupil2\"].chans = ['pupil2']\n if ('pupil_dup') in state_signals:\n newrec['pupil_dup']=newrec[\"pupil\"].copy()\n newrec[\"pupil_dup\"].chans = ['pupil_dup']\n if ('pupil_dup2') in state_signals:\n newrec['pupil_dup2']=newrec[\"pupil\"].copy()\n newrec[\"pupil_dup2\"].chans = ['pupil_dup2']\n\n if ('pupil_psd') in state_signals:\n pup = newrec['pupil'].as_continuous().copy()\n fs = newrec['pupil'].fs\n # get spectrogram of pupil\n nperseg = int(60*fs)\n noverlap = nperseg-1\n f, time, Sxx = ss.spectrogram(pup.squeeze(), fs=fs, nperseg=nperseg,\n noverlap=noverlap)\n max_chan = 4 # (np.abs(f - 0.1)).argmin()\n # Keep only first five channels of spectrogram\n #f = interpolate.interp1d(np.arange(0, Sxx.shape[1]), Sxx[:max_chan, :], axis=1)\n #newspec = f(np.linspace(0, Sxx.shape[-1]-1, pup.shape[-1]))\n pad1 = np.ones((max_chan,int(nperseg/2)))*Sxx[:max_chan,[0]]\n pad2 = np.ones((max_chan,int(nperseg/2-1)))*Sxx[:max_chan,[-1]]\n newspec = np.concatenate((pad1,Sxx[:max_chan, :],pad2), axis=1)\n\n # = np.concatenate((Sxx[:max_chan, :], np.tile(Sxx[:max_chan,-1][:, np.newaxis], [1, noverlap])), axis=1)\n newspec -= np.nanmean(newspec, axis=1, keepdims=True)\n newspec /= np.nanstd(newspec, axis=1, keepdims=True)\n\n spec_signal = newrec['pupil']._modified_copy(newspec)\n spec_signal.name = 'pupil_psd'\n chan_names = []\n for chan in range(0, newspec.shape[0]):\n chan_names.append('puppsd{0}'.format(chan))\n spec_signal.chans = chan_names\n\n newrec.add_signal(spec_signal)\n\n if ('pupil_ev' in state_signals) or ('pupil_bs' in state_signals):\n # generate separate pupil baseline and evoked signals\n\n prestimsilence = newrec[\"pupil\"].extract_epoch('PreStimSilence')\n spont_bins = prestimsilence.shape[2]\n pupil_trial = newrec[\"pupil\"].extract_epoch('TRIAL')\n\n pupil_bs = np.zeros(pupil_trial.shape)\n for ii in range(pupil_trial.shape[0]):\n pupil_bs[ii, :, :] = np.mean(\n pupil_trial[ii, :, :spont_bins])\n pupil_ev = pupil_trial - pupil_bs\n\n newrec['pupil_ev'] = newrec[\"pupil\"].replace_epoch('TRIAL', pupil_ev)\n newrec['pupil_ev'].chans=['pupil_ev']\n newrec['pupil_bs'] = newrec[\"pupil\"].replace_epoch('TRIAL', pupil_bs)\n newrec['pupil_bs'].chans=['pupil_bs']\n\n # normalize mean/std of pupil trace if being used\n if any([s.startswith('facepca') for s in state_signals]):\n # save raw facepca trace\n # normalize min-max\n p_raw = newrec[\"facepca\"].as_continuous().copy()\n # p[p < np.nanmax(p)/5] = np.nanmax(p)/5\n # norm to mean 0, variance 1\n p = p_raw - np.nanmean(p_raw, axis=1, keepdims=True)\n p /= np.nanstd(p, axis=1, keepdims=True)\n # hack to make sure state signal matches size of resp\n newrec[\"facepca\"] = newrec[\"facepca\"]._modified_copy(p)\n newrec[\"facepca_raw\"] = newrec[\"facepca\"]._modified_copy(p_raw)\n\n if ('each_passive' in state_signals):\n file_epochs = ep.epoch_names_matching(resp.epochs, \"^FILE_\")\n pset = []\n found_passive1 = False\n for f in file_epochs:\n # test if passive expt\n epoch_indices = ep.epoch_intersection(\n resp.get_epoch_indices(f),\n resp.get_epoch_indices('PASSIVE_EXPERIMENT'))\n if epoch_indices.size:\n if not(found_passive1):\n # skip first passive\n found_passive1 = True\n else:\n pset.append(f)\n newrec[f] = resp.epoch_to_signal(f)\n state_signals.remove('each_passive')\n state_signals.extend(pset)\n if 'each_passive' in permute_signals:\n permute_signals.remove('each_passive')\n permute_signals.extend(pset)\n\n if ('each_file' in state_signals) or ('each_active' in state_signals):\n file_epochs = ep.epoch_names_matching(resp.epochs, \"^FILE_\")\n trial_indices = resp.get_epoch_indices('TRIAL')\n passive_indices = resp.get_epoch_indices('PASSIVE_EXPERIMENT')\n pset = []\n psetx = []\n pcount = 0\n acount = 0\n # pupil interactions\n\n for f in file_epochs:\n # test if passive expt\n f_indices = resp.get_epoch_indices(f, mask=newrec['mask'])\n\n epoch_indices = ep.epoch_intersection(f_indices, passive_indices)\n added_signal = False\n if not f_indices.size:\n log.info(\"Skipping file %s because empty after masking\", f)\n elif epoch_indices.size:\n # this is a passive file\n name1 = \"PASSIVE_{}\".format(pcount)\n pcount += 1\n if pcount == 1:\n acount = 1 # reset acount for actives after first passive\n else:\n # use first passive part A as baseline - don't model\n if ('each_file' in state_signals):\n pset.append(name1)\n newrec[name1] = resp.epoch_to_signal(name1, indices=f_indices)\n added_signal = True\n else:\n name1 = \"ACTIVE_{}\".format(acount)\n pset.append(name1)\n newrec[name1] = resp.epoch_to_signal(name1, indices=f_indices)\n added_signal = True\n if pcount == 0:\n acount -= 1\n else:\n acount += 1\n if ('p_x_f' in state_signals) and added_signal:\n if name1.startswith('ACTIVE') | ('each_file' in state_signals):\n p = newrec[\"pupil\"].as_continuous()\n a = newrec[name1].as_continuous()\n name1x = name1+'Xpup'\n newrec[name1x] = newrec[\"pupil\"]._modified_copy(p * a)\n newrec[name1x].chans = [name1x]\n psetx.append(name1x)\n\n # test if passive expt\n# epoch_indices = ep.epoch_intersection(\n# resp.get_epoch_indices(f),\n# resp.get_epoch_indices('PASSIVE_EXPERIMENT'))\n# if epoch_indices.size and not(found_passive1):\n# # skip first passive\n# found_passive1 = True\n# else:\n# pset.append(f)\n# newrec[f] = resp.epoch_to_signal(f)\n\n if 'each_file' in state_signals:\n state_signals.remove('each_file')\n state_signals.extend(pset)\n if 'each_active' in state_signals:\n state_signals.remove('each_active')\n state_signals.extend(pset)\n if 'each_file' in permute_signals:\n permute_signals.remove('each_file')\n permute_signals.extend(pset)\n if 'each_active' in permute_signals:\n permute_signals.remove('each_active')\n permute_signals.extend(pset)\n\n # add interactions to state list if specified\n if ('p_x_f' in state_signals):\n state_signals.remove('p_x_f')\n state_signals.extend(psetx)\n if 'p_x_f' in permute_signals:\n permute_signals.remove('p_x_f')\n permute_signals.extend(psetx)\n\n if ('each_half' in state_signals):\n file_epochs = ep.epoch_names_matching(resp.epochs, \"^FILE_\")\n trial_indices = resp.get_epoch_indices('TRIAL')\n passive_indices = resp.get_epoch_indices('PASSIVE_EXPERIMENT')\n pset = []\n pcount = 0\n acount = 0\n for f in file_epochs:\n # test if passive expt\n f_indices = resp.get_epoch_indices(f)\n epoch_indices = ep.epoch_intersection(f_indices, passive_indices)\n trial_intersect = ep.epoch_intersection(f_indices, trial_indices)\n #trial_count = trial_intersect.shape[0]\n #_split = int(trial_count/2)\n _t1=trial_intersect[0,0]\n _t2=trial_intersect[-1,1]\n _Xsplit = int((_t1+_t2)/2)\n epoch1 = np.array([[_t1,_split]])\n epoch2 = np.array([[_split,_t2]])\n\n if epoch_indices.size:\n # this is a passive file\n name1 = \"PASSIVE_{}_{}\".format(pcount, 'A')\n name2 = \"PASSIVE_{}_{}\".format(pcount, 'B')\n pcount += 1\n if pcount == 1:\n acount = 1 # reset acount for actives after first passive\n else:\n # don't model PASSIVE_0 A -- baseline\n pset.append(name1)\n newrec[name1] = resp.epoch_to_signal(name1, indices=epoch1)\n\n # do include part B\n pset.append(name2)\n newrec[name2] = resp.epoch_to_signal(name2, indices=epoch2)\n else:\n name1 = \"ACTIVE_{}_{}\".format(acount, 'A')\n name2 = \"ACTIVE_{}_{}\".format(acount, 'B')\n pset.append(name1)\n newrec[name1] = resp.epoch_to_signal(name1, indices=epoch1)\n pset.append(name2)\n newrec[name2] = resp.epoch_to_signal(name2, indices=epoch2)\n\n if pcount == 0:\n acount -= 1\n else:\n acount += 1\n\n state_signals.remove('each_half')\n state_signals.extend(pset)\n if 'each_half' in permute_signals:\n permute_signals.remove('each_half')\n permute_signals.extend(pset)\n\n # generate task state signals\n if 'pas' in state_signals:\n fpre = (resp.epochs['name'] == \"PRE_PASSIVE\")\n fpost = (resp.epochs['name'] == \"POST_PASSIVE\")\n INCLUDE_PRE_POST = (np.sum(fpre) > 0) & (np.sum(fpost) > 0)\n if INCLUDE_PRE_POST:\n # only include pre-passive if post-passive also exists\n # otherwise the regression gets screwed up\n newrec['pre_passive'] = resp.epoch_to_signal('PRE_PASSIVE')\n else:\n # place-holder, all zeros\n newrec['pre_passive'] = resp.epoch_to_signal('XXX')\n newrec['pre_passive'].chans = ['PRE_PASSIVE']\n if 'puretone_trials' in state_signals:\n newrec['puretone_trials'] = resp.epoch_to_signal('PURETONE_BEHAVIOR')\n newrec['puretone_trials'].chans = ['puretone_trials']\n if 'easy_trials' in state_signals:\n newrec['easy_trials'] = resp.epoch_to_signal('EASY_BEHAVIOR')\n newrec['easy_trials'].chans = ['easy_trials']\n if 'hard_trials' in state_signals:\n newrec['hard_trials'] = resp.epoch_to_signal('HARD_BEHAVIOR')\n newrec['hard_trials'].chans = ['hard_trials']\n if ('active' in state_signals) or ('far' in state_signals):\n newrec['active'] = resp.epoch_to_signal('ACTIVE_EXPERIMENT')\n newrec['active'].chans = ['active']\n if (('hit_trials' in state_signals) or ('miss_trials' in state_signals) or\n ('far' in state_signals) or ('hit' in state_signals)):\n newrec['hit_trials'] = resp.epoch_to_signal('HIT_TRIAL')\n newrec['miss_trials'] = resp.epoch_to_signal('MISS_TRIAL')\n newrec['fa_trials'] = resp.epoch_to_signal('FA_TRIAL')\n\n sm_len = int(sm_win_len * newrec['resp'].fs)\n if 'far' in state_signals:\n log.info('FAR: sm_win_len=%.0f sm_len=%d', sm_win_len, sm_len)\n a = newrec['active'].as_continuous()\n fa = newrec['fa_trials'].as_continuous().astype(float)\n #c = np.concatenate((np.zeros((1,sm_len)), np.ones((1,sm_len+1))),\n # axis=1)\n c = np.ones((1,sm_len))/sm_len\n\n fa = convolve2d(fa, c, mode='same')\n fa[a] -= 0.25 # np.nanmean(fa[a])\n fa[np.logical_not(a)] = 0\n\n s = newrec['fa_trials']._modified_copy(fa)\n s.chans = ['far']\n s.name='far'\n newrec.add_signal(s)\n\n if 'hit' in state_signals:\n a = newrec['active'].as_continuous()\n hr = newrec['hit_trials'].as_continuous().astype(float)\n ms = newrec['miss_trials'].as_continuous().astype(float)\n ht = hr-ms\n\n c = np.ones((1,sm_len))/sm_len\n\n ht = convolve2d(ht, c, mode='same')\n ht[a] -= 0.1 # np.nanmean(ht[a])\n ht[np.logical_not(a)] = 0\n\n s = newrec['hit_trials']._modified_copy(ht)\n s.chans = ['hit']\n s.name='hit'\n newrec.add_signal(s)\n\n if 'lick' in state_signals:\n newrec['lick'] = resp.epoch_to_signal('LICK')\n\n # pupil interactions\n if ('p_x_a' in state_signals):\n # normalize min-max\n p = newrec[\"pupil\"].as_continuous()\n a = newrec[\"active\"].as_continuous()\n newrec[\"p_x_a\"] = newrec[\"pupil\"]._modified_copy(p * a)\n newrec[\"p_x_a\"].chans = [\"p_x_a\"]\n\n if ('pupil_x_population' in state_signals):\n # normalize min-max\n p = newrec[\"pupil\"].as_continuous().copy()\n p -= np.mean(p, axis=1, keepdims=True)\n a = newrec[\"population\"].as_continuous().copy()\n a -= np.mean(a, axis=1, keepdims=True)\n newrec[\"pupil_x_population\"] = newrec[\"population\"]._modified_copy(p * a)\n newrec[\"pupil_x_population\"].chans = [\"px\"+c for c in newrec[\"pupil_x_population\"].chans]\n\n if ('active_x_population' in state_signals):\n # normalize min-max\n a = newrec[\"active\"].as_continuous().astype(float)\n a -= np.mean(a, axis=1, keepdims=True)\n p = newrec[\"population\"].as_continuous().copy()\n p -= np.mean(p, axis=1, keepdims=True)\n newrec[\"active_x_population\"] = newrec[\"population\"]._modified_copy(p * a)\n newrec[\"active_x_population\"].chans = [\"ax\"+c for c in newrec[\"active_x_population\"].chans]\n\n if ('prw' in state_signals):\n # add channel two of the resp to state and delete it from resp\n if len(rec['resp'].chans) != 2:\n raise ValueError(\"this is for pairwise fitting\")\n else:\n ch2 = rec['resp'].chans[1]\n ch1 = rec['resp'].chans[0]\n\n newrec['prw'] = newrec['resp'].extract_channels([ch2]).rasterize()\n newrec['resp'] = newrec['resp'].extract_channels([ch1]).rasterize()\n\n if ('pup_x_prw' in state_signals):\n # interaction term between pupil and the other cell\n if 'prw' not in newrec.signals.keys():\n raise ValueError(\"Must include prw alone before using interaction\")\n\n else:\n pup = newrec['pupil']._data\n prw = newrec['prw']._data\n sig = newrec['pupil']._modified_copy(pup * prw)\n sig.name = 'pup_x_prw'\n sig.chans = ['pup_x_prw']\n newrec.add_signal(sig)\n\n if 'drift' in state_signals:\n resp_len = rec['resp'].shape[1]\n drift = np.reshape(np.linspace(0,1,resp_len), (1, -1))\n _s = nems0.signal.RasterizedSignal(fs=rec['resp'].fs, data=drift, name=\"drift\",\n recording=rec['resp'].recording, chans=[\"drift\"], epochs=rec['resp'].epochs)\n newrec.add_signal(_s)\n\n # delete any pre-existing state signal. Is this a good idea??\n if new_signalname in newrec.signals.keys():\n log.info(\"Deleting existing %s signal before generating new one\", new_signalname)\n del newrec.signals[new_signalname]\n\n for i, x in enumerate(state_signals):\n if x.startswith(\"dummy\"):\n s = rec['resp'].shape[1]\n d_data = np.random.uniform(size=(1, s))\n _s = nems0.signal.RasterizedSignal(fs=rec['resp'].fs, data=d_data, name=x,\n recording=rec['resp'].recording, chans=[x],\n epochs=rec['resp'].epochs)\n newrec.add_signal(_s)\n\n if x in permute_signals:\n # kludge: fix random seed to index of state signal in list\n # this avoids using the same seed for each shuffled signal\n # but also makes shuffling reproducible\n newrec = concatenate_state_channel(\n newrec, newrec[x].shuffle_time(rand_seed=i,\n mask=newrec['mask']),\n state_signal_name=new_signalname)\n elif x in generate_signals:\n # fit a gaussian process to the signal, then generate a new signal using the fit\n newrec = concatenate_state_channel(\n newrec, _generate_gp(newrec[x], rand_seed=i), \n state_signal_name=new_signalname)\n else:\n newrec = concatenate_state_channel(\n newrec, newrec[x], state_signal_name=new_signalname)\n\n newrec = concatenate_state_channel(\n newrec, newrec[x], state_signal_name=new_signalname+\"_raw\")\n \n return newrec", "def calculate_signals(self):\n\t\traise NotImplementedError(\n\t\t\t\"Should implement calculate_signals()\\n\" + \\\n\t\t\t\"By calling this method to calculate 'Signal' Events\"\n\t\t)", "def signal_to_training( # pylint: disable=too-many-locals\n self,\n signal: Union[Dict, List[Dict]]\n ) -> Tuple[np.ndarray, Tuple[np.ndarray, ...], np.ndarray, Dict[str, Any]]:\n dict_list = list(signal) if isinstance(signal, list) else list((signal, ))\n\n # Initialize the return values\n time_length = len(dict_list[0]['signal']['time']['data']) # type: ignore\n length = int(time_length / 2)\n signals = np.zeros((0, time_length))\n result_r = np.zeros((0, length))\n result_b = np.zeros((0, length))\n result_h = np.zeros((0, length))\n result_m = np.zeros((0, length))\n result_p = np.zeros((0, length))\n answer = np.zeros((0, length))\n config = {\n 'SNR': [],\n 'count': [],\n 'frequencies': [],\n 'amplitudes': [],\n 'minamplitude': [],\n 'mindist': []\n } # type: Dict[str, Any]\n\n # Calculate window functions\n window_bartlett = np.bartlett(time_length)\n window_hanning = np.hanning(time_length)\n window_meyer = self._meyer_wavelet(time_length)\n window_poisson = exponential(time_length, sym=True, tau=(time_length/2)*(8.69/60.0))\n\n # Loop all data entries\n for data in dict_list:\n time = np.asarray(data['signal']['time']['data'])\n signals = np.concatenate((signals, np.reshape(time, (1,) + time.shape)))\n config['SNR'].append(data['signal']['SNR'])\n\n # Assemble the FFTs\n fft = np.fft.fft(time)[:length] / time_length\n result_r = np.concatenate((result_r, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_bartlett)[:length] / time_length\n result_b = np.concatenate((result_b, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_hanning)[:length] / time_length\n result_h = np.concatenate((result_h, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_meyer)[:length] / time_length\n result_m = np.concatenate((result_m, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_poisson)[:length] / time_length\n result_p = np.concatenate((result_p, np.reshape(fft, (1,) + fft.shape)))\n\n # Assemble all the frequencies and amplitudes\n count = 0\n freqs = []\n ampls = []\n counting = np.zeros((1, length))\n for subsig in data['signal']['parts']:\n if subsig['signal']['type'] == 'SingleOscillation':\n count += 1\n freq = subsig['signal']['frequency']\n counting[0, int(max(0, min(length - 1, round(freq))))] += 1\n freqs.append(freq)\n ampls.append(subsig['signal']['amplitude'])\n config['count'].append(count)\n\n # Sort frequencies and amplitudes by frequency\n np_freqs = np.asarray(freqs)\n sorting = np.unravel_index(np.argsort(np_freqs), np_freqs.shape)\n np_freqs = np_freqs[sorting]\n np_ampls = np.asarray(ampls)[sorting]\n\n # Assemble some statistics\n config['mindist'].append(999999. if len(np_freqs) < 2 else np.min(np.diff(np_freqs)))\n config['minamplitude'].append(np.min(np_ampls) if len(np_ampls) > 0 else 999999.)\n config['frequencies'].append(np_freqs)\n config['amplitudes'].append(np_ampls)\n answer = np.concatenate((answer, counting))\n\n # Assemble results\n ffts = (result_r, result_b, result_h, result_m, result_p)\n return signals, ffts, answer, config", "def add_signal_to_noise(self):\n\n # noise\n noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n noise.data.data = self.td_noise.data\n\n # signal\n signal = lal.CreateREAL8TimeSeries('blah',\n self.ext_params.geocent_peak_time, 0, self.td_signal.delta_t,\n lal.StrainUnit, int(self.td_signal.duration /\n self.td_signal.delta_t))\n signal.data.data = self.td_signal.data\n\n win = lal.CreateTukeyREAL8Window(len(signal.data.data),0.1)\n win.data.data[len(signal.data.data):] = 1.0\n #signal.data.data *= win.data.data\n\n # --- Scale to a target snr\n print '---'\n if self.target_snr is not None:\n\n tmp_sig = pycbc.types.TimeSeries(signal.data.data,\n delta_t=self.td_signal.delta_t)\n\n current_snr = pycbc.filter.sigma(tmp_sig, psd=self.psd,\n low_frequency_cutoff=self.f_low,\n high_frequency_cutoff=0.5/self.delta_t)\n\n signal.data.data *= self.target_snr / current_snr\n # ----\n\n # sum\n noise_plus_signal = lal.AddREAL8TimeSeries(noise, signal)\n\n self.td_response = \\\n pycbc.types.timeseries.TimeSeries(\\\n initial_array=np.copy(noise_plus_signal.data.data),\n delta_t=noise_plus_signal.deltaT,\n epoch=noise_plus_signal.epoch)\n\n # Finally, zero-pad the signal vector to have the same length as the actual data\n # vector\n no_noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n\n no_noise.data.data = np.zeros(\\\n int(self.td_noise.duration / self.td_noise.delta_t))\n\n signal = lal.AddREAL8TimeSeries(no_noise, signal)\n\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=np.copy(signal.data.data),\n delta_t=signal.deltaT, epoch=noise_plus_signal.epoch)\n\n del noise, signal, noise_plus_signal", "def analytic(self):\r\n data = self.input.data\r\n sampling_rate = self.input.sampling_rate\r\n\r\n a_signal =\\\r\n ts.TimeSeries(data=np.zeros(self.freqs.shape + data.shape,\r\n dtype='D'), sampling_rate=sampling_rate)\r\n if self.freqs.ndim == 0:\r\n w = self.wavelet(self.freqs, self.sd,\r\n sampling_rate=sampling_rate, ns=5,\r\n normed='area')\r\n\r\n # nd = (w.shape[0] - 1) / 2\r\n a_signal.data[...] = (np.convolve(data, np.real(w), mode='same') +\r\n 1j * np.convolve(data, np.imag(w), mode='same'))\r\n else:\r\n for i, (f, sd) in enumerate(zip(self.freqs, self.sd)):\r\n w = self.wavelet(f, sd, sampling_rate=sampling_rate,\r\n ns=5, normed='area')\r\n\r\n # nd = (w.shape[0] - 1) / 2\r\n a_signal.data[i, ...] = (\r\n np.convolve(data, np.real(w), mode='same') +\r\n 1j * np.convolve(data, np.imag(w), mode='same'))\r\n\r\n return a_signal", "def sigmoid_critical_df1(sigma_df,hvals,svals):\n return pd.DataFrame([{'h':h,'s':s,'b_to_c_star':critical_benefit_to_cost1(sigma_df,Z,sigmoid_benefit,h,s)} \n for h in hvals for s in svals])", "def synchronise_signals(in_signal_1, in_signal_2, time_interval = -1, fs = 100):\n\n # signal segmentation\n in_signal_1 = in_signal_1[:time_interval*fs]\n in_signal_2 = in_signal_2[:time_interval*fs]\n\n #in_signal_2 = in_signal_2 - gravitational_filter(in_signal_2, fs)\n in_signal_1 = in_signal_1 * (-1)\n\n #in_signal_1[time_array[0] * fs:time_array[1] * fs] = in_signal_1[time_array[0] * fs:time_array[1] * fs] + 200\n #in_signal_2[time_array[4] * fs:time_array[5] * fs] = in_signal_2[time_array[4] * fs:time_array[5] * fs] + 200\n #in_signal_1[time_array[2] * fs:time_array[3] * fs] = in_signal_1[time_array[2] * fs:time_array[3] * fs] + 200\n #in_signal_2[time_array[6] * fs:time_array[7] * fs] = in_signal_2[time_array[6] * fs:time_array[7] * fs] + 200\n\n\n # signal normalisation\n mean_1, std_1, mean_2, std_2 = [np.mean(in_signal_1), np.std(in_signal_1), np.mean(in_signal_2),\n np.std(in_signal_2)]\n signal_1 = in_signal_1 - mean_1\n signal_1 /= std_1\n signal_2 = in_signal_2 - mean_2\n signal_2 /= std_2\n\n\n # zero padding signals so that they are of same length, this facilitates the calculation because\n # then the delay between both signals can be directly calculated\n # zero padding only if needed\n #if (len(signal_1) != len(signal_2)):\n\n # check which signal has to be zero padded\n # if (len(signal_1) < len(signal_2)):\n\n # pad first signal\n # signal_1 = np.append(signal_1, np.zeros(len(signal_2) - len(signal_1)))\n\n # else:\n\n # pad second signal\n # signal_2 = np.append(signal_2, np.zeros(len(signal_1) - len(signal_2)))\n\n\n N = len(signal_1) + len(signal_2) - 1\n # Calculate the cross-correlation between the two signals.\n #correlation = np.correlate(signal_1, signal_2, 'full')\n f1 = fft(signal_1, N)\n f2 = np.conj(fft(signal_2, N))\n correlation = np.real(ifft(f1 * f2))\n #correlation = fftshift(cc)\n\n\n # calculate tau / shift between both signals\n #tau = int(np.argmax(correlation) - (len(correlation)) / 2)\n tau = np.argmax(correlation)\n print(tau)\n if tau > len(correlation) // 2:\n tau = np.argmax(correlation) - len(correlation)\n print(tau)\n\n # crop signals to original length (removing zero padding)\n #signal_1 = signal_1[:len(in_signal_1)]\n #signal_2 = signal_2[:len(in_signal_2)]\n\n\n # check which signal has to be sliced\n if (tau < 0):\n # tau negative --> second signal lags\n signal_2 = signal_2[np.abs(tau):]\n\n elif (tau > 0):\n # tau positive ---> firs signal lags\n signal_1 = signal_1[np.abs(tau):]\n\n\n # revert signals to orignal scale\n result_signal_1 = signal_1 * std_1 + mean_1\n result_signal_2 = signal_2 * std_2 + mean_2\n\n return tau, result_signal_1, result_signal_2", "def createSignalModelLinear(data):\n print \"Creating model\"\n switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))\n \n noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))\n exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))\n \n lin_scale = Uniform('lin_scale', lower=0, upper=.01)\n \n timestamp = np.arange(0, len(data), dtype=np.float)\n \n @deterministic(plot=False, name=\"test\")\n def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):\n ''' Concatenate Poisson means '''\n out = np.empty(len(data))\n out[:s] = n\n out[s:] = e\n return out\n \n @deterministic\n def tau(eps=uncertainty_model):\n return np.power(eps, -2)\n \n\n @deterministic(plot=False)\n def baseline_model(s=switchpoint, scale=lin_scale):\n out = np.zeros(len(data))\n out[s:] = scale * (timestamp[s:] - s)\n \n# plt.figure(fig.number)\n# plt.clf()\n# plt.plot(out ,color=\"blue\" )\n# plt.plot(data ,color=\"red\" )\n# value = raw_input(' --> Press q to quit, any other key to continue\\n')\n\n return out\n\n\n baseline_observed = Normal(\"baseline_observed\", mu=baseline_model, tau=tau, value=data, observed= True )\n return locals()", "def _get_new_lambda(df_intermediate, spark):\n df_intermediate.createOrReplaceTempView(\"df_intermediate\")\n sql = _sql_gen_new_lambda(table_name = \"df_intermediate\")\n\n new_lambda = spark.sql(sql).collect()[0][0]\n logger.debug(_format_sql(sql))\n return new_lambda", "def addMACDBuySignals(self):\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n if not 'close' in self.df.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'close' column not int64 or float64.\")\n\n if not 'macd' or not 'signal' in self.df.columns:\n self.addMomentumIndicators()\n\n # true if MACD is above the Signal\n self.df['macdgtsignal'] = self.df.macd > self.df.signal\n # true if the current frame is where MACD crosses over above\n self.df['macdgtsignalco'] = self.df.macdgtsignal.ne(self.df.macdgtsignal.shift())\n self.df.loc[self.df['macdgtsignal'] == False, 'macdgtsignalco'] = False\n\n # true if the MACD is below the Signal\n self.df['macdltsignal'] = self.df.macd < self.df.signal\n # true if the current frame is where MACD crosses over below\n self.df['macdltsignalco'] = self.df.macdltsignal.ne(self.df.macdltsignal.shift())\n self.df.loc[self.df['macdltsignal'] == False, 'macdltsignalco'] = False\n\n # true if OBV is greater than 2%\n self.df['obvsignal'] = self.df.obv_pc > 2", "def __call__(self, dt, **kwargs):\n return self.signal_weights", "def sigmoid_payoffs(df,hvals,svals,bvals,c=1):\n f_jk = get_f_jkAB(get_coop_coop_neighbour_dist(df),get_degree_distribution(df))\n df = pd.concat([mean_payoffs(f_jk,sigmoid_benefit,b,c,h,s).assign(h=h,s=s,b=b)\n for h,s,b in product(hvals,svals,bvals)]).reset_index() \n df = df.set_index(['n','h','s','b'])\n return pd.concat([df['A_pay'].rename('payoff').to_frame().assign(type='A'),df['B_pay'].rename('payoff').to_frame().assign(type='B')]).reset_index()", "def filter_signals(self, df_phys):\n if not df_phys.empty and len(self.signals):\n df_phys = df_phys[df_phys[\"Signal\"].isin(self.signals)]\n\n return df_phys", "def perform_spectral_interpolation(gaussian_data):\n\n dframe = pd.DataFrame()\n wavelength1 = gaussian_data[:, -1]\n\n sampled_wavelength1 = np.arange(min(wavelength1), max(wavelength1), 2)\n wavelength2 = gaussian_data[:, -1]\n sampled_wavelength2 = np.arange(min(wavelength2), max(wavelength2), 2)\n a1_val = gaussian_data[:, 0]\n a2_val = gaussian_data[:, 1]\n sigma1 = gaussian_data[:, 2]\n sigma2 = gaussian_data[:, 3]\n\n # A1 first\n fit_params_a1 = interp1d(wavelength1, a1_val, kind='linear')\n fitted_val_a1 = fit_params_a1(sampled_wavelength1)\n # Now A2\n fit_params_a2 = interp1d(wavelength2, a2_val, kind='linear')\n fitted_val_a2 = fit_params_a2(sampled_wavelength2)\n\n # Now Sigma1\n fit_params_sigma1 = interp1d(wavelength1, sigma1, kind='linear')\n fitted_val_sigma1 = fit_params_sigma1(sampled_wavelength1)\n\n # Now Sigma2\n fit_params_sigma2 = interp1d(wavelength2, sigma2, kind='slinear')\n fitted_val_sigma2 = fit_params_sigma2(sampled_wavelength2)\n\n\n# plt.plot(wavelength1, Sigma1, 'bo')\n# plt.plot(sampled_wavelength1, fitted_val_Sigma1, 'ro--', markersize=3)\n# plt.grid(True, linestyle=':')\n# plt.show()\n dframe = pd.DataFrame({'W1' : sampled_wavelength1,\n 'W2' : sampled_wavelength2,\n 'A1' : fitted_val_a1,\n 'A2' : fitted_val_a2,\n 'Sigma1' : fitted_val_sigma1,\n 'Sigma2' : fitted_val_sigma2,\n })\n\n return dframe.round(3)" ]
[ "0.55726427", "0.55375326", "0.54029214", "0.5381461", "0.53808427", "0.53580076", "0.5323321", "0.52420646", "0.5232329", "0.5229146", "0.5227639", "0.5204138", "0.5184703", "0.51758593", "0.51096946", "0.5088613", "0.5079434", "0.50712466", "0.5067344", "0.5062894", "0.50036097", "0.4982948", "0.49708784", "0.49599844", "0.4949273", "0.49163514", "0.49161124", "0.48961288", "0.48896575", "0.48775086" ]
0.76016694
0
Given df of raw data and list of decoding databases, create new def with physical values (no duplicate signals and optionally filtered/rebaselined)
def extract_phys(self, df_raw): import can_decoder import pandas as pd df_phys = pd.DataFrame() df_phys_temp = [] for db in self.db_list: df_decoder = can_decoder.DataFrameDecoder(db) for bus, bus_group in df_raw.groupby("BusChannel"): for length, group in bus_group.groupby("DataLength"): df_phys_group = df_decoder.decode_frame(group) if not df_phys_group.empty: df_phys_group["BusChannel"] = bus df_phys_temp.append(df_phys_group) df_phys = pd.concat(df_phys_temp, ignore_index=False).sort_index() # remove duplicates in case multiple DBC files contain identical signals df_phys["datetime"] = df_phys.index df_phys = df_phys.drop_duplicates(keep="first") df_phys = df_phys.drop(labels="datetime", axis=1) # optionally filter and rebaseline the data df_phys = self.filter_signals(df_phys) if not df_phys.empty and type(self.days_offset) == int: df_phys = self.rebaseline_data(df_phys) return df_phys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_FEMA_P58_bldg_redtag_db(\n source_file,\n target_data_file='bldg_redtag_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_redtag_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # take another pass with booleans because the first does not always work\n for true_str in (\"YES\", \"Yes\", \"yes\"):\n df.replace(true_str, True, inplace=True)\n\n for false_str in (\"NO\", \"No\", \"no\"):\n df.replace(false_str, False, inplace=True)\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n f'DS {DS_i}, Unsafe Placard Trigger Flag',\n f'DS {DS_i}, Unsafe Placard Damage Median',\n f'DS {DS_i}, Unsafe Placard Damage Dispersion'\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\"\n ]\n\n # create the database index\n comps = df_db_source.index.values\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=comps,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete = False\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n pass\n # Note that we are assuming that components with simultaneous\n # damage states do not have damage that would trigger a red tag.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n redtag_flag = getattr(\n cmp, f'DS_{DS_i}_Unsafe_Placard_Trigger_Flag')\n\n if redtag_flag is True:\n\n theta_0 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'\n f'Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'\n f'Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Theta_1'] = theta_1\n\n if (pd.isna(theta_0) or pd.isna(theta_1)):\n\n incomplete = True\n\n if ~np.isnan(redtag_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.set_index('Index', inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 6):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the red tag consequence data from FEMA \"\n \"P58\")", "def update_db(db_name=_db_indicators, start=1950, end=dt.datetime.now().year, write_db=True):\n def read_indicators(pdfI=None, coutries=[], ctry_chunksize=50, write_db=True):\n print('UPDATE IMF: Start reading {0} indicators'.format(pdfI.shape[0]))\n #dct_not_data=dict()\n lst_ret=[]\n for k, v in pdfI.iterrows():\n\n lst_pdf=list()\n lst_not_country=list()\n tbl_name=k #'{0}_{1}'.format(k, freq)\n print('UPDATE IMF ({2}-{3}): reading {0}, tDS={1}\\t'.format(k, v['Dataset'], start, end), end='... ')\n for cs in cmm.iterate_group(coutries, ctry_chunksize):\n\n try:\n pdf = pds.read_imf(strDataSetID=v['Dataset'], indiID=k, countryCode=cs,\n frequency=v['Freq'], startDate=start, endDate=end)\n\n lst_pdf.append(pdf)\n lst_not_country+=pdf.not_country\n #print(pdf.name, pdf.shape, len(pdf.not_country))\n except ValueError as e:\n lst_not_country += cs\n\n #print(e, k, 0, 50)\n try:\n pdfC=pds.DataFrameDATA(pd.concat([ppdf for ppdf in lst_pdf if not ppdf.empty]))\n pdfC.name=tbl_name\n #dct_not_data.update({'IND_NOT':tbl_name, 'NOT_DATA':lst_not_country})\n print('read {name},\\tlen {len_df},\\tnot data countries - {nc}'.format(name=pdfC.name,\n len_df=pdfC.shape[0],\n nc=len(lst_not_country)), end='... ')\n if write_db:\n print('write to DB...', end='')\n\n lstWrite=[c for c in pdfC.columns.tolist() if c !='mult']\n\n pdfC[lstWrite].to_sql(pdfC.name, coni, if_exists='upsert')\n cmm.write_status(db_name, k, pdfC.shape[0], mult=pdfC['mult'].unique()[0])\n\n print('done', end='\\n')\n pdfC['INDI']=k\n lst_ret.append(pdfC)\n #print(dct_not_data)\n except ValueError as e:\n print(e, 'not data for ', k, v['Dataset'], len(cs))\n\n return pd.concat(lst_ret)\n\n coni = sa.create_engine('sqlite+pysqlite:///{db_name}'.format(db_name=db_name))\n # pdfIndi=pd.read_sql('select * from INDICATORS where LastUpdateDateA is NULL', coni, index_col='Code')\n pdfIndi = pd.read_sql('select * from {INDI_NAME}'.format(INDI_NAME=cmm.strINDI_db_name), coni, index_col='Code')#.iloc[:40]\n pdfCountry = pd.read_sql('select * from {COUNTRY_NAME}'.format(COUNTRY_NAME=cmm.strCOUNTRY_db_name), coni, index_col='id')\n country_list = pdfCountry.index.tolist()\n print('UPDATE IMF: reading {0} countries'.format(len(country_list)))\n\n pdfQ=read_indicators(pdfI=pdfIndi.sort_index(), coutries=country_list, write_db=write_db)\n print('=' * 50)\n\n print('UPDATE IMF: all done')\n return pdfQ", "def get_dataframe_from_db_with_aux( input_file, conn, sources: list=None, image_min: int=128 ):\n\n if sources is None and input_file is None:\n names = \"\"\n elif input_file is not None:\n if isinstance(input_file, str):\n input_file = [input_file]\n filelist = preprocessFileList( input_file )\n names = [ f'\"{Path(f).name}\"' for f in filelist if '\"' not in f ]\n names = f'AND Sources.name in ({\", \".join(names)})'\n else:\n names = [ f'\"{s}\"' for s in sources ]\n names = f'AND Sources.name in ({\", \".join(names)})'\n\n if image_min is not None:\n images = f\"\"\"AND Sources.image_width >= {image_min}\n AND Sources.image_height >= {image_min}\"\"\"\n else:\n images = \"\"\n\n# Edited values for angle and throttle override all others.\n# Otherwise user overrides pilot. But there's no way to know if the user overrode the pilot if the user value is zero.\n# select t.source_id, pos_cte, sm.value, tr.track_id from TubRecords t, Sources s, SourceMeta sm, Tracks tr where t.source_id = s.source_id and s.source_id = sm.source_id and sm.key=\"DONKEY_GYM_ENV_NAME\" AND sm.value=tr.gym_name ORDER BY RANDOM() LIMIT 10;\n\n sql=f\"\"\"SELECT Sources.full_path || '/' || '{Tub.images()}' || '/' || TubRecords.image_path as \"cam/image_array\",\n case when edit_angle is not null then edit_angle\n when pilot_angle is not null and user_angle == 0.0 then pilot_angle\n else user_angle end as \"user/angle\",\n case when edit_throttle is not null then edit_throttle\n when pilot_throttle is not null and user_throttle == 0.0 then pilot_throttle\n else user_throttle end as \"user/throttle\",\n TubRecords.pos_cte,\n Tracks.track_id\n FROM TubRecords, Sources, SourceMeta, Tracks\n WHERE TubRecords.source_id = Sources.source_id\n AND Sources.source_id = SourceMeta.source_id\n AND SourceMeta.key = \"DONKEY_GYM_ENV_NAME\"\n AND SourceMeta.value = Tracks.gym_name\n AND TubRecords.pos_cte is not null\n {names}\n {images}\nAND TubRecords.deleted = 0;\"\"\"\n\n df = pd.read_sql_query(sql, conn)\n df['user/angle'] = df['user/angle'].astype(np.float32)\n df['user/throttle'] = df['user/throttle'].astype(np.float32)\n df['pos_cte'] = df['pos_cte'].astype(np.float32)\n df['track_id'] = df['track_id'].astype(np.int64)\n\n return df", "def col_sqlite(path,name,list_bd_drop,pathlist_names_feature):\n \n \n dfnames=pd.read_csv(pathlist_names_feature,sep=',', header=None) \n df1=dfnames.T\n df1.columns=[\"band_name\"]\n colnames=list(df1.band_name.apply(lambda s: s[2:-1]))\n \n if \".csv\" in path:\n df=pd.read_csv(path)\n globals()[\"%s\"% name ]=df.groupby(\"originfid\").mean()\n labcroirr=globals()[\"df%s\"% name ].labcroirr\n globals()[\"df%s\"% name ].drop(columns=list_bd_drop,inplace=True)\n globals()[\"df%s\"% name ]=globals()[\"df%s\"% name ].T\n\n globals()[\"df%s\"% name ][\"band_names\"]=colnames\n globals()[\"df%s\"% name ][\"date\"] = globals()[\"%s\"% name ].band_names.apply(lambda s: s[-8:])\n globals()[\"df%s\"% name ].set_index(\"band_names\",inplace=True)\n globals()[\"df%s\"% name ]=globals()[\"%s\"% name ].T\n globals()[\"df%s\"% name ][\"labcroirr\"]= labcroirr\n else:\n sql=sqlite3.connect(path)\n df=pd.read_sql_query(\"SELECT * FROM output\", sql)\n globals()[\"df%s\"%name]=df.groupby(\"originfid\").mean()\n labcroirr=globals()[\"df%s\"%name][\"labcroirr\"]\n globals()[\"df%s\"%name].drop(columns=list_bd_drop,inplace=True)\n globals()[\"df%s\"% name ]=globals()[\"df%s\"%name].T\n globals()[\"df%s\"% name ][\"band_names\"]=colnames\n globals()[\"df%s\"% name ][\"date\"] = globals()[\"df%s\"% name ].band_names.apply(lambda s: s[-8:])\n globals()[\"df%s\"% name ].set_index(\"band_names\",inplace=True)\n globals()[\"df%s\"% name ]=globals()[\"df%s\"% name ].T\n globals()[\"df%s\"% name ][\"labcroirr\"]= labcroirr\n return globals()[\"df%s\"% name ]", "def mergeDatabase(df):\n\n\tabsPath = os.path.abspath(__file__)\n\tabsPath = os.path.split(absPath)[0]\n\tabsPath = ''\n\n\tdataPath = os.path.join(absPath, 'dual-data')\n\n\tdfFinal = None\n\n\t# 20210122__0002_lcrPicker has high-freq ap, no lcr b/w spikes\n\trejectionList = ['dual-data/20210122/20210122__0002_lcrPicker.csv']\n\n\tprint('dataPath:', dataPath)\n\tnumFiles = 0\n\tfor obj in os.listdir(dataPath):\n\t\tfolderPath = os.path.join(dataPath, obj)\n\t\tif os.path.isdir(folderPath):\n\t\t\tprint('folderPath:', folderPath)\n\t\t\tfor file in os.listdir(folderPath):\n\t\t\t\tif file.startswith('.'):\n\t\t\t\t\tcontinue\n\t\t\t\tif file.endswith('_lcrPicker.csv'):\n\t\t\t\t\tcsvPath = os.path.join(folderPath, file)\n\t\t\t\t\tprint(' csvPath:', csvPath)\n\n\t\t\t\t\tif csvPath in rejectionList:\n\t\t\t\t\t\tprint('!!! rejecting csvPath:', csvPath)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnumFiles += 1\n\t\t\t\t\tif dfFinal is None:\n\t\t\t\t\t\tdfFinal = pd.read_csv(csvPath, header=0)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdf0 = pd.read_csv(csvPath, header=0)\n\t\t\t\t\t\tdfFinal = dfFinal.append(df0)\n\t\t\t\t\t\tdfFinal.reset_index(drop=True)\n\t\t\t\t\t\t# todo: should be\n\t\t\t\t\t\t#dfFinal = dfFinal.reset_index(drop=True)\n\n\t#\n\t# add new column for time of lcr before spike\n\t# todo: make new col to get rid of lcr where lcrPreSpikeSec < 0.1 sec\n\tif 1:\n\t\tdfFinal['lcrPreSpikeSec'] = dfFinal['spikeSec'] - dfFinal['lcrSec']\n\n\t\t#print(dfFinal[ np.isnan(dfFinal['lcrPreSpikeSec']) ] )\n\n\t\t# remove lcr (rows) that are close to before the spike, lcrPreSpikeSec<0.1\n\t\t# important: we need second or np.isnan() to KEEP lcrPicker with no spike detecct\n\t\tlcrNoCloserThanSec = 0.15\n\t\tprint('num lcr before removing lcr close to spike:', len(dfFinal))\n\t\tdfFinal = dfFinal[ (dfFinal['lcrPreSpikeSec'] > lcrNoCloserThanSec) | (np.isnan(dfFinal['lcrPreSpikeSec']) ) ]\n\t\tprint(' after removing lcr close to spike:', len(dfFinal))\n\n\t#\n\t# save merged ccsv\n\tmasterCsv = 'lcrPicker-db.csv'\n\tprint('mergeDatabase() merged:', numFiles, '...saving masterCsv:', masterCsv)\n\tdfFinal.to_csv(masterCsv)", "def _build_variable_mapping_df(magnet_strings, length_constants):\n LOG.debug(\" Building Dataframe Mapping\")\n var_to_mag = {}\n for magnet in magnet_strings:\n for order, value_string in magnet_strings[magnet].iteritems():\n if order not in var_to_mag:\n var_to_mag[order] = tfs.TfsDataFrame()\n\n k_dict = _eval_magnet_strength(value_string, length_constants)\n var_to_mag[order] = var_to_mag[order].append(\n tfs.TfsDataFrame([k_dict.values()],\n index=[magnet],\n columns=k_dict.keys()\n )).fillna(0)\n return var_to_mag", "def df_2_dict(df,band_list,lens_model_list,source_model_list,lens_light_model_list):\n \n import re\n from lenstronomy.Util.param_util import ellipticity2phi_q\n from lenstronomy.Util.param_util import shear_cartesian2polar\n \n import pandas as pd\n \n\n model_kwarg_names = get_kwarg_names(lens_model_list,source_model_list,\n lens_light_model_list,None)\n \n IDs = df.loc[:,'ID']\n chi_sq = df.loc[:,'reduced chi^2']\n \n lens_dict = {}\n \n for i,prof in enumerate(lens_model_list):\n lens_dict[prof] = {}\n for param in model_kwarg_names['kwargs_lens'][i]:\n col = df.loc[:,'{}_lens.{}'.format(prof,param)]\n col_array = col.values\n lens_dict[prof][param] = col_array\n \n if 'e1' in model_kwarg_names['kwargs_lens'][i]:\n lens_dict[prof]['q'] = np.array([])\n lens_dict[prof]['phi'] = np.array([]) \n for j in range(len(lens_dict[prof]['e1'])):\n phi,q = ellipticity2phi_q(lens_dict[prof]['e1'][j],lens_dict[prof]['e2'][j])\n lens_dict[prof]['q'] = np.append(lens_dict[prof]['q'],q)\n lens_dict[prof]['phi'] = np.append(lens_dict[prof]['phi'],phi)\n elif 'gamma1' in model_kwarg_names['kwargs_lens'][i]:\n lens_dict[prof]['gamma'] = np.array([])\n lens_dict[prof]['theta'] = np.array([])\n for j in range(len(lens_dict[prof]['gamma1'])):\n theta,gamma = shear_cartesian2polar(lens_dict[prof]['gamma1'][j],lens_dict[prof]['gamma2'][j])\n lens_dict[prof]['gamma'] = np.append(lens_dict[prof]['gamma'],gamma)\n lens_dict[prof]['theta'] = np.append(lens_dict[prof]['theta'],theta)\n \n \n source_dict = {}\n lens_light_dict = {}\n \n for i,band in enumerate(band_list):\n for j,prof in enumerate(source_model_list):\n key = '{} Band: {}'.format(band,prof)\n source_dict[key] = {}\n for param in model_kwarg_names['kwargs_source'][j]:\n col = df.loc[:,'{} Band: {}_source.{}'.format(band,prof,param)]\n col_array = col.values\n source_dict[key][param] = col_array\n \n if 'e1' in model_kwarg_names['kwargs_source'][j]:\n source_dict[key]['q'] = np.array([])\n source_dict[key]['phi'] = np.array([]) \n for k in range(len(source_dict[key]['e1'])):\n phi,q = ellipticity2phi_q(source_dict[key]['e1'][k],source_dict[key]['e2'][k])\n source_dict[key]['q'] = np.append(source_dict[key]['q'],q)\n source_dict[key]['phi'] = np.append(source_dict[key]['phi'],phi)\n \n for j,prof in enumerate(lens_light_model_list):\n key = '{} Band: {}'.format(band,prof)\n lens_light_dict[key] = {}\n for param in model_kwarg_names['kwargs_lens_light'][j]:\n col = df.loc[:,'{} Band: {}_lens_light.{}'.format(band,prof,param)]\n col_array = col.values\n lens_light_dict[key][param] = col_array\n\n if 'e1' in model_kwarg_names['kwargs_lens_light'][j]:\n lens_light_dict[key]['q'] = np.array([])\n lens_light_dict[key]['phi'] = np.array([]) \n for k in range(len(lens_light_dict[key]['e1'])):\n phi,q = ellipticity2phi_q(lens_light_dict[key]['e1'][k],lens_light_dict[key]['e2'][k])\n lens_light_dict[key]['q'] = np.append(lens_light_dict[key]['q'],q)\n lens_light_dict[key]['phi'] = np.append(lens_light_dict[key]['phi'],phi)\n \n params_dict = {'Object IDs': IDs.values,'Reduced Chi^2': chi_sq.values,\n 'lens': lens_dict, 'source': source_dict, 'lens_light': lens_light_dict}\n \n return params_dict", "def get_dataframe_from_db( input_file, conn, sources: list=None, image_min: int=128 ):\n\n if sources is None and input_file is None:\n names = \"\"\n elif input_file is not None:\n if isinstance(input_file, str):\n input_file = [input_file]\n filelist = preprocessFileList( input_file )\n names = [ f'\"{Path(f).name}\"' for f in filelist if '\"' not in f ]\n names = f'AND Sources.name in ({\", \".join(names)})'\n else:\n names = [ f'\"{s}\"' for s in sources ]\n names = f'AND Sources.name in ({\", \".join(names)})'\n\n if image_min is not None:\n images = f\"\"\"AND Sources.image_width >= {image_min}\n AND Sources.image_height >= {image_min}\"\"\"\n else:\n images = \"\"\n\n# Edited values for angle and throttle override all others.\n# Otherwise user overrides pilot. But there's no way to know if the user overrode the pilot if the user value is zero.\n sql=f\"\"\"SELECT Sources.full_path || '/' || '{Tub.images()}' || '/' || TubRecords.image_path as \"cam/image_array\",\n-- edit > user > pilot\ncase when edit_angle is not null then edit_angle\n when pilot_angle is not null and user_angle == 0.0 then pilot_angle\n else user_angle end as \"user/angle\",\ncase when edit_throttle is not null then edit_throttle\n when pilot_throttle is not null and user_throttle == 0.0 then pilot_throttle\n else user_throttle end as \"user/throttle\"\n FROM TubRecords, Sources\n WHERE TubRecords.source_id = Sources.source_id\n{names}\n{images}\nAND TubRecords.deleted = 0;\"\"\"\n\n df = pd.read_sql_query(sql, conn)\n\n return df", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def create_Hazus_EQ_bldg_injury_db(source_file,\n target_data_file='bldg_injury_DB_Hazus_EQ.csv',\n target_meta_file='bldg_injury_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n building_types = list(\n raw_data['Structural_Fragility_Groups']['P_collapse'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'LF']\n comps = [f'{cmp_type}.{bt}'\n for cmp_type in cmp_types for bt in building_types]\n DVs = ['S1', 'S2', 'S3', 'S4']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for bt in building_types:\n\n # create the component id\n cmp_id = f'STR.{bt}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS5 is stored under 'collapse'\n if DS_i == 5:\n ds_i = 'Collapse'\n else:\n ds_i = f'DS{DS_i}'\n\n for S_i in range(1, 5):\n s_label = f'S{S_i}'\n df_db.loc[(cmp_id, s_label), f'DS{DS_i}-Theta_0'] = (\n S_data['Injury_rates'][ds_i][bt][S_i-1])\n\n # Second, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for bt in building_types:\n\n # create the component id\n cmp_id = f'STR.{bt}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS5 is stored under 'collapse'\n if DS_i == 5:\n ds_i = 'Collapse'\n else:\n ds_i = f'DS{DS_i}'\n\n for S_i in range(1, 5):\n s_label = f'S{S_i}'\n df_db.loc[(cmp_id, s_label), f'DS{DS_i}-Theta_0'] = (\n S_data['Injury_rates'][ds_i][bt][S_i - 1])\n\n # remove empty rows\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[:, 'DV-Unit'] = \"injury_rate\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from Hazus \"\n \"EQ\")", "def create_FEMA_P58_bldg_injury_db(\n source_file,\n target_data_file='bldg_injury_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_injury_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n\n f'DS {DS_i}, Potential non-collapse casualty?',\n f'DS {DS_i} - Casualty Affected Area',\n f'DS {DS_i} Serious Injury Rate - Median',\n f'DS {DS_i} Serious Injury Rate - Dispersion',\n f'DS {DS_i} Loss of Life Rate - Median',\n f'DS {DS_i} Loss of Life Rate - Dispersion',\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-AffectedArea\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['S1', 'S2']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'Severity'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_S1 = False\n incomplete_S2 = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'S1'), 'DV-Unit'] = \"persons\"\n df_db.loc[(cmp.Index, 'S2'), 'DV-Unit'] = \"persons\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n inj_data = {}\n ds_tot = 0\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n casualty_model = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_model is True:\n\n inj_data.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'DS_{DS_i}___Casualty_Affected_Area'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Dispersion'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Dispersion')\n ])})\n ds_tot += 1\n\n elif casualty_model is False:\n ds_tot += 1\n\n # only continue if there is injury data\n if len(inj_data) == 0:\n continue\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = ds_tot\n ds_count = 2 ** (sim_ds_count) - 1\n\n # Here we take advantage of knowing that for every component with\n # simultaneous damage states, only one of the DSs has injury\n # consequences.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n ds_trig = list(inj_data.keys())[0]\n inj_data = inj_data[ds_trig]\n ds_trig = int(ds_trig[2:])\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n if ds_map[-ds_trig] == '1':\n\n # store the consequence data\n for severity in ('S1', 'S2'):\n\n A_affected = inj_data[0]\n\n if severity == 'S1':\n theta_0 = inj_data[1]\n theta_1 = inj_data[2]\n elif severity == 'S2':\n theta_0 = inj_data[3]\n theta_1 = inj_data[4]\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n # store the metadata\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[\n f\"DS_{ds_pure_id}_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo)\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n casualty_flag = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_flag is True:\n\n A_affected = getattr(cmp,\n f'DS_{DS_i}___Casualty_Affected_Area')\n\n for severity in ('S1', 'S2'):\n\n if severity == 'S1':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Dispersion')\n elif severity == 'S2':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n if (pd.isna(theta_0) or pd.isna(\n theta_1) or pd.isna(A_affected)):\n\n if severity == 'S1':\n incomplete_S1 = True\n else:\n incomplete_S2 = True\n\n if ~np.isnan(casualty_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[(cmp.Index, 'S1'), 'Incomplete'] = int(incomplete_S1)\n df_db.loc[(cmp.Index, 'S2'), 'Incomplete'] = int(incomplete_S2)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 16):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from FEMA \"\n \"P58\")", "def main(path_to_cdr_ids, path_to_db):\n from sqlalchemy import create_engine\n import pandas as pd\n\n cdr_ids_to_get = set(open(path_to_cdr_ids).readlines())\n\n cdr_ids_str = ','.join(['\"{}\"'.format(x) for x in cdr_ids_to_get])\n query_fmt = 'select * from cdr_id_to_homology where cdr_id in ({})'.format\n\n sql_con = create_engine('sqlite:///{}'.format(path_to_db))\n\n df = pd.read_sql(query_fmt(cdr_ids_str), sql_con)\n\n df = df.pivot(columns='homology').fillna(False)\n\n df.to_pickle('data/generated/homology_df.pkl')", "def gen_main_df(add_list: list):\r\n # 由Bert 计算得来的 sentiment信息\r\n if 'sentiment' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sentiment')\r\n sentiment = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'daily_svm_sentiment_6class' , 'csv')[0],\r\n 'date', ['0'], 'sentiment') # 'daily_svm_sentiment_2class' '0', '1', '2', '3', '4', '5'\r\n data_manipulator.add_column(sentiment)\r\n # 中国CPI指数\r\n if 'cpi' in add_list and 'cpi' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('cpi')\r\n cpi = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'CPI', 'csv')[0],\r\n '日期', ['最新值', '涨跌幅', '近3月涨跌幅'], 'CPI')\r\n data_manipulator.add_column(cpi)\r\n # 上海银行间同业拆放利率\r\n if 'shibor' in add_list and 'shibor' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shibor')\r\n shibor = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'shibor', 'csv')[0],\r\n 'date', ['on', '1w', '2w', '1m', '3m'], 'Shibor')\r\n data_manipulator.add_column(shibor)\r\n # 上证综指\r\n if 'shangzheng' in add_list and 'shangzheng' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shangzheng')\r\n shangzheng = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng', 'csv')[0],\r\n 'trade_date', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount',\r\n 'total_mv', 'float_mv', 'total_share', 'float_share',\r\n 'free_share', 'turnover_rate', 'turnover_rate_f', 'pe',\r\n 'pe_ttm', 'pb'],\r\n 'ShangZheng')\r\n data_manipulator.add_column(shangzheng)\r\n data_manipulator.shift_columns(['ShangZheng_pct_chg'], (-1,),\r\n add=True) # name has changed to shift-1_ShangZheng_pct_chg\r\n data_manipulator.rank_df_column(['shift-1_ShangZheng_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n shangzheng_30min = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng_index_30min', 'csv')[0],\r\n 'trade_time', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount'],\r\n 'ShangZheng_30min')\r\n data_manipulator.news_df_add_column(shangzheng_30min)\r\n data_manipulator.shift_minute_columns(['ShangZheng_30min_pct_chg'], (-1,),\r\n add=True)\r\n data_manipulator.rank_minute_df_columns(['shift-1_ShangZheng_30min_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n\r\n # M2 广义货币量\r\n if 'm2' in add_list and 'm2' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('m2')\r\n m2 = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'M2', 'csv')[0],\r\n '月份', ['M2数量(亿元)', 'M2同比增长', 'M2环比增长'], 'M2')\r\n m2 = data_manipulator.complement_df(m2, 'date')\r\n data_manipulator.add_column(m2)\r\n\r\n # 人民币美元汇率\r\n if 'rmb_usd' in add_list and 'rmb_usd' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('rmb_usd')\r\n rmb_usd = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'RMB_USD', 'csv')[0],\r\n 'trade_date',\r\n ['bid_open', 'bid_close', 'bid_high', 'bid_low', 'ask_open',\r\n 'ask_close', 'ask_high', 'ask_low', 'tick_qty'], 'exchange')\r\n data_manipulator.add_column(rmb_usd)\r\n\r\n # 沪港通 沪深通 到岸 离岸资金流\r\n if 'fund_flow' in add_list and 'fund_flow' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('fund_flow')\r\n fund_flow = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'fund_flow', 'csv')[0],\r\n 'trade_date', ['north_money', 'south_money'], 'fund_flow')\r\n data_manipulator.add_column(fund_flow)\r\n\r\n # 债券回购日行情\r\n if 'repo' in add_list and 'repo' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('repo')\r\n repo = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'repo', 'csv')[0],\r\n 'trade_date', ['repo_maturity', 'open', 'high', 'low', 'close',\r\n 'amount'], 'repo', data_manipulator.cut_time_string,\r\n (0, 10,))\r\n repo = data_manipulator.select_col_group_by(repo, 'repo_repo_maturity', ['GC001', 'GC007', 'GC014', 'GC028'],\r\n 'date')\r\n data_manipulator.add_column(repo)\r\n\r\n # 新浪新闻\r\n if 'sina_news' in add_list and 'sina_news' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sina_news')\r\n columns_type = {'create_time': str, 'text': str}\r\n sina_news = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'sina', 'csv')[0],\r\n 'create_time', ['text', ], 'sina', dtypes=columns_type)\r\n data_manipulator.add_change_news('sina', (7, 9), columns_type, sina_news, time_col_name='create_time')\r\n data_manipulator.add_minute_change_news('sina', columns_type, sina_news, time_col_name='create_time')\r\n if 'scale' in add_list:\r\n data_manipulator.scaling_col()\r\n if 'clear' in add_list:\r\n data_manipulator.clear()", "def process_database(self):\n self.DBDict = self.data_df.to_dict(orient=\"index\")\n\n # calculate weight ratio\n self.DBDict = {k: self.calc_compound_weight_ratio(\n self.DBDict[k]) for k in self.DBDict}\n\n # export as dataframe\n self.converted_df = pd.DataFrame(self.DBDict).T\n\n unnest_list = [\"SMILES_wt_list\", \"structureList\",\n \"wt_ratio\", \"fp_list\", \"MWList\"]\n self.converted_df = unnest_dataframe(\n self.converted_df, unnest_list, axis=0)\n\n # unnest FP\n unNest_FP_list = list(self.converted_df.columns[[True if re.match(\n \"fp_list\", i) else False for i in self.converted_df.columns]])\n rename_dict = {k: k+\"_\" for k in unNest_FP_list}\n self.converted_df = self.converted_df.rename(columns=rename_dict)\n\n self.converted_df = unnest_dataframe(\n self.converted_df, rename_dict.values(), axis=0)", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def pre_generate(df_dic):\n file_paths = df_dic.keys()\n main_table_id = {}\n for file in file_paths:\n if find_main_id(ent_dic[file]):\n main_table_id[file] = find_main_id(ent_dic[file])\n for file in main_table_id:\n for other_file in main_table_id:\n fake_col = \"fake_\" + main_table_id[other_file]\n if (other_file != file) and list(set(main_table_id[other_file]) - set(df_dic[file].columns)) \\\n and list(set(main_table_id[other_file]) & set(df_dic[file].columns)):\n\n col_use = list(main_table_id[file]) + list(main_table_id[other_file])\n df_other_fake = df_dic[other_file][col_use]\n df_other_fake[fake_col] = df_other_fake[main_table_id[other_file]]\n del df_other_fake[main_table_id[other_file]]\n df_dic[file] = pd.merge(df_dic[file], df_other_fake, on=[main_table_id[file]],\n how='left').drop_duplicates()\n return df_dic", "def data_prep(df, params, if_resample=False):\n\n if if_resample and (params['balanced'] in ['Bootstrap', 'Handsample']):\n if params['balanced'] == 'Bootstrap':\n df = resample(df=df, balance=params['balanced'], nclass=params['classnum'])\n elif params['balanced'] == 'Handsample':\n df = resample(df=df, balance=params['balanced'], nclass=params['classnum'])\n\n if params['classnum'] == 6:\n df.drop(df[df['label']=='PTSD'].index, axis=0, inplace=True)\n\n data = list(df.dialog)\n label_encode = LabelEncoder()\n output = dict()\n output['data'] = data\n output['encoded_label'] = label_encode.fit_transform(df.label)\n output['binary_label'] = label_binarize(y=output['encoded_label'], classes=np.arange(params['classnum']))\n return output, label_encode", "def create_FEMA_P58_fragility_db(source_file,\n target_data_file='fragility_DB_FEMA_P58_2nd.csv',\n target_meta_file='fragility_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove the empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns that we need for the fragility database\n cols_to_db = [\n \"Demand Parameter (value):\",\n \"Demand Parameter (unit):\",\n \"Demand Location (use floor above? Yes/No)\",\n \"Directional?\",\n \"DS Hierarchy\",\n \"DS 1, Probability\",\n \"DS 1, Median Demand\",\n \"DS 1, Total Dispersion (Beta)\",\n \"DS 2, Probability\",\n \"DS 2, Median Demand\",\n \"DS 2, Total Dispersion (Beta)\",\n \"DS 3, Probability\",\n \"DS 3, Median Demand\",\n \"DS 3, Total Dispersion (Beta)\",\n \"DS 4, Probability\",\n \"DS 4, Median Demand\",\n \"DS 4, Total Dispersion (Beta)\",\n \"DS 5, Probability\",\n \"DS 5, Median Demand\",\n \"DS 5, Total Dispersion (Beta)\",\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 1, Repair Description\",\n \"DS 2, Description\",\n \"DS 2, Repair Description\",\n \"DS 3, Description\",\n \"DS 3, Repair Description\",\n \"DS 4, Description\",\n \"DS 4, Repair Description\",\n \"DS 5, Description\",\n \"DS 5, Repair Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n # replace missing values with an empty string\n df_meta.fillna('', inplace=True)\n # the metadata shall be stored in strings\n df_meta = df_meta.astype(str)\n\n # initialize the output fragility table\n df_db = pd.DataFrame(\n columns=[\n \"Index\",\n \"Incomplete\",\n \"Demand-Type\",\n \"Demand-Unit\",\n \"Demand-Offset\",\n \"Demand-Directional\",\n \"LS1-Family\",\n \"LS1-Theta_0\",\n \"LS1-Theta_1\",\n \"LS1-DamageStateWeights\",\n \"LS2-Family\",\n \"LS2-Theta_0\",\n \"LS2-Theta_1\",\n \"LS2-DamageStateWeights\",\n \"LS3-Family\",\n \"LS3-Theta_0\",\n \"LS3-Theta_1\",\n \"LS3-DamageStateWeights\",\n \"LS4-Family\",\n \"LS4-Theta_0\",\n \"LS4-Theta_1\",\n \"LS4-DamageStateWeights\"\n ],\n index=df_db_source.index,\n dtype=float\n )\n\n # initialize the dictionary that stores the fragility metadata\n meta_dict = {}\n\n # conversion dictionary for demand types\n convert_demand_type = {\n 'Story Drift Ratio': \"Peak Interstory Drift Ratio\",\n 'Link Rotation Angle': \"Peak Link Rotation Angle\",\n 'Effective Drift': \"Peak Effective Drift Ratio\",\n 'Link Beam Chord Rotation': \"Peak Link Beam Chord Rotation\",\n 'Peak Floor Acceleration': \"Peak Floor Acceleration\",\n 'Peak Floor Velocity': \"Peak Floor Velocity\"\n }\n\n # conversion dictionary for demand unit names\n convert_demand_unit = {\n 'Unit less': 'unitless',\n 'Radians': 'rad',\n 'g': 'g',\n 'meter/sec': 'mps'\n }\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n # create a dotted component index\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete = False\n\n # store demand specifications\n df_db.loc[cmp.Index, 'Demand-Type'] = (\n convert_demand_type[cmp.Demand_Parameter_value])\n df_db.loc[cmp.Index, 'Demand-Unit'] = (\n convert_demand_unit[cmp.Demand_Parameter_unit])\n df_db.loc[cmp.Index, 'Demand-Offset'] = (\n int(cmp.Demand_Location_use_floor_above_YesNo))\n df_db.loc[cmp.Index, 'Demand-Directional'] = (\n int(cmp.Directional))\n\n # parse the damage state hierarchy\n DS_setup = parse_DS_Hierarchy(cmp.DS_Hierarchy)\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"LimitStates\": {}\n }\n\n # now look at each Limit State\n for LS_i, LS_contents in enumerate(DS_setup):\n\n LS_i = LS_i + 1\n LS_contents = np.atleast_1d(LS_contents)\n\n ls_meta = {}\n\n # start with the special cases with multiple DSs in an LS\n if LS_contents[0] in {'MutEx', 'Simul'}:\n\n # collect the fragility data for the member DSs\n median_demands = []\n dispersions = []\n weights = []\n for ds in LS_contents[1:]:\n median_demands.append(\n getattr(cmp, f\"DS_{ds[2]}_Median_Demand\"))\n\n dispersions.append(\n getattr(cmp, f\"DS_{ds[2]}_Total_Dispersion_Beta\"))\n\n weights.append(getattr(cmp, f\"DS_{ds[2]}_Probability\"))\n\n # make sure the specified distribution parameters are appropriate\n if ((np.unique(median_demands).size != 1) or (\n np.unique(dispersions).size != 1)):\n raise ValueError(f\"Incorrect mutually exclusive DS \"\n f\"definition in component {cmp.Index} at \"\n f\"Limit State {LS_i}\")\n\n if LS_contents[0] == 'MutEx':\n\n # in mutually exclusive cases, make sure the specified DS\n # weights sum up to one\n np.testing.assert_allclose(\n np.sum(np.array(weights, dtype=float)), 1.0,\n err_msg=f\"Mutually exclusive Damage State weights do \"\n f\"not sum to 1.0 in component {cmp.Index} at \"\n f\"Limit State {LS_i}\")\n\n # and save all DS metadata under this Limit State\n for ds in LS_contents[1:]:\n ds_id = ds[2]\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": cmp_meta[f\"DS_{ds_id}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{ds_id}_Repair_Description\"]\n }})\n\n else:\n # in simultaneous cases, convert simultaneous weights into\n # mutexc weights\n sim_ds_count = len(LS_contents) - 1\n ds_count = 2 ** (sim_ds_count) - 1\n\n sim_weights = []\n\n for ds_id in range(1, ds_count + 1):\n ds_map = format(ds_id, f'0{sim_ds_count}b')\n\n sim_weights.append(np.product(\n [weights[ds_i]\n if ds_map[-ds_i - 1] == '1' else 1.0-weights[ds_i]\n for ds_i in range(sim_ds_count)]))\n\n # save ds metadata - we need to be clever here\n # the original metadata is saved for the pure cases\n # when only one DS is triggered\n # all other DSs store information about which\n # combination of pure DSs they represent\n\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[f\"DS_{ds_pure_id}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{ds_pure_id}_Repair_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo),\n \"RepairAction\": 'Combination of pure DS repair '\n 'actions.'\n }})\n\n # adjust weights to respect the assumption that at least\n # one DS will occur (i.e., the case with all DSs returning\n # False is not part of the event space)\n sim_weights_array = np.array(sim_weights) / np.sum(sim_weights)\n\n weights = sim_weights_array\n\n theta_0 = median_demands[0]\n theta_1 = dispersions[0]\n weights_str = ' | '.join([f\"{w:.6f}\" for w in weights])\n\n df_db.loc[cmp.Index, f'LS{LS_i}-DamageStateWeights'] = weights_str\n\n # then look at the sequential DS cases\n elif LS_contents[0].startswith('DS'):\n\n # this is straightforward, store the data in the table and dict\n ds_id = LS_contents[0][2]\n\n theta_0 = getattr(cmp, f\"DS_{ds_id}_Median_Demand\")\n theta_1 = getattr(cmp, f\"DS_{ds_id}_Total_Dispersion_Beta\")\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": cmp_meta[f\"DS_{ds_id}_Description\"],\n \"RepairAction\": cmp_meta[f\"DS_{ds_id}_Repair_Description\"]\n }})\n\n # FEMA P58 assumes lognormal distribution for every fragility\n df_db.loc[cmp.Index, f'LS{LS_i}-Family'] = 'lognormal'\n\n # identify incomplete cases...\n\n # where theta is missing\n if theta_0 != 'By User':\n df_db.loc[cmp.Index, f'LS{LS_i}-Theta_0'] = theta_0\n else:\n incomplete = True\n\n # where beta is missing\n if theta_1 != 'By User':\n df_db.loc[cmp.Index, f'LS{LS_i}-Theta_1'] = theta_1\n else:\n incomplete = True\n\n # store the collected metadata for this limit state\n meta_data['LimitStates'].update({f\"LS{LS_i}\": ls_meta})\n\n # store the incomplete flag for this component\n df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.set_index('Index', inplace=True)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the fragility data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the fragility data from FEMA P58\")", "def prepare_for_influxdb(df):\n df = df.drop(columns=\"landkreis\", errors=\"ignore\") # prevent name collision in get_ags()\n df = get_ags(df)\n df[\"time\"] = df.apply(lambda x: 1000000000*int(datetime.timestamp((pd.to_datetime(x[\"timestamp\"])))), 1)\n df[\"measurement\"] = \"hystreet\"\n df[\"origin\"] = \"https://hystreet.com\"\n df = df.rename(columns={\n 'station_id': '_id',\n 'pedestrians_count': 'pedestrian_count',\n 'state': 'bundesland'\n })\n df['ags'] = pd.to_numeric(df['ags'])\n # import pdb; pdb.set_trace()\n return df", "def build_df(path_orig = r'.\\chest_xray', orig_file_ext = 'jpeg', path_seg = r'.\\segmentation', seg_file_ext = 'png', save_path = '.\\df_all.csv'):\n \n read_df = 'C'\n list_df = [] \n \n if os.path.exists(save_path):\n read_df = input('DataFrame was found, would you like to read it (R) or recreate it (C) (default Read)?\\n') or 'R'\n if read_df == 'R':\n df = pd.read_csv(save_path, index_col = 0)\n return df\n \n if read_df == 'C':\n for dirname, _, filenames in os.walk(path_orig):\n for filename in tqdm(filenames, disable=len(filenames)==0):\n if ('.' + orig_file_ext) in filename:\n list_val = []\n list_val.append('PNEUMONIA' if 'PNEUMONIA' in dirname else 'NORMAL')\n list_val.append(1 if 'PNEUMONIA' in dirname else 0)\n list_val.append('bacteria' if 'bacteria' in filename.lower() else 'virus' if 'virus' in filename.lower() else 'normal')\n list_val.append(1 if 'bacteria' in filename.lower() else 2 if 'virus' in filename.lower() else 0)\n list_val.append(filename)\n list_val.append(os.path.join(dirname, filename)) \n list_val.append(filename.replace(orig_file_ext, seg_file_ext))\n list_val.append(os.path.join(dirname.replace(path_orig, path_seg), filename.replace(orig_file_ext, seg_file_ext)))\n list_df.append(list_val)\n\n df = pd.DataFrame(list_df, columns = ['Label_name', 'Label_int', 'Label_pathology', 'Label_pathology_int', 'Filename_orig', 'Filepath_orig', 'Filename_seg', 'Filepath_seg'])\n df.to_csv(save_path)\n \n print('Done')\n \n return df", "async def create_subexchange(csi_stock_df, bi_stock_df, bi_fondi_df):\n\n logger.info('Processing the sub_exchange...')\n await database.connect()\n start = time.time()\n\n # Retrieve any existing values from the sub-exchange table\n existing_df = await SubExchanges.get_all()\n\n # Retrieve any existing values from the exchange table\n exch_df = await Exchanges.get_all()\n\n # csi_stock_df = CSI_Stock.get_all(to_df=True)\n # bi_stock_df = Italy_Stock.get_all()\n #bi_fondi_df = Italy_FONDI.get_all()\n\n # get all sub exchanges in csi stocks\n csi_sub_exch_df = csi_stock_df[['exchange', 'sub_exchange']]\n csi_sub_exch_df = csi_sub_exch_df.drop_duplicates()\n csi_sub_exch_df = csi_sub_exch_df.dropna()\n\n bi_sub_exch_df = bi_stock_df[['nome', 'mercato']]\n # bi_fondi_df = bi_fondi_df[['denominazione', 'mercato']]\n # bi_fondi_df.columns = ['nome', 'mercato']\n bi_sub_exch_df = bi_sub_exch_df.append(bi_fondi_df[['nome', 'mercato']])\n bi_sub_exch_df['nome'] = 'MIL'\n bi_sub_exch_df.columns = ['exchange', 'sub_exchange']\n bi_sub_exch_df = bi_sub_exch_df.drop_duplicates()\n\n sub_exch_df = pd.concat([csi_sub_exch_df, bi_sub_exch_df])\n # replace exchange with own id\n exch_id = exch_df.set_index('symbol').to_dict()['exchange_id']\n sub_exch_df['exchange_id'] = sub_exch_df['exchange'].replace(exch_id)\n sub_exch_df.drop('exchange', axis=1,inplace=True)\n\n # Check if every exchange is present on database\n if not sub_exch_df.loc[~sub_exch_df['exchange_id'].apply(np.isreal)].empty:\n exch_not_listed = sub_exch_df.loc[~sub_exch_df['exchange_id'].apply(np.isreal)]['exchange_id'].to_list()\n logger.error(\"FINDING EXCHANGES NOT PRESENT IN DATABASE: %s\" % (','.join(list(set(exch_not_listed)))))\n sub_exch_df = sub_exch_df.loc[sub_exch_df['exchange_id'].apply(np.isreal)]\n\n # Find the values that are different between the two DataFrames\n altered_values_df = altered_values(\n existing_df=existing_df, new_df=sub_exch_df)\n\n # Prepare a new DataFrame with all relevant data for these values\n df = pd.DataFrame()\n df.insert(0, 'subexchange_name', altered_values_df['sub_exchange'])\n df.insert(1, 'exchange_id', altered_values_df['exchange_id'])\n logger.info('Finished processing the Sub Exchange IDs taking %0.2f seconds' % (time.time() - start))\n await database.disconnect()\n return df", "def synthesize_from_table(df, geo_df, targets):\n # replace NaNs with None\n targets = targets.where(targets.notnull(), None)\n\n new_df = df\n\n for _, row in targets.iterrows():\n new_df = synthesize_one(\n df=new_df,\n target=row['target_value'],\n alloc_id=row['geo_id_col'],\n geo_df=geo_df,\n geo_col=row['capacity_col'],\n constraint_expr=row['capacity_expr'],\n filters=row['filters'],\n count=row['count'],\n stuff=row['stuff'])\n\n return new_df", "def get_computed_dataframe(self, df):\n df = add_nid_metadata(df, ['data_type_id'], **self.cache_options)\n has_verbal_autopsy = self.VA in df['data_type_id'].unique()\n\n if self.needs_bridging(has_verbal_autopsy):\n sheet_name = self.get_sheet_name(has_verbal_autopsy)\n map_df = pd.read_excel(self.bridge_map_path, sheetname=sheet_name)\n map_df = map_df[['acause', 'bridge_code']]\n\n # add acause column to deaths data\n bridge_mapped = add_cause_metadata(\n df,\n ['acause'],\n merge_col='cause_id',\n cause_meta_df=self.cause_meta_df\n )\n # hack, this cause_id snuck in somehow...\n bridge_mapped.loc[\n bridge_mapped['cause_id'] == 606, 'acause'\n ] = 'gyne_femaleinfert'\n report_if_merge_fail(bridge_mapped, 'acause', 'cause_id')\n bridge_mapped.drop(['cause_id'], axis=1, inplace=True)\n bridge_mapped = bridge_mapped.merge(\n map_df, how='left', on='acause'\n )\n bridge_mapped = self.acause_to_bridge_code(bridge_mapped)\n # bring cause_id back\n bridge_mapped = add_cause_metadata(\n bridge_mapped,\n ['cause_id'],\n merge_col='acause',\n cause_meta_df=self.cause_meta_df\n )\n\n bridge_mapped.loc[\n bridge_mapped['acause'] == 'gyne_femaleinfert', 'cause_id'\n ] = 606\n report_if_merge_fail(bridge_mapped, 'cause_id', 'acause')\n # output diagnostic dataframe\n self.diag_df = bridge_mapped\n # drop unnecessary columns\n bridge_mapped = self.clean_up(bridge_mapped)\n return bridge_mapped\n else:\n self.diag_df = df\n df = self.clean_up(df)\n return df", "def create_Hazus_EQ_bldg_repair_db(source_file,\n target_data_file='bldg_repair_DB_Hazus_EQ.csv',\n target_meta_file='bldg_repair_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n occupancies = list(\n raw_data['Structural_Fragility_Groups']['Repair_cost'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'NSD', 'NSA', 'LF']\n comps = [f'{cmp_type}.{occ_type}'\n for cmp_type in cmp_types for occ_type in occupancies]\n DVs = ['Cost', 'Time']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'STR.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_cost'][occ_type][ds_i-1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_time'][occ_type][ds_i-1]\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSD.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSD_data['Repair_cost'][occ_type][DS_i-1]\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSA.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSA_data['Repair_cost'][occ_type][DS_i-1]\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'LF.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_cost'][occ_type][ds_i - 1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_time'][occ_type][ds_i - 1]\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[idx[:, 'Cost'], 'DV-Unit'] = \"loss_ratio\"\n df_db.loc[idx[:, 'Time'], 'DV-Unit'] = \"day\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from Hazus \"\n \"EQ\")", "def add_frame_specific_cbf_tables(cbf, wavelength, timestamp, trusted_ranges, diffrn_id = \"DS1\", is_xfel = True, gain = 1.0, flux = None):\n\n \"\"\"Data items in the DIFFRN_RADIATION category describe\n the radiation used for measuring diffraction intensities,\n its collimation and monochromatization before the sample.\n\n Post-sample treatment of the beam is described by data\n items in the DIFFRN_DETECTOR category.\"\"\"\n if flux:\n cbf.add_category(\"diffrn_radiation\", [\"diffrn_id\",\"wavelength_id\",\"probe\",\"beam_flux\"])\n cbf.add_row([diffrn_id,\"WAVELENGTH1\",\"x-ray\",\"%f\"%flux])\n else:\n cbf.add_category(\"diffrn_radiation\", [\"diffrn_id\",\"wavelength_id\",\"probe\"])\n cbf.add_row([diffrn_id,\"WAVELENGTH1\",\"x-ray\"])\n\n \"\"\" Data items in the DIFFRN_RADIATION_WAVELENGTH category describe\n the wavelength of the radiation used in measuring the diffraction\n intensities. Items may be looped to identify and assign weights\n to distinct wavelength components from a polychromatic beam.\"\"\"\n cbf.add_category(\"diffrn_radiation_wavelength\", [\"id\",\"wavelength\",\"wt\"])\n cbf.add_row([\"WAVELENGTH1\",str(wavelength),\"1.0\"])\n\n \"\"\"Data items in the DIFFRN_MEASUREMENT category record details\n about the device used to orient and/or position the crystal\n during data measurement and the manner in which the\n diffraction data were measured.\"\"\"\n cbf.add_category(\"diffrn_measurement\",[\"diffrn_id\",\"id\",\"number_of_axes\",\"method\",\"details\"])\n cbf.add_row([diffrn_id,\n \"INJECTION\" if is_xfel else \"unknown\",\"0\",\n \"electrospray\" if is_xfel else \"unknown\"\n \"crystals injected by electrospray\" if is_xfel else \"unknown\"])\n\n \"\"\" Data items in the DIFFRN_SCAN category describe the parameters of one\n or more scans, relating axis positions to frames.\"\"\"\n cbf.add_category(\"diffrn_scan\",[\"id\",\"frame_id_start\",\"frame_id_end\",\"frames\"])\n cbf.add_row([\"SCAN1\",\"FRAME1\",\"FRAME1\",\"1\"])\n\n \"\"\"Data items in the DIFFRN_SCAN_FRAME category describe\n the relationships of particular frames to scans.\"\"\"\n cbf.add_category(\"diffrn_scan_frame\",[\"frame_id\",\"frame_number\",\"integration_time\",\"scan_id\",\"date\"])\n cbf.add_row([\"FRAME1\",\"1\",\"0.0\",\"SCAN1\",timestamp])\n\n \"\"\" Data items in the ARRAY_INTENSITIES category record the\n information required to recover the intensity data from\n the set of data values stored in the ARRAY_DATA category.\"\"\"\n # More detail here: http://www.iucr.org/__data/iucr/cifdic_html/2/cif_img.dic/Carray_intensities.html\n array_names = []\n cbf.find_category(b\"diffrn_data_frame\")\n while True:\n try:\n cbf.find_column(b\"array_id\")\n array_names.append(cbf.get_value().decode())\n cbf.next_row()\n except Exception as e:\n assert \"CBF_NOTFOUND\" in str(e)\n break\n\n if not isinstance(gain, list):\n gain = [gain] * len(array_names)\n\n\n cbf.add_category(\"array_intensities\",[\"array_id\",\"binary_id\",\"linearity\",\"gain\",\"gain_esd\",\"overload\",\"underload\",\"undefined_value\"])\n for i, array_name in enumerate(array_names):\n overload = trusted_ranges[i][1] + 1\n underload = trusted_ranges[i][0]\n undefined = underload - 1\n cbf.add_row([array_name,str(i+1),\"linear\",\"%f\"%gain[i],\"0.0\",str(overload),str(underload),str(undefined)])", "def _init_prepare_database(self, feat_db):\n by_groups = self.db.groupby(self.by)\n\n if self.verbose:\n display = progress_display.ProgressDisplay()\n display.add('block', 'Preprocessing by block', len(by_groups))\n\n for by_key, by_frame in by_groups:\n if self.verbose:\n display.update('block', 1)\n display.display()\n\n # allow to get by values as well as values of other variables\n # that are determined by these\n by_values = dict(by_frame.iloc[0])\n\n # apply 'by' filters\n if self.filters.by_filter(by_values):\n # get analogous feat_db\n by_feat_db = feat_db.iloc[by_frame.index]\n\n # drop indexes\n by_frame = by_frame.reset_index(drop=True)\n\n # reset_index to get an index relative to the 'by' db,\n # the original index could be conserved in an additional\n # 'index' column if necessary by removing the drop=True, but\n # this would add another constraint on the possible column name\n by_feat_db = by_feat_db.reset_index(drop=True)\n\n # apply generic filters\n by_frame = self.filters.generic_filter(by_values, by_frame)\n\n self.by_dbs[by_key] = by_frame\n self.feat_dbs[by_key] = by_feat_db\n\n def _by_dbs(l): return self.by_dbs[by_key].groupby(l)\n self.on_blocks[by_key] = _by_dbs(self.on)\n self.across_blocks[by_key] = _by_dbs(self.across)\n self.on_across_blocks[by_key] = _by_dbs(self.on + self.across)\n\n if len(self.across) > 1:\n self.antiacross_blocks[by_key] = dict()\n for across_key in self.across_blocks[by_key].groups:\n b = True\n for i, col in enumerate(self.across):\n b = b * (by_frame[col] != across_key[i])\n self.antiacross_blocks[by_key][across_key] = (\n by_frame[b].index)", "def bufr_to_dataframe(file=''):\n \n if debug:\n print(\"Running bufr_to_dataframe for: \", file)\n \n check_read_file (file = file, read= False)\n f = open(file)\n #source_file = [l for l in file.split('/') if '.bfr' in l][0]\n read_data = []\n \n \"\"\" Name of the columns as they will appear in the pandas dataframe (not necessarily CDM compliant) \"\"\"\n #column_names = ['report_timestamp' , 'iday', 'station_id', 'latitude', 'longitude', 'pressure', 'value','varno@body']\n \n lat, lon, alt, blockNumber, stationNumber, statid = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n \n obs_id, report_id = -1, 0 # progressive observation id\n stations_id = [] \n \n while 1:\n #lista = [] # temporary list\n bufr = codes_bufr_new_from_file(f)\n \n if bufr is None:\n break\n \n codes_set(bufr, 'unpack', 1) # eCcodes must expand all the descriptors and unpack the data section\n \n date = '19'+codes_get_array(bufr, \"typicalDate\")[0][2:]\n timePeriod = codes_get_array(bufr, \"typicalTime\")[0] \n \n year, month, day = date[0:4], date[4:6] , date[6:8]\n hour, minutes = timePeriod[0:2] , timePeriod[2:4]\n \n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day )\n\n pressure = codes_get_array(bufr, \"pressure\") \n temperature = codes_get_array(bufr, \"airTemperature\") \n wind_direction = codes_get_array(bufr, \"windDirection\")\n wind_speed = codes_get_array(bufr, \"windSpeed\")\n \n try: # not all the bufr files have the dewpoint \n dew_point = codes_get_array(bufr, \"dewpointTemperature\")\n except:\n dew_point= np.empty((1, len(temperature)))\n dew_point[:] = np.nan\n \n num_lev = len(pressure) # number of distinct pressure levels \n \n try:\n geopotential = codes_get_array(bufr, \"nonCoordinateGeopotentialHeight\") \n except:\n geopotential = np.full( (1,len(temperature)) , np.nan )[0,:]\n \n if report_id == 0:\n ''' Check again but these values should remain the same for all cnt, so it makes no sense to read them every time '''\n lat = codes_get(bufr, \"latitude\")\n lon = codes_get(bufr, \"longitude\")\n alt = float(codes_get(bufr, \"heightOfStation\"))\n blockNumber = codes_get(bufr, \"blockNumber\")\n stationNumber = codes_get(bufr, \"stationNumber\")\n #statid = str(blockNumber*1000+stationNumber) # changed to int instead of str\n statid = blockNumber*1000+stationNumber\n if statid not in stations_id:\n stations_id.append(statid) \n \n codes_release(bufr)\n \n miss_value = -1.e100 \n \n for i in range(len(temperature)):\n obs_id = obs_id + 1 \n airT = temperature[i]\n winds = wind_speed[i]\n windd = wind_direction[i]\n press = pressure[i]\n gph = geopotential[i]\n dp = dew_point[i]\n if press == miss_value:\n press = np.nan \n if dp == miss_value:\n dp = np.nan\n if airT == miss_value : # replacing none values with numpy nans\n airT = np.nan \n if winds == miss_value:\n winds = np.nan\n if gph == miss_value:\n gph = np.nan \n if windd == 2147483647 or windd == -2147483647:\n windd = np.nan \n \n \n for value,var in zip( [gph, airT, winds, windd, dp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'dew_point'] ):\n obs_id = obs_id + 1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n else:\n z_type = -2147483648 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n\n\n report_id += 1\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n return df, stations_id", "def create_db(name=_db_indicators,\n indi_file=os.path.join('Source', 'codes_need.csv'),\n country_file=os.path.join('Source', 'work_countries.txt')):\n\n def create_indi_country(pdfI, con, mess, db_name, freq):\n if pdfI.shape[0]==0:\n return\n print('+' * 50, '{} WORKS'.format(mess), '+' * 50)\n\n pdfI.to_sql(cmm.strINDI_db_name, con, if_exists='replace')\n print('CREATE IMF.INDICATORS table for {} indicators'.format(pdfI.shape[0]))\n pdfC = get_countryes(db_name=db_name, country_txt_file=country_file)\n pdfC.to_sql(cmm.strCOUNTRY_db_name, con=con, if_exists='replace')\n print('CREATE IMF.COUNTRIES for {0} countries.'.format(pdfC.shape[0]))\n\n update_db(db_name=db_name, start=1970, end=2000)\n update_db(db_name=db_name, start=1999)\n\n cmm.create_views(db_name, freq=freq)\n\n pdf = cmm.read_indicators_from_csv(indi_file)\n print(indi_file)\n\n pdfQ = pdf[pdf['Freq']=='Q']\n pdfA = pdf[pdf['Freq'] == 'Y']\n pdfM = pdf[pdf['Freq'] == 'M']\n\n #pdfC = cmm.read_countries(file_name=country_file)\n\n nameA=cmm.db_name2annu(name)\n nameM = cmm.db_name2annu(name, suff='_M')\n\n coni = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=name))\n coniA = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=nameA))\n coniM = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=nameM))\n\n create_indi_country(pdfQ, coni, 'QUARTERLY', name, freq='Q')\n create_indi_country(pdfA, coniA, 'ANNUAL', nameA, freq='A')\n create_indi_country(pdfM, coniM, 'MONTHLY', nameM, freq='M')", "def _convertAndFix(self):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n \r\n if 'SWVT_ROWT' not in self.dataFrames:\r\n self.dataFrames['SWVT_ROWT']=self._constructEmptyDf(['pk','fk','ZEIT','W'])\r\n self.dataFrames['SWVT']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG','INTPOL','ZEITOPTION'])\r\n\r\n if not self.dataFrames['SWVT_ROWT'].empty:\r\n self.dataFrames['SWVT_ROWT'].ZEIT=self.dataFrames['SWVT_ROWT'].ZEIT.str.replace(',', '.')\r\n self.dataFrames['SWVT_ROWT'].W=self.dataFrames['SWVT_ROWT'].W.str.replace(',', '.')\r\n\r\n if 'LFKT_ROWT' not in self.dataFrames:\r\n self.dataFrames['LFKT_ROWT']=self._constructEmptyDf(['pk','fk','ZEIT','LF']) \r\n self.dataFrames['LFKT']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG','INTPOL','ZEITOPTION'])\r\n\r\n if not self.dataFrames['LFKT_ROWT'].empty:\r\n self.dataFrames['LFKT_ROWT'].ZEIT=self.dataFrames['LFKT_ROWT'].ZEIT.str.replace(',', '.')\r\n self.dataFrames['LFKT_ROWT'].LF=self.dataFrames['LFKT_ROWT'].LF.str.replace(',', '.')\r\n\r\n if 'QVAR_ROWT' not in self.dataFrames:\r\n self.dataFrames['QVAR_ROWT']=self._constructEmptyDf(['pk','fk','ZEIT','QM']) \r\n self.dataFrames['QVAR']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG','INTPOL','ZEITOPTION'])\r\n\r\n if not self.dataFrames['QVAR_ROWT'].empty:\r\n self.dataFrames['QVAR_ROWT'].ZEIT=self.dataFrames['QVAR_ROWT'].ZEIT.str.replace(',', '.')\r\n self.dataFrames['QVAR_ROWT'].QM=self.dataFrames['QVAR_ROWT'].QM.str.replace(',', '.')\r\n\r\n if 'PVAR_ROWT' not in self.dataFrames:\r\n self.dataFrames['PVAR_ROWT']=self._constructEmptyDf(['pk','fk','ZEIT','PH']) \r\n self.dataFrames['PVAR']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG','INTPOL','ZEITOPTION'])\r\n\r\n if not self.dataFrames['PVAR_ROWT'].empty:\r\n self.dataFrames['PVAR_ROWT'].ZEIT=self.dataFrames['PVAR_ROWT'].ZEIT.str.replace(',', '.')\r\n self.dataFrames['PVAR_ROWT'].PH=self.dataFrames['PVAR_ROWT'].PH.str.replace(',', '.')\r\n\r\n # 1st Time without Value?!\r\n self.dataFrames['SWVT_ROWT']=self.dataFrames['SWVT_ROWT'].fillna(0) \r\n self.dataFrames['LFKT_ROWT']=self.dataFrames['LFKT_ROWT'].fillna(0) \r\n self.dataFrames['QVAR_ROWT']=self.dataFrames['QVAR_ROWT'].fillna(0) \r\n self.dataFrames['PVAR_ROWT']=self.dataFrames['PVAR_ROWT'].fillna(0) \r\n \r\n # Template Node\r\n self.dataFrames['KNOT']=self.dataFrames['KNOT'][self.dataFrames['KNOT'].NAME.fillna('').astype(str).isin(['TemplateNode','TemplNode-VL','TemplNode-RL'])==False] \r\n \r\n # TE only in Heatingmodels ? ...\r\n try:\r\n isinstance(self.dataFrames['KNOT_BZ']['TE'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['KNOT_BZ']['TE']\",'TE only in Heatingmodels?!')) \r\n self.dataFrames['KNOT_BZ']['TE']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # FWVB LFK\r\n if 'FWVB' in self.dataFrames:\r\n try:\r\n isinstance(self.dataFrames['FWVB']['LFK'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['FWVB']['LFK']\",'LFK not set?!')) \r\n self.dataFrames['FWVB']['LFK']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.\r\n self.dataFrames['FWVB']['LFK'].fillna(value=1,inplace=True)\r\n\r\n # Models with only one Standard LTGR ...\r\n try:\r\n isinstance(self.dataFrames['LTGR']['BESCHREIBUNG'],pd.core.series.Series)\r\n except:\r\n self.dataFrames['LTGR']['BESCHREIBUNG']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # Models with old DTRO_ROWD \r\n for attrib in ['AUSFALLZEIT','PN','REHABILITATION','REPARATUR','WSTEIG','WTIEFE']:\r\n try:\r\n isinstance(self.dataFrames['DTRO_ROWD'][attrib],pd.core.series.Series)\r\n except:\r\n self.dataFrames['DTRO_ROWD'][attrib]=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # Models with no CONTs ...\r\n try:\r\n isinstance(self.dataFrames['CONT']['LFDNR'],pd.core.series.Series)\r\n except:\r\n self.dataFrames['CONT']['LFDNR']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n try:\r\n isinstance(self.dataFrames['CONT']['GRAF'],pd.core.series.Series)\r\n except:\r\n self.dataFrames['CONT']['GRAF']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # Models with no PZONs ...\r\n if not 'PZON' in self.dataFrames: \r\n self.dataFrames['PZON']=pd.DataFrame() \r\n self.dataFrames['PZON']['NAME']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n self.dataFrames['PZON']['pk']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # Models with no STOFs ...\r\n if not 'STOF' in self.dataFrames: \r\n # BESCHREIBUNG\r\n self.dataFrames['STOF']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG']) \r\n\r\n # Models with no GMIXs ...\r\n if not 'GMIX' in self.dataFrames: \r\n self.dataFrames['GMIX']=self._constructEmptyDf(['pk','NAME']) \r\n \r\n # empty WBLZ OBJS-BLOBs\r\n if 'WBLZ' in self.dataFrames.keys():\r\n self.dataFrames['WBLZ']=self.dataFrames['WBLZ'][pd.notnull(self.dataFrames['WBLZ']['OBJS'])] \r\n # empty LAYR OBJS-BLOBs\r\n if 'LAYR' in self.dataFrames.keys():\r\n if 'OBJS' in self.dataFrames['LAYR'].columns:\r\n self.dataFrames['LAYR']=self.dataFrames['LAYR'][pd.notnull(self.dataFrames['LAYR']['OBJS'])] \r\n\r\n # BESCHREIBUNG nicht in RLVG?...\r\n if 'RLVG' in self.dataFrames: \r\n try:\r\n isinstance(self.dataFrames['RLVG']['BESCHREIBUNG'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['RLVG']['BESCHREIBUNG']\",'BESCHREIBUNG nicht in RLVG?...')) \r\n self.dataFrames['RLVG']['BESCHREIBUNG']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # BESCHREIBUNG nicht in RADD?...\r\n if 'RADD' in self.dataFrames: \r\n try:\r\n isinstance(self.dataFrames['RADD']['BESCHREIBUNG'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['RADD']['BESCHREIBUNG']\",'BESCHREIBUNG nicht in RADD?...')) \r\n self.dataFrames['RADD']['BESCHREIBUNG']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # RSLW: WMIN/WMAX nicht immer vorhanden? ...\r\n if 'RSLW' in self.dataFrames: \r\n try:\r\n isinstance(self.dataFrames['RSLW']['WMIN'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['RSLW']['WMIN']\",'WMIN nicht vorhanden?!')) \r\n self.dataFrames['RSLW']['WMIN']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n try:\r\n isinstance(self.dataFrames['RSLW']['WMAX'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['RSLW']['WMAX']\",'WMAX nicht vorhanden?!')) \r\n self.dataFrames['RSLW']['WMAX']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def create_FEMA_P58_bldg_repair_db(\n source_file,\n target_data_file='bldg_repair_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_repair_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.concat(\n [pd.read_excel(source_file, sheet_name=sheet, header=2, index_col=1)\n for sheet in ('Summary', 'Cost Summary', 'Env Summary')], axis=1)\n\n # remove duplicate columns\n # (there are such because we joined two tables that were read separately)\n df = df.loc[:, ~df.columns.duplicated()]\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the repair database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n f\"Best Fit, DS{DS_i}\",\n f\"Lower Qty Mean, DS{DS_i}\",\n f\"Upper Qty Mean, DS{DS_i}\",\n f\"Lower Qty Cutoff, DS{DS_i}\",\n f\"Upper Qty Cutoff, DS{DS_i}\",\n f\"CV / Dispersion, DS{DS_i}\",\n\n f\"Best Fit, DS{DS_i}.1\",\n f\"Lower Qty Mean, DS{DS_i}.1\",\n f\"Upper Qty Mean, DS{DS_i}.1\",\n f\"Lower Qty Cutoff, DS{DS_i}.1\",\n f\"Upper Qty Cutoff, DS{DS_i}.1\",\n f\"CV / Dispersion, DS{DS_i}.2\",\n f\"DS {DS_i}, Long Lead Time\",\n\n f'Repair Cost, p10, DS{DS_i}',\n f'Repair Cost, p50, DS{DS_i}',\n f'Repair Cost, p90, DS{DS_i}',\n f'Time, p10, DS{DS_i}',\n f'Time, p50, DS{DS_i}',\n f'Time, p90, DS{DS_i}',\n f'Mean Value, DS{DS_i}',\n f'Mean Value, DS{DS_i}.1',\n\n # Columns added for the Environmental loss\n f\"DS{DS_i} Best Fit\",\n f\"DS{DS_i} CV or Beta\",\n\n f\"DS{DS_i} Best Fit.1\",\n f\"DS{DS_i} CV or Beta.1\",\n\n f\"DS{DS_i} Embodied Carbon (kg CO2eq)\",\n f\"DS{DS_i} Embodied Energy (MJ)\",\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 1, Repair Description\",\n \"DS 2, Description\",\n \"DS 2, Repair Description\",\n \"DS 3, Description\",\n \"DS 3, Repair Description\",\n \"DS 4, Description\",\n \"DS 4, Repair Description\",\n \"DS 5, Description\",\n \"DS 5, Repair Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-LongLeadTime\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['Cost', 'Time', 'Carbon', 'Energy']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n convert_family = {\n 'LogNormal': 'lognormal',\n 'Normal': 'normal'\n }\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_cost = False\n incomplete_time = False\n incomplete_carbon = False\n incomplete_energy = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'Cost'), 'DV-Unit'] = \"USD_2011\"\n df_db.loc[(cmp.Index, 'Time'), 'DV-Unit'] = \"worker_day\"\n df_db.loc[(cmp.Index, 'Carbon'), 'DV-Unit'] = \"kg\"\n df_db.loc[(cmp.Index, 'Energy'), 'DV-Unit'] = \"MJ\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n cost_est = {}\n time_est = {}\n carbon_est = {}\n energy_est = {}\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n if not pd.isna(getattr(cmp, f'Repair_Cost_p10_DS{DS_i}')):\n\n cost_est.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'Repair_Cost_p10_DS{DS_i}'),\n getattr(cmp, f'Repair_Cost_p50_DS{DS_i}'),\n getattr(cmp, f'Repair_Cost_p90_DS{DS_i}'),\n getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}'),\n getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')\n ])})\n\n time_est.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'Time_p10_DS{DS_i}'),\n getattr(cmp, f'Time_p50_DS{DS_i}'),\n getattr(cmp, f'Time_p90_DS{DS_i}'),\n getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1'),\n getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1'),\n int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES')\n ])})\n\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')):\n\n theta_0, theta_1, family = [\n getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq'),\n getattr(cmp, f'DS{DS_i}_CV_or_Beta'),\n getattr(cmp, f'DS{DS_i}_Best_Fit')\n ]\n\n if family == 'Normal':\n p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1)\n elif family == 'LogNormal':\n p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1))\n\n carbon_est.update({f'DS{DS_i}': np.array([p10, p50, p90])})\n\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')):\n\n theta_0, theta_1, family = [\n getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ'),\n getattr(cmp, f'DS{DS_i}_CV_or_Beta_1'),\n getattr(cmp, f'DS{DS_i}_Best_Fit_1')\n ]\n\n if family == 'Normal':\n p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1)\n elif family == 'LogNormal':\n p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1))\n\n energy_est.update({f'DS{DS_i}': np.array([p10, p50, p90])})\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = len(cost_est.keys())\n ds_count = 2 ** (sim_ds_count) - 1\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n cost_vals = np.sum([cost_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(5)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n time_vals = np.sum([time_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(6)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n carbon_vals = np.sum([carbon_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(3)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n energy_vals = np.sum([energy_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(3)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n # fit a distribution\n family_hat, theta_hat = fit_distribution_to_percentiles(\n cost_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n cost_theta = theta_hat\n if family_hat == 'normal':\n cost_theta[1] = cost_theta[1] / cost_theta[0]\n\n time_theta = [time_vals[1],\n np.sqrt(cost_theta[1] ** 2.0 + 0.25 ** 2.0)]\n\n # fit distributions to environmental impact consequences\n family_hat_carbon, theta_hat_carbon = fit_distribution_to_percentiles(\n carbon_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n carbon_theta = theta_hat_carbon\n if family_hat_carbon == 'normal':\n carbon_theta[1] = carbon_theta[1] / carbon_theta[0]\n\n family_hat_energy, theta_hat_energy = fit_distribution_to_percentiles(\n energy_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n energy_theta = theta_hat_energy\n if family_hat_energy == 'normal':\n energy_theta[1] = energy_theta[1] / energy_theta[0]\n\n # Note that here we assume that the cutoff quantities are\n # identical across damage states.\n # This assumption holds for the second edition of FEMA P58, but\n # it might need to be revisited in future editions.\n cost_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1')\n cost_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1')\n time_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1_1')\n time_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1_1')\n\n # store the results\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = family_hat\n\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (\n f\"{cost_vals[3]:g},{cost_vals[4]:g}|\"\n f\"{cost_qnt_low:g},{cost_qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Cost'),\n f'DS{DS_i}-Theta_1'] = f\"{cost_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = family_hat\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (\n f\"{time_vals[3]:g},{time_vals[4]:g}|\"\n f\"{time_qnt_low:g},{time_qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-Theta_1'] = f\"{time_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-LongLeadTime'] = int(time_vals[5] > 0)\n\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = family_hat_carbon\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = f\"{carbon_theta[0]:g}\"\n\n df_db.loc[(cmp.Index, 'Carbon'),\n f'DS{DS_i}-Theta_1'] = f\"{carbon_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = family_hat_energy\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = f\"{energy_theta[0]:g}\"\n\n df_db.loc[(cmp.Index, 'Energy'),\n f'DS{DS_i}-Theta_1'] = f\"{energy_theta[1]:g}\"\n\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[f\"DS_{ds_pure_id}_Description\"],\n \"RepairAction\":\n cmp_meta[f\"DS_{ds_pure_id}_Repair_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo),\n \"RepairAction\": 'Combination of pure DS repair '\n 'actions.'\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n # cost\n if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}')):\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}')])\n\n if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')):\n\n theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')\n theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')\n qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}')\n qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}')\n\n if theta_0_low == 0. and theta_0_up == 0.:\n df_db.loc[(cmp.Index, 'Cost'),\n f'DS{DS_i}-Family'] = np.nan\n\n else:\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (\n f\"{theta_0_low:g},{theta_0_up:g}|\"\n f\"{qnt_low:g},{qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_1'] = (\n f\"{getattr(cmp, f'CV__Dispersion_DS{DS_i}'):g}\")\n\n else:\n incomplete_cost = True\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\n \"Description\": cmp_meta[f\"DS_{DS_i}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{DS_i}_Repair_Description\"]}})\n\n # time\n if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}_1')):\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}_1')])\n\n if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')):\n\n theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')\n theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1')\n qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}_1')\n qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}_1')\n\n if theta_0_low == 0. and theta_0_up == 0.:\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-Family'] = np.nan\n\n else:\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (\n f\"{theta_0_low:g},{theta_0_up:g}|\"\n f\"{qnt_low:g},{qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_1'] = (\n f\"{getattr(cmp, f'CV__Dispersion_DS{DS_i}_2'):g}\")\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-LongLeadTime'] = (\n int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES'))\n\n else:\n incomplete_time = True\n\n # Carbon\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit')):\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit')])\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = getattr(cmp,\n f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta')\n\n # Energy\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit_1')):\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit_1')])\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta_1')\n\n df_db.loc[(cmp.Index, 'Cost'), 'Incomplete'] = int(incomplete_cost)\n df_db.loc[(cmp.Index, 'Time'), 'Incomplete'] = int(incomplete_time)\n df_db.loc[(cmp.Index, 'Carbon'), 'Incomplete'] = int(incomplete_carbon)\n df_db.loc[(cmp.Index, 'Energy'), 'Incomplete'] = int(incomplete_energy)\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 6):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n for cmp in cmp_to_drop:\n if cmp[0] in meta_dict:\n del meta_dict[cmp[0]]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from FEMA \"\n \"P58\")" ]
[ "0.59248036", "0.5922244", "0.5877572", "0.5856754", "0.5853021", "0.5813514", "0.58091724", "0.574842", "0.565548", "0.5630156", "0.56003445", "0.5574408", "0.557013", "0.5540441", "0.5514523", "0.55128217", "0.5479409", "0.5468362", "0.54644877", "0.5457218", "0.54571164", "0.54561484", "0.54466957", "0.5436934", "0.54265374", "0.54237205", "0.5409214", "0.54062945", "0.54018325", "0.54003733" ]
0.62812114
0
Given a df of physical values, this offsets the timestamp to be equal to today, minus a given number of days.
def rebaseline_data(self, df_phys): from datetime import datetime, timezone import pandas as pd delta_days = (datetime.now(timezone.utc) - df_phys.index.min()).days - self.days_offset df_phys.index = df_phys.index + pd.Timedelta(delta_days, "day") return df_phys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def daily_returns(self, df):\n daily_returns = df.copy()\n daily_returns[1:] = (df[1:] / df[:-1].values) - 1\n daily_returns.ix[0] = 0\n return daily_returns", "def align_index_to_local_cdays(self):\n self.setup_class()\n date_range = pd.date_range(start=self.tsdf.first_valid_index(),\n end=self.tsdf.last_valid_index(), freq=CDay(calendar=self.sweden))\n\n self.tsdf = self.tsdf.reindex(date_range, method='pad', copy=False)\n\n return self", "def transform(self, df):\n temp = df.where(df >= self.df_med, -1)\n temp = temp.where(df <= self.df_med, 1).where(df != self.df_med, 0)\n return temp", "def fcl(df, dtObj):\r\n return df.iloc[np.argmin(np.abs(pd.to_datetime(df.index) - dtObj))] # remove to_pydatetime()\r", "def days_until(self, target_date_tensor):\n return target_date_tensor.ordinal() - self._ordinals", "def yesterday(self):\n if self.isLeapYear():\n fdays = 29\n else:\n fdays = 28\n\n DIM = [0, 31, fdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n currentDay = self.day\n firstMonth = 1\n firstDay = 1\n\n if currentDay == firstDay and self.month == firstMonth:\n self.year -= 1\n self.month = 12\n self.day = 31\n elif currentDay == firstDay:\n self.month -= 1\n self.day = DIM[self.month]\n else:\n self.day -= 1", "def __day(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"day\",\n operand1=self,\n operand2=None\n )", "def get_dt_per_index(self):\n dt = self.data[1,-1] - self.data[0,-1]\n return dt", "def test_fill_data_with_days_in_dtes(self):\n date = pd.to_datetime('2009-01-15')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n self.assertTrue(len(df_iv))", "def fix_date(df):\n df.insert(2, \"timestamp\", df[\"TestDate\"])\n\n mask = df[\"TestDate\"] <= df[\"StorageDate\"]\n print(\"Removing %.2f%% of unusual data\" % ((len(df) - np.sum(mask)) * 100 / len(df)))\n df = df[mask]\n\n mask = df[\"StorageDate\"] - df[\"TestDate\"] > pd.Timedelta(days=90)\n print(\"Fixing %.2f%% of outdated data\" % (np.sum(mask) * 100 / len(df)))\n df[\"timestamp\"].values[mask] = df[\"StorageDate\"].values[mask]\n return df", "def delta_today(N: int) -> dt.datetime:\n today = dt.date.today()\n return dt.datetime.combine(today, dt.time.min) + dt.timedelta(days=N)", "def convert_to_daily(data_list):\n for _in in range(1, len(data_list)):\n data_list[-_in] = data_list[-_in] - data_list[-_in - 1]", "def get_forecast_delta(target_horizon, days_early=1):\n return get_deadline_delta(target_horizon) + days_early", "def inverse_transform(self, df):\n if self.log:\n df = pd.DataFrame(np.exp(df))\n if self.squared:\n df = df ** 0.5\n df = df - self.shift_amount\n return df", "def prior_determination_date(ddates,clo_idx):\n dd = ddates.loc[ddates['Fund']==clo_idx,'Determination Date']\n prior_ddate = max(dd.loc[dd<pd.Timestamp.today()], key=lambda s: (s-pd.Timestamp.today()))\n return prior_ddate", "def pre_timestamps(ldt_timestamps, window):\r\n dt_timeofday = dt.timedelta(hours=16)\r\n days_delta = dt.timedelta(days=(np.ceil(window*7/5)+20))\r\n dt_start = ldt_timestamps[0] - days_delta\r\n dt_end = ldt_timestamps[0] - dt.timedelta(days=1)\r\n pre_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)\r\n return pre_timestamps", "def get_offset():\n offset = datetime.date.today() - start_day\n return int(offset.days) - 4", "def compute_daily_returns(df):\n # Note: Returned DataFrame must have the same number of rows\n # Initialize the dataframe here\n daily_returns=df.copy(); # Make a copy of original dataframe\n daily_returns[1:]=(df[1:]/df[:-1].values)-1 # .values is very important here. Otherwise pandas does the arithmetic operation based on index rather than shifted values\n \n if (df.size==df.shape[0]): # That means there is only one column\n daily_returns.ix[0]=0\n else:\n # Make the value at index 0 to 0 for all columns\n daily_returns.ix[0,:]=0; # \n # Other way to do this is following\n # daily_returns=(df/df.shift(1))-1; # However the zero index will NaN\n # daily_returns.ix[0,:]=0\n \n \n return daily_returns", "def compute_daily_returns(df):\n daily_returns = df.copy()\n daily_returns[1:] = (df[1:] / df[:-1].values) - 1\n return daily_returns[1:]", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n df = (df + 1).replace([0], np.nan)\n df = df.fillna((df[df != 0]).abs().min()).fillna(0.1)\n\n # add last values, group by lag, cumprod\n if trans_method == 'original':\n df = pd.concat([self.first_values, df.tail(df.shape[0] - 1)], axis=0)\n return df.cumprod()\n else:\n df_len = df.shape[0]\n df = pd.concat([self.last_values, df], axis=0)\n return df.cumprod().tail(df_len)", "def inverse_transform(self, df, trans_method: str = \"forecast\"):\n if self.fixed:\n return df\n else:\n window = self.window\n if trans_method == 'original':\n staged = self.first_values\n diffed = ((df.astype(float) - df.shift(1).astype(float)) * window).tail(\n len(df.index) - window\n )\n temp_cols = diffed.columns\n for n in range(len(diffed.index)):\n temp_index = diffed.index[n]\n temp_row = diffed.iloc[n].reset_index(drop=True) + staged.iloc[\n n\n ].reset_index(drop=True).astype(float)\n temp_row = pd.DataFrame(\n temp_row.values.reshape(1, len(temp_row)), columns=temp_cols\n )\n temp_row.index = pd.DatetimeIndex([temp_index])\n staged = pd.concat([staged, temp_row], axis=0)\n return staged\n\n # current_inversed = current * window - cumsum(window-1 to previous)\n if trans_method == 'forecast':\n staged = self.last_values\n df = pd.concat([self.last_rolling, df], axis=0)\n diffed = ((df.astype(float) - df.shift(1).astype(float)) * window).tail(\n len(df.index)\n )\n diffed = diffed.tail(len(diffed.index) - 1)\n temp_cols = diffed.columns\n for n in range(len(diffed.index)):\n temp_index = diffed.index[n]\n temp_row = diffed.iloc[n].reset_index(drop=True) + staged.iloc[\n n\n ].reset_index(drop=True).astype(float)\n temp_row = pd.DataFrame(\n temp_row.values.reshape(1, len(temp_row)), columns=temp_cols\n )\n temp_row.index = pd.DatetimeIndex([temp_index])\n staged = pd.concat([staged, temp_row], axis=0)\n staged = staged.tail(len(diffed.index))\n return staged", "def getDatePrice(self):\n return self.getHistorical().ix[:,[0,5]]", "def now_minus(days: int):\n return NOW - datetime.timedelta(days=days)", "def inverse_transform(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n X = date_part(df.index, method=self.datepart_method)\n y = pd.DataFrame(self.model.predict(X))\n y.columns = df.columns\n y.index = df.index\n df = df + y\n return df", "def daily_viewed(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n yesterday = today - timedelta(days=1)\n todays_per_min = []\n yesterday_per_min = []\n today_viewed = []\n yesterday_viewed = []\n # this iterates over each row in the dataframe, applying the logic and adding the cards_per_min value to the\n # appropriate list\n for index, row in df.iterrows():\n if row['session_start'].date() == today:\n per_min = get_cards_per_min(row)\n todays_per_min.append(per_min)\n today_viewed.append(row['total_looked_at'])\n if row['session_start'].date() == yesterday:\n per_min = get_cards_per_min(row)\n yesterday_per_min.append(per_min)\n yesterday_viewed.append(row['total_looked_at'])\n today_viewed_result = total_viewed(today_viewed, yesterday_viewed)\n today_viewed_result['total_viewed_daily'] = today_viewed_result.pop('total_viewed')\n return today_viewed_result", "def manage_position(self, dt, pos, logic_df):\n if pos.almost_expired_ratio(dt) > 0:\n pos.close(dt)", "def yesterday():\n return datetime.today() - timedelta(1)", "def dst(self, dt):", "def yesterday(self):\r\n return RecordsYesterday(self)", "def add_days_since_year_start(df):\n\n try:\n \n df['day_into_year'] = df.DAYOFSERVICE.dt.dayofyear\n\n print('— days since 1/1/18 added')\n \n return df\n\n except:\n\n print(\"Problem with add_day_since_year_start function\")" ]
[ "0.5707972", "0.55130273", "0.53883445", "0.52876997", "0.52724236", "0.52668387", "0.52399904", "0.5213567", "0.5184036", "0.51482123", "0.5135267", "0.5123454", "0.5121073", "0.5113842", "0.5008443", "0.5001906", "0.49927956", "0.49903804", "0.49891898", "0.49804872", "0.4968909", "0.4961165", "0.4957996", "0.49563545", "0.49524263", "0.49443048", "0.49441624", "0.4923087", "0.49063864", "0.48843917" ]
0.6362852
0
Given a df of physical values, return only signals matched by filter
def filter_signals(self, df_phys): if not df_phys.empty and len(self.signals): df_phys = df_phys[df_phys["Signal"].isin(self.signals)] return df_phys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mut_filter(df, rate, binary_cutoff=12):\n get_min_count = lambda s: s.value_counts().min() if len(s.unique()) > 1 else -1\n df = df[df.apply(get_min_count, axis=1) > binary_cutoff]\n cc = H.screen_feature(rate, rev_kruskal, df)\n\n fc_apply = lambda s: fc(s, rate)\n direction = df.apply(fc_apply, axis=1)\n direction.name = 'direction'\n\n cc = cc.join(direction)\n #cc = cc[cc.direction == False]\n #return cc\n\n df = df.ix[H.true_index((cc.p > .01) | (cc.direction == True))]\n df = df.dropna(axis=1)\n return df", "def filter_by_isin(df: pd.DataFrame, column: str, values: Iterable) -> pd.DataFrame:\n # First, create a \"map\" series from all possible values in the column => whether they should pass the filter\n all_ids = df[column].unique()\n is_id_relevant = pd.Series(np.zeros(len(all_ids)), index=all_ids).astype('bool') # Default false\n is_id_relevant.loc[values] = True\n\n # Create a boolean mask for column, based on the mapping above. Grab the raw array.\n mask = is_id_relevant[df[column]].values\n # Apply mask\n return df[mask]", "def filter_by_match(df: pd.DataFrame, d: dict) -> np.ndarray:\n incl = np.ones([len(df)], dtype=bool)\n for k, v in d.items():\n incl = incl & (df[k] == v)\n return incl", "def data_filter(\n df, CondTempRange=[float('-inf'), float('inf')],\n EvapTempRange=[float('-inf'), float('inf')],\n RemovalPoint=[OperatingPoint()],\n AddPoint=[OperatingPoint()]\n ):\n\n # copy new dataframe\n df_new = copy.deepcopy(df)\n\n # condition list\n cond = []\n cond.append(df.CondTempInF >= CondTempRange[0])\n cond.append(df.CondTempInF <= CondTempRange[1])\n cond.append(df.EvapTempInF >= EvapTempRange[0])\n cond.append(df.EvapTempInF <= EvapTempRange[1])\n for point in RemovalPoint:\n cond.append(df.OperatingPoint != point)\n addcond = []\n for point in AddPoint:\n addcond.append(df.OperatingPoint == point)\n\n # Apply AND to all conditions\n final_condition = cond[0]\n for ii in xrange(1, len(cond)):\n final_condition = final_condition*cond[ii]\n\n # Apply OR to all conditions\n for ii in xrange(0, len(addcond)):\n final_condition = final_condition+addcond[ii]\n\n # Return the data that satisfy all conditions\n return df_new[final_condition]", "def filter_data(data,filters):\n final_filter = pd.Series(np.array([True] * data.shape[0]))\n for attribute, value in filters:\n final_filter &= data[attribute] == value\n return data[final_filter]", "def filterDataframeBySenSpeLimitContrary(value_sen, value_spe, dataframe_values_models):\n\n datafram_values_filtered = dataframe_values_models.query('Sensitivity < {0} or Specificity < {1}'.format(value_sen, value_spe))\n return datafram_values_filtered", "def FilterFXSeries(self):\r\n filtFX=self.data[self.data.columns[0]].tolist()\r\n return filtFX", "def filters(array, sample_frequency):\n strain = TimeSeries(array, sample_rate=int(sample_frequency))\n white_data = strain.whiten(fftlength=4, fduration=4)\n bp_data = white_data.bandpass(50, 250)\n return bp_data.value", "def _filtering(cls, signal, system):\r\n\r\n if np.iscomplexobj(signal):\r\n _, filtered_signal_r, _ = sc_sig.dlsim(system, np.real(signal))\r\n _, filtered_signal_i, _ = sc_sig.dlsim(system, np.imag(signal))\r\n filtered_signal = filtered_signal_r + 1j * filtered_signal_i\r\n else:\r\n _, filtered_signal, _ = sc_sig.dlsim(system, signal)\r\n filtered_signal.shape = signal.shape\r\n return filtered_signal", "def cn_filter(df, binary_cutoff=12):\n del_df = (df.ix['Deletion'].dropna(1) < 0).astype(int)\n del_df = del_df[del_df.sum(1) >= binary_cutoff]\n del_df.index = del_df.index.droplevel(1)\n del_df = del_df.T\n amp_df = (df.ix['Amplification'].dropna(1) > 0).astype(int)\n amp_df = amp_df[amp_df.sum(1) >= binary_cutoff]\n amp_df.index = amp_df.index.droplevel(1)\n amp_df = amp_df.T\n return amp_df, del_df", "def filterDataframeBySenSpeLimit(value_sen, value_spe, dataframe_values_models):\n\n datafram_values_filtered = dataframe_values_models.query('Sensitivity >= {0} and Specificity >= {1}'.format(value_sen, value_spe))\n return datafram_values_filtered", "def __handle_filters(self, df) -> DataFrame:\n if not len(df):\n return df\n starting_df = df.copy()\n running_df = df\n for filter_ in self.filters:\n filter_value = filter_.value\n if filter_value is None:\n continue\n filter_condition = filter_.condition\n if filter_condition == FilterCondition.OR:\n df = starting_df\n else:\n df = running_df\n\n column_name = filter_.columnName\n operation = filter_.operation\n if operation == FilterOperation.TOP:\n df = df.sort_values(by=column_name, ascending=False, na_position='last').head(filter_value)\n elif operation == FilterOperation.BOTTOM:\n df = df.sort_values(by=column_name, ascending=True, na_position='last').head(filter_value)\n elif operation == FilterOperation.ABSOLUTE_TOP:\n df = df.reindex(df[column_name].abs().sort_values(ascending=False, na_position='last').index).head(\n filter_value)\n elif operation == FilterOperation.ABSOLUTE_BOTTOM:\n df = df.reindex(df[column_name].abs().sort_values(ascending=True, na_position='last').index).head(\n filter_value)\n elif operation == FilterOperation.EQUALS:\n if not isinstance(filter_value, list):\n filter_value = [filter_value]\n # Special case to handle different types of floats\n if isinstance(filter_value[0], str):\n df = df.loc[df[column_name].isin(filter_value)]\n else:\n # Add a tolerance for the special case to handle different types of floats\n df = df[np.isclose(df[column_name].values[:, None], filter_value, atol=1e-10).any(axis=1)]\n elif operation == FilterOperation.NOT_EQUALS:\n if not isinstance(filter_value, list):\n filter_value = [filter_value]\n if isinstance(filter_value[0], str):\n df = df.loc[~df[column_name].isin(filter_value)]\n else:\n # Add a tolerance for the special case to handle different types of float\n df = df[~np.isclose(df[column_name].values[:, None], filter_value, atol=1e-10).any(axis=1)]\n elif operation == FilterOperation.GREATER_THAN:\n df = df[df[column_name] > filter_value]\n elif operation == FilterOperation.LESS_THAN:\n df = df[df[column_name] < filter_value]\n elif operation == FilterOperation.LESS_THAN_EQUALS:\n df = df[df[column_name] <= filter_value]\n elif operation == FilterOperation.GREATER_THAN_EQUALS:\n df = df[df[column_name] >= filter_value]\n else:\n raise MqValueError(f'Invalid Filter operation Type: {operation}')\n\n if filter_.condition == FilterCondition.OR:\n # Need to merge the results\n running_df = running_df.merge(df, how='outer')\n else:\n running_df = df\n\n return running_df", "def filter(df, predicate):\n if not df:\n return []\n\n return [row for row in df if predicate(row)]", "def spfilt(self, i):\n\n # separate NaN and non-NaN values to avoid NaN filter output on cleaned data\n data_nan = self.data[i][self.data[i]['Raw'].isna()]\n data_notnan = self.data[i][self.data[i]['Raw'].isna() == False]\n\n # filter notNaN data & add column to notNaN df\n data_notnan_filt = sosfiltfilt(self.sp_sos, data_notnan.to_numpy(), axis=0)\n data_notnan['Filt'] = data_notnan_filt\n\n # merge NaN & filtered notNaN values, sort on index\n filt_chan = data_nan['Raw'].append(data_notnan['Filt']).sort_index()\n\n # add channel to main dataframe\n self.spfiltEEG[i] = filt_chan", "def filter_patients(self):\n\n if self.dataset is None:\n self.dataset = h5py.File(self.filename, 'r')['dataset']\n \n # Find feature indices belonging to specific criteria\n inclusion_info = self.filter_params['inclusion']\n # exclusion_info = self.filter_params['exclusion']\n case_control_info = self.filter_params['case_control']\n\n inclusion_inds = self.check_criteria(inclusion_info, case_control=False)\n # exclusion_inds = self.check_criteria(exclusion_info, case_control=False)\n case_inds, control_inds = self.check_criteria(case_control_info, case_control=True)\n\n filtered_inds = {}\n # inclusion_exclusion_inds = np.setdiff1d(inclusion_inds, exclusion_inds)\n filtered_inds['case'] = np.intersect1d(inclusion_inds, case_inds)\n filtered_inds['control'] = np.intersect1d(inclusion_inds, control_inds)\n\n return filtered_inds", "def filterfeatures(df):\n\tfilter_arr = []\n\tfor f in df.columns:\n\t\tif not '.l' in f and not '.r' in f and not '.std' in f and f != 'weight' and f != 'class':\n\t\t\t# filter_arr.append(f.rstrip('.mean'))\n\t\t\tfilter_arr.append(f)\n\treturn filter_arr", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def filter(df: pd.DataFrame = pd.DataFrame()):\n if df.empty:\n df = read()\n print('Filtering data...')\n df = df.dropna()\n df2 = pd.DataFrame()\n df2['Longitude'] = df['Longitude']\n df2['Latitude'] = df['Latitude']\n df2['Month'] = df['Date'].dt.strftime('%m').astype(int)\n df2['Day'] = df['Date'].dt.strftime('%d').astype(int)\n df2['Day_of_Week'] = df['Day_of_Week']\n df2['Time'] = np.array([t.timestamp() for t in df['Time']]) - df['Time'].min().timestamp()\n df2['Weather_Conditions'] = df['Weather_Conditions']\n return pd.get_dummies(df2)", "def filterDataset(dat, dataset):\n #\n dat = dat[dat['organism'].isin(dataset)]\n no_mmei_index = dat['mmei']=='no'\n nonstop_index = dat['mutstop']=='no'\n zerofit_index = dat['fitness'].abs()>1e-4\n mutwt_index = dat['mutwt']=='no'\n dat = dat[no_mmei_index & nonstop_index & zerofit_index & mutwt_index]\n #print \"Filtered data\"\n return dat", "def filter_renters(data_df):\n return data_df[(data_df['sc116'] == 2) # Only renters\n & (data_df['uf17'] < 8000) # With a real rent provided\n ]", "def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df", "def filter_common_variation(self):\n # Filter common variation\n unknown_freq_df = self.variant_df.query('gnomAD_exome_ALL == \".\"')\n other_freq_df = self.variant_df.query('gnomAD_exome_ALL != \".\"')\n self.unknown_maf_count = unknown_freq_df.shape[0]\n \n # Filter common variants\n other_freq_df = other_freq_df[other_freq_df['gnomAD_exome_ALL'].astype(float) <= self.filter_common_maf]\n self.variant_df = pd.concat([other_freq_df, unknown_freq_df], axis=0)\n self.filter_common_var_count = self.variant_df.shape[0]", "def filter_by(df, constraints):\n indexer = [constraints[name] if name in constraints else slice(None)\n for name in df.index.names]\n return df.loc[tuple(indexer)] if len(df.shape) == 1 else df.loc[tuple(indexer),]", "def filter_representative_sites_patient(\n df: pd.DataFrame, representative_sites: List[str]) -> pd.DataFrame:\n\n return df.loc[~(df['site'].isin(representative_sites))]", "def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)", "def filter_values(df, value=0, axis=0):\n \n if axis:\n return df.loc[:, (df != value).any(axis=1-axis)]\n else:\n return df.loc[(df != value).any(axis=1-axis)]", "def filter_to_verified(df, verified_df):\n return df[df.UID.isin(verified_df.UID.values)].reset_index(drop=True)", "def filter_tracks_domain(df, minlon=90, maxlon=180, minlat=-40, maxlat=0):\n\n domain = sbox(minlon, minlat, maxlon, maxlat, ccw=False)\n tracks = df.groupby('num')\n tempfilter = tracks.filter(lambda x: len(x) > 1)\n filterdf = tempfilter.groupby('num').filter(\n lambda x: LineString(zip(x['lon'], x['lat'])).intersects(domain))\n return filterdf", "def filter_input(input_df, target_df):\n # input_df = input_df.reindex(target_df.index, copy=False)\n data_df = pd.concat((input_df, target_df), join=\"inner\", copy=False, axis=1)\n return data_df", "def _filter(self, col: str, val: Any) -> pd.DataFrame:\n return self._df[self._df[col] == val]" ]
[ "0.64054054", "0.6110009", "0.60705125", "0.59736735", "0.5903753", "0.58663183", "0.5819775", "0.57466286", "0.57170045", "0.5706719", "0.56877935", "0.567983", "0.5672527", "0.5629999", "0.5559911", "0.5537252", "0.55131197", "0.54510593", "0.54250556", "0.54237044", "0.5420049", "0.538934", "0.5380897", "0.5376138", "0.5375657", "0.5356125", "0.5303589", "0.5301702", "0.5259074", "0.52358264" ]
0.71506196
0
Extract a df of raw data and device ID from log file. Optionally include LIN bus data by setting lin=True
def get_raw_data(self, log_file, passwords={},lin=False): import mdf_iter with self.fs.open(log_file, "rb") as handle: mdf_file = mdf_iter.MdfFile(handle, passwords=passwords) device_id = self.get_device_id(mdf_file) if lin: df_raw_lin = mdf_file.get_data_frame_lin() df_raw_lin["IDE"] = 0 df_raw_can = mdf_file.get_data_frame() df_raw = df_raw_can.append(df_raw_lin) else: df_raw = mdf_file.get_data_frame() return df_raw, device_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_linelog():", "def hoomdlog(filename):\r\n\r\n data = pd.read_csv(filename, sep = '\\s+')\r\n return data", "def log_to_dataframe(log_file, regex, headers):\n log_messages = []\n linecount = 0\n\n with open(log_file, 'r') as fin:\n logs = fin.readlines()\n logs = [j.strip() for j in logs]\n\n for line in logs:\n try:\n line = line.strip()\n match = regex.search(line.strip())\n message = [match.group(header) for header in headers]\n log_messages.append(message)\n linecount += 1\n except Exception as e:\n print(e)\n pass\n logdf = pd.DataFrame(log_messages, columns=headers)\n logdf.insert(0, 'LineId', None)\n\n logdf['LineId'] = [i + 1 for i in range(linecount)]\n return logdf", "def uadb_ascii_to_dataframe(file=''): \n \n if debug:\n print(\"Running uadb_ascii_to_dataframe for: \", file) \n \n data = check_read_file(file=file, read=True) # TODO\n \n #source_file = [l for l in file.split('/') if '.txt' in l][0]\n\n nmiss = 0\n search_h = False \n read_data = []\n \n usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n\n #usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd, iday, ident, numlev= 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n obs_id = 0\n stations_id = [] \n \n for i, line in enumerate(data):\n if line[0] == 'H':\n try:\n # Header\n usi = int(line[2:14]) # unique station identifier\n \n ident = int(line[15:21].replace(' ',''))# WMO\n if ident not in stations_id:\n stations_id.append(ident)\n \n #if len(ident) == 4:\n # ident = '0' + ident \n #idflag = int(line[22:24]) # id flag\n #d_src = int(line[25:28]) # source dataset\n #version = float(line[29:34]) # version\n #dateflag = int(line[35:37]) # date flag\n year = line[38:42] # year\n month = \"%02d\" % int(line[43:45])\n day = \"%02d\" % int(line[46:48])\n hour = line[49:53]\n #locflag = int(line[54:56]) # Location Flag\n lat = float(line[57:67])\n lon = float(line[68:78])\n #ele = float(line[79:85])\n #stype = int(line[86:88])\n numlev = int(line[89:93])\n #pvers = line[94:102]\n\n if '99' in hour:\n hour = hour.replace('99', '00')\n \n if '99' in day:\n search_h = True\n continue\n \n minutes = int(hour) % 100 \n hour = \"%02d\" % (int(hour) // 100)\n if minutes > 60 or minutes < 0:\n minutes = 0\n minutes = \"%02d\" % minutes\n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day)\n #pday = int(day)\n search_h = False\n\n except Exception as e:\n #print(\"Error: \", i, line, repr(e), \"Skipping Block:\")\n search_h = True\n #iprev = i\n\n elif search_h:\n nmiss += 1\n continue # Skipping block\n\n else:\n # Data\n #ltyp = int(line[0:4])\n p = float(line[5:13])\n \n if p != -99999.0 and p != 9999.9: \n press = float(line[5:13])*100 # converting to Pa, since P is given in mb (1 mb = 1 hPa) \n else:\n press = np.nan \n \n gph = float(line[14:22]) # gph [m]\n \n if gph == -999.0 or gph == -99999.00 or gph >= 99999.0:\n gph = np.nan\n \n temp = float(line[23:29])\n if temp == -999.0:\n temp = np.nan \n else:\n temp = temp + 273.15\n \n rh = float(line[30:36]) # %\n if rh == -999.0:\n rh = np.nan\n else:\n rh = rh / 100. # convert to absolute ratio TODO\n\n wdir = float(line[37:43]) \n if wdir == -999.0 or wdir == -999 :\n wdir = np.nan\n \n wspd = float(line[44:50]) # [m/s], module of the velocity\n if wspd <0 :\n wspd = np.nan \n \n try:\n \n for value,var in zip([ gph, temp, wspd, wdir, rh], [ 'gph', 'temperature', 'wind_speed', 'wind_direction', 'relative_humidity'] ):\n obs_id = obs_id +1\n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n else:\n z_type = -2147483648 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n \n except:\n 0\n \n \n \n #column_names = ['source_file', 'product_code', 'report_id', 'observation_id', 'report_timestamp' , 'iday', 'station_id', 'lat@hdr', 'lon@hdr', 'vertco_reference_1@body', 'obsvalue@body', 'varno@body' , 'units', 'number_of_pressure_levels' ]\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n #df['observations_id'] =numpy.char.zfill(numpy.arange(ivar.shape[0]).astype('S10'), 10)\n \n df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n #df['report_id'] = numpy.int64 (df['report_id'] ) \n #df['observation_id'] = numpy.int64 (df['observation_id'] ) \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n print('Done reading DF')\n return df , stations_id", "def lammpslog(filename):\r\n\r\n with open(filename, 'r') as f:\r\n data = f.readlines()\r\n\r\n #----get how many sections are there----\r\n start = [i for i, val in enumerate(data) if val.startswith('Step ')]\r\n end = [i for i, val in enumerate(data) if val.startswith('Loop time of ')]\r\n\r\n if data[-1] is not '\\n':\r\n if data[-1].split()[0].isnumeric(): #incomplete log file\r\n end.append(len(data) - 2)\r\n \r\n start = np.array(start)\r\n end = np.array(end)\r\n linenum = end - start - 1\r\n print ('Section Number: %d' %len(linenum), ' Line Numbers: ' + str(linenum))\r\n del data \r\n\r\n final = []\r\n for i in range(len(linenum)):\r\n data = pd.read_csv(filename, sep = '\\s+', skiprows = start[i], nrows = linenum[i])\r\n final.append(data)\r\n del data\r\n\r\n return final", "def extract_phys(self, df_raw):\n import can_decoder\n import pandas as pd\n\n df_phys = pd.DataFrame()\n df_phys_temp = []\n for db in self.db_list:\n df_decoder = can_decoder.DataFrameDecoder(db)\n\n for bus, bus_group in df_raw.groupby(\"BusChannel\"): \n for length, group in bus_group.groupby(\"DataLength\"):\n df_phys_group = df_decoder.decode_frame(group)\n if not df_phys_group.empty:\n df_phys_group[\"BusChannel\"] = bus \n df_phys_temp.append(df_phys_group)\n \n df_phys = pd.concat(df_phys_temp, ignore_index=False).sort_index()\n \n # remove duplicates in case multiple DBC files contain identical signals\n df_phys[\"datetime\"] = df_phys.index\n df_phys = df_phys.drop_duplicates(keep=\"first\")\n df_phys = df_phys.drop(labels=\"datetime\", axis=1)\n\n # optionally filter and rebaseline the data\n df_phys = self.filter_signals(df_phys)\n\n if not df_phys.empty and type(self.days_offset) == int:\n df_phys = self.rebaseline_data(df_phys)\n\n return df_phys", "def log_extract(log_info):\n \n #Handle file names, strings and open file-like objects equivalently\n with uber_open_rmode(log_info) as log_info:\n \n headers = []\n footers = []\n i = 0\n \n #for all lines in file/output\n for line in log_info:\n \n #skip blank lines\n if len(line.split()) == 0:\n continue\n \n #This is listed before both run and minimize simulations \n if 'Memory usage per processor =' in line:\n headers.append(i+1)\n \n #This follows both run and minimize simulations\n elif 'Loop time of' in line:\n footers.append(i-1)\n \n i += 1\n \n #Add last line to footers for incomplete logs\n footers.append(i)\n \n log_info.seek(0)\n \n #Create DataModelDict root\n log_dict = DM()\n log_dict['LAMMPS-log-thermo-data'] = DM()\n \n #for all lines in file/output\n for header, footer in zip(headers, footers):\n\n #Read thermo data\n df = pd.read_csv(log_info, header=header, nrows=footer-header, sep='\\s+', engine='python', skip_blank_lines=True)\n log_info.seek(0) \n\n #Convert to DataModelDict\n thermo = DM()\n for j in df:\n thermo[str(j)] = df[j].values.tolist()\n \n #Append simulation results to DataModelDict root\n simulation = DM([('thermo', thermo)])\n log_dict['LAMMPS-log-thermo-data'].append('simulation', simulation)\n \n return log_dict", "def logger_info(api_token, records):\n\n df_out = pd.DataFrame(columns=['file_type', 'station_name',\n 'logger_model', 'serial_no', 'os_version', 'logger_program',\n 'Dld_sig', 'table_name'])\n\n for record in tqdm.tqdm(records):\n if is_toa5(record):\n download_url = f\"{record['url']}?auth_token={api_token}\"\n req = urllib.request.urlopen(download_url)\n data = req.read()\n df = pd.read_csv(io.StringIO(data.decode('utf-8')),\n skiprows=0, header=None, nrows=1)\n df = df.dropna(axis=1)\n df.columns = ['file_type', 'station_name', 'logger_model',\n 'serial_no', 'os_version', 'logger_program',\n 'Dld_sig', 'table_name']\n df_out.loc[record['filename']] = df.iloc[0]\n else:\n print('Error: This is not a TOA5 record')\n return df_out.sort_index()", "def readData(f):\n line = f.readline()\n fieldnames = [x.strip() for x in line.split(\",\")]\n line = f.readline().strip()\n data = []\n while line != \"\":\n if line[0] != \"#\":\n fields = line.split(\",\")\n data.append((fields[0], [extractSI(v)[0] for v in fields[1:]]))\n line = f.readline().strip()\n # Man, working out this next incantation out was non-trivial!\n # They really want you to be snarfing data in csv or some other format they understand!\n res = pd.DataFrame.from_items(data, columns=fieldnames[1:], orient=\"index\")\n return res", "def readtxt(obslog):\n\n logger = log.getLogger('obslog.readtxt')\n\n if not os.path.exists(obslog):\n logger.error('Cannot access %s', obslog)\n raise SystemExit\n\n logger.info('Reading %s', obslog)\n\n with open(obslog) as f: # Since we will have to go through the data twice, read the whole file at once.\n data = f.readlines()\n\n header = ['Observation ID', 'Data Labels', 'File Numbers', 'Dataset UT', 'Target Name', 'Filters', 'Slit',\n 'Grating/Wavelength', 'Camera/Prism', 'ExpTime/LNR/Coadds', 'ACQ']\n\n pattern = dict() # Enforce formatting rules to avoid parsing comments as data:\n pattern['Observation ID'] = re.compile(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}-[0-9]+$')\n pattern['Data Labels'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['File Numbers'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['Dataset UT'] = re.compile(r'^[0-9]{2}:[0-9]{2}:[0-9]{2}$') # 09:58:15\n pattern['Target Name'] = re.compile(r'[a-zA-Z0-9_-]+') # Match any string\n pattern['Filters'] = re.compile(r'[A-Z0-9\\-]+') # H, XD, H2, X, J, H\n pattern['Slit'] = re.compile(r'[a-zA-Z0-9]+') # 0.675, ACQ, LgPin\n pattern['Grating/Wavelength'] = re.compile(r'[0-9]{2,3}/[0-9]\\.[0-9]{2}') # 32/1.65, 111/1.68\n pattern['Camera/Prism'] = re.compile(r'[A-Z]{2}/[A-Z]{3}') # LB/MIR, SB/SXD\n pattern['ExpTime/LNR/Coadds'] = re.compile(r'[0-9]+\\.[0-9]/[0-9]+/[0-9]+') # 0.2/1/25, 300.0/32/1\n pattern['ACQ'] = re.compile(r'^Y*$') # Y or ''\n\n indx = {}\n for line in data:\n if 'Electronic Observing Log' in line:\n date = line.split()[-1][7:]\n logger.debug('Log date: %s', date)\n if line[0:14] == 'Observation ID': # This defines the start of the header row\n for h in header:\n indx[h] = line.find(h) # Find where each column starts\n break # No need to go farther\n\n width = {} # Find the width of each row\n for i in range(len(header) - 1): # This requires that 'header' be an ordered array (not a dictionary)\n width[header[i]] = indx[header[i + 1]] - indx[header[i]]\n width[header[i+1]] = 1 # The ACQ field is either 'Y' or blank\n\n val = {}\n match = {}\n info = {}\n for line in data:\n logger.debug('\\n%s', line)\n files = []\n for h in header:\n val[h] = line[indx[h]: indx[h] + width[h]].strip()\n match[h] = re.match(pattern[h], val[h])\n logger.debug('%s: \"%s\" %s' % (h, val[h], match[h]))\n\n # Maybe throw a warning if only match 1 fails; indicating a likely bad pattern specification?\n\n if None in match.values():\n logger.debug('Failed to match all patterns -> This is a comment')\n continue\n\n if '-' in val['File Numbers']:\n start, stop = val['File Numbers'].split('-')\n for i in range(int(start), int(stop)+1):\n files.append(i)\n else:\n files.append(int(val['File Numbers']))\n\n for filenum in files:\n f = 'N%sS%04d.fits' % (date, filenum)\n logger.debug('File: %s', f)\n info[f] = {}\n for h in [header[0]] + header[3:]: # Skip 'Data Labels' and \"File Numbers'\n info[f][h] = val[h]\n\n logger.debug('info: %s', info)\n return info", "def read_spectral_k(filename=\"tc_dos_l.dat\"):\n # column headers for the data \n #tcdosl_labels = [\n # \"wavelength\",\n # \"k_xx_raw\",\"k_xx_smooth\",\n # \"k_yy_raw\",\"k_yy_smooth\",\n # \"k_zz_raw\",\"k_zz_smooth\"]\n\n tcdosl_labels = [\n \"wavelength\",\n \"k_xx_raw\",\"k_yy_raw\",\"k_zz_raw\",\n \"k_xx_smooth\",\"k_yy_smooth\",\"k_zz_smooth\"]\n\n def subselect_table_block(i_start,lines):\n i = i_start + 1\n\n table = []\n while(lines[i].strip() != \"\"):\n args = lines[i].split()\n args = [arg.strip() for arg in args]\n args = [float(arg) for arg in args]\n table.append(args)\n i += 1 \n return np.array(table)\n\n line = None # initialize\n with open(filename,'r') as f:\n lines = f.readlines()\n lines = [s.strip() for s in lines]\n\n temperatures = []\n tcdosl_dict = OrderedDict()\n\n for il,line in enumerate(lines):\n if line.startswith('# Temp:'):\n args = line.split(':')\n T = int(float(args[1].strip()))\n temperatures.append(T)\n tcdosl_dict[T] = subselect_table_block(il,lines)\n\n tcdosl_df_dict = OrderedDict()\n for temp in temperatures:\n tcdosl_df_dict[temp] = pd.DataFrame(\n copy.deepcopy(tcdosl_dict[temp]),\n columns=list(tcdosl_labels))\n\n return {k:v.copy() for k,v in tcdosl_df_dict.items()}", "def print_log_summary(self, device_id, log_file, df_phys):\n if self.verbose:\n print(\n \"\\n---------------\",\n f\"\\nDevice: {device_id} | Log file: {log_file.split(device_id)[-1]} [Extracted {len(df_phys)} decoded frames]\\nPeriod: {df_phys.index.min()} - {df_phys.index.max()}\\n\",\n )", "def get_ARNA_flight_log_as_df():\n flight_nums = [\n # 216,\n 217, 218, 219, 220, 221, 222, 223, 224, 225\n ]\n flight_IDs = ['C{}'.format(i) for i in flight_nums]\n dfs = []\n for flight_ID in flight_IDs:\n dfs += [get_summary4flight(flight_ID=flight_ID)]\n # Combine and return as a single dataframe sorted by time\n df = pd.concat(dfs)\n df = df.sort_index()\n return df", "def fieldParser(fileLog,logFolder):\n for logFileS in sorted_alphanumeric(os.listdir(logFolder)):\n if logFileS.startswith(fileLog):\n if logFileS == fileLog+\"0\":\n logFile = pd.read_csv(\"{folder}{file}\" .format(folder=logFolder,file=logFileS) ,sep='\\t',header=None,names=['TimeStep','Residual'])\n #print(logFileS)\n logFileTemporal = pd.read_csv(\"{folder}{file}\" .format(folder=logFolder,file=logFileS) ,sep='\\t',header=None,names=['TimeStep','{}' .format(logFileS)])\n logFile = pd.concat([logFile,logFileTemporal],axis=1)\n field = []\n for index,row in logFile.iterrows():\n field.append(row.dropna()[-1])\n\n return np.array(field)", "def _read_raw(self):\n return pd.read_csv('data/oma/orthologs.tsv', sep='\\t', header=None,\n usecols=[0, 1], names=['CE_WORMPEP', 'HS_ENSG']) \\\n .drop_duplicates()", "def bufr_to_dataframe(file=''):\n \n if debug:\n print(\"Running bufr_to_dataframe for: \", file)\n \n check_read_file (file = file, read= False)\n f = open(file)\n #source_file = [l for l in file.split('/') if '.bfr' in l][0]\n read_data = []\n \n \"\"\" Name of the columns as they will appear in the pandas dataframe (not necessarily CDM compliant) \"\"\"\n #column_names = ['report_timestamp' , 'iday', 'station_id', 'latitude', 'longitude', 'pressure', 'value','varno@body']\n \n lat, lon, alt, blockNumber, stationNumber, statid = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n \n obs_id, report_id = -1, 0 # progressive observation id\n stations_id = [] \n \n while 1:\n #lista = [] # temporary list\n bufr = codes_bufr_new_from_file(f)\n \n if bufr is None:\n break\n \n codes_set(bufr, 'unpack', 1) # eCcodes must expand all the descriptors and unpack the data section\n \n date = '19'+codes_get_array(bufr, \"typicalDate\")[0][2:]\n timePeriod = codes_get_array(bufr, \"typicalTime\")[0] \n \n year, month, day = date[0:4], date[4:6] , date[6:8]\n hour, minutes = timePeriod[0:2] , timePeriod[2:4]\n \n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day )\n\n pressure = codes_get_array(bufr, \"pressure\") \n temperature = codes_get_array(bufr, \"airTemperature\") \n wind_direction = codes_get_array(bufr, \"windDirection\")\n wind_speed = codes_get_array(bufr, \"windSpeed\")\n \n try: # not all the bufr files have the dewpoint \n dew_point = codes_get_array(bufr, \"dewpointTemperature\")\n except:\n dew_point= np.empty((1, len(temperature)))\n dew_point[:] = np.nan\n \n num_lev = len(pressure) # number of distinct pressure levels \n \n try:\n geopotential = codes_get_array(bufr, \"nonCoordinateGeopotentialHeight\") \n except:\n geopotential = np.full( (1,len(temperature)) , np.nan )[0,:]\n \n if report_id == 0:\n ''' Check again but these values should remain the same for all cnt, so it makes no sense to read them every time '''\n lat = codes_get(bufr, \"latitude\")\n lon = codes_get(bufr, \"longitude\")\n alt = float(codes_get(bufr, \"heightOfStation\"))\n blockNumber = codes_get(bufr, \"blockNumber\")\n stationNumber = codes_get(bufr, \"stationNumber\")\n #statid = str(blockNumber*1000+stationNumber) # changed to int instead of str\n statid = blockNumber*1000+stationNumber\n if statid not in stations_id:\n stations_id.append(statid) \n \n codes_release(bufr)\n \n miss_value = -1.e100 \n \n for i in range(len(temperature)):\n obs_id = obs_id + 1 \n airT = temperature[i]\n winds = wind_speed[i]\n windd = wind_direction[i]\n press = pressure[i]\n gph = geopotential[i]\n dp = dew_point[i]\n if press == miss_value:\n press = np.nan \n if dp == miss_value:\n dp = np.nan\n if airT == miss_value : # replacing none values with numpy nans\n airT = np.nan \n if winds == miss_value:\n winds = np.nan\n if gph == miss_value:\n gph = np.nan \n if windd == 2147483647 or windd == -2147483647:\n windd = np.nan \n \n \n for value,var in zip( [gph, airT, winds, windd, dp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'dew_point'] ):\n obs_id = obs_id + 1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n else:\n z_type = -2147483648 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n\n\n report_id += 1\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n return df, stations_id", "def loadFromLogFile(filename, maxRows = 0):\n\tultracam = False\n\tultraspec = True\n\tinputFile = open(filename, 'r')\n\t\n\txValues = []\n\tyValues = []\n\tframeList = []\n\theaderBlock = \"\"\n\trunName = \"--unknown--\"\n\ttelescope = \"--unknown--\"\n\ttargetName = \"--unknown--\"\n\tfilterName = \"--unknown--\"\n\tPI = \"--unknown--\"\n\tcolumnCount = 0\n\tuniqueCCDs = []\n\tfor line in inputFile:\n\t\tif line[0] == '#':\n\t\t\theaderBlock+=line\n\t\t\tif (\"target\" in line) and (\"estimated\" not in line):\n\t\t\t\ttargetName = generalUtils.getBetweenChars(line, '=', '/').strip()\n\t\t\t\tprint \"Target: %s\"%targetName\n\t\t\tif (\"filters\" in line):\n\t\t\t\tfilterName = generalUtils.getBetweenChars(line, '=', '/').strip()\n\t\t\t\tprint \"Filters: %s\"%filterName\n\t\t\tif (\"Telescope\" in line) and (\"observing\" not in line):\n\t\t\t\ttelescopeName = generalUtils.getBetweenChars(line, '=', '/').strip()\n\t\t\t\tprint \"Telescope name: %s\"%telescopeName\n\t\t\tif (\" pi \" in line):\n\t\t\t\tPI = generalUtils.getBetweenChars(line, '=', '/').strip()\n\t\t\t\tprint \"PI: %s\"%PI\n\t\t\tif (\" Data file name \" in line):\n\t\t\t\trunName = generalUtils.getBetweenChars(line, '=', '\\n').strip()\n\t\t\t\tprint \"run data file: %s\"%runName\n\t\t\tif (\" Server file name \" in line):\n\t\t\t\trunName = generalUtils.getBetweenChars(line, '=', '\\n').strip()\n\t\t\t\tprint \"run data file: %s\"%runName\n\t\t\t\t\n\t\tif line[0] != '#':\n\t\t\tparams = line.split()\n\t\t\t# print params\n\t\t\tframeIndex = int(params[0])\n\t\t\tCCD = int(params[4])\n\t\t\tif CCD not in uniqueCCDs: uniqueCCDs.append(CCD)\n\t\t\tframeList.append(frameIndex)\n\t\t\tcolumnCount = len(params)\n\tfirstFrame = frameList[0]\n\t\n\tnumApertures = int( ((columnCount-7)/14) )\n\tprint \"ColumnCount: \", columnCount, \"which means %d apertures.\"%numApertures\n\t# frameList = generalUtils.removeDuplicatesFromList(frameList)\n\tprint \"The run in file %s contains %d frames. Start frame: %d End frame: %d\"%(filename, len(frameList), min(frameList), max(frameList))\n\tif len(uniqueCCDs) == 3:\n\t\tprint \"This file has 3 CCDs. It is an ULTRACAM file.\"\n\t\tultracam = True\n\t\tultraspec = False\n\tif len(uniqueCCDs) == 1: \n\t\tprint \"This file has 1 CCD. It is an ULTRASPEC file.\"\n\t\tultracam = False\n\t\tultraspec = True\n\n\tif (ultracam): CCDs = [1, 2, 3]\n\telse: CCDs = [1]\n\tfor CCD in CCDs: \n\t\tfor aperture in range(1, numApertures+1):\n\t\t\tapertureIndex = 14*(aperture-1) + 7\n\t\t\tprint \"Reading data for aperture %d, CCD %d\"%(aperture, CCD)\n\t\t\tinputFile.seek(0)\n\t\t\tMJDs = []\n\t\t\tcounts = []\n\t\t\tskys = []\n\t\t\tsigmas = []\n\t\t\terrors = []\n\t\t\ttimeFlags = []\n\t\t\texposures = []\n\t\t\tFWHMs = []\n\t\t\tbetas = []\n\t\t\txs = []\n\t\t\tys = []\n\t\t\tlineCounter = 0\n\t\t\tfor line in inputFile:\n\t\t\t\tlineCounter+= 1\n\t\t\t\tsys.stdout.write(\"\\rLine number: %d \"%(lineCounter))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tif line[0] != '#':\n\t\t\t\t\tparams = line.split()\n\t\t\t\t\t# print params\n\t\t\t\t\tCCDValue = int(params[4])\n\t\t\t\t\tapertureValue = int(params[apertureIndex])\n\t\t\t\t\tif CCDValue == CCD: \n\t\t\t\t\t\tframeIndex = int(params[0])\n\t\t\t\t\t\tMJDs.append(float(params[1]))\n\t\t\t\t\t\ttimeFlags.append(int(params[2]))\n\t\t\t\t\t\texposures.append(float(params[3]))\n\t\t\t\t\t\tFWHMs.append(float(params[5]))\n\t\t\t\t\t\tbetas.append(float(params[6]))\n\t\t\t\t\t\txs.append(float(params[apertureIndex + 1]))\n\t\t\t\t\t\tys.append(float(params[apertureIndex + 2]))\n\t\t\t\t\t\tcounts.append(float(params[apertureIndex + 7]))\n\t\t\t\t\t\tsigmas.append(float(params[apertureIndex + 8]))\n\t\t\t\t\t\tskys.append(float(params[apertureIndex + 9]))\n\t\t\t\t\t\terrors.append(int(params[apertureIndex + 13]))\n\t\t\t\t\t\n\t\t\tphotometry = {}\n\t\t\t\n\t\t\tphotometry['MJD'] = numpy.array(MJDs)\n\t\t\tphotometry['exposure'] = numpy.array(exposures)\n\t\t\tphotometry['FWHM'] = numpy.array(FWHMs)\n\t\t\tphotometry['beta'] = numpy.array(betas)\n\t\t\tphotometry['x'] = numpy.array(xs)\n\t\t\tphotometry['y'] = numpy.array(ys)\n\t\t\tphotometry['counts'] = numpy.array(counts)\n\t\t\tphotometry['sigma'] = numpy.array(sigmas)\n\t\t\tphotometry['sky'] = numpy.array(skys)\n\t\t\tphotometry['error'] = numpy.array(errors)\t\n\t\t\n\t\t\tid = slots.getNextSlotID()\n\t\t\tprint \"new ID:\", id\n\t\t\tslot = photometryClasses.slotObject(id)\n\t\t\tslot.setPhotometry(photometry)\n\t\t\tslot.setTimeColumn('MJD')\n\t\t\tslot.setYColumn('counts')\n\t\t\tslot.setYError('sigma')\n\t\t\tslot.target = targetName\n\t\t\tslot.filter = filterName\n\t\t\tslot.aperture = aperture\n\t\t\tslot.headers = headerBlock\n\t\t\tslot.runName = runName\n\t\t\tslot.telescope = findTelescope(telescopeName)\n\t\t\tslot.CCD = \"CCD %d\"%CCD\n\t\t\tnumSlots = slots.addSlot(slot)\n\t\t\tprint \"Added the data to a new slot. Total number of slots is now: %d\"%(numSlots)\n\t\t\tprint slot\n\t\n\tinputFile.close()\n\treturn", "def read_data_test_MLOG(self):\n self.na.set_query_timeout(10e3)\n self.na.set_format('mlog')\n fpts, mags = self.na.read_data()\n\n plt.figure()\n plt.plot(fpts, mags)\n plt.show()", "def get_flat_file_data(kind: str, server: str='PROD', ID: str='42') -> DataFrame:\r\n k = {\r\n 'c': 'customer_data_{0}_{1}_.csv',\r\n 'b': 'vendor_data_{0}_{1}_.csv'\r\n }\r\n f = k[kind].format(server, ID)\r\n df = pd.read_csv(f'{BASE_DIR}/{f}', encoding='UTF-8')\r\n df = prepare_input_df(df)\r\n return df", "def path_to_df(path, orig) :\n with open(path, 'r') as fich :\n strinfo = fich.readline()\n [strn, strm] = strinfo.split(\",\")\n info = {'n':int(strn.split(\"=\")[1]), 'm':int(strm.split(\"=\")[1])}\n data = pd.read_csv(fich, sep=\",\")\n data['origin'] = orig\n return info, data", "def extract_table_1(first_line_idx, lineIN_list): \n DEBUG_1A = False\n if DEBUG_1A: header_1A = '>>>DEBUG_1A:\\t'\n\n DEBUG_1 = False\n if DEBUG_1: header = '>>>DEBUG_1:\\t'\n if DEBUG_1: print header, 'first_line_idx', first_line_idx \n if DEBUG_1: from pprint import pprint as pp\n my_lineOUT_list = []\n my_lineIN_list = []\n\n #OBS header_of_the_table = 'Parameter'\n cell0_without_tab_list = [\n 'Time bus',\n 'Input Rise Time',\n 'Input Fall Time',\n 'Serial Interface Clock',\n ]\n tab_within_cell_list = [\n 'Clock', \n 'Holdoff',\n ]\n\n\n for line_idx in range(first_line_idx+1, len(lineIN_list) ):\n #if lineIN_list[line_idx].startswith('Parameter'):\n # my_lineIN_list.append(lineIN_list[line_idx].strip().replace(' ', csv_delimiter)) # CSV delimiter\n # continue\n\n this_line = lineIN_list[line_idx].rstrip('\\n') # strip EOL only, not other whtite space\n #this_line = lineIN_list[line_idx].replace('\\n', '') # strip EOL\n #this_line = lineIN_list[line_idx].rstrip()\n\n #TMP if DEBUG_1: print\n #TMP if DEBUG_1: print header, '%3d: 1000, this_line\\t(%s)'%(line_idx, this_line)\n #TMP if DEBUG_1: print header, '%3d: len(this_line)\\t(%s)'%(line_idx, len(this_line))\n #TMP if DEBUG_1: print header, '%3d: len(my_lineIN_list)\\t(%s)'%(line_idx, len(my_lineIN_list))\n #TMP if DEBUG_1A and 'Time bus' in this_line: print header_1A, '%3d: 1000, this_line\\t(%s)'%(line_idx, this_line)\n\n # Fix unwantted tab in original PDF file: replace unwanted tab into a space\n for tmp_str in tab_within_cell_list:\n this_line = this_line.replace('%s\\t'%(tmp_str), '%s '%(tmp_str), 1)\n\n if len(this_line) != 0:\n try:\n header_of_the_table \n except NameError:\n header_of_the_table = re.sub('\\t.*', '', this_line) # extract only the first cell\n my_lineIN_list.append(this_line)\n else:\n # The table itself is between 2 blank lines\n if len(my_lineIN_list) != 0:\n break\n\n # Make the text line compatible with CSV syntax\n line_idx = 0\n while line_idx < len(my_lineIN_list):\n this_line = my_lineIN_list[line_idx]\n this_delimiter_count = this_line.count('\\t')\n\n if DEBUG_1: print header, '%3d: 2000, this_line\\t(%s)'%(line_idx, this_line)\n if DEBUG_1A and 'Time bus' in this_line: print header_1A, '%3d: 2000, this_line\\t(%s)'%(line_idx, this_line)\n\n if this_line.startswith(header_of_the_table):\n # header of the table\n delimiter_count = this_delimiter_count\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n\n else:\n # Get one or more line until enough cells: \n if DEBUG_1A and 'Time bus' in this_line: print header_1A, '%3d: 2400, this_line\\t(%s)'%(line_idx, this_line)\n\n while this_line.count('\\t') < delimiter_count:\n if DEBUG_1A and 'Time bus' in this_line: print header_1A, '%3d: 3000, this_line\\t(%s)'%(line_idx, this_line)\n # append next line\n if line_idx+1 < len(my_lineIN_list):\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_1: print header, '%3d: 3000, this_line\\t(%s)'%(line_idx, this_line)\n if DEBUG_1A and 'Time bus' in this_line: print header_1A, '%3d: 4000, this_line\\t(%s)'%(line_idx, this_line)\n else:\n break\n\n # Has enough cells: append one or more line if these line has no tab except \n # the line start with a specific text\n while line_idx+1 < len(my_lineIN_list) and not '\\t' in my_lineIN_list[line_idx+1]:\n if any (z in my_lineIN_list[line_idx+1] for z in cell0_without_tab_list):\n break\n else:\n line_idx += 1\n this_line += ' ' + my_lineIN_list[line_idx]\n if DEBUG_1: print header, '%3d: 4000, this_line\\t(%s)'%(line_idx, this_line)\n this_line = text_to_csv_syntax(this_line) # text line compatible with CSV syntax\n this_line = text_to_excessive_space_on_hyphen(this_line) # remove the space in 'xx- xx', or 'xx -xx'\n my_lineOUT_list.append(this_line)\n line_idx += 1\n\n #if DEBUG_1: pp(my_lineOUT_list)\n if DEBUG_1: \n for str2 in my_lineOUT_list:\n if DEBUG_1: print header, 'str2(%r)'%(str2)\n\n return my_lineOUT_list", "def readfields(self, dbname, line1, nlines, startdate): \n\n conn=sqlite3.connect(dbname)\n c=conn.cursor()\n self.obsHistID=np.zeros(nlines)\n self.fieldMJD=np.zeros(nlines)\n self.fieldRA=np.zeros(nlines)\n self.fieldDec=np.zeros(nlines)\n self.rotSkyPos=np.zeros(nlines)\n self.filter=np.zeros(nlines, dtype=str)\n self.fiveSigmaDepth=np.zeros(nlines)\n self.seeingFwhmEff=np.zeros(nlines)\n\n count=0\n# exec_str='SELECT obsHistID,expMJD,fieldRA,fieldDec,rotSkyPos FROM Summary order by expMJD limit %d,%d' %(line1-1,nlines)\n exec_str='SELECT observationId,observationStartMJD,FieldRA,FieldDec,rotSkyPos,filter,fiveSigmaDepth,seeingFwhmEff FROM SummaryAllProps order by observationStartMJD limit %d,%d' %(line1-1,nlines)\n for row in c.execute(exec_str):\n self.obsHistID[count] = row[0]\n self.fieldMJD[count] = row[1]\n self.fieldRA[count] = np.deg2rad(row[2])\n self.fieldDec[count] = np.deg2rad(row[3])\n self.rotSkyPos[count] = np.deg2rad(row[4])\n self.filter[count] = row[5]\n self.fiveSigmaDepth[count] = row[6]\n self.seeingFwhmEff[count] = row[7]\n count +=1\n\n # startdate is 0 if not provided by user. In this case use the default MJDs.\n if (startdate > 1):\n self.fieldMJD=self.fieldMJD+(int(startdate)-int(self.fieldMJD[0]))", "def load_and_filer(pwd,rval=0.95):\n df = pd.read_csv(pwd)\n df = rl.give_good_structure(df)\n df = df.loc[(df['end_type']=='DIVISION')|(df['end_type']=='DIV')|(df['end_type']=='div')]\n if 'length_box' in df.columns: #guillaume data\n df['time_sec'] = df['frame']*60*3\n df['length_box_um'] = df['length_box']*0.065\n else:\n df['length_box_um'] = (df['vertical_bottom'] - df['vertical_top'])*0.065\n df = df.groupby('cell').filter(lambda x: True if len(x['length_box_um'])>2 else False)\n df =df.groupby('cell').filter(lambda x: linregress(x['time_sec'],np.log(x['length_box_um'])).rvalue>rval)\n #df = rl.give_unique_dataset(df,6,18)\n df =df[['length_box_um','time_sec','parent_id','id','gl','date','pos','cell','lane_ID','end_type']]\n return df", "def read_obsstat(logger: logging.Logger = None) -> pd.DataFrame:\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n dfTmp = pd.read_csv(dStat['obsstatf'], delim_whitespace=True)\n dfTmp.rename(columns={'TYP': 'PRN'}, inplace=True)\n if logger is not None:\n amutils.logHeadTailDataFrame(df=dfTmp, dfName='dfTmp', callerName=cFuncName, logger=logger)\n\n # select the SNR colmuns for the selected frequencies\n col_names = dfTmp.columns.tolist()\n cols2keep = col_names[:4]\n for freq in dStat['cli']['freqs']:\n cols2keep += [col for col in col_names[4:] if col.startswith('S{freq:s}'.format(freq=freq))]\n\n return dfTmp[cols2keep]", "def parse_ic_info(file_path: str) -> pd.DataFrame:\n log = ess_factory(fullpath=file_path, check_for_errors=False)\n ic_dict = {item: []\n for item in ['label', 'type', 'atoms', 'redundant', 'scan']}\n scan_args = parse_scan_args(file_path)\n max_atom_ind = scan_args['n_atom']\n if isinstance(log, GaussianLog):\n ic_info_block = parse_str_blocks(file_path, 'Initial Parameters', '-----------', regex=False,\n tail_count=3)[0][5:-1]\n for line in ic_info_block:\n # Line example with split() indices:\n # 0 1 2 3 4 5 6 7\n # ! R1 R(1, 2) 1.3581 calculate D2E/DX2 analytically !\n terms = line.split()\n ic_dict['label'].append(terms[1])\n ic_dict['type'].append(terms[1][0]) # 'R: bond, A: angle, D: dihedral\n atom_inds = re.split(r'[(),]', terms[2])[1:-1]\n ic_dict['atoms'].append([int(atom_ind) for atom_ind in atom_inds])\n\n # Identify redundant, cases like 5 atom angles or redundant atoms\n if (ic_dict['type'][-1] == 'A' and len(atom_inds) > 3) \\\n or (ic_dict['type'][-1] == 'R' and len(atom_inds) > 2) \\\n or (ic_dict['type'][-1] == 'D' and len(atom_inds) > 4):\n ic_dict['redundant'].append(True)\n else:\n # Sometimes, redundant atoms with weird indices are added.\n # Reason unclear. Maybe to better define the molecule, or to\n # solve equations more easily.\n weird_indices = [index for index in ic_dict['atoms'][-1]\n if index <= 0 or index > max_atom_ind]\n if weird_indices:\n ic_dict['redundant'].append(True)\n else:\n ic_dict['redundant'].append(False)\n\n # Identify ics being scanned\n if len(scan_args['scan']) == len(atom_inds) == 4 \\\n and is_same_pivot(scan_args['scan'], ic_dict['atoms'][-1]):\n ic_dict['scan'].append(True)\n elif len(scan_args['scan']) == len(atom_inds) == 2 \\\n and set(scan_args['scan']) == set(ic_dict['atoms'][-1]):\n ic_dict['scan'].append(True)\n else:\n # Currently doesn't support scan of angles.\n ic_dict['scan'].append(False)\n else:\n raise NotImplementedError(f'parse_ic_info() can currently only parse Gaussian output '\n f'files, got {log}')\n ic_info = pd.DataFrame.from_dict(ic_dict)\n ic_info = ic_info.set_index('label')\n return ic_info", "def _read_log(self):\n\n line_regex = compile(r\"\\[I\\]\\s*\\(\\d+ms\\)[^\\d]+(?P<counter>\\d+)\"\n r\"[^\\d]+(?P<timestamp>\\d+(\\.\\d+)?)[^\\d]+\"\n r\"(?P<acceleration>\\d+);\")\n values = []\n with open(self.filepath) as file:\n for line in file:\n match = line_regex.match(line)\n if match:\n values.append({\n 'counter':\n int(match['counter']),\n 'timestamp':\n int(float(match['timestamp']) * 1000),\n 'acceleration':\n int(match['acceleration'])\n })\n\n self.values = values", "def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df", "def igra2_ascii_to_dataframe(file=''):\n if debug:\n print(\"Running igra2_ascii_to_dataframe for: \", file) \n \n data = check_read_file(file=file, read=True)\n #source_file = [l for l in file.split('/') if '.txt' in l][0]\n read_data = [] # Lists containing the raw data from the ascii file, and the observation dates\n \"\"\" Data to be extracted and stored from the igra2 station files \n Some info is contained in the header of each ascent, some in the following data \"\"\"\n\n \"\"\" Initialize the variables that can be read from the igra2 files \"\"\"\n ident,year,month,day,hour,reltime,p_src,np_src,lat, lon = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan \n lvltyp1,lvltyp2,etime,press,pflag,gph,zflag,temp,tflag,rh,dpdep,wdir,wspd = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan # initialize to zeros\n stations_id = []\n idate = np.nan\n count = 0\n head_count = 0\n \n obs_id = 0\n \n def make_release_time(date_time, hour, release):\n \"\"\" build a sonde release time \n ex 2019 02 20 00 2349 \n ex 2019 01 10 00 0011 \n They round the release time to the closest hour. \n It can be the same day or the following !!!\n date_time = date_time pytohn object, \n date, time, release = original strings \n \"\"\"\n release_h = int(release[:2])\n release_m = int(release[2:4])\n \n if release_h == 99:\n return 0 #largest integer number int 64 \n \n else:\n if release_m == 99:\n release_m = 0\n release_date_time = date_time.replace(hour= release_h, minute= release_m) \n \n \"\"\" Here, I have to subtract one day to the release time stamp if the hour of the time stamp is in th evening,\n but the nominal time is reported at midnight hence in the following day. For example 2019 02 20 00 2349 from file VMM00048820 \"\"\"\n if hour == '00':\n if release_h > 20:\n release_date_time = release_date_time - timedelta(days=1)\n else:\n pass\n \n return release_date_time \n \n \n for i, line in enumerate(data):\n if line[0] == '#':\n head_count = head_count +1 \n # Info from the Header line of each ascent \n ident = line[1:12] # station identifier\n ident = ident[6:12]\n if ident not in stations_id:\n stations_id.append(ident)\n \n year = line[13:17] # year, months, day, hour of the observation\n month = line[18:20]\n day = line[21:23]\n hour = line[24:26] \n reltime = line[27:31] # release time of the sounding.\n numlev = int(line[32:36]) # number of levels in the sounding == number of data recorded in the ascent\n p_src = line[37:45] # data source code for the pressure levels \n np_src = line[46:54] # data source code for non-pressure levels\n lat = int(line[55:62]) / 10000. # latitude and longitude\n lon = int(line[63:71]) / 10000.\n #observation_id = i\n if int(hour) == 99:\n time = reltime + '00'\n else:\n time = hour + '0000'\n \n if '99' in time:\n time = time.replace('99', '00')\n\n idate = datetime.strptime(year + month + day + time, '%Y%m%d%H%M%S') # constructed according to CDM\n \n release_time = make_release_time(idate, hour, reltime) # making the release time \n \n \n iday = int(year + month + day)\n count = count + 1\n else:\n # Data of each ascent\n lvltyp1 = int(line[0]) # 1- 1 integer major level type indicator\n lvltyp2 = int(line[1]) # 2- 2 integer minor level type indicator\n etime = int(line[3:8]) # 4- 8 integer elapsed time since launch\n press = int(line[9:15]) # 10- 15 integer reported pressure\n \n if press == -9999:\n press = np.nan\n \n pflag = line[15] # 16- 16 character pressure processing flag\n \n gph = int(line[16:21]) # 17- 21 integer geopotential height [m]\n \n if gph == -9999 or gph == -8888: # reading the values andh check if they are missing or removed as -9999 or -8888 before dividing by 10 as the instructions say \n gph = np.nan # 23- 27 integer temperature, [Celsius to Kelvin ] \n \n zflag = line[21] # 22- 22 character gph processing flag, \n \n temp = int(line[22:27]) \n if temp != -9999 and temp != -8888: # reading the values andh check if they are missing or removed as -9999 or -8888 before dividing by 10 as the instructions say \n temp = temp / 10. + 273.15 # 23- 27 integer temperature, [Celsius to Kelvin ] \n else:\n temp = np.nan \n \n tflag = line[27] # 28- 28 character temperature processing flag\n \n rh = int(line[28:33]) # 30- 34 integer relative humidity [%] \n if rh != -8888 and rh != -9999:\n rh = rh / 1000. # converting from percentage to absolute ratio \n else:\n rh = np.nan\n \n dpdp = int(line[34:39]) \n if dpdp != -9999 and dpdp !=-8888: \n dpdp = dpdp / 10. # 36- 40 integer dew point depression (degrees to tenth e.g. 11=1.1 C) \n else:\n dpdp = np.nan \n \n wdir = int(line[40:45]) # 41- 45 integer wind direction (degrees from north, 90 = east)\n if wdir == -8888 or wdir == -9999 :\n wdir = np.nan \n \n wspd = int(line[46:51]) # 47- 51 integer wind speed (meters per second to tenths, e.g. 11 = 1.1 m/s [m/s]\n if wspd != -8888 and wspd != -9999 :\n wspd = wspd / 10. \n else:\n wspd = np.nan \n if reltime == 9999.0:\n reltime = np.nan \n \n z_type = np.nan\n if not (np.isnan(press)):\n z_type = 1\n elif (np.isnan(press) and not np.isnan(gph) ) :\n z_type = 2 \n \n for value,var in zip([gph, temp, wspd, wdir, rh, dpdp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'relative_humidity' , 'dew_point_depression'] ):\n obs_id = obs_id +1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1 \n z_type = 1 \n read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) )\n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) )\n else:\n z_type = -2147483648 \n read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) )\n\n\n df = pd.DataFrame(data= read_data, columns= column_names_igra2)\n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) # FF check here !!!! \n \n return df, stations_id", "def load_log(log_dir, log_file, img_dir):\n f = os.path.join(log_dir, log_file)\n df = pd.read_csv(f, header=None, names=['center','left','right', 'angle', 'throttle', 'break', 'speed'])\n i = os.path.join(log_dir, img_dir)\n fix_logs_paths(i, df)\n return df", "def grr_ifconfig(line: Text) -> pd.DataFrame:\n del line # Unused.\n return magics_impl.grr_ifconfig_impl()" ]
[ "0.59124273", "0.587135", "0.57448095", "0.570323", "0.56453687", "0.5628275", "0.5556992", "0.5550915", "0.5534511", "0.5524232", "0.5510892", "0.54958004", "0.54886043", "0.5483201", "0.54807746", "0.54399705", "0.5434385", "0.5400373", "0.53594816", "0.5350207", "0.53483963", "0.5344388", "0.53407145", "0.5316606", "0.53026265", "0.52734", "0.5263125", "0.52597827", "0.52430874", "0.5228692" ]
0.75194466
0
function to map all of the stops and add popups to them showing the various values
def map(stop_id,base,future,colmn_per,colmn_per_str,colmn_diff,df,col_func,rad_func,lat,lon): #sets the map zoomed into san fran with a scale bar mapa = folium.Map([37.765, -122.45], zoom_start=13, tiles='cartodbpositron', control_scale = True) #sets the layers up so that marks can be added to it (NEED TO CHANGE WHEN THE DATA IM MAPPING CHANGES!!!) missing09_group = folium.FeatureGroup(name = 'Stops Missing in 2009') missing16_group = folium.FeatureGroup(name = 'Stops Missing in 2016') missing_both_group = folium.FeatureGroup(name = 'Stops Missing in Both Years') good_group = folium.FeatureGroup(name = 'Model Accuracy Difference (2016 - 2009)') for name, row in df.iterrows(): #make all of the stops missing in both years purple with a radius of 20 if np.isnan(row[base]) & np.isnan(row[future]): html= """ <h2> STOP """ + str(row[stop_id]) + """ </h2> <p> 2009 Name: Missing <br> 2016 Name: Missing </p> <p> Percent Difference: N/A <br> Difference: N/A </p> <p> 2009 Value: Missing <br> 2016 Value: Missing </p> """ iframe = folium.IFrame(html=html, width=300, height=150) pop_up = folium.Popup(iframe, max_width=2650) folium.CircleMarker([row[lat], row[lon]], color='Purple', fill_color='Purple', radius= 5, fill_opacity = 0.3, popup=pop_up).add_to(missing_both_group) # make all of the bus stops missing in 2009 sea green elif np.isnan(row[base]) == True: html= """ <h2> STOP """ + str(row[stop_id]) + """ </h2> <p> 2009 Name: Missing <br> 2016 Name: """ + str(row['STOP NAME']) + """ </p> <p> Percent Difference: N/A <br> Difference: N/A </p> <p> 2009 Value: Missing <br> 2016 Value: """ + str(round(row[future])) + """ </p> """ iframe = folium.IFrame(html=html, width=300, height=150) pop_up = folium.Popup(iframe, max_width=2650) folium.CircleMarker([row[lat], row[lon]], color='#3CB371', fill_color='#3CB371', radius=rad_func(row[future]), fill_opacity = 0.3, popup=pop_up).add_to(missing09_group) # make all of the bus stops missing in 2016 maroon elif np.isnan(row[future]) == True: #if pd.isnull(row['STOP_NAME_09']): # row['STOP_NAME_09'] = 'Missing ' html=""" <h2> STOP """ + str(row[stop_id]) + """ </h2> <p> 2009 Name: """ + str(row['STOP']) + """ <br> 2016 Name: Missing </p> <p> Percent Difference: N/A <br> Difference: N/A </p> <p> 2009 Value: """ + str(row[base]) + """ <br> 2016 Value: Missing </p>""" iframe = folium.IFrame(html=html, width=300, height=150) pop_up = folium.Popup(iframe, max_width=2650) folium.CircleMarker([row[lat], row[lon]], color='#800000', fill_color='#800000', radius=rad_func(row[base]), fill_opacity = 0.3, popup=pop_up).add_to(missing16_group) #when both stops have a value of 0 then the percent difference is calculated as a nan and causes issues with the color and radius function #since the change is 0 (0 to 0) we set the color and radius equal to what it would have been set by the radius and color function (Dark Grey and a radius of 3 map units) elif row[future] == 0 and row[base] == 0: # if pd.isnull(row['STOP_NAME_09']): #row['STOP_NAME_09'] = 'Missing ' #elif pd.isnull(row['STOP_NAME_16']): #row['STOP_NAME_16'] = 'Missing ' html=""" <h2> STOP """ + str(row[stop_id]) + """ </h2> <p> 2009 Name: """ + row['STOP'] + """ <br> <br> 2016 Name: """ + row['STOP NAME'] + """ </p> <p> Percent Difference: 0% <br> Difference: """ + str(round(row[colmn_diff])) + """ </p> <p> 2009 Value: """ + str(round(row[base])) + """ <br> 2016 Value: """ + str(round(row[future])) + """ </p> """ iframe = folium.IFrame(html=html, width=300, height=150) pop_up = folium.Popup(iframe, max_width=2650) folium.CircleMarker([row["LAT"], row["LON"]], color='DarkGray', fill_color='DarkGray', radius= 5, fill_opacity = 0.3, popup=pop_up).add_to(good_group) #based on percent difference map the bus stop ranging from dark green (high % gain) to light green (medium % gain) to grey (low % gain/loss) to light red (low % loss) to dark red (high % loss) else: #takes care of a bug when there is a stop name in one year but not the other and a bug of having an infinite percent difference when the base year is zero if row[base] == 0: row[base] = 0.00001 row[colmn_per] = ((row[future] - row[base])/row[base])*100 #if pd.isnull(row['STOP_NAME_09']): #row['STOP_NAME_09'] = 'Missing ' #elif pd.isnull(row['STOP_NAME_16']): #row['STOP_NAME_16'] = 'Missing ' html=""" <h2> STOP: """ + str(row[stop_id]) + """ </h2> <p> 2009 Name: """ + row['STOP'] + """ <br> <br> 2016 Name: """ + row['STOP NAME'] + """ </p> <p> Percent Difference: """ + str(round(row[colmn_per])) + """% <br> Difference: """ + str(round(row[colmn_diff])) + """ </p> <p> 2009 Value: """ + str(round(row[base])) + """ <br> 2016 Value: """ + str(round(row[future])) + """ </p>""" iframe = folium.IFrame(html=html, width=300, height=150) pop_up = folium.Popup(iframe, max_width=2650) folium.CircleMarker([row[lat], row[lon]], color=col_func(row[colmn_per]), fill_color=col_func(row[colmn_per]), radius=rad_func(row[colmn_diff]), fill_opacity = 0.3, popup=pop_up).add_to(good_group) missing09_group.add_to(mapa) missing16_group.add_to(mapa) missing_both_group.add_to(mapa) good_group.add_to(mapa) folium.LayerControl().add_to(mapa) return mapa
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def demo_one_map():\n radii = [2, 5, 7.1, 0.3, 10]\n demo_1(radii)\n demo_2(radii)", "def create_popups(web_map, project_name, layer_names):\r\n if layer_names:\r\n for layer_name in layer_names:\r\n # popup\r\n feature_layer_popup(\r\n map_service_name='{}_{}'.format(project_name, 'Map'),\r\n map_service_type='Feature Layer',\r\n web_map=web_map,\r\n layer_name=layer_name\r\n )\r\n else:\r\n print('No popup was defined for layers')", "def render(self):\r\n super().render()\r\n layers, titles, latVect, lonVect = self.make_layers()\r\n LON, LAT = np.meshgrid(lonVect, latVect)\r\n lon = LON.flatten()\r\n lat = LAT.flatten()\r\n for i in range(len(layers)):\r\n vals = layers[i].flatten()\r\n hovertext = []\r\n for k in range(len(vals)):\r\n hovertext.append('lon: {:.2f}<br>lat: {:.2f}<br>{}: {:.1e}'.format(lon[k], lat[k], self.variable + self.unit,vals[k]))\r\n if self.levels == 0:\r\n data = [\r\n go.Heatmap(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n zmin=self.vmin,\r\n zmax=self.vmax,\r\n hoverinfo='text',\r\n text=hovertext \r\n )\r\n ]\r\n elif self.levels > 0:\r\n data = [\r\n go.Contour(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n hoverinfo='text',\r\n text=hovertext, \r\n connectgaps=False,\r\n contours=dict(\r\n coloring='heatmap',\r\n showlabels=True,\r\n start=self.vmin,\r\n end=self.vmax,\r\n size=(self.vmax-self.vmin) / float(self.levels)\r\n )\r\n # line=dict(smoothing=0.85) \r\n )\r\n ] \r\n\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel}\r\n ) \r\n\r\n\r\n\r\n if self.surface3D:\r\n data = [\r\n go.Surface(\r\n x=lonVect,\r\n y=latVect,\r\n z=layers[i],\r\n colorscale=self.cmap,\r\n # hoverinfo='text',\r\n # text=hovertext \r\n )\r\n ]\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n scene = dict(\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel},\r\n zaxis={'title': self.variable + self.unit}\r\n )\r\n ) \r\n\r\n\r\n self._save_plotly_(go, data, layout)", "def main() -> None:\n map_ssh = folium.Map(location=[45.523, -122.675], zoom_start=2)\n\n with open(\"lib/base_data.txt\") as tsv:\n for row in csv.reader(tsv, delimiter=\"\\t\"):\n name = row[0]\n try:\n x = float(row[1])\n y = float(row[2])\n folium.Marker([x, y], popup=name).add_to(map_ssh)\n except ValueError:\n pass\n\n map_ssh.save(\"map_ssh.html\")", "def velocity_map(self, output='test'):\n self.figure = figure(figsize=(10,3))\n self.axes = self.figure.gca() \n xWindowLim = (self.analyst.windowSize[0], self.analyst.windowSize[1])\n yWindowLim = (self.analyst.windowSize[2], self.analyst.windowSize[3])\n \n # Generate contours for velocity magnitude \n xGrid = linspace(\\\n xWindowLim[0]*self.millimetersPerPixel, \n xWindowLim[1]*self.millimetersPerPixel, self.nbins)\n yGrid = linspace(\\\n yWindowLim[0]*self.millimetersPerPixel, \n yWindowLim[1]*self.millimetersPerPixel, self.nbins)\n magVelGrid = griddata(self.xs, self.ys, self.magVel, xGrid, yGrid) \n # csf = self.axes.contourf(xGrid, yGrid, magVelGrid, range(2,26,2), cmap=myColorMap)\n csf = self.axes.contourf(xGrid, yGrid, magVelGrid, cmap=myColorMap)\n cbar = self.figure.colorbar(csf) \n cbar.set_label(\"Velocity magnitude, px/s\")\n \n # Generate arrow plot\n # q = self.axes.quiver(self.xs, self.ys, self.us, self.vs,\n # angles = 'xy', scale_units='xy', scale=2, pivot = 'mid')\n # self.axes.quiverkey(q, 0.9, 1.0, 10, \"10 px/frame\", coordinates='axes') \n \n # Save figure \n self.axes.set_aspect('equal')\n self.axes.set_xlim(*xWindowLim)\n self.axes.set_ylim(*yWindowLim)\n self.figure.savefig(output + '_velocity_map.pdf')", "def openvariables(self):\n\n print \"Open Variable\"\n self.combo_wms_time_first_d.clear()\n self.combo_wms_time_first_h.clear()\n self.combo_wms_time_last_d.clear()\n self.combo_wms_time_last_h.clear()\n self.combo_wms_time_first_d_2.clear()\n self.combo_wms_time_first_h_2.clear()\n self.combo_wms_time_last_d_2.clear()\n self.combo_wms_time_last_h_2.clear()\n self.combo_colorbar.clear()\n self.combo_proj.clear()\n # 0 list_variables\n # 1 list_time\n # 2 list_server\n # 3 list_DGF\n # 4 list_MFTP\n # 5 list_WMS\n # 6 list_depth\n # 7 list_resol\n # print \"Open variables\"\n product=str(self.combo_product_list.currentText())\n dataset=str(self.combo_dataset_list.currentText())\n self.combo_variable_list.clear()\n url_base=self.dict_prod[product][dataset][5]\n print 'url'\n print url_base\n self.dict_var=self.getXML(url_base)\n print 'Get XML'\n for key in self.dict_var.keys():\n if not str(key).startswith('Automatically'):\n self.combo_variable_list.addItem(str(key))\n ## Add current in the list if u, v exist\n\n variable=str(self.combo_variable_list.currentText()) \n list_area=self.dict_var[str(variable)][2]\n self.WMS_westBound.setText(list_area[0]) \n self.WMS_eastBound.setText(list_area[1])\n self.WMS_southBound.setText(list_area[2])\n self.WMS_northBound.setText(list_area[3])\n self.WMS_westBound_2.setText(list_area[0]) \n self.WMS_eastBound_2.setText(list_area[1])\n self.WMS_southBound_2.setText(list_area[2])\n self.WMS_northBound_2.setText(list_area[3])\n self.combo_wms_time_first_d.setEnabled(True)\n self.combo_wms_time_first_h.setEnabled(True)\n self.combo_wms_time_last_d.setEnabled(True)\n self.combo_wms_time_last_h.setEnabled(True)\n self.combo_wms_time_first_d_2.setEnabled(True)\n self.combo_wms_time_first_h_2.setEnabled(True)\n self.combo_wms_time_last_d_2.setEnabled(True)\n self.combo_wms_time_last_h_2.setEnabled(True)\n self.combo_colorbar.setEnabled(True)\n self.combo_proj.setEnabled(True)\n self.WMS_westBound.setEnabled(True)\n self.WMS_eastBound.setEnabled(True)\n self.WMS_northBound.setEnabled(True)\n self.WMS_southBound.setEnabled(True)\n self.WMS_westBound_2.setEnabled(True)\n self.WMS_eastBound_2.setEnabled(True)\n self.WMS_northBound_2.setEnabled(True)\n self.WMS_southBound_2.setEnabled(True)\n print \"Get XML Ok\"\n ## Open wms server\n ## Find complementary informations from WMS with OWSlib\n try:\n from owslib.wms import WebMapService\n except ImportError:\n raise ImportError('OWSLib required to use wmsimage method')\n #print 'projection options:'\n self.wms = WebMapService(url_base[0])\n projections=self.wms[variable].crsOptions\n styles=self.wms[variable].styles\n for colorbar in styles.keys():\n self.combo_colorbar.addItem(str(colorbar.split('/')[1]))\n self.minscale_value.setText('-50')\n self.maxscale_value.setText('50')\n self.nbcolors_value.setText('20')\n self.Xpixels_value.setText('800')\n self.Xparallels_value.setText('20')\n self.Ymedians_value.setText('20')\n formats=self.wms.getOperationByName('GetMap').formatOptions\n ind=0\n for proj in projections :\n if str(proj) == \"EPSG:4326\" or str(proj) == \"EPSG:3408\" or str(proj) == \"EPSG:3409\" : \n self.combo_proj.addItem(str(proj))", "def _onbuttonReqMapClicked(self):\n\n day1=str(self.combo_wms_time_first_d.currentText())\n hour1=str(self.combo_wms_time_first_h.currentText())\n date_val=day1+hour1\n depth=str(self.combo_wms_layer_depth.currentText())\n variable=str(self.combo_variable_list.currentText())\n product=str(self.combo_product_list.currentText())\n dataset=str(self.combo_dataset_list.currentText())\n xmin=int(float(self.WMS_westBound.text()))\n xmax=int(float(self.WMS_eastBound.text()))\n ymin=int(float(self.WMS_southBound.text()))\n ymax=int(float(self.WMS_northBound.text()))\n dir_out=self.tmp\n rastermin=self.minscale_value.text()\n rastermax=self.maxscale_value.text()\n nb_colors=self.nbcolors_value.text()\n xpixels=float(self.Xpixels_value.text())\n xparallels=int(self.Xparallels_value.text())\n ymeridians=int(self.Ymedians_value.text())\n dpi=300\n colorbar=str(self.combo_colorbar.currentText())\n input_srs=str(self.combo_proj.currentText())\n epsg_val=input_srs.split(':')[1]\n ll_polar=False\n##\tif self.checkBox_2.isChecked() == True :\n##\t print \"Projection arctic\"\n## #m = Basemap(llcrnrlon=xmin, urcrnrlat=ymax,\n## # urcrnrlon=xmax, llcrnrlat=ymin,resolution='l',epsg=epsg_val) \n## ##m = Basemap(projection='npstere',boundinglat=ymin,lon_0=0,round=True,resolution='l') \n## m = Basemap(projection='npstere',boundinglat=ymin,lon_0=0,round=True,resolution='l') \n## #Proj4js.defs[\"EPSG:3408\"] = \"+proj=laea +lat_0=90 +lon_0=0 +x_0=0 +y_0=0 +a=6371228 +b=6371228 +units=m +no_defs\";\n## #\n## ll_polar=True\n##\telif self.checkBox_3.isChecked() == True :\n##\t print \"Projection antarctic\"\n## m = Basemap(projection='spstere',boundinglat=ymax,lon_0=180,round=True,resolution='l') \n## ll_polar=True\n##\telse : \n m = Basemap(llcrnrlon=xmin, urcrnrlat=ymax,\n urcrnrlon=xmax, llcrnrlat=ymin,resolution='l',epsg=epsg_val) \n print \"cylindric projection\"\n\n # ypixels not given, find by scaling xpixels by the map aspect ratio.\n ypixels = int(m.aspect*xpixels)\n style='boxfill/'+colorbar\n print input_srs\n print epsg_val\n p = pyproj.Proj(init=\"epsg:%s\" % epsg_val, preserve_units=True)\n xmin,ymin = p(m.llcrnrlon,m.llcrnrlat)\n xmax,ymax = p(m.urcrnrlon,m.urcrnrlat)\n if epsg_val == '4326' :\n xmin = (180./np.pi)*xmin; xmax = (180./np.pi)*xmax\n ymin = (180./np.pi)*ymin; ymax = (180./np.pi)*ymax\n print \"Cylindric projection\"\n print xmin,xmax,ymin,ymax\n print style\n img = self.wms.getmap(layers=[variable],service='wms',bbox=(xmin,ymin,xmax,ymax),\n size=(int(xpixels),ypixels),\n format='image/png',\n elevation=depth,\n srs=input_srs,\n time=date_val,\n colorscalerange=rastermin+','+rastermax,numcolorbands=nb_colors,logscale=False,\n styles=[style])\n image=imread(io.BytesIO(img.read()),format='png')\n if variable == \"sea_water_velocity\" :\n ylabel=\"magnitude\"\n else :\n ylabel=self.wms[variable].abstract\n\n long_name=self.wms[variable].title\n title=product+\" - \"+long_name+\" \"+\" - \"+date_val\n file_pal='./palettes/thredds/'+colorbar+'.pal'\n my_cmap=compute_cmap(file_pal,colorbar)\n cm.register_cmap(name=colorbar, cmap=my_cmap)\n font=10\n norm = mpl.colors.Normalize(vmin=float(rastermin), vmax=float(rastermax), clip=False) \n parallels=np.round(np.arange(ymin,ymax+xparallels/2,xparallels))\n meridians = np.round(np.arange(xmin,xmax+ymeridians/2,ymeridians))\n # Plot figure \n plt.figure(figsize=(20,12))\n if epsg_val == '4326' :\n m.drawcoastlines(color='lightgrey',linewidth=0.25)\n m.fillcontinents(color='lightgrey')\n m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10,linewidth=0.2,dashes=[1, 5])\n m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10,linewidth=0.2)\n\n elif ll_polar == True : \n #m.drawcoastlines(linewidth=0.5)\n m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10,linewidth=0.2)\n m.drawmeridians(meridians[:-1],labels=[1,1,1,1],fontsize=10,linewidth=0.2,dashes=[1, 5])\n ## Plot the image\n cs=m.imshow(image,origin='upper',alpha=1,cmap=(cm.get_cmap(colorbar,int(nb_colors))),norm=norm)\n ## Add colorbar\n cb=plt.colorbar(cs,orientation='vertical',format='%4.2f',shrink=0.7)\n cb.ax.set_ylabel(ylabel, fontsize=int(font)+4)\n cl=plt.getp(cb.ax, 'ymajorticklabels')\n plt.setp(cl, fontsize=font)\n\n plt.title(title,fontsize=font+4,y=1.05)\n plt.savefig('images/'+product+\"_\"+long_name+\"_\"+date_val+\"_basemap.png\",dpi=300,bbox_inches='tight')\n plt.show()", "def derive_features(self):\n\n temp = int(self.stop_id)\n\n while temp not in self.stops_latlon.keys():\n if temp < 7692:\n temp += 1\n else:\n while temp not in self.stops_latlon.keys():\n temp -= 1\n\n self.latitude = self.stops_latlon[temp][0]\n self.longitude = self.stops_latlon[temp][1]\n\n self.distance_centre = FormatInput.haversine(self.latitude, self.longitude)\n\n self.cluster = FormatInput.map_stop_to_cluster(self.cluster_map, self.stop_id)\n\n self.holiday = FormatInput.add_holiday(self.date)", "def visualize(g):\n url = \"http://www.gcmap.com/mapui?P=\"\n routes = []\n \n for key in g.city_dict:\n for flight in g.city_dict[key].get_flights_out():\n route = (g.city_dict[key].get_code(), flight[0])\n if(route not in routes):\n routes.append(route)\n \n for flight in routes:\n url = url + flight[0] + \"-\" + flight[1]\n url = url + \",+\"\n \n url = url[:-2]\n return url", "def population_results_map():\n start_time = time()\n fig= Figure(figsize=(60,52), frameon=True, tight_layout=True)\n ax = fig.add_subplot(1,1,1, axisbg='#EEEEEE')\n ax.grid(color='white', linestyle='solid')\n rstyle(ax)\n\n queryset = Unit.objects.all()\n # It might be faster to request a flat value list and then construct new tuples based on that\n latlong = [(u.latitude, u.longitude, \n u.unitstats.cumulative_infected, \n u.unitstats.cumulative_vaccinated,\n u.unitstats.cumulative_destroyed,\n u.unitstats.cumulative_zone_focus, \n u.initial_size,\n ) if hasattr(u, \"unitstats\") else\n (u.latitude, u.longitude, -1, -1, -1, -1, u.initial_size)\n for u in queryset]\n total_iterations = float(len(list_of_iterations()))\n latitude, longitude, infected, vaccinated, destroyed, zone_focus, herd_size = zip(*latlong)\n zone_blues, red_infected, green_vaccinated = define_color_mappings()\n \n graph_zones(ax, latitude, longitude, total_iterations, zone_blues, zone_focus)\n graph_states(ax, latitude, longitude, total_iterations, infected, vaccinated, destroyed)\n \n neutral_longitude = [entry[1] for entry in latlong if not any(x > 0 for x in (entry[2], entry[3], entry[4]))]\n neutral_latitude = [entry[0] for entry in latlong if not any(x > 0 for x in (entry[2], entry[3], entry[4]))]\n # to ensure zero occurrences has a different color\n uninvolved = ax.scatter(neutral_longitude,\n neutral_latitude,\n marker='s',\n s=[min(max(0.25, size / 100), 1000) for size in herd_size],\n color=(0.2, 0.2, 0.2, 1.0),\n zorder=1000)\n Results.graphing.crop_to_fit_map(ax)\n print(\"Population Map took %i seconds\" % int(time() - start_time))\n return fig", "def getPressures(self, flaggedmeteo, useWeatherStations=True, scaleHeight=500.):\n self._weather = self.asdm.weatherTable().get()\n self._station = self.asdm.stationTable().get()\n self._antenna = self.asdm.antennaTable().get()\n antennas = []\n wStationId = {}\n wStationName = {}\n wStationDistance = {}\n flagged_meteo = flaggedmeteo.split()\n count = {}\n self.meanDeltaPressure = {}\n \n centralStationId = Tag(0)\n #for r in self._station:\n # if str(r.name()) == \"MeteoCentral\":\n # centralStationId = r.stationId()\n for r in self._station:\n if str(r.name()) == \"MeteoTB2\":\n centralStationId = r.stationId()\n \n if centralStationId == Tag(0):\n print(\"== no central station\")\n return\n refPos = self.asdm.stationTable().getRowByKey(centralStationId).position()\n refVector = pl.array([refPos[0].get(),refPos[1].get(),refPos[2].get()])\n for row in self._antenna:\n ant = row.name()\n antennas.append(ant)\n count[ant] = 0\n self.meanDeltaPressure[ant] = 0\n if useWeatherStations:\n stationId = row.stationId()\n r0 = self.asdm.stationTable().getRowByKey(stationId)\n\n d2min = 1e12\n for r in self._station:\n if (str(r.type()) == 'WEATHER_STATION') and (str(r.name()) not in flagged_meteo):\n d2 = 0\n for i in range(3):\n d2 += (r0.position()[i].get()-r.position()[i].get())**2\n if d2 < d2min: \n rows = self.asdm.weatherTable().getByContext(r.stationId())\n # test th epressure\n if rows[0].pressure().get() > 1000:\n # \n wStationName[ant] = r.name()\n wStationId[ant] = r.stationId()\n wStationDistance[ant] = sqrt(d2)\n d2min = d2\n print('%s/%s : Weather station %15s distance %10.2f m' \\\n %(ant, r0.name(), wStationName[ant], wStationDistance[ant])) \n \n self.deltaPressures = {}\n self.centralPressure = {}\n self.centralWaterPressure = {}\n self.centralTemperature = {}\n self.minPressure = 1e10\n self.maxPressure = -1e10\n \n for row in self.asdm.calDataTable().get():\n if str(row.calType()) == \"CAL_WVR\":\n scan = row.scanSet()[0]\n if scan not in list(self.scanArrayTimes.keys()):\n start = row.startTimeObserved().get()\n end = row.endTimeObserved().get()\n\n self.deltaPressures[scan] = {}\n rows = self.asdm.weatherTable().getByContext(centralStationId)\n for r in rows:\n ttt = r.timeInterval().start().get()\n if (ttt > start) and (ttt < end):\n found = True\n self.centralPressure[scan] = r.pressure().get()\n self.centralTemperature[scan] = r.temperature().get()\n for wvrrow in self.asdm.calWVRTable().get():\n #print wvrrow.calDataId(), row.calDataId()\n if wvrrow.antennaName() == self.refAntenna:\n if wvrrow.calDataId() == row.calDataId():\n water = wvrrow.water().get() # meters\n break\n # assuming scale height of 1000m\n scaleHeight = 1000.\n self.centralWaterPressure[scan] = self.centralTemperature[scan]*water*1000./217.*100*(1000./scaleHeight) ## in pascals.\n print(\"=== scan %2s pres %7.3f mb temp %7.3f K w %6.3f mm ppH2O %7.3f mb\" %\\\n (scan, self.centralPressure[scan]/100., self.centralTemperature[scan], water*1000, self.centralWaterPressure[scan]/100.))\n self.minPressure = min(self.minPressure, self.centralPressure[scan])\n self.maxPressure = max(self.minPressure, self.centralPressure[scan])\n\n for ant in antennas:\n # print \"antenna \", ant \n water = 0\n for wvrrow in self.asdm.calWVRTable().get():\n if wvrrow.antennaName() == ant:\n if wvrrow.calDataId() == row.calDataId():\n water = wvrrow.water().get() # meters\n break\n temp = self.centralTemperature[scan]\n water_pressure = temp*water*1000./217.*100.*(1000./scaleHeight) # pascals\n self.deltaPressures[scan][ant] = \\\n - (water_pressure-self.centralWaterPressure[scan] ) \n if useWeatherStations:\n rows = self.asdm.weatherTable().getByContext(wStationId[ant])\n sRow = self.asdm.stationTable().getRowByKey(wStationId[ant])\n pos = sRow.position()\n padVector = pl.array([pos[0].get(),pos[1].get(),pos[2].get()]) \n diffVector = padVector - refVector\n diffHeight = sqrt(padVector[0]**2+padVector[1]**2+padVector[2]**2)\n diffHeight -= sqrt(refVector[0]**2+refVector[1]**2+refVector[2]**2)\n found = False\n pres = 0\n for r in rows:\n ttt = r.timeInterval().start().get()\n if (ttt > start) and (ttt < end):\n found = True\n pres = r.pressure().get()\n temp = r.temperature().get()\n if found:\n self.deltaPressures[scan][ant] += \\\n pres - self.centralPressure[scan]*(1.-6.5e-3/293.5*diffHeight)**5.26 \n # if scan>1:\n self.meanDeltaPressure[ant] += self.deltaPressures[scan][ant]\n count[ant] += 1\n\n for ant in list(count.keys()):\n self.meanDeltaPressure[ant] /= count[ant]", "def get_stop_info(stops):\n\tapi_url = 'http://webservices.nextbus.com/service/publicXMLFeed?command=predictions&a=sf-muni&stopId='\n\t\"\"\"Stop_dict = {bus_name:'38',\n\t\t\t\t\tminutes: 7,\n\t\t\t\t\tstop_location: 'Geary & Leavenworth'}\"\"\"\n\tfor stop in stops:\n\t\turl = api_url + str(stop)\n\treturn url", "def map_property(self, linc):\n sleep(1)\n linc = '{}'.format(linc).zfill(10)\n self.driver.switch_to_frame('fOpts')\n select_box = Select(self.driver.find_element_by_id('Finds_lstFindTypes'))\n select_box.select_by_visible_text('Linc Number')\n linc_box = self.driver.find_element_by_id('Finds_ctlLincNumber_txtLincNumber')\n linc_box.clear()\n linc_box.send_keys(linc)\n self.driver.find_element_by_id('Finds_cmdSubmit').click()\n if self.spatial_count == 0:\n sleep(8)\n sleep(4)\n self.driver.switch_to_default_content()\n if self.spatial_count == 0:\n try:\n e = WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.ID, 'map'))\n )\n except:\n self.driver.refresh()\n self.driver.switch_to_default_content()\n e = WebDriverWait(self.driver, 60).until(\n EC.presence_of_element_located((By.ID, 'map'))\n )\n hover_target = self.driver.find_element_by_id('map')\n if self.spatial_count == 0:\n sleep(5)\n map_location = hover_target.location\n map_size = hover_target.size\n filename = 'data/sites/{}.png'.format(linc)\n self.driver.save_screenshot(filename)\n x = map_location['x'] + 50\n y = map_location['y']\n width = map_location['x'] + map_size['width'] - 50\n height = map_location['y'] + map_size['height']\n im = Image.open(filename)\n im = im.crop((int(x), int(y), int(width), int(height)))\n im.save(filename)\n if self.spatial_count == 0:\n sleep(5)\n ActionChains(self.driver).move_to_element(hover_target).drag_and_drop_by_offset(hover_target, 1, 1).perform()\n if self.spatial_count == 0:\n sleep(5)\n nad83_raw = self.driver.find_element_by_id('coordinateOutput').text\n nad83 = tuple(re.findall(r\"[0-9\\.\\-]+\", nad83_raw))\n gps = Geography().nad83(nad83, reverse=True)\n gpsr = (gps[1], gps[0])\n self.spatial_count += 1\n return gpsr", "def drought_fire_map(request):\n \n view_center = [-105.2, 39.0]\n view_options = MVView(\n projection='EPSG:4326',\n center=view_center,\n zoom=7.0,\n maxZoom=12,\n minZoom=5\n )\n\n # TIGER state/county mapserver\n tiger_boundaries = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/State_County/MapServer'},\n legend_title='States & Counties',\n layer_options={'visible':True,'opacity':0.8},\n legend_extent=[-112, 36.3, -98.5, 41.66]) \n \n # USGS Rest server for HUC watersheds \n watersheds = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://hydro.nationalmap.gov/arcgis/rest/services/wbd/MapServer'},\n legend_title='HUC Watersheds',\n layer_options={'visible':False,'opacity':0.4},\n legend_extent=[-112, 36.3, -98.5, 41.66])\n\n ## Colorado Wildfire Risk Assessment Portal - Fire Intensity Scale\n # https://www.coloradowildfirerisk.com/map/Public\n fire_intensity = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://www.coloradowildfirerisk.com/arcgis/rest/services/WUI_fieldwork/FireIntensityScale/MapServer',\n 'params': {'LAYERS': 'show:0'}},\n legend_title='Fire Intensity Scale',\n legend_classes=[\n MVLegendClass('polygon', 'Lowest Intensity', fill='rgba(199,215,158,0.5)'),\n MVLegendClass('polygon', '', fill='rgba(255,255,190,0.5)'),\n MVLegendClass('polygon', 'Moderate Intensity', fill='rgba(255,214,79,0.5)'),\n MVLegendClass('polygon', '', fill='rgba(255,153,0,0.5)'),\n MVLegendClass('polygon', 'Highest Intensity', fill='rgba(230,0,0,0.5)')],\n layer_options={'visible':False,'opacity':0.6},\n legend_extent=[-112, 36.3, -98.5, 41.66])\n \n fire_occur = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://www.coloradowildfirerisk.com/arcgis/rest/services/WUI_fieldwork/FireOccurrenceAreas/MapServer',\n 'params': {'LAYERS': 'show:0'}},\n legend_title='Fire Occurance Areas',\n legend_classes=[\n MVLegendClass('polygon', '1 Lowest Occurrence', fill='rgba(204,204,204,0.5)'),\n MVLegendClass('polygon', '2', fill='rgba(199,215,158,0.5)'),\n MVLegendClass('polygon', '3', fill='rgba(242,242,183,0.5)'),\n MVLegendClass('polygon', '4', fill='rgba(255,211,127,0.5)'),\n MVLegendClass('polygon', '5', fill='rgba(255,170,0,0.5)'),\n MVLegendClass('polygon', '6', fill='rgba(168,0,0,0.5)'),\n MVLegendClass('polygon', '7 Highest Intensity', fill='rgba(230,0,0,0.5)')],\n layer_options={'visible':False,'opacity':0.6},\n legend_extent=[-112, 36.3, -98.5, 41.66])\n \n ## WFAS - Severe Fire Weather Potential Forecast \n ## https://m.wfas.net/wfas_sfwp_map.html\n wfas_legend = MVLegendImageClass(value='SFWF',\n image_url='https://www.wfas.net/cgi-bin/mapserv?map=/var/www/html/nfdr/mapfiles/ndfd_geog5.map&SERVICE=WMS&VERSION=1.3.0&SLD_VERSION=1.1.0&REQUEST=GetLegendGraphic&FORMAT=image/jpeg&LAYER=fbxday0&STYLE=')\n wfas_sfw = MVLayer(\n source='ImageWMS',\n options={'url': 'https://www.wfas.net/cgi-bin/mapserv?map=/var/www/html/nfdr/mapfiles/wfas_wms_new.map',\n 'params': {'LAYERS': 'fbxday0'}},\n layer_options={'visible':True,'opacity':0.7},\n legend_title='Fire Weather Forecast',\n legend_classes=[wfas_legend],\n legend_extent=[-126, 24.5, -66.2, 49])\n \n ## NOAA - nowcoast fire weather\n nws_fire_hazards = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://nowcoast.noaa.gov/arcgis/rest/services/nowcoast/wwa_meteoceanhydro_longduration_hazards_time/MapServer',\n 'params': {'LAYERS': 'show:38'}},\n legend_title='NWS Fire Hazards',\n legend_classes=[\n MVLegendClass('polygon', 'Red Flag Warning', fill='rgba(255,20,147,0.6)'),\n MVLegendClass('polygon', 'Fire Weather Watch', fill='rgba(255,222,173,0.6)')],\n layer_options={'visible':False,'opacity':0.6},\n legend_extent=[-112, 36.3, -98.5, 41.66])\n \n # Define map view options\n drought_fire_map_view_options = MapView(\n height='100%',\n width='100%',\n controls=['ZoomSlider', 'Rotate', 'ScaleLine', 'FullScreen',\n {'MousePosition': {'projection': 'EPSG:4326'}},\n {'ZoomToExtent': {'projection': 'EPSG:4326', 'extent': [-130, 22, -65, 54]}}],\n layers=[tiger_boundaries,wfas_sfw,fire_intensity,fire_occur,nws_fire_hazards,watersheds],\n view=view_options,\n basemap='OpenStreetMap',\n legend=True\n )\n\n context = {\n 'drought_fire_map_view_options':drought_fire_map_view_options,\n }\n\n return render(request, 'co_drought/drought_fire.html', context)", "def show_map(pdb,show_sticks_all=False, show_sticks_metalbinding=True, show_probes=True, show_pdb_metals=True):\n view=py3Dmol.view(width=1000, height=800)\n\n view.addModel(open(pdb+'.pdb', 'r').read(),'pdb')\n if show_probes:\n view.addModel(open(pdb+'_PredictedSites.xyz', 'r').read(),'xyz')\n probes = open(pdb+'_PredictedSites.xyz', 'r').readlines()\n if(int(probes[0])!=0):\n probabilities = [p.replace('#','').split()[-1] for p in probes[2:]] # read p from comment in xyz file\n colors = {}\n # use different colors for the probabilities\n for i,x in enumerate(probabilities):\n colors[i] = '#%02x%02x%02x' % (0, 128, int(float(x)/float(probabilities[0])*255))\n else: #no predicted site\n colors = [] \n view.addLabel(\"No probe predicted\", {'position': {'x':0, 'y':0, 'z':0}, 'backgroundColor': '#0080FF', 'fontColor': 'white'});\n \n view.zoomTo()\n view.setBackgroundColor('white')\n view.setStyle({},{'cartoon': {'color':'gray'}})\n if show_sticks_all:\n view.setStyle({}, {'stick':{},'cartoon': {'color':'gray'}})\n if show_pdb_metals:\n view.getModel(0).setStyle({'resn':\"ZN\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"CA\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"CU\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"HG\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MG\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"FE\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MN\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"NI\"},{'sphere': {'opacity':.75}})\n view.getModel(0).setStyle({'resn':\"MB\"},{'sphere': {'opacity':.75}})\n \n if show_probes:\n view.getModel(1).setStyle({},{'sphere': {'colorscheme':{'prop':'index', 'map':colors}}})\n \n # add hoverable labels for the residues and the predicted metals\n # two callbacks are needed, one for the residues and one for the metals\n # the metal one displays the probability\n view.getModel(0).setHoverable({},True,'''function(atom,viewer,event,container) {\n if(!atom.label) {\n atom.label = viewer.addLabel(atom.resn+atom.resi+\":\"+atom.atom,{position: atom, backgroundColor: 'mintcream', fontColor:'black'});\n }}''',\n '''function(atom,viewer) { \n if(atom.label) {\n viewer.removeLabel(atom.label);\n delete atom.label;\n }\n }''')\n view.getModel(1).setHoverable({},True,'''function(atom,viewer,event,container) {\n if(!atom.label) {\n atom.label = viewer.addLabel(atom.atom+\" [\"+atom.serial+\"]\",{position: atom, backgroundColor: 'mintcream', fontColor:'black'});\n }}''',\n '''function(atom,viewer) { \n if(atom.label) {\n viewer.removeLabel(atom.label);\n delete atom.label;\n }\n }''')\n if show_sticks_metalbinding:\n view.setStyle({'resn':\"HIS\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"ASP\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"GLU\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n view.setStyle({'resn':\"CYS\"},{'stick': {}, 'cartoon': {'color':'gray'}})\n\n return view.show()", "def output_grid_information():\n # translate = [-74.26, 40.50]\n # scale = [0.02, 0.02]\n # step = 1\n\n translate = [0, 0]\n scale = [1, 1]\n step = 0.02\n\n lon_limits = [(-74.26 - translate[0]) / scale[0], (-73.76 - translate[0]) / scale[0]]\n lat_limits = [(40.48 - translate[1]) / scale[1], (40.94 - translate[1]) / scale[1]]\n\n lons = np.arange(lon_limits[0], lon_limits[1] - step, step)\n lats = np.arange(lat_limits[0], lat_limits[1] - step, step)\n\n all_json = {\n \"type\": \"FeatureCollection\"\n }\n\n gr_id = 0\n grid_df = pd.DataFrame(columns=['gr_id', 'c_lat', 'c_lon', 's_lon', 'w_lat', 'n_lon', 'e_lat'])\n features = []\n\n for lat in lats:\n for lon in lons:\n w_lon = lon\n e_lon = lon + step\n s_lat = lat\n n_lat = lat + step\n\n c_lon = lon + step / 2\n c_lat = lat + step / 2\n\n grid_df = grid_df.append(pd.DataFrame({\"gr_id\": [gr_id],\n \"c_lon\": [c_lon], \"c_lat\": [c_lat],\n \"w_lon\": [w_lon], \"s_lat\": [s_lat],\n \"e_lon\": [e_lon], \"n_lat\": [n_lat]}))\n\n coor = [[[s_lat, w_lon], [n_lat, w_lon], [n_lat, e_lon],\n [s_lat, e_lon], [s_lat, w_lon]]]\n\n feature = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": coor\n },\n \"properties\": {\n \"id\": str(gr_id)\n }\n }\n\n features.append(feature)\n\n gr_id += 1\n\n all_json['features'] = features\n\n with open(BaseDir + '/grid.geojson', 'w') as f:\n json.dump(all_json, f)\n\n grid_df.to_csv(BaseDir + '/grid_locs.csv', index=False)", "def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def drawMap(self):\n world_map = folium.Map(location=[25, 10], zoom_start=3)\n totals_column = 'total_' + self.map_type.lower()\n top10 = self.covid_df.sort_values(totals_column, axis=0, ascending=False)['location'][:10]\n scale, units = self.unitsDetector(self.covid_df[totals_column].max())\n \n color_scheme = {'Cases': 'YlOrRd', 'Deaths': 'PuRd'}[self.map_type]\n bins = list(np.linspace(0, np.ceil(self.covid_df[totals_column].max() / scale) * scale, 6))\n legend_name = 'Total Number of COVID-19 ' + self.map_type\n map_file_name = self.generateFileName()\n \n folium.Choropleth(geo_data=self.geo_data,\n data=self.covid_df,\n columns=['location', totals_column],\n key_on='feature.properties.ADMIN',\n fill_color=color_scheme,\n bins=bins,\n legend_name=legend_name,\n highlight=True\n ).add_to(world_map)\n \n for i in range(10):\n country = top10.iloc[i]\n cases = self.covid_df[self.covid_df['location'] == country][totals_column] / scale\n \n # Centroid coordinates for each country labelled by its ISO-2 code\n lat = self.countries_centroids.loc[self.name_iso2_mapping[country]]['latitude']\n long = self.countries_centroids.loc[self.name_iso2_mapping[country]]['longitude']\n popup = f\"{country}: {cases.values[0]:.2f}{units} total {self.map_type.lower()}\"\n \n folium.Marker(location=[lat, long],\n popup=folium.Popup(popup, \n max_width=1000)\n ).add_to(world_map)\n \n world_map.save(map_file_name)", "def make_kinematics():\r\n # Read data values for vel, sigma, h3, h4\r\n data = np.loadtxt(outtable, usecols=(5, 7, 9, 11)).T\r\n xall, yall, sn = np.loadtxt(outtable, usecols=(1, 2, 14,)).T\r\n ###########################################################################\r\n # Details of the maps\r\n names = [r\"vel\", r\"sigma\", r\"h3\", r\"h4\"]\r\n cb_label = [r\"V$_{\\rm LOS}$ (km/s)\", r\"$\\sigma_{\\rm LOS}$ (km/s)\",\r\n r\"$h_3$\", r\"$h_4$\"]\r\n # lims = [[3750,4000], [150,500], [-0.08, 0.08], [-0.15, 0.15] ]\r\n lims = [[3640, 4040], [220, 500], [-0.08, 0.08], [-0.11, 0.11]]\r\n xcb = [0.068, 0.385, 0.705]\r\n ###########################################################################\r\n # Set the threshold S/N for smoothing\r\n # Higher values than this values are not smoothed\r\n sn_thres = [50, 50, 1000, 1000]\r\n ###########################################################################\r\n # Read values of other authors\r\n tab1a, tab1b = get_richtler()\r\n tab2 = get_ventimiglia()\r\n ###########################################################################\r\n # Set the colormap\r\n cmap = \"Spectral_r\"\r\n ###########################################################################\r\n # Loop for figures\r\n for i, vector in enumerate(data):\r\n print \"Producing figure for {0}...\".format(names[i])\r\n good = np.where(((~np.isnan(vector)) & (sn > sn_cut)))[0]\r\n sn_high = np.where(((~np.isnan(vector)) & (sn >= sn_thres[i])))[0]\r\n sn_low = np.delete(good, sn_high)\r\n vector_low = ll.loess_2d(xall[sn_low], yall[sn_low], vector[sn_low],\r\n frac=frac_loess)\r\n vector_high = vector[sn_high]\r\n good = np.hstack((sn_high, sn_low))\r\n v_loess = np.hstack((vector_high, vector_low))\r\n v = vector[good]\r\n vmin = lims[i][0] if lims[i][0] else v_loess.min()\r\n vmax = lims[i][1] if lims[i][1] else v_loess.max()\r\n fig = plt.figure(figsize=(15, 5.1))\r\n gs = gridspec.GridSpec(1, 3)\r\n gs.update(left=0.051, right=0.985, bottom=0.11, top=0.975, hspace=0.06,\r\n wspace=0.06)\r\n vs = [v, v_loess, v_loess]\r\n ylabels = [1, 0, 0]\r\n contours = [\"vband\", \"vband\", \"residual\"]\r\n cb_fmts = [\"%i\", \"%i\", \"%.2f\", \"%.2f\"]\r\n ####################################################\r\n # Produces pannels\r\n ####################################################\r\n for j in range(3):\r\n ax = plt.subplot(gs[j])\r\n # if i <1:\r\n # norm = LogNorm(vmin=vmin, vmax=vmax)\r\n # else:\r\n # norm = Normalize(vmin=vmin, vmax=vmax)\r\n norm = Normalize(vmin=vmin, vmax=vmax)\r\n coll = PolyCollection(polygons_bins[good], array=vs[j],\r\n cmap=cmap, edgecolors='w', norm=norm,\r\n linewidths=0.4)\r\n draw_map(fig, ax, coll)\r\n draw_contours(contours[j], fig, ax)\r\n plt.gca().add_patch(\r\n Rectangle((18, -36), 20, 10, alpha=1, zorder=10000,\r\n color=\"w\"))\r\n draw_colorbar(fig, ax, coll, cblabel=cb_label[i],\r\n cbar_pos=[xcb[j], 0.18, 0.08, 0.04],\r\n ticks=np.linspace(vmin, vmax, 4), cb_fmt=cb_fmts[i])\r\n xylabels(ax, y=ylabels[j])\r\n if j > 0:\r\n ax.set_yticklabels([])\r\n #####################################################\r\n # Draw long slits of other papers\r\n #####################################################\r\n if i > 1:\r\n continue\r\n bc = [\"g\", \"g\", \"b\", \"b\"]\r\n for k, tab in enumerate([tab1a, tab1b, tab2[4:], tab2[:4]]):\r\n norm = Normalize(vmin=vmin, vmax=vmax)\r\n idx = np.argsort(tab[:, 0])\r\n points = np.array([tab[:, 0][idx], tab[:, 1][idx]]).T.reshape(\r\n -1, 1, 2)\r\n segments = np.concatenate([points[:-1], points[1:]],\r\n axis=1)\r\n lc = LineCollection(segments, array=tab[:, i + 2],\r\n cmap=cmap, norm=norm, lw=5)\r\n ax.add_collection(lc)\r\n add_borders(ax, points, c=bc[k])\r\n # plt.savefig(\"figs/{0}.pdf\".format(names[i]))\r\n plt.savefig(\"figs/{0}.png\".format(names[i]))\r\n # plt.savefig(\"figs/{0}.eps\".format(names[i]), fmt=\"eps\")\r", "def gen_stops():\r\n stop_ = list(nasal_stop)\r\n stop_.extend(voiced_stop)\r\n stop_.extend(unvoiced_stop)\r\n return stop_", "def make_layers(self):\r\n #assuming temporal field is always the first column!\r\n timeCol = self.data.columns[0]\r\n times = self.data[timeCol].unique() \r\n lat = self.data.lat.unique()\r\n lon = self.data.lon.unique()\r\n shape = (len(lat), len(lon))\r\n depths, hours = [None], [None]\r\n if 'depth' in self.data.columns:\r\n depths = self.data.depth.unique()\r\n if 'hour' in self.data.columns:\r\n hours = self.data.hour.unique()\r\n layers, titles = [], []\r\n for t in times:\r\n for h in hours:\r\n for z in depths:\r\n frame = self.data[self.data[timeCol] == t]\r\n\r\n if timeCol == 'time':\r\n sub = self.variable + self.unit + ', ' + str(datetime.strptime(t, '%Y-%m-%dT%H:%M:%S').date())\r\n else:\r\n sub = self.variable + self.unit + ', ' + timeCol + ': ' + str(t) \r\n\r\n if h != None:\r\n frame = frame[frame['hour'] == h]\r\n sub = sub + ', hour: ' + str(h) + 'hr'\r\n if z != None:\r\n frame = frame[frame['depth'] == z] \r\n sub = sub + ', depth: %2.2f' % z + ' [m]' \r\n try: \r\n layers.append(frame[self.variable].values.reshape(shape))\r\n titles.append(sub)\r\n except Exception as e:\r\n continue \r\n return layers, titles, lat, lon", "def update_ptable(self):\n from bokeh.sampledata.periodic_table import elements\n romans = [\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\"]\n\n elements[\"atomic mass\"] = elements[\"atomic mass\"].astype(str)\n\n elements[\"period\"] = [x for x in elements.period]\n elements = elements[elements.group != \"-\"]\n\n group_range = [str(x) for x in range(1, 19)]\n print ('reaches colormap def')\n colormap = {\n \"c\" : \"#ffa07a\",\n \"nc\" : \"#A9A9A9\"\n }\n elems_colorpair = {}\n\n fcc_B_extrapol_props = {}\n fcc_dB_extrapol_props = {}\n fcc_V0_extrapol_props = {}\n fcc_E0_extrapol_props = {}\n\n bcc_B_extrapol_props = {}\n bcc_dB_extrapol_props = {}\n bcc_V0_extrapol_props = {}\n bcc_E0_extrapol_props = {}\n\n hcp_B_extrapol_props = {}\n hcp_dB_extrapol_props = {}\n hcp_V0_extrapol_props = {}\n hcp_E0_extrapol_props = {}\n\n available_elems = []\n\n for e in elements[\"symbol\"]:\n if e in np.unique(list(self.plot_data['element'])):\n available_elems.append(e)\n for s in np.unique(list(self.plot_data['structure'])):\n plot_struct = self.plot_data[self.plot_data['structure']==s]\n plot_struct_elem = plot_struct[plot_struct['element']==e]\n if s=='fcc':\n try:\n fcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n fcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n fcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n fcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n elif s=='bcc':\n try:\n bcc_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n bcc_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n bcc_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n bcc_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n elif s=='hcp':\n try:\n hcp_B_extrapol_props.update({e:list(plot_struct_elem['B'])[0]})\n\n hcp_dB_extrapol_props.update({e:list(plot_struct_elem['BP'])[0]})\n\n hcp_V0_extrapol_props.update({e:list(plot_struct_elem['V0'])[0]})\n\n hcp_E0_extrapol_props.update({e:list(plot_struct_elem['E0'])[0]})\n except:\n pass\n fcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_E0_extrapol_props})\n fcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_V0_extrapol_props})\n fcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_B_extrapol_props})\n fcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in fcc_dB_extrapol_props})\n\n bcc_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_E0_extrapol_props})\n bcc_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_V0_extrapol_props})\n bcc_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_B_extrapol_props})\n bcc_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in bcc_dB_extrapol_props})\n\n hcp_E0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_E0_extrapol_props})\n hcp_V0_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_V0_extrapol_props})\n hcp_B_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_B_extrapol_props})\n hcp_dB_extrapol_props.update({k:'xxx' for k in elements['symbol'] if k not in hcp_dB_extrapol_props})\n\n elems_colorpair.update( { key:'c' for key in np.unique(available_elems) } )\n elems_colorpair.update( { key:'nc' for key in list(elements['symbol']) if key not in list(elems_colorpair.keys()) } )\n\n\n print ([ colormap[elems_colorpair[x]] for x in elements['symbol'] ])\n\n source = ColumnDataSource(\n data=dict(\n group=[str(x) for x in elements[\"group\"]],\n period=[str(y) for y in elements[\"period\"]],\n symx=[str(x)+\":0.1\" for x in elements[\"group\"]],\n numbery=[str(x)+\":0.8\" for x in elements[\"period\"]],\n massy=[str(x)+\":0.15\" for x in elements[\"period\"]],\n namey=[str(x)+\":0.3\" for x in elements[\"period\"]],\n sym=elements[\"symbol\"],\n name=elements[\"name\"],\n# cpk=elements[\"CPK\"],\n atomic_number=elements[\"atomic number\"],\n# electronic=elements[\"electronic configuration\"],\n fcc_B=[fcc_B_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_dB=[fcc_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_V0=[fcc_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n fcc_E0=[fcc_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_B=[bcc_B_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_dB=[bcc_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_V0=[bcc_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n bcc_E0=[bcc_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_B=[hcp_B_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_dB=[hcp_dB_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_V0=[hcp_V0_extrapol_props[x] for x in elements[\"symbol\"]],\n hcp_E0=[hcp_E0_extrapol_props[x] for x in elements[\"symbol\"]],\n type=elements[\"metal\"],\n type_color=[ colormap[elems_colorpair[x]] for x in elements['symbol'] ],\n )\n )\n\n # plot the periodic layout\n #name = source.data[\"name\"]\n #B = source.data[\"B\"]\n\n ptable = figure(title=\"Periodic Table\", tools=\"hover\",\n x_range=group_range, y_range=list(reversed(romans)))\n ptable.background_fill_color='white'\n ptable.plot_width = 1500\n ptable.toolbar_location = None\n ptable.outline_line_color = None\n\n ptable.rect(\"group\", \"period\", 0.9, 0.9, source=source,\n fill_alpha=0.3, color='type_color')\n\n text_props = {\n \"source\": source,\n \"angle\": 0,\n \"color\": \"black\",\n \"text_align\": \"left\",\n \"text_baseline\": \"middle\"\n }\n\n ptable.text(x=\"symx\", y=\"period\", text=\"sym\",\n text_font_style=\"bold\", text_font_size=\"22pt\", **text_props)\n\n ptable.text(x=\"symx\", y=\"numbery\", text=\"atomic_number\",\n text_font_size=\"9pt\", **text_props)\n\n# ptable.text(x=\"symx\", y=\"namey\", text=\"name\",\n# text_font_size=\"6pt\", **text_props)\n\n# ptable.text(x=\"symx\", y=\"massy\", text=\"mass\",\n# text_font_size=\"5pt\", **text_props)\n\n ptable.grid.grid_line_color = None\n\n\n ptable.select_one(HoverTool).tooltips = [\n (\"name\", \"@name\"),\n (\"fcc, V0 (A^3 per atom)\", \"@fcc_V0\"),\n (\"fcc, B (GPa)\", \"@fcc_B\"),\n (\"fcc, dB/dP\", \"@fcc_dB\"),\n (\"bcc, V0 (A^3 per atom)\", \"@bcc_V0\"),\n (\"bcc, B (GPa)\", \"@bcc_B\"),\n (\"bcc, dB/dP\", \"@bcc_dB\"),\n (\"hcp, V0 (A^3 per atom)\", \"@hcp_V0\"),\n (\"hcp, B (GPa)\", \"@hcp_B\"),\n (\"hcp, dB/dP\", \"@hcp_dB\")]\n return ptable", "def imprime_mapa(lat,lon):\r\n\r\n lista=[\"colegio\", \"starbucks\",\"estadio de baloncesto\", \"bar\",\"restaurante vegano\",\"peluqueria perros\",\"aeropuerto\"]\r\n \r\n tipo=list()\r\n latitud=list()\r\n longitud=list()\r\n\r\n for q in lista:\r\n resultado=foursquare_visual({'latitud':lat, 'longitud':lon},q)\r\n \r\n for r in resultado:\r\n tipo.append(q.replace(\" \",\"_\"))\r\n latitud.append(r['latitud'])\r\n longitud.append(r['longitud'])\r\n #if q == \"colegio\" or q == \"peluqueria perros\":\r\n # print(pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud}))\r\n # raise\r\n \r\n \r\n df=pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud})\r\n\r\n \r\n\r\n mapa = Map(location=[lat,lon],zoom_start=15)\r\n\r\n empresa = {\r\n \"location\":[lat, lon ],\r\n \"tooltip\" : \"Empresa\"\r\n }\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-dot-circle-o\",\r\n icon_color = \"white\"\r\n )\r\n Marker(**empresa,icon = icon ).add_to(mapa)\r\n\r\n\r\n for i, row in df.iterrows():\r\n establecimiento = {\r\n \"location\":[row[\"latitud\"], row[\"logitud\"]],\r\n \"tooltip\" : row[\"tipo\"].replace(\"_\",\" \").capitalize()\r\n }\r\n\r\n if row[\"tipo\"] == \"starbucks\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"fa-coffee\",\r\n icon_color = \"white\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"restaurante_vegano\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"leaf\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"colegio\":\r\n icon = Icon(color = \"blue\",\r\n prefix = \"fa\",\r\n icon = \"fa-graduation-cap \",\r\n icon_color = \"black\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"peluqueria_perros\":\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-paw\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"estadio_de_baloncesto\":\r\n icon = Icon(color = \"orange\",\r\n prefix = \"fa\",\r\n icon = \"fa-futbol-o \",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"aeropuerto\":\r\n icon = Icon(color = \"white\",\r\n prefix = \"fa\",\r\n icon = \"fa-plane\",\r\n icon_color = \"black\"\r\n )\r\n elif row[\"tipo\"] == \"bar\":\r\n icon = Icon(color = \"pink\",\r\n prefix = \"fa\",\r\n icon = \"fa-glass\",\r\n icon_color = \"white\"\r\n )\r\n \r\n else:\r\n prefix = \"fa\",\r\n icon = \"briefcase\",\r\n icon_color = \"black\" \r\n Marker(**establecimiento,icon = icon ).add_to(mapa)\r\n return mapa", "def opentimeanddepth(self) :\n\n print \"Open time and depth\"\n self.combo_wms_time_first_d.clear()\n self.combo_wms_time_first_h.clear()\n self.combo_wms_time_last_d.clear()\n self.combo_wms_time_last_h.clear()\n self.combo_wms_time_first_d_2.clear()\n self.combo_wms_time_first_h_2.clear()\n self.combo_wms_time_last_d_2.clear()\n self.combo_wms_time_last_h_2.clear()\n self.combo_wms_layer_depth.clear()\n self.combo_wms_layer_depth_2.clear()\n self.combo_wms_layer_depth_max_2.clear()\n # Current combobox values\n product=str(self.combo_product_list.currentText())\n dataset=str(self.combo_dataset_list.currentText())\n variable=str(self.combo_variable_list.currentText())\n resol=self.dict_prod[product][dataset][7][0]\n list_time=self.dict_var[str(variable)][1]\n if \"daily\" in str(resol) :\n print \"Daily variable\"\n for value in list_time:\n day=str(value).split()[0][:-13]\n hour=str(value).split()[0][11:]\n self.combo_wms_time_first_d.addItem(str(day))\n self.combo_wms_time_first_d_2.addItem(str(day))\n self.combo_wms_time_last_d.addItem(str(day))\n self.combo_wms_time_last_d_2.addItem(str(day))\n self.combo_wms_time_first_h.addItem(str(hour)) \n self.combo_wms_time_first_h_2.addItem(str(hour)) \n self.combo_wms_time_last_h.addItem(str(hour)) \n self.combo_wms_time_last_h_2.addItem(str(hour)) \n self.combo_wms_time_first_h.setEnabled(True)\n self.combo_wms_time_first_h_2.setEnabled(True)\n self.combo_wms_time_last_h.setEnabled(True)\n self.combo_wms_time_last_h_2.setEnabled(True)\n if \"hourly\" in str(resol) :\n print \"Hourly variable\"\n i=0\n day_tmp=''\n for value in list_time :\n day=str(value).split()[0][:-13]\n if day_tmp != day :\n self.combo_wms_time_first_d.addItem(str(day))\n self.combo_wms_time_first_d_2.addItem(str(day))\n self.combo_wms_time_last_d.addItem(str(day))\n self.combo_wms_time_last_d_2.addItem(str(day))\n i=i+1\n day_tmp=day\n if i == 1:\n hour=str(value).split()[0][11:]\n self.combo_wms_time_first_h.addItem(str(hour))\n self.combo_wms_time_first_h_2.addItem(str(hour))\n self.combo_wms_time_last_h.addItem(str(hour))\n list_prof=self.dict_var[variable][0]\n for value in list_prof : \n prof=str(value).split()[0]\n self.combo_wms_layer_depth.addItem(str(prof))\n self.combo_wms_layer_depth_2.addItem(str(prof))\n self.combo_wms_layer_depth_max_2.addItem(str(prof))", "def mapviewer(request):\n\n precip_layer1 = geeutils.getPrecipMap(accumulation=1)\n precip_layer3 = geeutils.getPrecipMap(accumulation=3)\n precip_layer7 = geeutils.getPrecipMap(accumulation=7)\n #flood_viir = 'None' #geeutils.getfloodMap(snsr='atms')\n #flood_sentinel = geeutils.getfloodMap(\"sentinel1\",\"2010-01-01\")\n #flood_atms = geeutils.getfloodMap(\"atms\", \"2010-01-01\")\n #print(flood_sentinel)\n\n historical_layer = geeutils.getHistoricalMap(region,'2010-01-01','2015-12-31',month=8,algorithm='JRC')\n\n image = ee.Image(wc.filter(ee.Filter.eq('sensor','sentinel1')).first())\n #sentinel1_layer = geeutils.getTileLayerUrl(image.updateMask(image).visualize(palette='#9999ff'))\n\n\n product_selection = SelectInput(\n # display_text='Select precipitation product:',\n name='product_selection',\n multiple=False,\n options=[('1 Day Accumulation', '1|'+precip_layer1),\n ('3 Day Accumulation', '2|'+precip_layer3),\n ('7 Day Accumulation', '3|'+precip_layer7)],\n initial=['1 Day Accumulation'],\n select2_options={'placeholder': 'Select a product',\n 'allowClear': False}\n )\n\n\n browse_selection = SelectInput(\n # display_text='Select precipitation product:',\n name='browse_selection',\n multiple=False,\n options=[('VIIRS NRT TRUE COLOR', '1|VIIRS_SNPP_CorrectedReflectance_TrueColor'),\n ('VIIRS NRT NATURAL COLOR', '2|VIIRS_SNPP_CorrectedReflectance_BandsM11-I2-I1'),\n ('MODIS AQUA TRUE COLOR', '3|MODIS_Aqua_CorrectedReflectance_TrueColor'),\n ('MODIS AQUA NATURAL COLOR', '4|MODIS_Aqua_CorrectedReflectance_Bands721'),\n ('MODIS TERRA TRUE COLOR', '5|MODIS_Terra_CorrectedReflectance_TrueColor'),\n ('MODIS TERRA NATURAL COLOR', '5|MODIS_Terra_CorrectedReflectance_Bands721')],\n initial=['VIIRS NRT NATURAL COLOR'],\n select2_options={'placeholder': 'Select browse imagery:',\n 'allowClear': False}\n )\n\n sensor_selection = SelectInput(\n # display_text='Select precipitation product:',\n name='sensor_selection',\n multiple=False,\n options=[ ('select sensor', 'none'),\n ('Sentinel 1', 'sentinel1'),\n ('ATMS', 'atms')],\n initial=['select sensor'],\n select2_options={'placeholder': 'Select sensor:',\n 'allowClear': False}\n )\n\n context = {\n 'precip_layer': precip_layer1,\n 'historical_layer': historical_layer,\n 'admin_layer': admin_layer,\n 'product_selection': product_selection,\n 'browse_selection': browse_selection,\n 'sensor_selection':sensor_selection,\n }\n\n return render(request, 'hydraviewer/map.html', context)", "def optionThree(catalog):\n ans = controller.pointsInterconnection(catalog)\n\n item_map = folium.Map(location=[25.557547, -24.568953], zoom_start=2)\n for landinpoint in lt.iterator(ans[0]):\n item = mp.get(catalog['landingpoints'], landinpoint)['value']['info']\n print('Nombre:', item['id'], '\\tLugar:', item['name'], '\\tIdentificador: ', item['landing_point_id'])\n\n tooltip = item['id']\n item_lat = float(item['latitude'])\n item_lon = float(item['longitude'])\n cables = mp.get(catalog['landingpoints'], landinpoint)['value']['lstcables']\n folium.Marker(location=[item_lat, item_lon], popup=\"<strong></strong>\", tooltip=tooltip, icon=folium.Icon(color='darkred', icon = 'cloud')).add_to(item_map)\n\n for cable in lt.iterator(mp.keySet(cables)): \n # * Markers y conectors entre el lp actual y todas sus conexiones\n\n cable = cable.split('-')[1:][0]\n cable_info = mp.get(catalog['landingpoints'], cable)['value']['info']\n cable_lat = float(cable_info['latitude'])\n cable_lon = float(cable_info['longitude'])\n tooltip_2 = cable_info['name']\n folium.Marker(location=[cable_lat, cable_lon], popup=\"<strong>Landpoint se connecta con Jakarta</strong>\", tooltip=tooltip_2,icon=folium.Icon(color='lightgray')).add_to(item_map)\n folium.PolyLine([(item_lat, item_lon), (cable_lat, cable_lon)],\n color='gray',\n weight=2,\n opacity=0.6).add_to(item_map)\n\n print('\\nHay', ans[1], 'cables conectados a dicho(s) landingpoints')\n item_map.save('Req 2.html')", "def feature_layer_popup(map_service_name, map_service_type, web_map, layer_name):\r\n # get operational layer of a web map\r\n operational_layer = get_webmap_operational_layers(\r\n web_map=web_map,\r\n layer_name=layer_name\r\n )\r\n # get feature layer from feature layer collection\r\n target_layer = get_feature_layer_from_feature_service(\r\n map_service_name=map_service_name,\r\n map_service_type=map_service_type,\r\n layer_name=layer_name\r\n )\r\n\r\n # set popup info for the operational layer based on the type of services\r\n # hosted feature layer\r\n if operational_layer['popupInfo']:\r\n operational_layer_popup = operational_layer['popupInfo']\r\n\r\n # registered feature layer\r\n else:\r\n # set popup info\r\n operational_layer.update(popupInfo=target_layer['popupInfo'])\r\n operational_layer_popup = operational_layer['popupInfo']\r\n\r\n # set title for popup\r\n operational_layer_popup['title'] = layer_name\r\n\r\n # set description for popup\r\n layer_popup_description = customize_popup_description(\r\n operational_layer=operational_layer,\r\n map_service_type=map_service_type\r\n )\r\n operational_layer_popup['description'] = layer_popup_description\r\n\r\n # set decimal places and digit separators for numeric fields\r\n for field in operational_layer_popup['fieldInfos']:\r\n for fld in target_layer.properties.fields:\r\n # double\r\n if field['fieldName'] == fld.name and fld.type == \"esriFieldTypeDouble\":\r\n field.update({\"format\": {\"places\": 2, \"digitSeparator\": False}})\r\n # integer\r\n elif field['fieldName'] == fld.name and fld.type == \"esriFieldTypeInteger\":\r\n field.update({\"format\": {\"places\": 0, \"digitSeparator\": False}})\r\n\r\n print('customizing popup for', operational_layer['title'])", "def _build_stations(self, stop_list):\n # stations = [] TODO: What is this for\n dists = self._euclidian_distances(stop_list)\n stations = self._calculate_y_lines(dists)\n return stations", "def runCheck(self):\n # Select the layers open in the legendInterface and add them to an array\n crs = QgsCoordinateReferenceSystem()\n layers = self.iface.legendInterface().layers()\n layer_list = []\n # Declare coordinate system to print out screen\n # VN2000 Noi bo mui 3\n htd_103_nb = \"+proj=tmerc +lat_0=0 +lon_0=103 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_104_nb = \"+proj=tmerc +lat_0=0 +lon_0=104 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_104_5_nb = \"+proj=tmerc +lat_0=0 +lon_0=104.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_104_75_nb = \"+proj=tmerc +lat_0=0 +lon_0=104.75 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_105_nb = \"+proj=tmerc +lat_0=0 +lon_0=105 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_105_5_nb = \"+proj=tmerc +lat_0=0 +lon_0=105.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_105_75_nb = \"+proj=tmerc +lat_0=0 +lon_0=105.75 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_106_nb = \"+proj=tmerc +lat_0=0 +lon_0=106 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_106_25_nb = \"+proj=tmerc +lat_0=0 +lon_0=106.25 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_106_5_nb = \"+proj=tmerc +lat_0=0 +lon_0=106.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_107_nb = \"+proj=tmerc +lat_0=0 +lon_0=107 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_107_25_nb = \"+proj=tmerc +lat_0=0 +lon_0=107.25 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_107_5_nb = \"+proj=tmerc +lat_0=0 +lon_0=107.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_107_75_nb = \"+proj=tmerc +lat_0=0 +lon_0=107.75 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_108_nb = \"+proj=tmerc +lat_0=0 +lon_0=108 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_108_25_nb = \"+proj=tmerc +lat_0=0 +lon_0=108.25 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n htd_108_5_nb = \"+proj=tmerc +lat_0=0 +lon_0=108.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n\n # VN2000 Hoi nhap mui 3\n htd_103_hn = \"+proj=tmerc +lat_0=0 +lon_0=103 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_104_hn = \"+proj=tmerc +lat_0=0 +lon_0=104 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_104_5_hn = \"+proj=tmerc +lat_0=0 +lon_0=104_5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_104_75_hn = \"+proj=tmerc +lat_0=0 +lon_0=104.75 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_105_hn = \"+proj=tmerc +lat_0=0 +lon_0=105 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_105_5_hn = \"+proj=tmerc +lat_0=0 +lon_0=105.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_105_75_hn = \"+proj=tmerc +lat_0=0 +lon_0=105.75 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_106_hn = \"+proj=tmerc +lat_0=0 +lon_0=106 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_106_25_hn = \"+proj=tmerc +lat_0=0 +lon_0=106.25 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_106_5_hn = \"+proj=tmerc +lat_0=0 +lon_0=106.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_107_hn = \"+proj=tmerc +lat_0=0 +lon_0=107 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_107_25_hn = \"+proj=tmerc +lat_0=0 +lon_0=107.25 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_107_5_hn = \"+proj=tmerc +lat_0=0 +lon_0=107.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_107_75_hn = \"+proj=tmerc +lat_0=0 +lon_0=107.75 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_108_hn = \"+proj=tmerc +lat_0=0 +lon_0=108 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_108_25_hn = \"+proj=tmerc +lat_0=0 +lon_0=108.25 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n htd_108_5_hn = \"+proj=tmerc +lat_0=0 +lon_0=108.5 +k=0.9999 +x_0=500000 +y_0=0 +ellps=WGS84 +towgs84=-191.90441429,-39.30318279,-111.45032835,0.00928836,-0.01975479,0.00427372,0.252906278 +units=m +no_defs\"\n\n # UTM 48,49\n htd_utm_48 = \"+proj=utm +zone=48 +datum=WGS84 +units=m +no_defs\"\n htd_utm_49 = \"+proj=utm +zone=49 +datum=WGS84 +units=m +no_defs\"\n\n # WGS84 Latlong - 4326\n htd_latlong_4326 = \"+proj=longlat +datum=WGS84 +no_defs\"\n\n #Loop all layers\n for layer in layers:\n if layer.crs().toProj4() == htd_103_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 103 mui 3 \")\n elif layer.crs().toProj4() == htd_104_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 104 mui 3 \")\n elif layer.crs().toProj4() == htd_104_5_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 104.5 mui 3 \")\n elif layer.crs().toProj4() == htd_104_75_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 104.75 mui 3 \")\n elif layer.crs().toProj4() == htd_105_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 105 mui 3 \")\n elif layer.crs().toProj4() == htd_105_5_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 105.5 mui 3 \")\n elif layer.crs().toProj4() == htd_105_75_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 105.75 mui 3 \")\n elif layer.crs().toProj4() == htd_106_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 106 mui 3 \")\n elif layer.crs().toProj4() == htd_106_25_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 106.25 mui 3 \")\n elif layer.crs().toProj4() == htd_106_5_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 106.5 mui 3 \")\n elif layer.crs().toProj4() == htd_107_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 107 mui 3 \")\n elif layer.crs().toProj4() == htd_107_25_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 107.25 mui 3 \")\n elif layer.crs().toProj4() == htd_107_5_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 107.5 mui 3 \")\n elif layer.crs().toProj4() == htd_107_75_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 107.75 mui 3 \")\n elif layer.crs().toProj4() == htd_108_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 108 mui 3 \")\n elif layer.crs().toProj4() == htd_108_25_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 108.25 mui 3 \")\n elif layer.crs().toProj4() == htd_108_5_nb :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Noi bo KTT 108.5 mui 3 \")\n # VN2000 Hoi nhap\n elif layer.crs().toProj4() == htd_103_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 103 mui 3 \")\n elif layer.crs().toProj4() == htd_104_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 104 mui 3 \")\n elif layer.crs().toProj4() == htd_104_5_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 104.5 mui 3 \")\n elif layer.crs().toProj4() == htd_104_75_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 104.75 mui 3 \")\n elif layer.crs().toProj4() == htd_105_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 105 mui 3 \")\n elif layer.crs().toProj4() == htd_105_5_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 105.5 mui 3 \")\n elif layer.crs().toProj4() == htd_105_75_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 105.75 mui 3 \")\n elif layer.crs().toProj4() == htd_106_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 106 mui 3 \")\n elif layer.crs().toProj4() == htd_106_25_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 106.25 mui 3 \")\n elif layer.crs().toProj4() == htd_106_5_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 106.5 mui 3 \")\n elif layer.crs().toProj4() == htd_107_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 107 mui 3 \")\n elif layer.crs().toProj4() == htd_107_25_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 107.25 mui 3 \")\n elif layer.crs().toProj4() == htd_107_5_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 107.5 mui 3 \")\n elif layer.crs().toProj4() == htd_107_75_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 107.75 mui 3 \")\n elif layer.crs().toProj4() == htd_108_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 108 mui 3 \")\n elif layer.crs().toProj4() == htd_108_25_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 108.25 mui 3 \")\n elif layer.crs().toProj4() == htd_108_5_hn :\n layer_list.append(layer.name() + \" -->\" + \"VN-2000 Hoi nhap KTT 108.5 mui 3 \")\n\n # UTM 48,49, Latlong\n elif layer.crs().toProj4() == htd_utm_48 :\n layer_list.append(layer.name() + \" -->\" + \"UTM Zone 48N - EPSG: 32648\")\n elif layer.crs().toProj4() == htd_utm_49 :\n layer_list.append(layer.name() + \" -->\" + \"UTM Zone 49N - EPSG: 32649\")\n elif layer.crs().toProj4() == htd_latlong_4326 :\n layer_list.append(layer.name() + \" -->\" + \"WGS 84 Lat/Long - EPSG: 4326\")\n else:\n layer_list.append(layer.name() + \" -->\" +layer.crs().toProj4())\n # Add layer_list array to listWidget, clear layer if removed to layer in tools\n self.dlgtool3.listWidget_check.clear()\n self.dlgtool3.listWidget_check.addItems(layer_list)\n # show the dialog\n self.dlgtool3.show()\n # Run the dialog event loop\n result = self.dlgtool3.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def plot_timeres(timeres):\n tint = 24 / timeres\n step = 255 / (tint + 1)\n\n cont = np.zeros((scale, scale))\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n mxcont = np.max(cont)\n\n if distrib:\n cont = cont / mxcont\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=cont[i, j] * (circlesize / scale),\n line_color='#000000',\n fill_color='#110000', fill_opacity=0.3)\n # mymap.addradpoint(minLat+(((scale - i)-0.5)/normLat), minLon+((j+1.5)/normLon),\n # cont[i,j]*(circlesize/scale), \"#FF0000\")\n else:\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=30,\n line_color='#000000',\n fill_color='#110000', fill_opacity=0.3)\n # mymap.addradpoint(minLat+(((scale - i )-0.5)/normLat),\n # minLon+((j+1.5)/normLon), 30, \"#FF0000\")\n for t in range(tint):\n color = '#' + (str(hex((t + 1) * step))[2:]) + (\n str(hex((t + 1) * step))[2:]) + 'FF' # (str(hex((t+1)*step))[2:])\n cont = np.zeros((scale, scale))\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[3]\n if (evtime / timeres) == t:\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n if distrib:\n cont = cont / mxcont\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=cont[i, j] * (circlesize / scale),\n line_color=color,\n fill_color='#110000', fill_opacity=0.2)\n else:\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] > 0.01:\n mymap.circle_marker(\n location=[minLat + (((scale - i) - 0.5) / normLat), minLon + ((j + 1.5) / normLon)],\n radius=30,\n line_color=color,\n fill_color='#110000', fill_opacity=0.2)" ]
[ "0.5821836", "0.5777538", "0.55118066", "0.5333158", "0.53221834", "0.5259219", "0.5246929", "0.52346987", "0.523304", "0.5226837", "0.5213859", "0.5206291", "0.5205278", "0.5203672", "0.51721615", "0.5124725", "0.5114918", "0.50976086", "0.5096673", "0.5094198", "0.50739485", "0.5070421", "0.5059543", "0.5021916", "0.5013929", "0.5012236", "0.4997573", "0.4983409", "0.49739906", "0.49724817" ]
0.6849664
0
Computes the squareroot of the sum of the squares for the combined list of numbers in the args list
def sqrt_sum_of_squares(*args): out = 0.0 for arg in args: out += float(arg) * float(arg) return out ** 0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _root_sum_of_squares(list):\n return sum((el ** 2 for el in list)) ** (0.5)", "def lsquare_of_sums(inlist):\r\n s = sum(inlist)\r\n return float(s)*s", "def square_nums(number_list):", "def square(numbers):\n\n # Needs only one argument\n\n return numbers[0] ** 2", "def _square_rooted(x):\n return sqrt(sum([(a * a) for a in x]))", "def rootsumsquares(a):\n\treturn np.sqrt(np.sum(np.power(a,2)))", "def sumsq(values):\n\n return sum(map(lambda x: x ** 2, values))", "def sum_of_squares(seq):\n if len(seq) == 0:\n return 0\n else:\n result = 0\n for num in seq:\n result += num ** 2\n return result", "def sum_of_squares(x):\r\n return dot(x, x)", "def sum_squared(variable_list):\n return sum([el * el for el in variable_list])", "def square_or_square_root(numbers):\n result = []\n for element in numbers:\n root = element ** 0.5\n if root.is_integer():\n result.append(int(root))\n else:\n result.append(int(element * element))\n return result", "def squareroot(number):\n return math.sqrt(number)", "def sqrSum(a, b, i, j):\n return (a - i)**2 + (b - j)**2", "def sum_of_squares(v):\n return sum(v_i * v_i for v_i in v)", "def dot(v,w):\n return sum(v_i * w_i for v_i, w_i in zip(v,w)\n\ndef sum_of_squares(v):\n return dot(v, v)\n\nimport math", "def root_mean_square(xs):\n squares = xs ** 2\n sum_squares = np.sum(squares)\n\n rms = math.sqrt((len(xs) ** -1) * sum_squares)\n return rms", "def sum_of_squares(v):\n return dot_product(v, v)", "def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)", "def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)", "def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)", "def sum_of_squares(v):\n return sum(v_i ** 2 for v_i in v)", "def sum_of_squares(v):\n return dot(v, v)", "def sum_of_squares(v):\n return dot(v, v)", "def sum_of_squares(v):\n return dot(v, v)", "def sum_of_squares(v):\n return dot(v, v)", "def squared(num_list):\n new_list=[]\n for num in num_list:\n sq_num=pow(num,2)\n new_list.append(sq_num)\n return new_list", "def get_squares(n):\n\n return sum([i * i for i in range(n)])", "def square(numbers):\n\n # Needs only one argument\n newlist = []\n for num in numbers:\n newlist.append(num*num)\n return newlist", "def sum_squares(v):\n\treturn dot(v, v)", "def sumSquareDiff():\n\n def getEachSqareRange(a,b):\n return a + b**2 \n def getTotalSquareRange(a,b):\n return a + b\n \n print(reduce(getTotalSquareRange,range(1,101)) ** 2 - reduce(getEachSqareRange,range(1,101)))" ]
[ "0.7207651", "0.70310223", "0.69647837", "0.6624666", "0.6612508", "0.659189", "0.6584653", "0.65279794", "0.6487088", "0.645869", "0.63872254", "0.63788384", "0.6244868", "0.6227605", "0.62028605", "0.6177799", "0.6174678", "0.6172443", "0.6172443", "0.6172443", "0.6172443", "0.61245626", "0.61245626", "0.61245626", "0.61245626", "0.61222523", "0.6103502", "0.60945857", "0.60776955", "0.6018579" ]
0.78436637
0