query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
This method fetches the latest .dmg from Jenkins. First, it checks the id of the latest build. Next, it fetches the artifacts from that build and saves the .dmg to the workspace. | def fetch_executable_from_jenkins():
base_job_url = os.environ.get('JENKINS_JOB_URL')
if not base_job_url:
error('Jenkins job URL for the builder is not specified.')
build_json = json.loads(requests.get('%s/api/json'
% base_job_url).text)
last_build = build_json['lastCompletedBuild']['number']
print 'Last build ID: %d' % last_build
job_url = '%s/%d' % (base_job_url, last_build)
last_build_json = json.loads(requests.get('%s/api/json'
% job_url).text)
if not last_build_json['artifacts']:
error('No artifacts found!')
artifacts_deb = [artifact for artifact in
last_build_json['artifacts'] if '.dmg'
in artifact['fileName']]
artifact_url = '%s/artifact/%s' % (job_url,
artifacts_deb[0]['relativePath'])
file_name = artifacts_deb[0]['fileName']
print 'Tribler installer url: %s' % artifact_url
# Download the file
file_path = os.path.join(os.environ.get('WORKSPACE'), file_name)
download_response = requests.get(artifact_url, stream=True)
download_response.raise_for_status()
with open(file_path, 'wb') as handle:
for block in download_response.iter_content(1024):
handle.write(block)
return file_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_exe_from_jenkins():\n base_job_url = os.environ.get(\"JENKINS_JOB_URL\")\n if not base_job_url:\n print \"Jenkins job URL for the builder is not specified.\"\n sys.exit(-1)\n\n build_json = json.loads(requests.get(\"%s/api/json\" % base_job_url).text)\n last_build = build_json['lastCompletedBuild']['number']\n print \"Last build ID: %d\" % last_build\n\n job_url = '%s/%d' % (base_job_url, last_build)\n last_build_json = json.loads(requests.get(\"%s/api/json\" % job_url).text)\n if len(last_build_json['artifacts']) == 0:\n error(\"No artifacts found!\")\n\n artifact_url = \"%s/artifact/%s\" % (job_url, last_build_json['artifacts'][0]['relativePath'])\n file_name = last_build_json['artifacts'][0]['fileName']\n print \"Tribler installer url: %s\" % artifact_url\n\n # Download the file\n file_path = os.path.join(os.environ.get('WORKSPACE'), file_name)\n download_response = requests.get(artifact_url, stream=True)\n download_response.raise_for_status()\n\n with open(file_path, 'wb') as handle:\n for block in download_response.iter_content(1024):\n handle.write(block)\n\n return file_path",
"def jenkins_last_build_sha():\n job_url = os.getenv('JOB_URL')\n job_json_url = \"{0}/api/json\".format(job_url)\n response = urllib.urlopen(job_json_url)\n job_data = json.loads(response.read())\n\n last_completed_build_url = job_data['lastCompletedBuild']['url']\n last_complete_build_json_url = \"{0}/api/json\".format(last_completed_build_url)\n\n response = urllib.urlopen(last_complete_build_json_url)\n last_completed_build = json.loads(response.read())\n\n return last_completed_build[1]['lastBuiltRevision']['SHA1'] # needs testing",
"def download_build(self, name, dst_directory):\n logging.info('Not downloading build because no Filestore.')",
"def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num",
"def download(self):\n \n if not os.path.exists(self.directory):\n os.mkdir(self.directory)\n if not os.path.exists(self.fullPath):\n os.mkdir(self.fullPath)\n \n dm = pymodis.downmodis.downModis(self.fullPath, self.password, self.username, self.url, self.tiles, self.path, self.dataset, \n self.today, self.enddate, jpg = False, debug = True, timeout = 30)\n dm.connect()\n self.filelist = dm.getListDays() \n self.observations = len(dm.getListDays()) \n \n if self.dataset != 'MOD13Q1.005':\n if self.observations % 2 != 0:\n raise IOError(\"The total number of observations through time must be an even number. Please add or remove an observation before or after %s\" % str(self.filelist[0]))\n \n dm.downloadsAllDay()\n logger.log('SUCCESS', 'Downloading is complete! %d HDF files of %s data for tiles %s were downloaded for the following days: %s' % (self.observations*len(self.tiles), str(self.dataset), str(self.tiles), str(self.filelist)))",
"def getLastFinishedBuild():",
"def inner_dsym_download(project_id: int, config_id: str) -> None:\n with sdk.configure_scope() as scope:\n scope.set_tag(\"project\", project_id)\n scope.set_tag(\"config_id\", config_id)\n\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)\n client = appconnect.AppConnectClient.from_config(config)\n\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=listed_builds)\n\n if not builds:\n return\n\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context(\"dsym_downloads\", {\"total\": len(builds), \"completed\": i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n # For no dSYMs, let the build be marked as fetched so they're not\n # repeatedly re-checked every time this task is run.\n except appconnect.NoDsymsError:\n logger.debug(\"No dSYMs for build %s\", build)\n # Moves on to the next build so we don't check off fetched. This url will\n # eventuallyTM be populated, so revisit it at a later time.\n except appconnect.PendingDsymsError:\n logger.debug(\"dSYM url currently unavailable for build %s\", build)\n continue\n # early-return in unauthorized and forbidden to avoid trying all the other builds\n # as well, since an expired token will error for all of them.\n # the error is also swallowed unreported because this is an expected and actionable\n # error.\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n \"Not authorized to download dSYM using current App Store Connect credentials\",\n level=\"info\",\n )\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n \"Forbidden from downloading dSYM using current App Store Connect credentials\",\n level=\"info\",\n )\n return\n # Don't let malformed URLs abort all pending downloads in case it's an isolated instance\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n # Assume request errors are a server side issue and do not abort all the\n # pending downloads.\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug(\"Uploaded dSYMs for build %s\", build)\n metrics.incr(\"tasks.app_store_connect.builds_ingested\", sample_rate=1)\n\n build_state.fetched = True\n build_state.save()",
"def build():\n return get_cached(\"build.json\", False).get(\"build_id\")",
"def official(ctx, build_number):\n # TODO: Cache API calls to be nice too kashike and the gang\n minecraft_version = ctx.parent.minecraft_version\n known_builds = minecraft_version.known_paper_builds\n if not known_builds:\n raise ClickException()\n if build_number is None:\n build_number = max(known_builds)\n if build_number not in known_builds:\n print(f\"Known builds for {minecraft_version}:\", file=sys.stderr)\n print_wrapped(', '.join(map(str, known_builds)))\n raise ClickException(f\"Build {build_number} is not a valid build for {minecraft_version}\")\n latest_build = max(known_builds)\n if build_number != latest_build:\n click.echo(f\"The latest build for {minecraft_version} is {latest_build}.\")\n click.confirm(f\"Are you sure you want to use {build_number} instead?\", abort=True)\n jar = OfficialPaperJar(minecraft_version, build_number)\n try:\n jar.validate_cache()\n except CacheInvalidationException as e:\n e.print(\"Paper jar\")\n print()\n print(f\"Downloading Paper {build_number}....\")\n jar.update()\n assert jar.resolved_path.exists()\n return jar",
"def download_latest(isamAppliance, dir='.', check_mode=False, force=False):\n ret_obj = get(isamAppliance)\n\n # Get snapshot with lowest 'id' value - that will be latest one\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n id = snaps['id']\n file = snaps['filename']\n filename = os.path.join(dir, file)\n\n return download(isamAppliance, filename, id, check_mode, force)",
"def GetLatest(server_url, project_name, username, password, command, name,\n filename, stage):\n server = xmlrpclib.ServerProxy(server_url + 'xmlrpc')\n token = server.RemoteApi.login(username, password)\n # Get the latest 100 builds of the tools.\n builds = server.RemoteApi.getLatestBuildsForProject(token, project_name,\n '', True, 100)\n\n # Extract the latest green build.\n green_builds = [b for b in builds if b['status'] == 'success']\n if not green_builds:\n raise IOError('No green builds of project %s found' % project_name)\n build = green_builds[0]\n\n artifacts = server.RemoteApi.getArtifactsInBuild(token, project_name,\n build['id'])\n # Pick out the desired artifact file.\n link = None\n for a in artifacts:\n # Skip everything other than what we're looking for.\n if a['command'] != command or a['name'] != name or a['stage'] != stage:\n continue\n # Construct full permalink to artifact.\n link = (server_url + a['permalink'] + filename)\n break\n\n server.RemoteApi.logout(token)\n\n return link",
"def pull(self, build_id, file_path):\n url = f\"{self.base_url}/pull\"\n payload = {\"build_id\": build_id}\n response = requests.get(url, json=payload, headers=self.headers)\n if response.headers[\"Content-Type\"] == \"text/html\":\n return response.text\n else:\n with open(file_path, 'wb') as f:\n f.write(response.content)\n\n return \"Success\"",
"def UpgradeDUT(self, build):\n elapsed = None\n factory_id = False\n upgrade_id = True\n list_of_connected_aps = list()\n build_stream = build.build_stream.name\n build_version = str(build_stream.split(\"_\")[1])\n bno = build.number\n zd_model_num = build_stream.split(\"_\")[0]\n base_build_project_num = build_stream.split(\"_\")[1]\n mesh_enabled = self.is_mesh_enabled_in_testbed()\n ap_upgrade_timeout = 1500\n \n build_url = build.URL\n byte = None\n mb = None\n tb_config = self.config\n \n #get the switch component object\n if 'L3Switch' in self.components.keys():\n l3switch = self.components['L3Switch']\n \n #because upgrade to the base build, waiting time too long, \n #the station sockect connection break, so quit the station at first,\n #after the upgrage zd, recreate the station object:\n for station in self.components['Station']:\n station.__del__()\n del(self.components['Station'])\n \n #set the image file name.\n if server_url_map.SAVE_REPOSITORY.has_key(zd_model_num):\n filename = zd_model_num + \"_\" + base_build_project_num + \".\" + str(bno) + \".tar.gz\"\n if os.path.isdir(server_url_map.SAVE_REPOSITORY[zd_model_num]['share_folder_path']):\n full_fname = server_url_map.SAVE_REPOSITORY[zd_model_num]['share_folder_path'] + filename\n else:\n full_fname = server_url_map.SAVE_REPOSITORY[zd_model_num]['local_path'] + filename\n \n #if no the image file in the target folder,\n #the script will be downloaded it from the build server\n #if the image file is in the target folder,\n #the script will upgrade zd to the base build which is used the image. \n if os.path.isfile(full_fname):\n pass\n elif os.path.isdir(full_fname):\n logging.info(\"Please remove the folder of %s\" % filename)\n raise Exception(\"This is a folder, instead of a file.\")\n else:\n build_url = ih.get_build_url(build_stream, bno)\n if 'http' in build_url:\n if '.img' in build_url:\n filename = re.findall(r'^.*ZD\\d+\\w+/*(.*)', build_url)[0]\n if os.path.isdir(server_url_map.SAVE_REPOSITORY[zd_model_num]['share_folder_path']):\n full_fname = server_url_map.SAVE_REPOSITORY[zd_model_num]['share_folder_path'] + filename\n else:\n full_fname = server_url_map.SAVE_REPOSITORY[zd_model_num]['local_path'] + filename\n fin = ih.download_build_v2(build_url, full_fname)\n if fin:\n pass\n else:\n raise Exception(\"downloaded is not successufully.\")\n else:\n full_fname = build_url\n \n logging.info(\"Waiting all aps join in zd...\")\n if not self.dut.wait_aps_join_in_zd_with_the_expect_status(self.config['ap_mac_list'], self.config['ap_sym_dict']):\n logging.info(\"ap rejoin in zd failed, enable all aps's switch ports\")\n for ap_mac in self.config['ap_mac_to_port'].keys():\n l3switch.enable_interface(self.config['ap_mac_to_port'][ap_mac])\n \n (elapsed, factory_id) = self.dut.upgrade_sw(full_fname, False, True, build_version, False, mesh_enabled)\n \n if factory_id:\n logging.info(\"ZD be setted factory default, so enable all switch ports of the aps.\")\n for ap_mac in self.config['ap_mac_to_port'].keys():\n l3switch.enable_interface(self.config['ap_mac_to_port'][ap_mac])\n \n logging.info(\"Waiting 2 minutes, let ZD all service module enabled.\")\n time.sleep(120)\n\n logging.info(\"Waiting for APs to be upgraded and reconnect. This process takes some minutes. Please wait... \")\n ap_upgrade_start_time = time.time()\n list_of_connected_aps = list()\n for associated_ap in self.config['ap_mac_list']:\n while True:\n if (time.time() - ap_upgrade_start_time) > ap_upgrade_timeout:\n raise Exception(\"Error: AP upgrading failed. Timeout\")\n \n si_ap_info = self.dut._get_ap_info(associated_ap)\n status = si_ap_info['status']\n logging.info('ap %s status is %s'%(associated_ap, status))\n if status.lower().startswith(\"connected\"):\n list_of_connected_aps.append(si_ap_info)\n break\n \n return upgrade_id, factory_id, list_of_connected_aps",
"def download_builds(config, builds, force=False):\n if not config.get('datadir'):\n raise ValueError(\"No output dir (--datadir) specified\")\n\n jenkins_client = jenkins.get_client(config)\n download_args = []\n for build in builds:\n if ':' in build:\n (job, build_id) = build.split(':')\n download_args.append(\n (job, build_id, config['datadir'],\n jenkins_client, config.get('groupingParameter'), force)\n )\n else:\n job = build\n for build_id in jenkins_client.fetch_all_build_ids(job):\n download_args.append(\n (job, build_id, config['datadir'],\n jenkins_client, config.get('groupingParameter'), force)\n )\n\n num_threads = config.get('downloadThreads', 7) # arbitrary number\n if num_threads <= 1:\n for args_tuple in download_args:\n _download_one_build(args_tuple)\n else:\n import multiprocessing.pool # only import if we need it!\n pool = multiprocessing.pool.ThreadPool(num_threads)\n pool.map(_download_one_build, download_args)",
"def keil_download(button_download=None,md5='',build=False,nircmd=None):\t\r\n\tU,T,N,F=py.importUTNF() \r\n\tfrom qgb import Win\r\n\tbutton_download=button_download or U.get(keil_download.__name__+'Download')\r\n\tbutton_build=\t\t\t\t\t U.get(keil_download.__name__+'Build')\r\n\tif not button_download or not button_build:\r\n\t\tes=get_keil_es()\r\n\t\tbs=[e for e in es if py.getattr(e,'friendlyclassname',0)=='Button']\r\n\t\t# if build:return es,bs\r\n\t\t# bs=[]\r\n\t\t# for i in range(9):\r\n\t\t\t# print(U.stime(),'wait bs',len(bs))\r\n\t\t\t# U.sleep(0.5)\r\n\t\t# if not bs:return es\r\n\t\tbutton_download=[e for e in bs if e.texts()==['Download']][0]\r\n\t\tbutton_build\t=[e for e in bs if e.texts()==['Build']][0]\r\n\t\t\r\n\tU.set(keil_download.__name__+'Download',button_download)\r\n\tU.set(keil_download.__name__+'Build',button_build)\r\n\t\r\n\tif md5:\r\n\t\t# md5=md5.replace(py.chr(0x0a),T.eol)\r\n\t\tms=[i for i in md5.splitlines() if '.elf' in i]\r\n\t\tmd5=ms[0][:32]\r\n\t\t\r\n\t\tt=button_download.parent().parent().parent().texts()[0]\r\n\t\tsp=T.subLast(t,'','\\\\')\r\n\t\tname=T.subLast(t,'\\\\','.uvprojx')\r\n\t\tif sp and name:\r\n\t\t\tsp=f'{sp}/Objects/{name}.axf'\r\n\t\t\tif md5==U.md5(file=sp):\r\n\t\t\t\timport win32gui\r\n\t\t\t\th=win32gui.GetForegroundWindow()\r\n\t\t\t\tbutton_download.click()\r\n\t\t\t\tU.nircmd('win activate stitle tmux')\r\n\t\t\t\tU.nircmd('win max stitle tmux')\t\r\n\t\t\t\t# for i in range(3):\r\n\t\t\t\t\t# print(Win.GetForegroundWindow())\r\n\t\t\t\t\t#win32gui.SetForegroundWindow(h)\r\n\t\t\t\t\t# U.sleep(0.5)\r\n\t\t\t\t\r\n\t\t\t\treturn [U.StrRepr('='*122+T.eol*3),'Success keil_download !',md5,sp,\r\n\t\t\t\th,get_title(h),\r\n\t\t\t\tU.stime(),U.StrRepr(T.eol*2+'='*122)]\r\n\t\t\t\r\n\t\treturn U.StrRepr('#'*122+T.eol*3),'check failed !!!',md5,sp,U.md5(file=sp),U.StrRepr(T.eol*2+'#'*122)\r\n\t\t\r\n\tif build:\r\n\t\t# print(U.stime(),button_build)\r\n\t\tbutton_build.click()\r\n\t\tprint(U.stime(),button_build)\r\n\t\tU.set('keil.log',U.stime())\r\n\t\tlog=''\r\n\t\twhile ' Error(s)' not in log:\r\n\t\t\tlog=get_keil_log(-10)\r\n\t\t\tU.sleep(0.6)\r\n\t\tif '- 0 Error(s)' not in log:\r\n\t\t\tprint(U.stime(),log)\r\n\t\t\tlog=get_keil_log()\r\n\t\t\tU.set('keil.log',log)\r\n\t\t\treturn py.No(log)\r\n\t\t\t\r\n\tbutton_download.click()\r\n\t\r\n\tif nircmd:\r\n\t\tU.nircmd('win','activate',*nircmd)\r\n\t\tU.nircmd('win','activate',*nircmd)\r\n\t\t\r\n\treturn button_download",
"def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()",
"def get_latest_successful_build(self, job_id):\n builds = list(self.get_job_builds(\n job_id, started=True, finished=True, success=True, skipped=False,\n order='desc', limit=1))\n if len(builds) < 1:\n return None # No build!\n assert len(builds) == 1 # Or something is broken..\n return builds[0]",
"def pull_snapshot(edition, force):\n from docker import DockerClient\n from docker.errors import ImageNotFound\n docker = DockerClient.from_env(version=\"auto\")\n artifact = resolve_artifact_name(edition)\n if force:\n return download_snapshot_artifact(artifact)\n else:\n derived = derive_image_tag(artifact)\n try:\n docker.images.get(derived)\n except ImageNotFound:\n return download_snapshot_artifact(artifact)\n else:\n return derived",
"def download(self):\n cmd = mccli() + \" d f \" + self.localpath + \" -p \" + self.project.name\n \n set_cli_remote(self.project.remote)\n \n child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = child.communicate()\n return CLIResult(out, err, child.returncode)",
"def main():\n parser = argparse.ArgumentParser(description='Fetch master build artifacts.')\n parser.add_argument('--token', type=str, help='API token to use')\n parser.add_argument(\n '--job', type=str, help='From what job to fetch artifacts from')\n parser.add_argument(\n '--artifact-download-dir',\n type=str,\n default='.',\n help='Where to download the artifacts')\n parser.add_argument(\n '--build-output-dir',\n type=str,\n default='.',\n help='Generated build files directory to use to compare for bloat')\n parser.add_argument(\n '--report-file',\n type=str,\n default='report.txt',\n help='From what job to fetch artifacts from')\n parser.add_argument(\n '--github-api-token',\n type=str,\n help='Github API token to upload the report as a comment')\n parser.add_argument(\n '--github-repository', type=str, help='Repository to use for PR comments')\n parser.add_argument(\n '--github-comment-pr-number',\n type=str,\n default=None,\n help='To what PR to comment in github')\n parser.add_argument(\n '--log-level',\n default=logging.INFO,\n type=lambda x: getattr(logging, x),\n help='Configure the logging level.')\n args = parser.parse_args()\n\n # Ensures somewhat pretty logging of what is going on\n logging.basicConfig(\n level=args.log_level,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n coloredlogs.install()\n\n if not args.token or not args.job:\n logging.error(\n 'Required arguments missing. Please specify at least job and token.')\n return\n\n try:\n ci_fetch_artifacts.fetchArtifactsForJob(args.token, args.job,\n args.artifact_download_dir)\n except Exception as e:\n logging.warning('Failed to fetch artifacts: %r', e)\n\n compareResults = generateBloatReport(\n args.report_file,\n args.artifact_download_dir,\n args.build_output_dir,\n title=\"Bloat report for job '%s'\" % args.job)\n\n if args.github_api_token and args.github_repository and args.github_comment_pr_number:\n sendFileAsPrComment(args.job, args.report_file, args.github_api_token,\n args.github_repository,\n int(args.github_comment_pr_number), compareResults)",
"def test_get_artifacts(self):\r\n if os.environ.get('CIRCLE_ARTIFACTS'):\r\n print('\\nCreate test artifacts (screenshots): ', end='', flush=True)\r\n window_sizes = [[300, 600], [700, 600], [800, 600], [1000, 1000], [1300, 1300]]\r\n \r\n repo = Repo('.')\r\n artifacts_path = os.environ.get('CIRCLE_ARTIFACTS') + '/' + str(repo.active_branch)\r\n \r\n if not os.path.exists(artifacts_path):\r\n os.makedirs(artifacts_path)\r\n \r\n driver = self.driver\r\n driver.get(MY_URL)\r\n for w_size in window_sizes:\r\n driver.set_window_size(w_size[0], w_size[1])\r\n filepath = artifacts_path + '/ff_shot_%d_%d.png' % (w_size[0], w_size[1])\r\n driver.save_screenshot(filepath)\r\n print('.', end=\"\", flush=True)\r\n if DEBUG:\r\n print ('Captured %s' % filepath)\r\n else:\r\n print('\\nNo test artifacts generated. ', end='', flush=True)",
"def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()",
"def get_latest_build(tag, package):\n proc = Popen([\"osg-koji\", \"-q\", \"list-tagged\", \"--latest\", tag, package],\n stdout=PIPE)\n out = proc.communicate()[0] or b''\n ret = proc.returncode\n\n latest_build_line = out.decode(\"latin-1\").strip()\n\n if ret != 0 or not latest_build_line:\n return\n\n return latest_build_line.split()[0]",
"def _download_binaries(self, dep):\n download_dict = self.dependency_dict[dep][\"download_binaries\"]\n if self.platform in download_dict:\n download_url = download_dict[self.platform]\n else:\n logger.warning(\n f\"No binaries for download for {dep}, will fake it.\"\n )\n return\n dlname = download_url.split(\"/\")[-1]\n download_path = Path(\".\") / dlname\n logger.debug(f\"downloading {dep} at {download_url} to {dlname}\")\n if self.quiet:\n trackers = ()\n else:\n trackers = (ProgressTracker(DataTransferBar()),)\n request_download(download_url, download_path, trackers=trackers)\n logger.debug(\n f\"downloaded file {download_path}, size\"\n f\" {download_path.stat().st_size}\"\n )",
"def UpdateBuilds(builds):\n\n # The build data file records the last build number for which we\n # generated a report. When we generate the next report, we read\n # this data and increment it to get the new data; when we finish\n # generating the reports, we write the updated values into this file.\n # NOTE: One side effect of doing this at the end: If the script\n # fails in the middle of generating a report, this data does not get\n # updated.\n with open(BUILD_DATA_FILE, 'w') as fp:\n gcc_max = 0\n llvm_max = 0\n for b in builds:\n if b[0] == GCC_ROTATING_BUILDER:\n gcc_max = max(gcc_max, b[1])\n elif b[0] == LLVM_ROTATING_BUILDER:\n llvm_max = max(llvm_max, b[1])\n else:\n fp.write('%s,%d\\n' % (b[0], b[1]))\n if gcc_max > 0:\n fp.write('%s,%d\\n' % (GCC_ROTATING_BUILDER, gcc_max))\n if llvm_max > 0:\n fp.write('%s,%d\\n' % (LLVM_ROTATING_BUILDER, llvm_max))",
"def _download_item(self, image):\n image_name, image_dict = image\n log.info('Downloading image: {}'.format(image_name))\n try:\n if image_dict['pulled']:\n image_to_save = self._docker_client.images.get(image_name)\n else:\n image_to_save = self._pull_image(image_name)\n if self._save:\n self._save_image(image_name, image_to_save, image_dict['dst'])\n except Exception as err:\n log.exception('Error downloading {}: {}'.format(image_name, err))\n raise err",
"def build_dmg(name, version):\n product = name+\" \"+version\n productdash = name+\"-\"+version\n app=\"dist/%s.app\"%product\n dmg=\"dist/%s.dmg\"%productdash\n # Remove previous build if it is still sitting there\n if os.path.exists(app): \n shutil.rmtree(app)\n if os.path.exists(dmg): \n os.unlink(dmg)\n print(os.getcwd(), name, app)\n os.rename(\"dist/%s.app\"%name, app)\n os.system('cd dist && ../extra/dmgpack.sh \"%s\" \"%s.app\" ../doc/_build/html ../doc/examples'\n % (productdash,product))\n os.system('chmod a+r \"%s\"'%dmg)",
"def DoAll(self):\n flags = ['--hwtest']\n date_str = datetime.date.today()\n description = 'master_%s_%s_%s' % (self._patches_string, self._build,\n date_str)\n _ = buildbot_utils.GetTrybotImage(\n self._chromeos_root,\n self._build,\n self._patches,\n description,\n other_flags=flags,\n async=True)\n\n return 0",
"def GetLastFinishedBuildInfo(builder_name, cached_builds):\n cached_builds.sort(reverse=True)\n for build_num in cached_builds:\n build_info = GetBuildInfo(builder_name, build_num)\n if IsBuildFinished(build_info):\n return build_info\n return None",
"def get_last_successful_build_nr(jenkins_url, job_name):\n return execute_command(\n f\"wget -qO- {jenkins_url}/{job_name}/lastSuccessfulBuild/buildNumber\"\n )"
]
| [
"0.686224",
"0.5885177",
"0.57551664",
"0.54857945",
"0.54756904",
"0.54189986",
"0.5292523",
"0.5283305",
"0.5274984",
"0.5225317",
"0.521765",
"0.5216646",
"0.52091974",
"0.5197427",
"0.51546884",
"0.5133753",
"0.5104077",
"0.509849",
"0.5095465",
"0.5090588",
"0.50867426",
"0.5074265",
"0.50565636",
"0.5039008",
"0.5015101",
"0.50056314",
"0.49993443",
"0.49815673",
"0.49713317",
"0.49627176"
]
| 0.70967686 | 0 |
Takes a username and iterates over all of the animes, adding them to the csv file. | def profile(username):
link = "https://myanimelist.net/animelist/{}".format(username)
mal_soup = get_soup(link)
table = json.loads(mal_soup.find('table').get('data-items'))
print("\nRunning...\n")
for dic in table:
link = "https://myanimelist.net{}"
res = mal_get_all_info(link.format(dic['anime_url']), dic['anime_title'])
export_to_csv(res, 1)
sleep(5)
print("Finished.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")",
"def collect_twitter_sentiment():\r\n # Open/create a file to append data to\r\n csvFile = open(NAME+'_posts.csv', 'a')\r\n # Use csv writer\r\n csvWriter = csv.writer(csvFile)\r\n # Calling the user function with current parameters\r\n results = twitter.user_timeline(id=NAME, count=TWEET_COUNT)\r\n for tweet in results:\r\n print(tweet.created_at, tweet.text)\r\n csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])\r\n return csvFile",
"async def stat(ctx):\r\n await bot.request_offline_members(ctx.message.server)\r\n before = time.time()\r\n nicknames = [m.display_name for m in ctx.message.server.members]\r\n roles = [m.roles for m in ctx.message.server.members]\r\n rn = [[m.name for m in line] for line in roles]\r\n with open('temp.csv', mode='w', encoding='utf-8', newline='') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n for i in range(len(rn)):\r\n writer.writerow([nicknames[i], rn[i]])\r\n after = time.time()\r\n await bot.send_file(ctx.message.author, 'temp.csv', filename='stats.csv',\r\n content=\"Here you go! Check your PM's. Generated in {:.4}ms.\".format((after - before) * 1000))",
"def ExportUser():\n UrlTodos = \"https://jsonplaceholder.typicode.com/todos/?userId={}\".format(\n sys.argv[1])\n DataTask = requests.get(UrlTodos).json()\n\n UrlInfo = \"https://jsonplaceholder.typicode.com/users/{}\".format(\n sys.argv[1])\n DataInfo = requests.get(UrlInfo).json()\n\n USER_ID = sys.argv[1]\n USERNAME = DataInfo.get(\"username\")\n FileName = USER_ID+\".csv\"\n with open(FileName, 'w', newline='') as f:\n writer = csv.writer(f, quoting=csv.QUOTE_ALL)\n for i in DataTask:\n writer.writerow([USER_ID, USERNAME, i.get(\"completed\"),\n i.get(\"title\")])",
"def make_csv(filename):\n usernames = ['kissinfashion', 'instagood', 'beautifuldestinations', 'etdieucrea', 'josecabaco']\n train, test = split_data(filename)\n for user in usernames:\n trainname = '../data/' + 'train_' + user + '.csv'\n testname = '../data/' + 'test_' + user + '.csv'\n train[user].to_csv(path_or_buf=trainname, sep='\\t', index=False, encoding='utf-8')\n test[user].to_csv(path_or_buf=testname, sep='\\t', index=False, encoding='utf-8')",
"def write_csv(error_list, user_list):\n\n with open('error_log.csv', 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(['ERROR', 'COUNT'])\n for value in error_list:\n writer.writerow(value)\n\n with open('user_statistics.csv', 'w', newline='') as csv_file:\n fields = ['USER', 'ERROR', 'INFO']\n writer = csv.DictWriter(csv_file, fields)\n writer.writeheader()\n for key, value in user_list:\n row = {'USER': key}\n row.update(value)\n writer.writerow(row)",
"def get_data(html_file):\n #Parse HTML data into CSV File\n global count\n filler = open(\"myanime{}.csv\".format(count),'w',encoding=\"utf-8\",newline='')\n writer = csv.writer(filler)\n count += 1\n rows = [\"Name\",\"Score\",\"Summary\"]\n #CSV File Headers\n writer.writerow(rows)\n\n mal = BeautifulSoup(html_file , \"html.parser\")\n\n #Scraping Name and Score of Anime from Site HTML \n summer = mal.find(\"div\",class_ = \"js-categories-seasonal\")\n\n who=summer.find_all(\"div\",class_=\"seasonal-anime js-seasonal-anime\")\n\n #Loop for Jacking the Data\n \n for win in who :\n \n title = win.find(\"div\")\n \n aniwin = win.find(\"div\", class_=\"information\")\n\n summary = win.find(\"span\", class_=\"preline\")\n \n scoreman = aniwin.find(\"div\", class_ =\"scormem\")\n \n score = scoreman.find(\"span\", class_=\"score\")\n \n ss = score.text.strip(\" \")\n \n name = title.p.text\n\n summ = summary.text.strip(\" \")\n \n print(\" \")\n \n n = name.strip(\" \\n\")\n \n s = ss.split()[0].strip(\" \\n\")\n\n su = summ.strip(\" \\n\")\n \n print(name.strip(\" \\n\"), end= \" : \")\n \n print(ss.split()[0])\n\n print(summ)\n \n print(\" \")\n \n\n #Checking if Anime is Scored\n\n if s == \"N/A\":\n #Replace \"NA\" score with Different Output\n writer.writerow([n,\"not scored yet\"])\n \n #new.write(str(n) + \":\" + str(s) + \"\\n\")\n \n #new.write(\"-------------------------------\\n\")\n \n else:\n \n writer.writerow([n,s,su])\n #writing in csv file\n #new.write(str(n) + \":\" +str(s) + \"\\n\")\n \n #new.write(\"-------------------------------\\n\")",
"def add_users_from_file(self, input_file, out_file):\n\n csv_file_read = open(input_file, 'r')\n rows_dict = csv.DictReader(csv_file_read)\n\n # Process file entries, appending to the file one at a time\n for row in rows_dict:\n csv_file_write = open(out_file, 'a', newline='')\n writer = csv.DictWriter(csv_file_write, rows_dict.fieldnames)\n print('---\\nProcessing: ' + row['firstName'] + ' ' + row['surname'])\n if not self.password_validates(row['newPassword']):\n comment = \"ICE won't accept this password even if i try it!\"\n else:\n comment = self.add_user(row)\n print(comment)\n # i = datetime.now()\n # row['Status'] = comment + ' (%s/%s/%s %s:%s)' % (i.day, i.month, i.year, i.hour, i.minute)\n row['Status'] = comment + ' (' + datetime.now().strftime('%d %b %Y %H:%M') + ')' # 01 Jan 1900 19:00\n writer.writerow(row)\n csv_file_write.close()\n\n csv_file_read.close()\n self.driver.quit()",
"def get_employee_info_csv(url, user_id):\n user = get(url + 'users/' + user_id).json()\n user_name = user.get('username')\n todos = get(url + 'todos?userId={}'.format(user_id)).json()\n\n try:\n with open('{}.csv'.format(user_id), mode='w') as f:\n write = csv.writer(f, quoting=csv.QUOTE_ALL)\n for task in todos:\n write.writerow([\n user_id,\n user_name,\n task.get('completed'),\n task.get('title'),\n ])\n except IOError:\n print(\"I/O error\")",
"def write_to_file(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.clean_unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")",
"def log_followers(self) -> None:\n with open(self.graphfile, 'a') as gf:\n writer = csv.writer(gf)\n follower_count = len(self.get_user_followers(''))\n date = datetime.datetime.now()\n\n # Append the current date and follower count to the file\n writer.writerow([date, follower_count])\n gf.close()",
"def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])",
"def write(self, args):\n\t\tnewcsvfile = self.filename[:len(self.filename)-4] + \"NEW.csv\" #clever naming MIGHT NEED TO CHANGE THIS LATER/OVERWRITE OLD FILE?\n\t\twith open(newcsvfile, 'wb') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(self.all_likes)",
"def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()",
"def export_csv(user, tasks):\n employee_name = user[0]['name']\n employee_id = user[0]['id']\n csvfile = '{}.csv'.format(employee_id)\n with open(csvfile, mode='w') as file:\n towrite = csv.writer(file, delimiter=',', quoting=csv.QUOTE_ALL)\n for task in tasks:\n towrite.writerow([employee_id, employee_name,\n task['completed'], task['title']])",
"def generateHourlyWeatherInCSV(self):\n\t\tfor town in self.helper.getTowns():\n\t\t\tself.storeHourlyWeatherInCSV(unicode(town),\"f\")\n\t\t\tself.storeHourlyWeatherInCSV(unicode(town),\"c\")",
"def _export_users(admin_access_token):\n admin = User.query.filter_by(id_=ADMIN_USER_ID).one_or_none()\n if admin_access_token != admin.access_token:\n raise ValueError(\"Admin access token invalid.\")\n csv_file_obj = io.StringIO()\n csv_writer = csv.writer(csv_file_obj, dialect=\"unix\")\n for user in User.query.all():\n csv_writer.writerow(\n [user.id_, user.email, user.access_token, user.username, user.full_name]\n )\n return csv_file_obj",
"def export_rep(name):\r\n attendance_list = read_rep()\r\n try:\r\n with open(name + '.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n # makes table in Excel by employee and attendance dates\r\n writer.writerow([\"Employee\", \"Attendance\"])\r\n for worker in attendance_list:\r\n count = 0\r\n for date in worker[1]:\r\n if not count:\r\n # first date needs to add name of worker\r\n writer.writerow([worker[0], date])\r\n count += 1\r\n # write only date\r\n else:\r\n writer.writerow(['', date])\r\n print(\"csv file made\")\r\n return attendance_list\r\n except PermissionError:\r\n print(\"file is opened, please close and try again\")\r\n return attendance_list",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def writer(Q, filepath):\n \n print(\"Starting Writer\")\n with open(filepath, \"wt\") as out_file:\n tsv_writer = csv.writer(out_file, delimiter='\\t')\n tsv_writer.writerow([\"user_id\", \"items_actioned_on\"])\n \n with open(filepath, \"a\") as out_file:\n tsv_writer = csv.writer(out_file, delimiter='\\t')\n i = 0\n while 1:\n m = Q.get()\n if m == \"kill\":\n print(\"\\nStoping Writer\")\n break\n if len(m) == 2:\n user_id = m[0]\n #print(\"GEt\",user_id)\n _user_truth = m[1]\n tsv_writer.writerow([user_id, f\",\".join(_user_truth) ])\n if i % 1000 == 0:\n print(f\"{i:,} / {GlobalVar.value:,} ({i/GlobalVar.value * 100:.2f})%\", end=\"\\r\", flush=True)\n i+=1",
"def csvAppend(asin, price, name):\n file_exists = os.path.isfile('CSVs/' + asin + '.csv') # Check if file exists\n date = arrow.now().format('YYYY/MM/DD')\n headers = ['Date', 'ASIN', 'Price', 'Name']\n\n with open('CSVs/' + asin + '.csv', 'a') as appendWrite:\n writer = csv.DictWriter(appendWrite, fieldnames=headers, delimiter=',', lineterminator='\\n')\n if not file_exists:\n writer.writeheader()\n writer.writerow({'Date': date, 'ASIN': asin, 'Price': price, 'Name': name.encode('utf-8').strip()})",
"def writeFile(self, name, folder, collected_entry_list=[]):\n file_io = open(os.path.join(folder, \"system_%s.csv\" % name), \"w\")\n csv_output = csv.writer(file_io)\n csv_output.writerow([\"time\", \"entry\"])\n for collected_entry in collected_entry_list:\n csv_output.writerow([collected_entry[\"time\"], collected_entry[\"entry\"]])\n file_io.close()",
"def csvWriter(asin, price, name):\n # NOT USED\n date = arrow.now().format('YYYY/MM/DD')\n headers = ['Date', 'ASIN', 'Price', 'Name']\n with open('CSVs/' + asin + '.csv', 'w') as newWrite:\n writer = csv.writer(newWrite)",
"def user_list_csv():\n us = user.User.query.all()\n filename = 'xxx.csv'\n csv_name = _rename_file(filename)\n url = app.config['CSV_FILES_DEST'] + '/' + csv_name\n with codecs.open(url, 'wb') as csvfile:\n #fieldnames = ['账号', '姓名', '描述', '角色', '邮箱', '电话', '工作电话', '公司', '部门', '职位']\n fieldnames = []\n if len(us) > 0:\n fieldnames = us[0].to_csv_dict().keys()\n writer = unicodecsv.writer(csvfile, encoding='utf-8-sig')\n writer.writerow(fieldnames)\n for u in us:\n dct = u.to_csv_dict()\n n_items = {}\n for name in fieldnames:\n if dct[name] is not None:\n n_items[name] = dct[name]\n else:\n n_items[name] = ''\n writer.writerow(n_items.values())\n return send_file(url)",
"def WriteToCSV(datalist):\n\n\tglobal csv_success\n\t# Define header\n\theader = ['time', 'accx', 'accy', 'accz', 'gx', 'gy', 'gz', 'mx', 'my', 'mz']\n\n\t# Define our filename\n#\tts = time.time()\n\ttimestamp = time.strftime(\"%Y%m%d_\")\n\n\tfilename = str(timestamp+ \"log.csv\")\n\n\t# Handling to open our file if it exists or create new one\n\tif exists(filename):\n\t\t# try: \n\t\tf = csv.writer(open(filename,\"a\"),lineterminator='\\n')\n\t\t\t# break\n\t\t# except:\n\telse:\n\t\tf = csv.writer(open(filename,\"a+\"),lineterminator='\\n')\n\t\t# Write our header line out if this is a new file\n\t\tf.writerow(header)\n\t\t\n\n\n\t\n\tf.writerow([ datalist['time'], datalist['acc'][0],datalist['acc'][1],datalist['acc'][2],datalist['gyro'][0],datalist['gyro'][1],datalist['gyro'][2],datalist['mag'][0],datalist['mag'][1],datalist['mag'][2] ])\n\t\n\t\n\tcsv_success = True\n\treturn csv_success",
"def write_all_users(folder_name: str, label: bool):\n make_directory(folder_name)\n for user in get_user_ids():\n print(\"Analysis of user: \" + user)\n subfolder_name = folder_name + \"/\" + user\n make_directory(subfolder_name)\n for session in get_user_session_ids(user):\n print(\"Session: \" + session)\n file_name = subfolder_name + \"/\" + session + \".csv\"\n data = get_feature_vector(user, session)\n if data == None:\n continue\n if label:\n data = [labels] + data\n write_to_csv(data, file_name)",
"def export_to_csv(self, log):\n if os.path.isfile(self.GENERATE_FILE):\n os.remove(self.GENERATE_FILE)\n\n with open(self.GENERATE_FILE, \"w\") as f:\n f.write(\"date, time, username, succes, label\\n\")\n\n for entry in log:\n f.write(str(entry[0].date()) + \", \"\n + str(self.hms_to_seconds(entry[0])) + \", \"\n + str(entry[1]) + \", \"\n + str(entry[2]) + \", \"\n + str(entry[3])\n + \"\\n\")",
"def writeUser(userid, rating, location, country):\n\tlst = [userid, rating, location, country]\n\twriteLine(lst, users_file)",
"def write_to_csv(self, verbose: bool = False) -> None: \n Path(self.csv_dir).mkdir(exist_ok=True)\n with open(f\"{self.csv_dir}/train.csv\", \"wt\", encoding=\"utf-8\", newline=\"\") as train_file:\n with open(f\"{self.csv_dir}/test.csv\", \"wt\", encoding=\"utf-8\", newline=\"\") as test_file:\n csv_header = (\"phone\", \"phone_class_index\", \"f1\", \"f2\", \"f3\", \"f4\", \"f5\")\n train_csvwriter = csv.writer(train_file)\n test_csvwriter = csv.writer(test_file)\n train_csvwriter.writerow(csv_header)\n test_csvwriter.writerow(csv_header)\n for vowels_and_formants, wav_path, category in self:\n if verbose:\n print(f\"File: {wav_path} (category: {category})\")\n writer = train_csvwriter if category == \"TRAIN\" else test_csvwriter\n for vowel_and_formants in vowels_and_formants:\n phone, formants = vowel_and_formants\n row = (phone, ipa_class_index[phone]) + tuple(formants)\n writer.writerow(row)\n if verbose:\n print(row)",
"def createRatingCSV(self):\n\n judgesExcelLogger.info(\"createRatingCSV: Generating CSV file of ratings\")\n try:\n os.chdir(self.path) # Change to set's directory context\n\n # Set up the header\n header = \"Song,Stepartist,Set\"\n for judgeName in self.judgeNames:\n header += \",\" + judgeName\n header += \",supp\"\n # print(header)\n\n with open(self.setCSV, 'w') as setRatings:\n setRatings.write(header+\"\\n\")\n # Set up the judges for printing out. Remember this has tuples\n songcounter = 0\n for song in self.setSongs:\n lineToWrite = song[0] + \",\" + song[1] + \",\" + self.setNumber\n for judgeName in self.judgeNames:\n lineToWrite += \",\" + (self.judgeToRating[judgeName])[songcounter]\n setRatings.write(lineToWrite+\"\\n\")\n songcounter += 1\n setRatings.close()\n judgesExcelLogger.info(\"createRatingCSV: Successfully wrote CSV File '%s'\", self.setCSV)\n except:\n judgesExcelLogger.warning(\"createRatingCSV: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))"
]
| [
"0.6231963",
"0.6130325",
"0.6032329",
"0.58681035",
"0.58271366",
"0.57833856",
"0.57713455",
"0.5771236",
"0.57581323",
"0.5747821",
"0.5747056",
"0.5650063",
"0.56407607",
"0.55748767",
"0.55562955",
"0.54966843",
"0.5489312",
"0.5480623",
"0.54588765",
"0.54519194",
"0.5446363",
"0.5446149",
"0.5439641",
"0.5407361",
"0.540524",
"0.54028016",
"0.53997356",
"0.53856975",
"0.5361556",
"0.5359488"
]
| 0.70463216 | 0 |
Initialize the logger. The verbose_level should be in [0, 1, 2]. This won't return anything but will reconfigure the root logger. | def set_logger(verbose_level):
if verbose_level >= 2:
logging_level = logging.DEBUG
elif verbose_level == 1:
logging_level = logging.INFO
else:
logging_level = logging.ERROR
logging.basicConfig(level=logging_level,
stream=sys.stdout,
format='%(levelname)s - %(message)s') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_logger(verbosity):\n # Register our logging handler\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(verbosity)\n rootLogger = logging.getLogger('')\n rootLogger.addHandler(handler)\n\n # Decrease the log level of the root logger if needed\n if verbosity < rootLogger.level:\n rootLogger.setLevel(verbosity)",
"def init_logger(self):\n\n if self.args.log_level:\n log_level = getattr(logging, self.args.log_level)\n if coloredlogs:\n coloredlogs.install(level=log_level, fmt=LOG_FMT)\n else:\n logging.basicConfig(level=log_level)\n ch = logging.StreamHandler()\n formatter = logging.Formatter(LOG_FMT)\n ch.setFormatter(formatter)\n elif coloredlogs:\n coloredlogs.install(level='INFO', fmt=LOG_FMT)\n\n if coloredlogs:\n effective_level = coloredlogs.get_level()\n else:\n effective_level = logger.getEffectiveLevel()\n\n # make sure warning and error display at any effective level\n if effective_level > logging.WARNING:\n self.warning = logger.critical\n else:\n self.warning = logger.warning\n\n if effective_level > logging.ERROR:\n self.error = logger.critical\n else:\n self.error = logger.error\n\n self.info = logger.info\n self.debug = logger.debug\n self.exception = logger.exception\n self.critical = logger.critical",
"def set_logger(verbose):\n\n level = 0\n if verbose == 1:\n level = logging.INFO\n elif verbose == 2:\n level = logging.DEBUG\n elif verbose == 3:\n level = logging.WARNING\n elif verbose == 4:\n level = logging.ERROR\n elif verbose == 5:\n level = logging.CRITICAL\n\n format = '%(module)s::%(funcName)s() - %(levelname)s: %(message)s'\n datefmt = '%m-%d %H:%M'\n logging.basicConfig(level=level, format=format, datefmt=datefmt)",
"def setup_logging(verbose=False):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO if not verbose else logging.DEBUG)",
"def logging_init(level, logfile=None, verbose=False):\n # Get logging related arguments & the configure logging\n if logfile:\n logfile = os.path.abspath(logfile)\n\n # Don't bother with a file handler if we're not logging to a file\n handlers = ['console', 'filehandler'] if logfile else ['console', ]\n\n # If the main logging level is any of these, set librarys to WARNING\n lib_warn_levels = ('DEBUG', 'INFO', 'WARNING', )\n\n # The base logging configuration\n BASE_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'ConsoleFormatter': {\n '()': ColorFormatter,\n 'format': '%(levelname)s: %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n 'FileFormatter': {\n '()': ColorStripper,\n 'format': (\"%(levelname)-8s: %(asctime)s '%(message)s' \"\n '%(name)s:%(lineno)s'),\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG' if verbose else level,\n 'class': 'logging.StreamHandler',\n 'formatter': 'ConsoleFormatter',\n },\n },\n 'loggers': {\n 'walleter': {\n 'handlers': handlers,\n 'level': 'DEBUG' if verbose else level,\n 'propagate': False,\n },\n 'requests': {\n 'handlers': handlers,\n 'level': 'WARNING' if level in lib_warn_levels else level,\n 'propagate': False,\n },\n }\n }\n\n # If we have a log file, modify the dict to add in the filehandler conf\n if logfile:\n BASE_CONFIG['handlers']['filehandler'] = {\n 'level': 'DEBUG' if verbose else level,\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': logfile,\n 'formatter': 'FileFormatter',\n }\n\n # Setup the loggers\n dictConfig(BASE_CONFIG)",
"def set_logger(verbose: [str, int] = 'info'):\n # Set 0 and None as no messages.\n if (verbose==0) or (verbose is None):\n verbose=60\n # Convert str to levels\n if isinstance(verbose, str):\n levels = {'silent': 60,\n 'off': 60,\n 'no': 60,\n 'debug': 10,\n 'info': 20,\n 'warning': 30,\n 'critical': 50}\n verbose = levels[verbose]\n\n # Show examples\n logger.setLevel(verbose)",
"def set_logger(verbose: [str, int] = 'info'):\n # Set 0 and None as no messages.\n if (verbose==0) or (verbose is None):\n verbose=60\n verbose = convert_verbose_to_new(verbose)\n # Convert str to levels\n if isinstance(verbose, str):\n levels = {'silent': 60,\n 'off': 60,\n 'no': 60,\n 'debug': 10,\n 'info': 20,\n 'warning': 30,\n 'error': 50,\n 'critical': 50}\n verbose = levels[verbose]\n\n # Show examples\n logger.setLevel(verbose)",
"def initialize(context, level):\n if not Log.initialized:\n Log.logger = logging.getLogger(context)\n Log.initialized = True\n logging.basicConfig(\n filename=CONST.APP_LOG_FILENAME,\n format=CONST.APP_LOG_FORMAT,\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n Log.logger.setLevel(level)\n Log.logger.log(50, 'Logging initialised, level={}'.format(level))\n return Log.logger",
"def _configure_logging(verbosity_lvl=logging.INFO):\n logging.basicConfig(level=verbosity_lvl, format='%(levelname)s: %(message)s')",
"def init_logger(config_path, verbosity):\n logging.config.fileConfig(config_path)\n logger = logging.getLogger()\n if verbosity:\n logger.setLevel(logging.DEBUG)",
"def reconfigure_logging(verbose_level):\n # Exit when nothing to do.\n if verbose_level == 0:\n return\n\n root = logging.getLogger()\n # Tune logging level.\n level = logging.WARNING - min(logging.WARNING, (10 * verbose_level))\n root.setLevel(level)\n # Be verbose with messages.\n if verbose_level <= 3:\n fmt_string = '%(name)s: %(levelname)s: %(message)s'\n else:\n fmt_string = '%(name)s: %(levelname)s: %(funcName)s: %(message)s'\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt=fmt_string))\n root.handlers = [handler]",
"def configure_logging(self):\n\n if self.options.debug:\n # --debug forces verbose_level 3\n # Set this here so cliff.app.configure_logging() can work\n self.options.verbose_level = 3\n\n super(Servizor, self).configure_logging()\n root_logger = logging.getLogger('')\n\n # Requests logs some stuff at INFO that we don't want\n # unless we have DEBUG\n requests_log = logging.getLogger(\"requests\")\n requests_log.setLevel(logging.ERROR)\n\n # Other modules we don't want DEBUG output for so\n # don't reset them below\n iso8601_log = logging.getLogger(\"iso8601\")\n iso8601_log.setLevel(logging.ERROR)\n\n # Set logging to the requested level\n self.dump_stack_trace = False\n if self.options.verbose_level == 0:\n # --quiet\n root_logger.setLevel(logging.ERROR)\n elif self.options.verbose_level == 1:\n # This is the default case, no --debug, --verbose or --quiet\n root_logger.setLevel(logging.WARNING)\n elif self.options.verbose_level == 2:\n # One --verbose\n root_logger.setLevel(logging.INFO)\n elif self.options.verbose_level >= 3:\n # Two or more --verbose\n root_logger.setLevel(logging.DEBUG)\n requests_log.setLevel(logging.DEBUG)\n\n if self.options.debug:\n # --debug forces traceback\n self.dump_stack_trace = True",
"def initialize_logger(self):\n\n # initialize logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # logger console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter(\"\"))\n logger.addHandler(console_handler)",
"def verbose_logger():\n lumigo_utils.get_logger().setLevel(logging.DEBUG)\n lumigo_utils.config(should_report=False, verbose=True)",
"def init(logfile = None, level = WARNING, verbose = True):\r\n\r\n # Debug information writes to log using SNSAPPLog.debug().\r\n # How do you debug the logger itself...?\r\n # Here it is...\r\n # We fall back to the print.\r\n # They should be comment out to make the screen clean.\r\n #print \"=== init log ===\"\r\n #print \"logfile:%s\" % logfile\r\n #print \"level:%s\" % level\r\n #print \"verbose:%s\" % verbose\r\n\r\n if logfile:\r\n logging.basicConfig(\\\r\n format='[%(levelname)s][%(asctime)s]%(message)s', \\\r\n datefmt='%Y%m%d-%H%M%S', \\\r\n level = level, \\\r\n filename = logfile\r\n )\r\n else:\r\n logging.basicConfig(\\\r\n format='[%(levelname)s][%(asctime)s]%(message)s', \\\r\n datefmt='%Y%m%d-%H%M%S', \\\r\n level = level\r\n )\r\n SNSAPPLog.VERBOSE = verbose",
"def initLogger(self):\n loglevel = self.loglevels[self.loglevel]\n log_format = '%(asctime)s name=%(name)s loglevel=%(levelname)s message=%(message)s'\n logging.basicConfig(format=log_format,\n level=loglevel)\n \tmultiprocessing.log_to_stderr(loglevel)",
"def init(level):\n Log.chosen_level = level\n logging.basicConfig(\n format=\"%(levelname)s\\t%(name)s\\t%(asctime)s\\t%(message)s\",\n level=level)",
"def set_verbosity():\n\n\tif conf.verbose is None:\n\t\tconf.verbose = 1\n\n\tconf.verbose = int(conf.verbose)\n\n\tif conf.verbose == 0:\n\t\tlogger.setLevel(logging.ERROR)\n\telif conf.verbose == 1:\n\t\tlogger.setLevel(logging.INFO)\n\telif conf.verbose == 2:\n\t\tlogger.setLevel(logging.DEBUG)\n\telif conf.verbose == 3:\n\t\tlogger.setLevel(CUSTOM_LOGGING.PAYLOAD)\n\telif conf.verbose == 4:\n\t\tlogger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)\n\telif conf.verbose >= 5:\n\t\tlogger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)",
"def initLogging(self):\n logging.basicConfig(level=self.loglevel, stream=sys.stderr)",
"def init_logger():\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(f'[%(asctime)s] %(name)s level=%(levelname)s %(filename)s:%(lineno)d \"%(message)s\"')\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n # Silencing the noisy Kafka logger\n kafka_logger = logging.getLogger('kafka')\n kafka_logger.setLevel(logging.ERROR)",
"def init_logging(self, options):\n if options.quiet:\n loglevel = logging.ERROR\n elif options.debug:\n loglevel = logging.DEBUG\n else:\n loglevel = logging.INFO\n\n init_logging(loglevel=loglevel, color=options.color)\n\n self.logger = logging.getLogger(self.name)",
"def get_logger(self, verbose):\n log_levels = [logging.INFO, logging.DEBUG]\n\n log = logging.getLogger()\n log.setLevel(log_levels[int(verbose)])\n \n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(log_levels[int(verbose)])\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n ch.setFormatter(formatter)\n log.addHandler(ch)\n\n return log",
"def __init__(self, default_level=logging.WARNING):\n # All loggers are an attr of self for tab completion in iPython\n # (with . replaced with _)\n self._loggerdict = logging.Logger.manager.loggerDict\n for name, logger in self._loggerdict.iteritems():\n attr = name.replace('.', '_')\n setattr(self, attr, logger)\n\n if len(logging.root.handlers) == 0:\n # The default level is INFO\n fmt='%(levelname)-7s | %(asctime)-23s | %(name)-8s | %(message)s'\n logging.basicConfig(format=fmt, level=default_level)\n logging.StreamHandler.emit = self._emit_wrap",
"def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)",
"def set_verbose(verbose=\"ERROR\"):\n if verbose == \"INFO\":\n logger.setLevel(logging.INFO)\n elif verbose == \"DEBUG\":\n logger.setLevel(logging.DEBUG)\n elif verbose == \"ERROR\":\n logger.setLevel(logging.ERROR)\n else:\n print('Incorrect verbose level, option:[\"INFO\",\"DEBUG\",\"ERROR\"], use \"ERROR instead.\"')\n logger.setLevel(logging.ERROR)",
"def init_logger():\n LOG_LEVEL = logging.INFO\n LOGFORMAT = \"%(log_color)s%(levelname)-1s: %(log_color)s%(message)s\"\n logging.root.setLevel(LOG_LEVEL)\n formatter = ColoredFormatter(LOGFORMAT)\n stream = logging.StreamHandler()\n stream.setLevel(LOG_LEVEL)\n stream.setFormatter(formatter)\n log = logging.getLogger('pythonConfig')\n log.setLevel(LOG_LEVEL)\n log.addHandler(stream)\n return log",
"def initialize_root_logger(log_level=INFO):\n formatter = Formatter(LOGGING_FORMAT)\n\n console_handler = StreamHandler()\n console_handler.setFormatter(formatter)\n\n root_logger = getLogger(__name__)\n root_logger.setLevel(log_level)\n root_logger.addHandler(console_handler)\n\n return root_logger",
"def setup_logging(log_file, verbose):\n if verbose:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n\n logger.setLevel(log_level)\n\n log_format = logging.Formatter('%(asctime)-15s %(message)s')\n\n console_log = logging.StreamHandler()\n console_log.setLevel(log_level)\n console_log.setFormatter(log_format)\n\n file_log = logging.FileHandler(log_file)\n file_log.setFormatter(log_format)\n file_log.setLevel(log_level)\n\n root_logger = logging.getLogger()\n root_logger.addHandler(console_log)\n root_logger.addHandler(file_log)",
"def _init_logging(verbosity=0, log_filename=None):\n\n root_logger = logging.getLogger()\n root_logger.handlers = []\n root_logger.addHandler(logging.NullHandler())\n\n sats_logger = logging.getLogger('sats')\n\n # Have the logger itself set with the lowest possible level\n sats_logger.setLevel(logging.DEBUG)\n # Reset any handlers that might have been set accidentally\n sats_logger.handlers = []\n\n # Always at least INFO in .flog\n file_level = logging.INFO\n\n if verbosity <= -2:\n stdout_level = logging.CRITICAL\n elif verbosity <= -1:\n stdout_level = logging.ERROR\n elif verbosity >= 1:\n stdout_level = logging.DEBUG\n file_level = logging.DEBUG\n else:\n stdout_level = logging.INFO\n\n # add the file handler only if a name is given\n if log_filename is not None:\n file_handler = logging.FileHandler(log_filename)\n file_handler.setLevel(file_level)\n formatter = logging.Formatter('[%(asctime)s] %(levelname)s '\n '<%(module)s.%(funcName)s> '\n '%(message)s',\n datefmt='%Y%m%d %H:%M:%S')\n file_handler.setFormatter(formatter)\n sats_logger.addHandler(file_handler)\n\n # Make these uniform widths\n logging.addLevelName(10, '--')\n logging.addLevelName(20, '>>')\n logging.addLevelName(30, '**')\n logging.addLevelName(40, '!!')\n logging.addLevelName(50, 'XX')\n\n # Use nice coloured console output\n console = ColouredConsoleHandler(stream=sys.stdout)\n console.setLevel(stdout_level)\n formatter = logging.Formatter('%(levelname)s %(message)s')\n console.setFormatter(formatter)\n # add the handler to the root logger\n sats_logger.addHandler(console)",
"def initialize_log():\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n )"
]
| [
"0.7625532",
"0.70968705",
"0.7083723",
"0.7014087",
"0.7007587",
"0.69671464",
"0.6960385",
"0.6925688",
"0.691053",
"0.685628",
"0.68291914",
"0.6796135",
"0.67866683",
"0.67789334",
"0.6759883",
"0.67246085",
"0.66928285",
"0.6676307",
"0.66545886",
"0.66129297",
"0.6597178",
"0.64832747",
"0.64767015",
"0.64731544",
"0.64729357",
"0.6469029",
"0.6436406",
"0.6416963",
"0.6403348",
"0.6397667"
]
| 0.72269374 | 1 |
Convert distributions to openTURNS. The list of distribution is converted to openTURNS objects. | def dists_to_ot(dists):
try:
dists = [eval('ot.' + dist, {'__builtins__': None},
{'ot': __import__('openturns')})
for dist in dists]
except (TypeError, AttributeError):
raise AttributeError('OpenTURNS distribution unknown.')
return dists | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _convertToDistr(self, ws):\n alg = self.createChildAlgorithm('ConvertToDistribution')\n alg.setProperty('Workspace', ws)\n alg.execute()",
"def _iter_distributions(self) -> Iterator[\"BaseDistribution\"]:\n raise NotImplementedError()",
"def as_distribution_list(dists):\n if isinstance(dists, str):\n return [tuple(x.split(',')) for x in dists.split()]\n else:\n return dists",
"def get_distributions(config):\n url = \"{}/{}\".format(config['url_base'], \"distributions.json\")\n\n try:\n resp = (api_call(url, 'get', config['debug']))\n distributions = resp.json()\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n\n return distributions",
"def convertmany(self, *args, **kwargs):\n return _coordsys.coordsys_convertmany(self, *args, **kwargs)",
"def to_deterministic(self) -> \"Distribution\":\n raise NotImplementedError",
"def display_distributions_over_positions(self, distributions):\n dists = []\n for dist in distributions:\n if dist is not None:\n if not isinstance(dist, util.Counter):\n raise Exception(\"Wrong type of distribution\")\n dists.append(dist)\n else:\n dists.append(util.Counter())\n\n if ((self.display is not None and\n 'update_distributions' in dir(self.display))):\n self.display.update_distributions(dists)\n else:\n self._distributions = dists # These can be read by pacclient.py",
"def iter_distributions(self) -> Iterator[\"BaseDistribution\"]:\n for dist in self._iter_distributions():\n # Make sure the distribution actually comes from a valid Python\n # packaging distribution. Pip's AdjacentTempDirectory leaves folders\n # e.g. ``~atplotlib.dist-info`` if cleanup was interrupted. The\n # valid project name pattern is taken from PEP 508.\n project_name_valid = re.match(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\",\n dist.canonical_name,\n flags=re.IGNORECASE,\n )\n if not project_name_valid:\n logger.warning(\n \"Ignoring invalid distribution %s (%s)\",\n dist.canonical_name,\n dist.location,\n )\n continue\n yield dist",
"def sdc_to_distributions(self, mysdc):\n if \"right\" in mysdc[\"verb\"]:\n D_mat = transpose(self.T_mat_right)\n elif \"left\" in mysdc[\"verb\"]:\n D_mat = transpose(self.T_mat_left)\n else:\n D_mat = transpose(self.T_mat_str)\n \n T_mat = ones([len(D_mat), len(D_mat[0])])*1.0\n \n \n if mysdc[\"sr\"] != None and len(mysdc[\"landmarks\"]) > 0 and self.use_spatial_relations:\n\n sr_i = self.sr_class.engineToIdx(mysdc[\"sr\"])\n SR_mat = self.srel_mat[sr_i,:,:,:]\n L_mat = self.get_prob_landmark_given_sdc_modifiers(mysdc)\n L_mat_entropy = entropy(L_mat)\n print \"using spatial relations\", str(mysdc)\n if L_mat_entropy > 2 and False:\n SR_mat = None\n L_mat = None\n else:\n SR_mat = None\n L_mat = None\n \n if mysdc[\"landmark\"] != None:\n O_mat = self.O_mat[:,self.names_to_index[mysdc[\"landmark\"]]]\n else:\n O_mat = None\n \n return O_mat, T_mat, SR_mat, L_mat, D_mat",
"def build_distribution(entity, doc, focus):\n dist = {}\n\n def _correct_type(entity, focus):\n return entity.type == focus\n\n def _get_next(iterable, index, focus):\n if (index + 1) < len(iterable):\n if (focus == 'type') or _correct_type(iterable[index+1], focus):\n return iterable[index+1]\n return False\n\n def _get_item(obj, focus):\n if focus == 'type':\n return obj.type\n return obj.value\n\n def _val_to_freq(dist):\n total = sum(list(dist.values()))\n for key in dist:\n dist[key] *= 1/total\n \n vals = tuple(dist.keys())\n freq = tuple(dist[val] for val in vals)\n return Distribution(vals, freq)\n\n\n\n for i, item in enumerate(doc.entities):\n next_item = _get_next(doc.entities, i, focus)\n if next_item:\n if item.value == entity.value:\n _update_dict(dist, _get_item(next_item, focus), 1)\n\n return _val_to_freq(dist)",
"def transform(self, docs):\n return [doc for doc in docs]",
"def build(self, distribution):\n\t\t# get latets rpms from the latest distribution snapshot\n\t\ttry:\n\t\t\tartefact = StorageReader().retrieve({\n\t\t\t\t\"artefact\": ARTEFACT_GOLANG_DISTRIBUTION_SNAPSHOT,\n\t\t\t\t\"distribution\": distribution.json()\n\t\t\t})\n\t\texcept KeyError:\n\t\t\traise KeyError(\"Distribution snapshot for '%s' not found\" % distribution)\n\n\t\tcounter = 0\n\t\tbuilder = DatasetBuilder()\n\n\t\tbuilds = DistributionSnapshot().read(data).builds()\n\t\tbuilds_total = len(builds)\n\t\tbuilds_counter = 0\n\t\tfor pkg in builds:\n\t\t\tbuilds_counter = builds_counter + 1\n\t\t\tlogger.info(\"%s/%s Processing %s\" % (builds_counter, builds_total, builds[pkg][\"build\"]))\n\n\t\t\t# get artefact\n\t\t\tdata = {\n\t\t\t\t\"product\": distribution.product(),\n\t\t\t\t\"distribution\": distribution.version(),\n\t\t\t\t\"build\": {\n\t\t\t\t\t\"name\": builds[pkg][\"build\"],\n\t\t\t\t\t\"rpms\": map(lambda l: {\"name\": l}, builds[pkg][\"rpms\"])\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor rpm in builds[pkg][\"rpms\"]:\n\t\t\t\tartefact_key = {\n\t\t\t\t\t\"artefact\": ARTEFACT_GOLANG_PROJECT_DISTRIBUTION_PACKAGES,\n\t\t\t\t\t\"product\": distribution.product(),\n\t\t\t\t\t\"distribution\": distribution.version(),\n\t\t\t\t\t\"build\": builds[pkg][\"build\"],\n\t\t\t\t\t\"rpm\": rpm,\n\t\t\t\t}\n\n\t\t\t\ttry:\n\t\t\t\t\tartefact = StorageReader().retrieve(artefact_key)\n\t\t\t\texcept KeyError:\n\t\t\t\t\tWorker(\"scandistributionbuild\").setPayload({\n\t\t\t\t\t\t\"product\": product,\n\t\t\t\t\t\t\"distribution\": version,\n\t\t\t\t\t\t\"build\": {\n\t\t\t\t\t\t\t\"name\": builds[pkg][\"build\"],\n\t\t\t\t\t\t\t\"rpms\": builds[pkg][\"rpms\"],\n\t\t\t\t\t\t}\n\t\t\t\t\t}).do()\n\n\t\t\t\ttry:\n\t\t\t\t\tartefact = StorageReader().retrieve(artefact_key)\n\t\t\t\texcept KeyError as e:\n\t\t\t\t\tlogger.error(e)\n\t\t\t\t\tcontinue\n\n\t\t\t\tbuilder.addDistributionArtefact(artefact)\n\n\t\treturn builder.build().dataset()",
"def packages_distributions():\n pkg_to_dist = collections.defaultdict(list)\n for dist in metadata.distributions():\n for pkg in (dist.read_text(\"top_level.txt\") or \"\").split():\n pkg_to_dist[pkg].append(dist.metadata[\"Name\"])\n return dict(pkg_to_dist)",
"def convert():\n parser = _parser()\n args = parser.parse_args()\n\n logger.setLevel(args.log_level)\n\n with contextlib.ExitStack() as stack:\n data = [stack.enter_context(ctra.formats.oxstats_genotypes(*a))\n for a in ctra.algorithms.kwise(args.load_oxstats, 2)]\n samples = list(itertools.chain.from_iterable(s for _, _, s, _ in data))\n merged = ctra.formats.merge_oxstats([d for _, _, _, d in data])\n if args.num_samples > len(samples):\n logger.error('{} individuals present in OXSTATS data, but {} were specified'.format(len(samples), args.num_samples))\n sys.exit(1)\n elif args.num_samples < len(samples):\n logger.warn('{} individuals present in OXSTATS data, but {} were specified'.format(len(samples), args.num_samples))\n if os.path.exists(args.out) and not args.force:\n logger.error('Output file {} already exists. Not overwriting')\n sys.exit(1)\n outfile = stack.enter_context(h5py.File(args.out, 'w'))\n outfile.create_dataset('dosage', shape=(args.num_samples, args.num_variants), dtype='float32', chunks=args.chunk_size)\n outfile.create_dataset('info', shape=(1, args.num_variants), dtype='float32')\n for j, row in enumerate(merged):\n if j >= args.num_variants:\n logger.warn('{} variants processed, but additional variants are present'.format(j))\n break\n probs = numpy.array([float(x) for x in row[5:]])\n x, y = info(probs)\n outfile['dosage'][:, j] = x\n if not j % 1000:\n logger.debug('{} variants processed'.format(j))\n if j + 1 < args.num_variants:\n logger.error('{} variants present in OXSTATS data, but {} were specified'.format(j, args.num_variants))\n sys.exit(1)",
"def cast(*args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL2_cast(*args)",
"def generate_probabilities(self):\n dists = np.copy(self.distances)\n\n for i in range(self.number_towers):\n for j in range(self.number_towers):\n if self.method == 'distance_distribution':\n dists[i][j] = (\n -1 *\n (dists[i][j] ** 2) *\n xamtfos(dists[i][j] ** 2, self.sigma) *\n self.expander\n )\n elif self.method == 'distance_square':\n dists[i][j] = -1 * (dists[i][j] + 1) ** self.distance_power\n\n normalizer = dists.max().max() / 2\n dists -= normalizer\n\n return np.array([\n softmax(dists[i])\n for i in range(self.number_towers)\n ])",
"def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]",
"def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]",
"def cast(*args):\n return _itkContourDirectedMeanDistanceImageFilterPython.itkContourDirectedMeanDistanceImageFilterIUL2IUL2_cast(*args)",
"def publish_list(self, messages: list) -> None:\n if __debug__:\n logger.warning(\n \"WARN: Unnecessary call on publish on FileDistroStream\"\n )",
"def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)",
"def makedist(dist_type, *pars, **kwards):\n a = 'sst.'\n b = dist_type\n c = a + b\n Scipy_stats_Obj = eval(c)\n dist = Scipy_stats_Obj(*pars, **kwards)\n \n return(dist, dist_type)",
"def _do_mapping(self):\n\n distro = None\n versions = None\n flavor = None\n\n try:\n distro = self._map_name(self.from_distro, self.from_version, self.from_like_distro, self.found_mapping)\n flavor = self._map_flavor(self.from_distro, self.from_version, self.from_like_distro, self.found_mapping)\n versions = self._map_version(self.from_distro, self.from_version, self.from_like_distro, self.found_mapping)\n return [DistroTuple(distro=distro, version=v, flavor=flavor) for v in versions]\n except:\n log.exception(\n 'Failed to fully construct the mapped distro from: {}, {}, {}'.format(self.from_distro,\n self.from_version,\n self.from_like_distro))\n raise",
"def generate_data(winbid_dist, save_path):\n\n winbid_dist = json.loads(winbid_dist)\n\n logger.info(f'Generate data for win-bid distributions...')\n\n data = [{'winbid_samples': generate_winbid_samples(**dist).tolist()}\n for dist in winbid_dist]\n\n logger.info(f'Store data in {save_path}.')\n with open(save_path, \"w\") as f:\n json.dump(data, f)",
"def test_output_create_dist(self):\n numbers_dist = sample2dist(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\"])\n VA_numbers = VoseAlias(numbers_dist)\n actual = VA_numbers.dist\n prob = Decimal(1)/Decimal(6)\n expected = {\"one\": prob, \"two\": prob, \"three\": prob,\n \"four\": prob, \"five\": prob, \"six\": prob}\n self.assertEqual(actual, expected)",
"def run_distgen(\n settings={},\n inputs='distgen.json',\n verbose=0):\n \n # Make distribution\n gen = Generator(inputs, verbose=verbose)\n\n #gen._input = update_nested_dict(gen._input, settings, verbose=verbose)\n\n for k,v in settings.items():\n vprint(f\"Replacing parameter {k} with value {v}.\", verbose>0, 0, True)\n gen[k] = v\n \n beam = gen.beam()\n\n # Write to file\n if 'file' in gen['output']:\n writer(gen['output']['type'], beam, gen['output']['file'],verbose)\n\n # Print beam stats\n if(verbose>0):\n beam.print_stats()\n\n return beam",
"def SetDistribution(self, dist=None):\n from mystic.math import Distribution\n if dist and Distribution not in dist.__class__.mro():\n dist = Distribution(dist) #XXX: or throw error?\n self._dist = dist\n return",
"def select_multiple_from_strategy(dist: dict, num: int) -> []:\n\n output = []\n dist_c = dist.copy()\n\n for _ in range(num):\n key = Tools.select_from_strategy(dist_c)\n output.append(key)\n\n del dist_c[key]\n dist_c = Tools.normalise(dist_c)\n\n return output",
"def generate_values(num_points: int, distribution: str, dist_params, sort: bool = False) -> np.ndarray:\n vals = method_map[distribution](*dist_params, size=num_points)\n if sort:\n vals.sort()\n return vals",
"def build_distribution_value(self):\n timeseries = {\n \"metricKind\": \"DELTA\", \n \"metric\": {\n \"type\": \"serviceruntime.googleapis.com/api/response_sizes\"\n }, \n \"points\": [\n {\n \"interval\": {\n \"endTime\": \"2019-02-19T04:00:00.841487Z\", \n \"startTime\": \"2019-02-19T03:00:00.841487Z\"\n }, \n \"value\": {\n \"distributionValue\": {\n \"count\": \"56\", \n \"mean\": 17,\n \"sumOfSquaredDeviation\": 1.296382457204002e-25,\n \"bucketCounts\": [\"56\"], \n \"bucketOptions\": {\n \"exponentialBuckets\": {\n \"scale\": 1, \n \"growthFactor\": 10, \n \"numFiniteBuckets\": 8\n }\n }\n }\n }\n }\n ], \n \"resource\": {\n \"labels\": {\n \"service\": \"monitoring.googleapis.com\", \n \"credential_id\": \"serviceaccount:106579349769273816070\", \n \"version\": \"v3\", \n \"location\": \"us-central1\", \n \"project_id\": \"ms-demo-app01\", \n \"method\": \"google.monitoring.v3.MetricService.ListMetricDescriptors\"\n }, \n \"type\": \"consumed_api\"\n }, \n \"valueType\": \"DISTRIBUTION\"}\n return timeseries"
]
| [
"0.60307014",
"0.5155594",
"0.49578184",
"0.49229342",
"0.47288564",
"0.46685475",
"0.46226797",
"0.46183845",
"0.4616375",
"0.45679697",
"0.45444563",
"0.44968802",
"0.44940984",
"0.44776946",
"0.44577244",
"0.44557306",
"0.4444763",
"0.44439286",
"0.44323635",
"0.44319963",
"0.4431021",
"0.4420535",
"0.4419624",
"0.44055703",
"0.44005916",
"0.43976885",
"0.4396647",
"0.43938565",
"0.43849048",
"0.4380655"
]
| 0.6566535 | 0 |
Convert kernel to openTURNS. The kernel is converted to openTURNS objects. | def kernel_to_ot(kernel):
try:
kernel = eval('ot.' + kernel, {'__builtins__': None},
{'ot': __import__('openturns')})
except (TypeError, AttributeError):
raise AttributeError('OpenTURNS kernel unknown.')
return kernel | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert(context, cm_node, inputs, outputs):\n kernel_enum = {'linearKernel': 'LINEAR', 'polyKernel': 'POLY',\n 'rbfKernel': 'RBF', 'sigmoidKernel': 'SIGMOID', 'precomputedKernel': 'PRECOMPUTED'}\n kernel = cm_node.supportVectorClassifier.kernel\n kernel_val = kernel.WhichOneof('kernel')\n svc_kernel = kernel_enum[kernel_val]\n\n if kernel_val == 'rbfKernel':\n svc_kernel_params = [kernel.rbfKernel.gamma, 0.0, 0.0]\n elif kernel_val == 'polyKernel':\n svc_kernel_params = [kernel.polyKernel.gamma,\n kernel.polyKernel.coef0, kernel.polyKernel.degree]\n elif kernel_val == 'sigmoidKernel':\n svc_kernel_params = [kernel.sigmoidKernel.gamma,\n kernel.sigmoidKernel.coef0, 0.0]\n elif kernel_val == 'linearKernel':\n svc_kernel_params = [0.0, 0.0, 0.0]\n\n prob_a = cm_node.supportVectorClassifier.probA\n prob_b = cm_node.supportVectorClassifier.probB\n svc_vectors_per_class = cm_node.supportVectorClassifier.numberOfSupportVectorsPerClass\n n_supports, svc_support_vectors = extract_support_vectors_as_dense_tensor(cm_node.supportVectorClassifier)\n chain_coef = list(itertools.chain.from_iterable(\n [coef.alpha for coef in cm_node.supportVectorClassifier.coefficients]))\n svc_coefficients = chain_coef\n svc_rho = [-x for x in cm_node.supportVectorClassifier.rho]\n\n nb = NodeBuilder(context, 'SVMClassifier', op_domain='ai.onnx.ml')\n nb.add_attribute('kernel_type', svc_kernel)\n nb.add_attribute('kernel_params', svc_kernel_params)\n if prob_a:\n nb.add_attribute('prob_a', prob_a)\n if prob_b:\n nb.add_attribute('prob_b', prob_b)\n nb.add_attribute('vectors_per_class', svc_vectors_per_class)\n nb.add_attribute('support_vectors', svc_support_vectors)\n nb.add_attribute('coefficients', svc_coefficients)\n nb.add_attribute('rho', svc_rho)\n svc_classes = cm_node.supportVectorClassifier.WhichOneof('ClassLabels')\n if svc_classes == 'int64ClassLabels':\n class_labels = list(int(i) for i in cm_node.supportVectorClassifier.int64ClassLabels.vector)\n nb.add_attribute('classlabels_ints', class_labels)\n elif svc_classes == 'stringClassLabels':\n class_labels = list(str(s) for s in cm_node.supportVectorClassifier.stringClassLabels.vector)\n nb.add_attribute('classlabels_strings', class_labels)\n\n nb.extend_inputs(inputs)\n\n # Find the ONNX name for the predicted label in CoreML\n predicted_label_name = context.get_onnx_name(cm_node.description.predictedFeatureName)\n nb.add_output(predicted_label_name)\n\n # The variable used to store the class probabilities produced by ONNX linear classifier\n probability_tensor_name = context.get_unique_name('probability_tensor')\n nb.add_output(probability_tensor_name)\n\n nodes = [nb.make_node()]\n\n if cm_node.description.predictedProbabilitiesName != '':\n # Find the corresponding ONNX name for CoreML's probability output (a dictionary)\n predicted_probability_name = context.get_onnx_name(cm_node.description.predictedProbabilitiesName)\n # Create a ZipMap to connect probability tensor and probability dictionary\n nodes.append(model_util.make_zipmap_node(context, probability_tensor_name,\n predicted_probability_name, class_labels))\n\n return nodes",
"def kernel(self, kernel):\n self._context[\"kernel\"] = kernel",
"def kernel_to_skl(kernel):\n try:\n kernel = eval('kernels.' + kernel, {'__builtins__': None},\n {'kernels': __import__('sklearn.gaussian_process.kernels',\n fromlist=['kernels'])})\n except (TypeError, AttributeError):\n raise AttributeError('scikit-learn kernel unknown.')\n\n return kernel",
"def cast(*args):\n return _itkCosImageFilterPython.itkCosImageFilterIF2IF2_cast(*args)",
"def cast(*args):\n return _itkScalarConnectedComponentImageFilterPython.itkScalarConnectedComponentImageFilterIUS2IUS2_cast(*args)",
"def kernel(self, verbose=False):\n\n return self._action(FP_ModuleMorphism.kernel, verbose)",
"def intel_run(kernel_call, kernel_def, kernel='autosa.tmp/output/src/kernel_autosa_opencl.cpp'):\n\n # Load kernel call file\n module_calls = []\n fifo_decls = []\n with open(kernel_call, 'r') as f:\n add = False\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the fifo declaration and add to the list\n if add:\n line = line.strip()\n fifo_decls.append(line)\n if line.find('/* FIFO Declaration */') != -1:\n if add:\n fifo_decls.pop(len(fifo_decls) - 1)\n add = not add\n\n with open(kernel_call, 'r') as f:\n add = False\n module_call = []\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the module call and add to the list\n if add:\n line = line.strip()\n module_call.append(line)\n if line.find('/* Module Call */') != -1:\n if add:\n module_call.pop(len(module_call) - 1)\n module_calls.append(module_call.copy())\n module_call.clear()\n add = not add\n\n module_defs = {}\n headers = []\n with open(kernel_def, 'r') as f:\n while True:\n line = f.readline()\n if not line:\n break\n if line.find('#include') != -1:\n line = line.strip()\n headers.append(line)\n\n with open(kernel_def, 'r') as f:\n add = False\n module_def = []\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the module definition and add to the dict\n if add:\n module_def.append(line)\n # Extract the module name\n if (line.find('__kernel')) != -1:\n m = re.search('void (.+?)\\(', line)\n if m:\n module_name = m.group(1)\n if line.find('/* Module Definition */') != -1:\n if add:\n module_def.pop(len(module_def) - 1)\n module_defs[module_name] = module_def.copy()\n module_def.clear()\n add = not add\n\n # compose the kernel file\n kernel = str(kernel)\n generate_intel_kernel(kernel, headers, module_defs, module_calls, fifo_decls)",
"def get_kernel(self, kernel_id):",
"def cast(*args):\n return _itkCosImageFilterPython.itkCosImageFilterID2ID2_cast(*args)",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterISS2ISS2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterISS2ISS2_cast(obj)",
"def cast(*args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS2_cast(*args)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterISS2IUS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2IUS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIF2IUS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF2IUS2_cast(obj)",
"def cast(*args):\n return _itkSpatialObjectWriterPython.itkSpatialObjectWriter2_cast(*args)",
"def cast(*args):\n return _itkLabelShapeOpeningImageFilterPython.itkLabelShapeOpeningImageFilterIUS2_cast(*args)",
"def cast(*args):\n return _itkStatisticsLabelMapFilterPython.itkStatisticsLabelMapFilterLM2IUS2_cast(*args)",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIUS2IUS2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIUS2IUS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIUS2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIUS2IUS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2IUS2_cast(obj)",
"def cast(*args):\n return _itkLabelShapeOpeningImageFilterPython.itkLabelShapeOpeningImageFilterIUC2_cast(*args)",
"def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS2IUS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkMeshSourcePSUC2 *\":\n return _itkMeshSourcePython.itkMeshSourcePSUC2_cast(obj)",
"def cast(*args):\n return _itkStatisticsLabelMapFilterPython.itkStatisticsLabelMapFilterLM2IUC2_cast(*args)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIUC2IUS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUC2IUS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIF2IF2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIF2IF2_cast(obj)",
"def itkCosImageFilterID2ID2_cast(*args):\n return _itkCosImageFilterPython.itkCosImageFilterID2ID2_cast(*args)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterISS2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2ISS2_cast(obj)",
"def itkCosImageFilterIF2IF2_cast(*args):\n return _itkCosImageFilterPython.itkCosImageFilterIF2IF2_cast(*args)",
"def create_kernel(name: str) -> str:\n ...",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIUC2IUC2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIUC2IUC2_cast(obj)"
]
| [
"0.56849927",
"0.5389978",
"0.5379721",
"0.52513784",
"0.5185308",
"0.51627207",
"0.5117263",
"0.51141346",
"0.50944746",
"0.50309515",
"0.5026859",
"0.50176793",
"0.49958104",
"0.49811846",
"0.49790224",
"0.49762222",
"0.49425617",
"0.49348697",
"0.49324325",
"0.49317148",
"0.4920655",
"0.4911933",
"0.49103186",
"0.4907164",
"0.49042198",
"0.4896445",
"0.48913434",
"0.48878986",
"0.4876827",
"0.48662424"
]
| 0.72218746 | 0 |
Convert kernel to scikitlearn. The kernel is converted to scikitlearn objects. | def kernel_to_skl(kernel):
try:
kernel = eval('kernels.' + kernel, {'__builtins__': None},
{'kernels': __import__('sklearn.gaussian_process.kernels',
fromlist=['kernels'])})
except (TypeError, AttributeError):
raise AttributeError('scikit-learn kernel unknown.')
return kernel | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterISS2ISS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterISS2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2ISS2_cast(obj)",
"def get_kernel(self, kernel_id):",
"def cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterIF2ISS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterIUC2ISS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2ISS2_cast(obj)",
"def itkLabelStatisticsImageFilterISS2ISS2_cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterISS2ISS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIF2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIUC2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUC2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterIUS2ISS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2ISS2_cast(obj)",
"def itkLabelStatisticsImageFilterIF2ISS2_cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterIF2ISS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterISS2IUC2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUC2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterISS2IUC2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2IUC2_cast(obj)",
"def itkLabelStatisticsImageFilterIUC2ISS2_cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterIUC2ISS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterISS2IUS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUS2_cast(obj)",
"def create_kernel(name: str) -> str:\n ...",
"def itkScalarImageKmeansImageFilterISS2ISS2_cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterISS2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2ISS2_cast(obj)",
"def to_swc(self, contributors=\"\"):\n from . import __version__\n sx, sy, sz = np.diag(self.transform)[:3]\n\n swc_header = f\"\"\"# ORIGINAL_SOURCE CloudVolume {__version__}\n# CREATURE \n# REGION\n# FIELD/LAYER\n# TYPE\n# CONTRIBUTOR {contributors}\n# REFERENCE\n# RAW \n# EXTRAS \n# SOMA_AREA\n# SHINKAGE_CORRECTION \n# VERSION_NUMBER {__version__}\n# VERSION_DATE {datetime.datetime.utcnow().isoformat()}\n# SCALE {sx:.6f} {sy:.6f} {sz:.6f}\n\"\"\"\n\n def generate_swc(skel, offset):\n if skel.edges.size == 0:\n return \"\"\n\n index = defaultdict(set)\n visited = defaultdict(bool)\n for e1, e2 in skel.edges:\n index[e1].add(e2)\n index[e2].add(e1)\n\n stack = [ skel.edges[0,0] ]\n parents = [ -1 ]\n\n swc = \"\"\n\n while stack:\n node = stack.pop()\n parent = parents.pop()\n\n if visited[node]:\n continue\n\n swc += \"{n} {T} {x:0.6f} {y:0.6f} {z:0.6f} {R:0.6f} {P}\\n\".format(\n n=(node + 1 + offset),\n T=skel.vertex_types[node],\n x=skel.vertices[node][0],\n y=skel.vertices[node][1],\n z=skel.vertices[node][2],\n R=skel.radii[node],\n P=parent if parent == -1 else (parent + 1 + offset),\n )\n\n visited[node] = True\n \n for child in index[node]:\n stack.append(child)\n parents.append(node)\n\n return swc\n\n skels = self.components()\n\n swc = swc_header + \"\\n\"\n offset = 0\n for skel in skels:\n swc += generate_swc(skel, offset) + \"\\n\"\n offset += skel.vertices.shape[0]\n\n return swc",
"def _traces_to_binary(self, X):\n return keras.utils.to_categorical(X, num_classes=self.max_n_span_types, dtype='int32')",
"def convert(context, cm_node, inputs, outputs):\n kernel_enum = {'linearKernel': 'LINEAR', 'polyKernel': 'POLY',\n 'rbfKernel': 'RBF', 'sigmoidKernel': 'SIGMOID', 'precomputedKernel': 'PRECOMPUTED'}\n kernel = cm_node.supportVectorClassifier.kernel\n kernel_val = kernel.WhichOneof('kernel')\n svc_kernel = kernel_enum[kernel_val]\n\n if kernel_val == 'rbfKernel':\n svc_kernel_params = [kernel.rbfKernel.gamma, 0.0, 0.0]\n elif kernel_val == 'polyKernel':\n svc_kernel_params = [kernel.polyKernel.gamma,\n kernel.polyKernel.coef0, kernel.polyKernel.degree]\n elif kernel_val == 'sigmoidKernel':\n svc_kernel_params = [kernel.sigmoidKernel.gamma,\n kernel.sigmoidKernel.coef0, 0.0]\n elif kernel_val == 'linearKernel':\n svc_kernel_params = [0.0, 0.0, 0.0]\n\n prob_a = cm_node.supportVectorClassifier.probA\n prob_b = cm_node.supportVectorClassifier.probB\n svc_vectors_per_class = cm_node.supportVectorClassifier.numberOfSupportVectorsPerClass\n n_supports, svc_support_vectors = extract_support_vectors_as_dense_tensor(cm_node.supportVectorClassifier)\n chain_coef = list(itertools.chain.from_iterable(\n [coef.alpha for coef in cm_node.supportVectorClassifier.coefficients]))\n svc_coefficients = chain_coef\n svc_rho = [-x for x in cm_node.supportVectorClassifier.rho]\n\n nb = NodeBuilder(context, 'SVMClassifier', op_domain='ai.onnx.ml')\n nb.add_attribute('kernel_type', svc_kernel)\n nb.add_attribute('kernel_params', svc_kernel_params)\n if prob_a:\n nb.add_attribute('prob_a', prob_a)\n if prob_b:\n nb.add_attribute('prob_b', prob_b)\n nb.add_attribute('vectors_per_class', svc_vectors_per_class)\n nb.add_attribute('support_vectors', svc_support_vectors)\n nb.add_attribute('coefficients', svc_coefficients)\n nb.add_attribute('rho', svc_rho)\n svc_classes = cm_node.supportVectorClassifier.WhichOneof('ClassLabels')\n if svc_classes == 'int64ClassLabels':\n class_labels = list(int(i) for i in cm_node.supportVectorClassifier.int64ClassLabels.vector)\n nb.add_attribute('classlabels_ints', class_labels)\n elif svc_classes == 'stringClassLabels':\n class_labels = list(str(s) for s in cm_node.supportVectorClassifier.stringClassLabels.vector)\n nb.add_attribute('classlabels_strings', class_labels)\n\n nb.extend_inputs(inputs)\n\n # Find the ONNX name for the predicted label in CoreML\n predicted_label_name = context.get_onnx_name(cm_node.description.predictedFeatureName)\n nb.add_output(predicted_label_name)\n\n # The variable used to store the class probabilities produced by ONNX linear classifier\n probability_tensor_name = context.get_unique_name('probability_tensor')\n nb.add_output(probability_tensor_name)\n\n nodes = [nb.make_node()]\n\n if cm_node.description.predictedProbabilitiesName != '':\n # Find the corresponding ONNX name for CoreML's probability output (a dictionary)\n predicted_probability_name = context.get_onnx_name(cm_node.description.predictedProbabilitiesName)\n # Create a ZipMap to connect probability tensor and probability dictionary\n nodes.append(model_util.make_zipmap_node(context, probability_tensor_name,\n predicted_probability_name, class_labels))\n\n return nodes",
"def segment_func1(self):\n # computing neighboors graph\n A = self.normal_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels",
"def itkLabelStatisticsImageFilterISS2IUC2_cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterISS2IUC2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUC2_cast(obj)",
"def itkLabelStatisticsImageFilterIUS2ISS2_cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterIUS2ISS2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2ISS2_cast(obj)",
"def modelKSVM():\n\n list_kernel_type = ['linear', 'poly', 'rbf']\n random_state = 20 # Do not change this random_state\n\n objs_KSVM = []\n\n # Create a list of objects for the classifier for each of the above \"kernel\" types\n for kernel in list_kernel_type:\n svm = SVC(kernel=kernel, random_state=random_state)\n objs_KSVM.append(svm)\n\n return objs_KSVM",
"def itkScalarImageKmeansImageFilterIF2ISS2_cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIF2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF2ISS2_cast(obj)",
"def segment_func2(self):\n # computing neighboors graph\n A = self.boundaryprob_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels",
"def convert_topk(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get('axis', '-1'))\n k = int(attrs.get('k', '1'))\n ret_type = attrs.get('ret_typ')\n dtype = attrs.get('dtype')\n outputs = [name + '_output0']\n\n if ret_type and ret_type == 'both':\n if dtype and dtype == 'int64':\n outputs.append(name + '_output1')\n else:\n raise NotImplementedError(\"ONNX expects indices to be of type int64\")\n else:\n raise NotImplementedError(\"ONNX expects both value and indices as output\")\n\n export_nodes = []\n\n k = np.asarray([k], dtype=np.int)\n k_node = create_helper_tensor_node(k, name + '__k', kwargs)\n export_nodes.extend(k_node)\n k_node = k_node[-1].name\n\n input_node = input_nodes[0]\n topk_node = onnx.helper.make_node(\n \"TopK\",\n [input_node, k_node],\n outputs,\n axis=axis,\n name=name\n )\n export_nodes.extend([topk_node])\n\n return [topk_node]",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterISS2IUS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2IUS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIUS2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterIF2IUC2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUC2_cast(obj)",
"def _convert_to_onehot_labels(seg_label, num_classes):\n\n batch_size = seg_label.size(0)\n onehot_labels = seg_label.new_zeros((batch_size, num_classes))\n for i in range(batch_size):\n hist = seg_label[i].float().histc(\n bins=num_classes, min=0, max=num_classes - 1)\n onehot_labels[i] = hist > 0\n return onehot_labels"
]
| [
"0.5159945",
"0.5121114",
"0.5094813",
"0.5053676",
"0.5037607",
"0.5019525",
"0.49861404",
"0.49394354",
"0.49363768",
"0.4923758",
"0.4917534",
"0.4887722",
"0.48826864",
"0.4859425",
"0.48550135",
"0.4851223",
"0.4845716",
"0.48123005",
"0.48054653",
"0.47690117",
"0.4768677",
"0.47426343",
"0.47235566",
"0.47197884",
"0.46874788",
"0.4674858",
"0.46707177",
"0.46664193",
"0.46651885",
"0.46614826"
]
| 0.59245807 | 0 |
Performs Friedman's test. G array of arrays(groups). First group is G[0] each group mus have the same number of elements! | def friedman(G, alpha = 0.05, ignoreties = False, onetailed = True, verbose = True):
nclasses = len(G) # number of groups
nblocks = len(G[0])
Rank = [0]* nclasses # ranks array.
for j in range(nblocks):
# get the rows.
row = []
for i in range(nclasses):
row.append((G[i][j], i))
row.sort()
start = 0
while start < nclasses:
end = start
for k in range(start+1, nclasses):
if not isequalfloats(row[k-1][0], row[k][0]):
end = k-1
break
if end > start:
rank = (start + end)/2.0 + 1
else:
rank = start + 1
for k in range(start, end+1):
index = row[k][1]
Rank[index] += rank
start = end + 1
sumRankssqr = sum([rank * rank for rank in Rank])
#Compute Friedman statistic.
Friedman = 12.0/(nblocks * nclasses*(nclasses+1))*sumRankssqr-3*(nclasses +1) *nblocks
df = nclasses -1
if verbose:
print "Friedman test at the " + str(alpha) + " level of significance"
print "Test statistic:", Friedman
print "Class rank sums:",
for rank in Rank: print rank,
print
pval = stats.chi2.sf(Friedman, df)
print "p-value for ", nclasses-1, "degree of freedom:", pval, pval<alpha
print "class ranks + avg + median"
for i, rank in enumerate(Rank):
print "%3d %6.1f %6.2f %6.2f" %(i+1, rank, avg(G[i]), getPercentile(G[i], 50))
return Friedman | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ford_fulkerson_algo() -> np.ndarray:\r\n res = ford_fulkerson_algorithm(np.array(ex_sample_graph), 0, 10)\r\n print(\"Result: \")\r\n print(res)\r\n return res",
"def test_annular_fpm():\n\n # test some semi-random cases - is the array size as expected? \n assert masks.annular_fpm(3, 2, np.inf).shape == (3*2*2, 3*2*2)\n assert masks.annular_fpm(3, 5, np.inf).shape == (3*5*2, 3*5*2)\n assert masks.annular_fpm(3, 5, 10).shape == (3*10*2, 3*10*2)\n assert masks.annular_fpm(3, 5, 11).shape == (3*11*2, 3*11*2)\n\n # test some pixel values are as expected. \n mask = masks.annular_fpm(3, 2, 10)\n assert mask[0,0]==0 # corner is black\n assert mask[5*10, 5*10]==1 # in between is white\n assert mask[3*10, 3*10]==0 # center is black",
"def test_griewank(self):\n fun = get_problem('griewank', self.dimension)\n self.assertEqual(fun(self.array), 0.0)",
"def test_chung_reynolds(self):\n fun = get_problem('chung_reynolds', self.dimension, -100, 100)\n self.assertEqual(fun(self.array), 0.0)",
"def test_get_grid_face_edges(flopy_dis_mf6):\n mf6 = flopy_dis_mf6[1]\n mf6.initialize()\n\n with pytest.raises(NotImplementedError):\n mf6.get_grid_face_edges(1, np.zeros((1, 1)))",
"def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)",
"def test_gauss_array():\n x,g = cw04.gen_gaussian_array(-1,1,3)\n desired = np.array([0.24197072451914337, 0.3989422804014327, 0.24197072451914337])\n print(\"Obtained:\",g)\n print(\"Desired:\",desired)\n # Numpy has built-in testing functions to iterate over arrays and compare\n # values up to certain tolerances\n np.testing.assert_almost_equal(g, desired)",
"def which_group(list_of_elements):\n if is_Matrix(list_of_elements[-1]):\n R = PolynomialRing(list_of_elements[-1].base_ring(),'z')\n z = R.gen(0)\n G=[(t[0,0]*z+t[0,1])/(t[1,0]*z+t[1,1]) for t in list_of_elements]\n else:\n G = list_of_elements\n\n n = ZZ(len(G))\n\n # invalid input\n if n == 0:\n raise ValueError(\"group must have at least one element\")\n\n # define ground field and ambient function field\n rational_function = G[-1]\n\n if rational_function.parent().is_field():\n K = rational_function.parent()\n R = K.ring()\n else:\n R = rational_function.parent()\n K = R.fraction_field()\n\n z = R.gen(0)\n p = K.characteristic()\n\n # factor n = mp^e; set e = 0 and m = n if p = 0 (Sage sets 0^0 = 1)\n if p > 0:\n m = n.prime_to_m_part(p)\n e = ZZ(n/m).exact_log(p)\n else:\n m = n\n e = 0\n\n # Determine if G is cyclic or dihedral.\n # This determines the maximal cyclic subgroup and the maximal cyclic\n # p-regular subgroup. Algorithm terminates if the order of this subgroup agrees with\n # the order of the group.\n max_reg_cyclic = [1, z, [z]] # initialize order of cyclic p-regular subgroup and generator\n discard = [] # list of elements already considered\n\n for g in G:\n if g not in discard:\n H = [g]\n for i in range(n-1):\n h = g(H[-1])\n H.append(h)\n H = list(set(H))\n if len(H) == n:\n return 'Cyclic of order {0}'.format(n)\n if len(H) > max_reg_cyclic[0] and gcd(len(H), p) != p:\n max_reg_cyclic = [len(H), g, H]\n discard = list(set(discard +H)) # adjoin all new elements to discard\n\n n_reg = max_reg_cyclic[0]\n # Test for dihedral subgroup. A subgroup of index 2 is always normal, so the\n # presence of a cyclic subgroup H of index 2 indicates the group is either\n # H x Z/2Z or dihedral. The former occurs only if H has order 1 or 2, both of\n # which are dihedral.\n if 2*n_reg == n:\n for g in G:\n if g not in max_reg_cyclic[2]:\n return 'Dihedral of order {0}'.format(n)\n # Check the p-irregular cases. There is overlap in these cases when p^e = 2,\n # which is dihedral and so already dealt with above. By the classification theorem,\n # these are either p-semi-elementary, PGL(2,q), PSL(2,q), or A_5 when p=3. The latter\n # case is already covered by the remaining sporadic cases below.\n if e > 0:\n if n_reg == m: # p-semi-elementary\n return '{0}-semi-elementary of order {1}'.format(p, n)\n if n_reg == m / (p**e - 1) and m == p**(2*e) - 1: # PGL(2)\n return 'PGL(2,{0})'.format(p**e)\n if n_reg == m / (p**e - 1) and m == (1/2)*(p**(2*e) - 1): # PSL(2)\n return 'PSL(2,{0})'.format(p**e)\n\n # Treat sporadic cases\n if n == 12:\n return ['A_4']\n elif n == 24:\n return ['S_4']\n else:\n return ['A_5']",
"def test_run_grouped_correlation(self):\r\n # hand calculation of spearman and pearson for 01\r\n # md_g1 = array([6.1, 0.0, 14.2, 6.5, 21])\r\n # md_g2 = array([.3, 9.1, .8, 5.0, 11])\r\n # o1_g1 = array([22, 48, 34, 0, 0])\r\n # o1_g2 = array([0, 15, 0, 76, 74])\r\n # c1_g1 = -0.6155870112510925 #spearman(md_g1, o1_g1)\r\n # c2_g2 = 0.66688592885535025 #spearman(md_g2, o1_g2)\r\n # fisher_population_correlation([-0.6155870112510925,\r\n # 0.66688592885535025], [5,5])\r\n # fpc, h = (0.043595171909468329, 0.12776325359984511)\r\n g1_rhos = [corrcoef(self.otus1[0][i], self.mds1[0])[0][1]\r\n for i in range(10)]\r\n g2_rhos = [corrcoef(self.otus1[1][i], self.mds1[1])[0][1]\r\n for i in range(10)]\r\n exp_rhos = [g1_rhos, g2_rhos]\r\n g1_pvals = [assign_correlation_pval(g1_rhos[i], 5,\r\n 'parametric_t_distribution') for i in range(10)]\r\n g2_pvals = [assign_correlation_pval(g2_rhos[i], 5,\r\n 'parametric_t_distribution') for i in range(10)]\r\n exp_pvals = [g1_pvals, g2_pvals]\r\n exp_f_pvals = [fisher([g1_pvals[i], g2_pvals[i]]) for i in range(10)]\r\n\r\n tmp = [fisher_population_correlation([g1_rhos[i], g2_rhos[i]], [5, 5])\r\n for i in range(10)]\r\n exp_f_rhos = [x[0] for x in tmp]\r\n exp_f_hs = [x[1] for x in tmp]\r\n\r\n obs_rhos, obs_pvals, obs_f_pvals, obs_f_rhos, obs_f_hs = \\\r\n run_grouped_correlation(self.mds1, self.otus1, 'pearson',\r\n CORRELATION_TEST_CHOICES, 'parametric_t_distribution')\r\n\r\n assert_almost_equal(obs_rhos, exp_rhos)\r\n assert_almost_equal(obs_pvals, exp_pvals)\r\n assert_almost_equal(obs_f_pvals, exp_f_pvals)\r\n assert_almost_equal(obs_f_rhos, exp_f_rhos)\r\n assert_almost_equal(obs_f_hs, exp_f_hs)",
"def fmgf(array, sigma):\n x, y = np.arange(len(array)), array.copy()\n yg = ndimage.filters.gaussian_filter(y, sigma)\n y -= yg\n\n # digitizing\n m = 101\n dy = 6.0 * mad(y) / m\n ybin = np.arange(np.min(y) - 5 * dy, np.max(y) + 5 * dy + dy, dy)\n z = np.zeros([len(ybin), len(x)])\n z[np.digitize(y, ybin), x] = 1.0\n\n # filtering\n g = partial(ndimage.filters.gaussian_filter, sigma=(0, sigma))\n c = partial(ndimage.filters.convolve1d, weights=np.ones(m), axis=0)\n zf = c(c(c(g(z))))\n\n # estimates\n ym1, y0, yp1 = [ybin[np.argmax(zf, 0) + i] for i in (-1, 0, 1)]\n zm1, z0, zp1 = [zf[np.argmax(zf, 0) + i, x] for i in (-1, 0, 1)]\n t = (zm1 - z0) / (zm1 - 2 * z0 + zp1)\n\n filtered = yg + ((1 - t) ** 2) * ym1 + (2 * t * (1 - t)) * y0 + (t**2) * yp1\n return filtered",
"def testgradsdim(self):\r\n assert self.data.grads.shape == (len(self.data.geovalues),self.data.natom,3)",
"def test_f_uni(self):\n s = np.array([100.0, 0, 0, 0, 0, 0])\n e = np.array([0.1, -0.05, -0.05, 0, 0, 0])\n f_direct = self.model.f(s, e, self.t, self.T)\n \n sdev = s - np.array([1,1,1,0,0,0]) * np.sum(s[:3]) / 3.0\n se = np.sqrt(3.0/2.0) * la.norm(sdev)\n ee = np.sqrt(2.0/3.0) * la.norm(e)\n\n g_direct = self.smodel.g(se, ee, self.t, self.T)\n \n self.assertTrue(np.isclose(g_direct, f_direct[0]))\n\n self.assertTrue(np.isclose(-g_direct/2.0, f_direct[1]))\n self.assertTrue(np.isclose(-g_direct/2.0, f_direct[2]))\n\n self.assertTrue(np.allclose([0,0,0], f_direct[3:]))",
"def test_schwefel222(self):\n fun = get_problem('schwefel222', self.dimension)\n self.assertEqual(fun(self.array), 0.0)",
"def count_accuracy(G_true, G):\n B_true = G_true != 0# nx.to_numpy_array(G_true) != 0\n B = G != 0# nx.to_numpy_array(G) != 0\n d = B.shape[0]\n # linear index of nonzeros\n pred = np.flatnonzero(B)\n cond = np.flatnonzero(B_true)\n cond_reversed = np.flatnonzero(B_true.T)\n cond_skeleton = np.concatenate([cond, cond_reversed])\n # true pos\n true_pos = np.intersect1d(pred, cond, assume_unique=True)\n\n # false pos\n false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)\n # reverse\n extra = np.setdiff1d(pred, cond, assume_unique=True)\n reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)\n # compute ratio\n pred_size = len(pred)\n cond_neg_size = 0.5 * d * (d - 1) - len(cond)\n fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1)\n tpr = float(len(true_pos)) / max(len(cond), 1)\n fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1)\n # structural hamming distance\n B_lower = np.tril(B + B.T)\n pred_lower = np.flatnonzero(B_lower)\n cond_lower = np.flatnonzero(np.tril(B_true + B_true.T))\n extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)\n missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)\n shd = len(extra_lower) + len(missing_lower) + len(reverse)\n return shd, tpr, fpr, fdr, pred_size",
"def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0",
"def test_schwefel221(self):\n fun = get_problem('schwefel221', self.dimension)\n self.assertEqual(fun(self.array), 0.0)",
"def test_score_groups(test_input, expected):\n score = sp.score_groups(test_input)\n assert score == expected",
"def run_tests(groups=None,fn='candy_out'):\n if groups is None: groups = read_groups()\n ev = np.array([group_tests(g) for g in groups])\n fields = 'time dm snr rank env dsG dsL ws1 ws2 td1 td2'\n dt = [(f,np.float64) for f in fields.split()]\n ev = ev.view(dt).view(np.recarray)\n np.save(fn, ev)",
"def test_group(self):\n # leave out particle 0\n group = hoomd.group.tags(1,2)\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (0,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))",
"def test_stress():\n\n group = Group({\n \"a\": Numerical(),\n \"b\": Numerical(),\n \"c\": Categorical(list(range(5))),\n \"d\": Hashed(buckets=5),\n \"e\": Hashed(buckets=5,\n random_sign=True),\n })\n\n for i in range(100):\n group.set_a(random())\n group.set_b(random())\n group.set_c(randint(0, 4))\n for i in range(10):\n group.set_d(randstr())\n group.set_e(randstr())\n group.push()\n\n array = group.array()\n assert array.shape == (100, 17)",
"def test_GrangerAnalyzer():\r\n\r\n # Start by generating some MAR processes (according to Ding and Bressler),\r\n a1 = np.array([[0.9, 0],\r\n [0.16, 0.8]])\r\n\r\n a2 = np.array([[-0.5, 0],\r\n [-0.2, -0.5]])\r\n\r\n am = np.array([-a1, -a2])\r\n\r\n x_var = 1\r\n y_var = 0.7\r\n xy_cov = 0.4\r\n cov = np.array([[x_var, xy_cov],\r\n [xy_cov, y_var]])\r\n\r\n L = 1024\r\n z, nz = utils.generate_mar(am, cov, L)\r\n\r\n # Move on to testing the Analyzer object itself:\r\n ts1 = ts.TimeSeries(data=z, sampling_rate=np.pi)\r\n g1 = gc.GrangerAnalyzer(ts1)\r\n\r\n # Check that things have the right shapes:\r\n npt.assert_equal(g1.frequencies.shape[-1], g1._n_freqs // 2 + 1)\r\n npt.assert_equal(g1.causality_xy[0, 1].shape, g1.causality_yx[0, 1].shape)\r\n\r\n # Test inputting ij:\r\n g2 = gc.GrangerAnalyzer(ts1, ij=[(0, 1), (1, 0)])\r\n\r\n # x => y for one is like y => x for the other:\r\n npt.assert_almost_equal(g1.causality_yx[1, 0], g2.causality_xy[0, 1])",
"def test_check_gff():\n gene_list = []\n gene, gene_list= check_gff(INPUT_ok, gene_list)\n # print(gene, gene_list)\n assert_equal(gene, \"GPLIN_000000100\")\n assert_equal(gene_list, [\"GPLIN_000000100\"])",
"def test_2x2_gf(z, eps0, eps1, hopping):\n assume(abs(z.imag) > 1e-6)\n assume(abs(eps0 - eps1) > 1e-16 or abs(hopping) > 1e-16)\n ham = np.array([[eps0, hopping],\n [hopping, eps1]])\n dec = gt.matrix.decompose_hamiltonian(ham)\n gf_num = dec.reconstruct(1/(z - dec.eig), kind='diag')\n assert_allclose(gt.matrix.gf_2x2_z(z, eps0=eps0, eps1=eps1, hopping=hopping),\n gf_num, rtol=1e-5, atol=1e-14)\n g0 = partial(gt.bethe_hilbert_transform, half_bandwidth=1)\n gf_num = dec.reconstruct(g0(z - dec.eig), kind='diag')\n gf_2x2 = gt.matrix.gf_2x2_z(z, eps0=eps0, eps1=eps1, hopping=hopping, hilbert_trafo=g0)\n assert_allclose(gf_2x2, gf_num, rtol=1e-5, atol=1e-14)",
"def test_fprop_matrix(self):\n r = []\n for i in xrange(self.N):\n max_input_sequence_len = self.rng.random_integers(300)\n sequence_len = max_input_sequence_len if i == 0 else self.rng.random_integers(max_input_sequence_len)\n embd_dim = self.rng.random_integers(10000)\n batch_size, output_dim = self.rng.random_integers(2000, size=2)\n W = self.get_orthogonal_matrix(embd_dim, output_dim)\n row_idxs = self.rng.randint(embd_dim, size=(batch_size, max_input_sequence_len)).astype(np.int32)\n\n output = {}\n for processor_type in ['gpu', 'cpu']:\n quagga.processor_type = processor_type\n qrow_idxs = Connector(Matrix.from_npa(row_idxs))\n qW = Connector(Matrix.from_npa(W))\n row_slicing_block = RowSlicingBlock(qW, qrow_idxs)\n qW.fprop()\n qrow_idxs.ncols = sequence_len\n qrow_idxs.fprop()\n row_slicing_block.fprop()\n output[processor_type] = row_slicing_block.output.to_host()\n\n for output_gpu, output_cpu in izip(output['gpu'], output['cpu']):\n r.append(np.allclose(output_gpu, output_cpu))\n\n self.assertEqual(sum(r), len(r))",
"def return_MatchUpTest___w():\n\n ####################################################################################################################\n # 1. Initialise test data\n ####################################################################################################################\n\n w1 = array([[0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25]])\n w2 = array([[0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25]])\n\n u1 = array([1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0])\n u2 = array([2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0])\n\n values = array([5.0, 3.0, 3.0, 2.5, 6.0, 3.0, 2.0, 4.0, 3.0, 4.0])\n unc = [Uncertainty(3, (0, 0)),\n Uncertainty(3, (1, 1))]\n ks = array([1.2, 1.7, 1.3, 1.4, 1.3])\n unck = [Uncertainty(1, array([0.25, 0.25, 0.25, 0.25, 0.25]))]\n idx = {\"Nm\": [5],\n \"cNm\": [0, 5, 10],\n \"Im\": [[1, 2]],\n \"sensors\": [1, 2],\n \"sensor_ms\": [1],\n \"n_sensor\": [1, 2],\n \"n_mu\": [1, 1],\n \"n_cov\": [1, 1],\n \"N_var\": [5, 5],\n \"idx\": [0, 5, 10],\n \"Ia\": [1, 1, 1, 2, 2, 2]}\n a = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])\n w_matrices = [csr_matrix(w1), csr_matrix(w2)]\n u_matrices = [u1, u2]\n\n ####################################################################################################################\n # 3. Initialise MatchUp object\n ####################################################################################################################\n\n MatchUpTest = MatchUp()\n MatchUpTest.values = values\n MatchUpTest.unc = unc\n MatchUpTest.ks = ks\n MatchUpTest.unck = unck\n MatchUpTest.idx = idx\n MatchUpTest.a = a\n MatchUpTest.w_matrices = w_matrices\n MatchUpTest.u_matrices = u_matrices\n\n return MatchUpTest",
"def test_fwhm(self):\n for i, func in enumerate(self.fwhm_funcs):\n for j, arr1d in enumerate(self.input_arrays):\n res = func(arr1d)\n assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)",
"def test_schwefel(self):\n fun = get_problem('schwefel', self.dimension)\n self.assertAlmostEqual(fun(self.array3), 0.0, places=3)",
"def split_training_testing(X, Y, gnd, negative=10000, per=0.05):\n df_x = pd.DataFrame(X)\n df_x['y'] = Y\n df_x['gnd'] = gnd\n df_x.sort_values(by=['y'], inplace=True, ascending=False)\n frac_positive = (df_x[df_x['y'] == 1].shape[0])/float(df_x.shape[0])\n split = int(frac_positive * per * df_x.shape[0])\n df_x.reset_index(drop=True, inplace=True)\n fraud = df_x[df_x['y'] == 1]\n # Shuffle inplace\n fraud = fraud.sample(frac=1, random_state=0).reset_index(drop=True)\n test = fraud.iloc[:split]\n train_ = fraud.iloc[split:]\n train = pd.concat([train_, df_x.iloc[fraud.shape[0]:].sample(n = negative, random_state=0)], ignore_index=True)\n # Shuffle inplace\n train = train.sample(frac=1, random_state=0).reset_index(drop=True)\n #train = randomSample(train, negative)\n y_train = train['y'].as_matrix()\n y_train_gnd = train['gnd'].as_matrix()\n train = train.drop(['y'], axis=1)\n train = train.drop(['gnd'], axis=1)\n \n y_test = test['y'].as_matrix()\n y_test_gnd = test['gnd'].as_matrix()\n test = test.drop(['y'], axis=1)\n test = test.drop(['gnd'], axis=1)\n return train.as_matrix(), y_train, y_train_gnd, test.as_matrix(), y_test, y_test_gnd",
"def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)",
"def test_30_test_init_array(self, persons_gi):\n example = Example(groups=7, origins=5)\n\n example.init_array('param_g', 7)\n assert example.param_g.shape == (7, )"
]
| [
"0.6315162",
"0.55101943",
"0.5471397",
"0.53878236",
"0.5365093",
"0.5364887",
"0.5359619",
"0.5357744",
"0.5303135",
"0.529447",
"0.5270481",
"0.52081114",
"0.5182559",
"0.5176437",
"0.516502",
"0.5108524",
"0.50582075",
"0.50428694",
"0.50377256",
"0.503524",
"0.50346583",
"0.5010726",
"0.50073725",
"0.49962556",
"0.49783233",
"0.4974028",
"0.49595374",
"0.49394473",
"0.49381047",
"0.49298394"
]
| 0.63828975 | 0 |
Deletes old file from filesystem when corresponding `ProductImage` object is updated with new file. | def auto_delete_file_on_change(sender, instance, **kwargs):
if not instance.pk:
return False
try:
old_image = ProductImage.objects.get(pk=instance.pk).image
except ProductImage.DoesNotExist:
return False
new_image = instance.image
if not old_image == new_image:
if pathlib.Path(old_image.path).is_file():
pathlib.Path(old_image.path).unlink() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def auto_delete_file_on_change(sender, instance, **kwargs):\n if not instance.pk:\n return False\n try:\n old_file = Image.objects.get(pk=instance.pk).img_png\n except Image.DoesNotExist:\n return False\n new_file = instance.img_png\n if old_file and not old_file == new_file:\n if os.path.isfile(old_file.path):\n os.remove(old_file.path)",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\r\n if instance.image:\r\n if pathlib.Path(instance.image.path).is_file():\r\n pathlib.Path(instance.image.path).unlink()",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.img:\n if os.path.isfile(instance.img.path):\n os.remove(instance.img.path)",
"def __on_delete(self):\n self.image.delete()",
"def __on_delete(self):\n self.image.delete()",
"def auto_delete_file_on_change(sender, instance, **kwargs):\n if not instance.pk:\n return False\n\n try:\n old_file = sender.objects.get(pk=instance.pk).image\n except sender.DoesNotExist:\n return False\n new_file = instance.image\n if not old_file == \"\":\n if not old_file == new_file:\n if os.path.isfile(old_file.path):\n os.remove(old_file.path)",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\n\n if instance.image:\n if os.path.isfile(instance.image.path):\n os.remove(instance.image.path)",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.img_png:\n if os.path.isfile(instance.img_png.path):\n os.remove(instance.img_png.path)",
"def delete(self, *args, **kwargs):\n self.image.storage.delete(self.image.name)\n delete(self.image)\n super().delete(*args, **kwargs)",
"def delete_file(sender, instance, *args, **kwargs):\n if instance.image:\n _delete_file(instance.image.path)",
"def delete(self, *args, **kwargs):\n self.file.delete(save=False)\n self.thumbnail.delete(save=False)\n\n super(File, self).delete(*args, **kwargs)",
"def delete(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)",
"def remove_image_file(sender, instance, **kwargs):\n # Pass false so ImageField doesn't save the model.\n instance.image.delete(False)",
"def delete(self, *args, **kwargs):\n self.file.storage.delete(self.file.name)\n super().delete(*args, **kwargs)",
"def eject_image(self, identity, device):\n device_info = self._get_device(identity, device)\n\n device_info['Image'] = ''\n device_info['ImageName'] = ''\n device_info['Inserted'] = False\n device_info['WriteProtected'] = False\n device_info['UserName'] = ''\n device_info['Password'] = ''\n\n self._devices.update({(identity, device): device_info})\n\n local_file = device_info.pop('_local_file', None)\n if local_file:\n try:\n os.unlink(local_file)\n\n self._logger.debug(\n 'Removed local file %(file)s for %(identity)s' % {\n 'identity': identity, 'file': local_file})\n except FileNotFoundError:\n # Ignore error as we are trying to remove the file anyway\n pass",
"def delete(self, image_path=None):\n current_app.mnt_mutex.acquire()\n unmount_image(image_path)\n current_app.mnt_mutex.release()",
"def cleanup_old_images(self):\n \n logging.debug(\"%s cleanup_old_images entered\" % self.footprint_name)\n active_imgs = self.images.values()\n old_images = self.old_images[:]\n for img_id in old_images:\n logging.info(\"Deleting image %s from footprint %s\" % (img_id, self.footprint_name))\n if img_id not in active_imgs:\n notify(\"Deleting image %s from footprint %s\" % (img_id, self.footprint_name))\n img = cs.images.get(img_id)\n if img:\n try:\n img.delete()\n except novaclient.exceptions.NotFound:\n notify(\"Couldn't find image %s\" % img_id)\n self.old_images.remove(img_id)\n else:\n notify(\"Not deleting active image %s\" % img_id)\n # save changes\n self.save()",
"def auto_delete_file_on_change(sender, instance, **kwargs):\n \n if not instance.pk:\n return False\n\n try:\n old_file = Company.objects.get(pk=instance.pk).company_logo\n except Company.DoesNotExist:\n return False\n\n new_file = instance.company_logo\n if not old_file == new_file and old_file.name != Company.DEFAULT_LOGO_FILE:\n if os.path.isfile(old_file.path):\n logger.info(\"removing old file \" + old_file.path)\n os.remove(old_file.path)",
"def delete( self ):\n if os.path.exists(self.filename):\n os.remove(self.filename)",
"def delete_file(self, instance, sender, **kwargs):\n super(AutoImageField, self).delete_file(instance, sender)\n if getattr(instance, self.attname):\n # Get full path and the base directory that contains the file\n file_name = getattr(instance,self.name).name\n basedir = os.path.dirname(file_name)\n \n # Look for thumbnails created from filters for the current filename\n # and delete the files\n mask = add_to_basename(file_name, '_*')\n [os.remove(os.path.join(basedir, f)) for f in glob.glob(mask)]",
"def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)",
"def auto_delete_image_lecture_on_delete(sender, instance, **kwargs):\n if instance.file:\n instance.file.delete(save=False)",
"def auto_delete_file_on_change(sender, instance, **kwargs):\r\n if not instance.pk:\r\n return False\r\n\r\n try:\r\n old_file = Attachment.objects.get(pk=instance.pk).path\r\n except Attachment.DoesNotExist:\r\n return False\r\n\r\n new_file = instance.path\r\n if not old_file == new_file:\r\n if os.path.isfile(old_file.path):\r\n os.remove(old_file.path)",
"def delete(self):\n os.remove(self.file_path)\n super(VideoFile, self).delete()",
"def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass",
"def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)",
"def delete_image(self):\n Image.objects.get(id = self.id).delete()",
"def auto_delete_file_on_change(sender, instance, **kwargs):\n if not instance.pk:\n return False\n\n try:\n old_file = SkyLabFile.objects.get(pk=instance.pk).file\n except SkyLabFile.DoesNotExist:\n return False\n\n if old_file:\n new_file = instance.file\n if not old_file == new_file:\n if os.path.isfile(old_file.path):\n os.remove(old_file.path)",
"def purge(self):\n from models.accounts import Account\n\n # Make sure we have access to the associated account frame\n if not isinstance(self.account, Account):\n self.account = Account.one(Q._id == self.account)\n\n # Get the backend required to delete the asset\n backend = self.account.get_backend_instance()\n\n # Delete the original file\n backend.delete(self.store_key)\n\n # Delete all variation files\n for variation in self.variations:\n backend.delete(variation.store_key)\n\n self.delete()",
"def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)"
]
| [
"0.6883194",
"0.6871514",
"0.67930377",
"0.67210275",
"0.67210275",
"0.6711644",
"0.6692425",
"0.6659921",
"0.66383386",
"0.65901065",
"0.65727085",
"0.6569942",
"0.6560771",
"0.6389942",
"0.6378528",
"0.63677853",
"0.634398",
"0.63360244",
"0.6323306",
"0.6315152",
"0.6294705",
"0.62815124",
"0.6277927",
"0.6262759",
"0.62380105",
"0.62341064",
"0.62154",
"0.62138987",
"0.6173552",
"0.6173467"
]
| 0.75113237 | 0 |
initiates a scan request to the scanning thread calculates the scan position based on self.num_points_done | def send_request(self):
x_pos = self.x_pos_list[self.num_points_done % self.x_num]
y_pos = self.y_pos_list[self.num_points_done // self.x_num]
# zigzag scanning to minimize backlash
if np.where(self.y_pos_list == y_pos)[0][0] % 2 == 1: # for even-numbered rows
original_index = self.num_points_done % self.x_num
new_index = -1 * (original_index + 1) # counting from the end of the list
x_pos = self.x_pos_list[new_index] # overwriting x_pos
self.scan_request.emit(x_pos, y_pos, self.pmt_exposure_time_in_ms) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scan(self):\n for angle in range(self.MIDPOINT-400, self.MIDPOINT+401, 100):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()\n #sort the scan data for easier analysis\n self.scan_data = OrderedDict(sorted(self.scan_data.items()))",
"def process_scan(self, msg):\n lidarPoints = msg.ranges\n minIndex = lidarPoints.index(min(lidarPoints))\n self.POI = (lidarPoints[minIndex], math.pi*minIndex/180)",
"def scan(self):\n for angle in range(self.MIDPOINT-350, self.MIDPOINT+350, 35):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()\n #sort the scan data for easier analysis\n self.scan_data = OrderedDict(sorted(self.scan_data.items()))",
"def run(self):\n self.scanner.scan_progress = 0\n while self.scanner.scanning and self.scanner.scan_progress < 100:\n response_future = self.scanner.client.ScanProgress.future(scanner_commands_pb2.ScannerRequest(request=1))\n progress = response_future.result()\n self.scanner.scan_progress = progress.ScanProgress\n self.scanner.scanning = not progress.ScanCompleted\n self.progress_update.emit()",
"def _start(self):\n def process_response(future):\n response = future.result()\n self._window.qtlog.append(response.ErrorResponse.Name(response.error_response)[14:])\n self.scanning = True\n self.subthread.start()\n self._window.qtlog.append(\"Scanner Start\")\n\n param = scanner_commands_pb2.ScanParam(\n ScannerIP=self._window.scan_ip.text(),\n ScanMode=self._window.scan_mode.currentText(),\n VerticalAngleMin=int(self._window.scan_v_min.text()),\n VerticalAngleMax=int(self._window.scan_v_max.text()),\n HorizontalAngleMin=int(self._window.scan_h_min.text()),\n HorizontalAngleMax=int(self._window.scan_h_max.text()),\n Resolution=int(self._window.scan_resolution.currentText()),\n MeasurementRate=int(self._window.scan_measure_rate.currentText()),\n NoiseCompression=int(self._window.scan_noise_comp.currentText()),\n ScanFileNumber=int(self._window.scan_f_num.text()),\n ScanBaseName=self._window.scan_f_name.text(),\n StorageMode=self._window.scan_storage.currentText(),\n RemoteScanStoragePath=self._window.scan_path.text()\n )\n response_future = self.client.StartScan.future(param)\n response_future.add_done_callback(process_response)",
"def scan(self):\n for angle in range(self.MIDPOINT-350, self.MIDPOINT+350, 50):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()",
"def process_scan(self, data):\n # check to see if bot far from a wall\n if min(data.ranges) > distance + distance/2:\n self.find_wall(data)\n\n # bot is close enough to corner to turn\n elif data.ranges[90] < 1.5* distance and data.ranges[0] < 1.5 * distance:\n self.turn(data)\n \n else:\n self.follow_wall(data)\n \n\n # Publish msg to cmd_vel.\n self.twist_pub.publish(self.twist)",
"def process_scan(self, msg):\n print(msg)\n self.last_scan = msg",
"def on_scanner_start(self, scanner):",
"def main(self):\n \n\tsession = db.Session()\n\tspeed = -1\n\n\t#self.checkResponseStatus()\n\n\t#secondsBetween = random.uniform(config.MIN_SCAN_DELAY, config.MIN_SCAN_DELAY + 2)\n #time.sleep(secondsBetween)\n\t\n \tstartTime = time.time()\n#\tlogger.info(\"Starting scanning at: %s\", time.asctime( time.localtime(startTime) ) )\n\n\tself.minorFailCount = 0\n for i, point in enumerate(self.points):\n\t self.minorFailCount = 0\n\t self.performMapOperations(i, point, session)\n\n endTime = time.time()\n# logger.info(\"Stopped scanning at: %s\", time.asctime( time.localtime(endTime) ) )\n\ttimeElapsed = endTime - startTime\n\tminutes = timeElapsed/60\n\tminutesRounded = math.floor(minutes)\n\tseconds = math.floor(60*(minutes-minutesRounded))\n\tlogger.info(\"Time elapsed: %d:%d\", minutesRounded, seconds)\t \n logger.info(\"Total pokemon seen: %d (average per cycle: %f)\", self.seen_per_cycle, (self.seen_per_cycle/len(self.points))) \n \n session.close()\n if self.seen_per_cycle == 0:\n self.error_code = 'NO POKEMON'",
"def do(self):\n device = self.target\n\n # if aborted from SCANNING, needs to set VCC and PSS subarray \n # to READY state otherwise when \n if device.scanID != 0:\n self.logger.info(\"scanning\")\n device._group_vcc.command_inout(\"EndScan\")\n device._group_fsp_corr_subarray.command_inout(\"EndScan\")\n device._group_fsp_pss_subarray.command_inout(\"EndScan\")\n device._group_fsp_pst_subarray.command_inout(\"EndScan\")\n \n (result_code,message)=super().do()\n \n return (result_code,message)",
"def scan_callback(self, msg):\n ranges = numpy.array(msg.ranges)\n angle_min = msg.angle_min\n angle_inc = msg.angle_increment\n\n # find the closest obstacle in viewing range\n min_reading = msg.range_max\n for i in range(len(ranges)):\n if -VIEW_RANGE < angle_min + angle_inc * i < VIEW_RANGE:\n if MIN_SCAN < ranges[i] < min_reading:\n min_reading = ranges[i]\n\n # is the obstacle within the turtlebot's safety area\n self.is_obstacle = False\n if min_reading < SAFE_DISTANCE:\n self.is_obstacle = True",
"async def run(self):\n\n\t\tawait asyncio.sleep(self.delay)\n\t\tR_load = self.lock.mag/(self.sense - self.lock.mag)*self.R_ref\n\t\tawait self.resistance.push(R_load)\n\t\tawait self.current.push(self.lock.dc/(self.R_ref+R_load))\n\t\tawait self.voltage.push(self.lock.dc*R_load/(self.R_ref+R_load))\n\n\t\tlogger.debug(\"Stream has filled {} of {} points\".format(self.resistance.points_taken,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.resistance.num_points() ))\n\n\t\t#await asyncio.sleep(2*self.integration_time) # Give the filters some time to catch up?",
"def process_scan(self, msg):\n if len(msg.ranges) <= 330:\n # throw out scans that don't have more than 90% of the data\n return\n # get pose according to the odometry\n p = PoseStamped(header=Header(stamp=msg.header.stamp, frame_id=\"base_link\"), pose=Pose())\n self.odom_pose = self.tf_listener.transformPose(\"odom\", p)\n self.base_pose = self.tf_listener.transformPose(\"base_laser_link\", p)\n # convert the odom pose to the tuple (x,y,theta)\n self.odom_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.odom_pose.pose)\n #(-0.0069918, 0.000338577, 0.048387097)\n #(1.0208817, 0.04827240, 0.048387)\n self.base_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.base_pose.pose)\n for i in range(len(msg.ranges)):\n if 0.0 < msg.ranges[i] < 5.0: #for any reding within 5 meters\n #Using the pose and the measurement nd the angle, find it in the world\n map_x = self.odom_pose[0] + msg.ranges[i] * cos(i * pi / 180.0 + self.odom_pose[2])\n map_y = self.odom_pose[1] + msg.ranges[i] * -sin(i * pi / 180.0 + self.odom_pose[2])\n\n #Relate that map measure with a place in the picture\n x_detect = int((map_x - self.origin[0]) / self.resolution)\n y_detect = int((map_y - self.origin[1]) / self.resolution)\n\n\n #Determine how to mark the location in the map, along with the stuff inbetween\n u = (map_x - self.odom_pose[0], map_y - self.odom_pose[1])\n magnitude = sqrt(u[0] ** 2 + u[1] ** 2)\n n_steps = max([1, int(ceil(magnitude / self.resolution))])\n u_step = (u[0] / (n_steps - 1), u[1] / (n_steps - 1))\n marked = set()\n for i in range(n_steps):\n curr_x = self.odom_pose[0] + i * u_step[0]\n curr_y = self.odom_pose[1] + i * u_step[1]\n if not (self.is_in_map(curr_x, curr_y)):\n break\n\n x_ind = int((curr_x - self.origin[0]) / self.resolution)\n y_ind = int((curr_y - self.origin[1]) / self.resolution)\n if x_ind == x_detect and y_ind == y_detect:\n break\n if not ((x_ind, y_ind) in marked):\n # odds ratio is updated according to the inverse sensor model\n self.odds_ratios[x_ind, y_ind] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_miss\n marked.add((x_ind, y_ind))\n if self.is_in_map(map_x, map_y):\n # odds ratio is updated according to the inverse sensor model\n self.odds_ratios[x_detect, y_detect] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_hit\n\n self.seq += 1\n # to save time, only publish the map every 10 scans that we process\n if self.seq % 10 == 0:\n # make occupancy grid\n map = OccupancyGrid()\n map.header.seq = self.seq\n self.seq += 1\n map.header.stamp = msg.header.stamp\n map.header.frame_id = \"map\" # the name of the coordinate frame of the map\n map.info.origin.position.x = self.origin[0]\n map.info.origin.position.y = self.origin[1]\n map.info.width = self.n\n map.info.height = self.n\n map.info.resolution = self.resolution\n map.data = [0] * self.n ** 2 # map.data stores the n by n grid in row-major order\n for i in range(self.n):\n for j in range(self.n):\n idx = i + self.n * j # this implements row major order\n if self.odds_ratios[i, j] < 1 / 5.0: # consider a cell free if odds ratio is low enough\n map.data[idx] = 0\n elif self.odds_ratios[i, j] > 5.0: # consider a cell occupied if odds ratio is high enough\n map.data[idx] = 100\n else: # otherwise cell is unknown\n map.data[idx] = -1\n self.pub.publish(map)\n\n # create the image from the probabilities so we can visualize using opencv\n im = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1], 3))\n for i in range(im.shape[0]):\n for j in range(im.shape[1]):\n if self.odds_ratios[i, j] < 1 / 5.0:\n im[i, j, :] = 1.0\n elif self.odds_ratios[i, j] > 5.0:\n im[i, j, :] = 0.0\n else:\n im[i, j, :] = 0.5\n\n # compute the index of the odometry pose so we can mark it with a circle\n x_odom_index = int((self.odom_pose[0] - self.origin[0]) / self.resolution)\n y_odom_index = int((self.odom_pose[1] - self.origin[1]) / self.resolution)\n\n x_base_index = int((self.base_pose[0] - self.origin[0] - 1) / self.resolution)\n y_base_index = int((self.base_pose[1] - self.origin[1]) / self.resolution)\n\n\n # computer the ball locations so we can mark with a colored circle\n #TODO Track and relate the robot's angle pose for accuracy\n\n if self.depth_red > 0:\n self.y_camera_red = int(x_odom_index - self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_red = int(y_odom_index - self.depth_red * sin(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_red, self.y_camera_red), 1, self.red)\n\n real_red_y = self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])\n real_red_x = self.depth_red * sin(self.angle_diff_red + pi - self.odom_pose[2])\n\n self.rcoor_pub.publish(Vector3(-real_red_x, -real_red_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_red, self.y_camera_red), 1, self.red)\n\n if self.depth_blue > 0:\n self.y_camera_blue = int(x_odom_index - self.depth_blue * cos(self.angle_diff_blue + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_blue = int(y_odom_index - self.depth_blue * sin(self.angle_diff_blue + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_blue, self.y_camera_blue), 1, self.blue)\n\n real_blue_y = self.depth_blue * cos(self.angle_diff_blue + pi - self.odom_pose[2])\n real_blue_x = self.depth_blue * sin(self.angle_diff_blue + pi - self.odom_pose[2])\n\n self.bcoor_pub.publish(Vector3(-real_blue_x, -real_blue_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_blue, self.y_camera_blue), 1, self.blue)\n\n if self.depth_green > 0:\n self.y_camera_green = int(x_odom_index - self.depth_green * cos(self.angle_diff_green + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_green = int(y_odom_index - self.depth_green * sin(self.angle_diff_green + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_green, self.y_camera_green), 1, self.green)\n \n real_green_y = self.depth_green * cos(self.angle_diff_green + pi - self.odom_pose[2])\n real_green_x = self.depth_green * sin(self.angle_diff_green + pi - self.odom_pose[2])\n\n self.gcoor_pub.publish(Vector3(-real_green_x, -real_green_y/2, 0))\n\n if self.depth_yellow > 0:\n self.y_camera_yellow = int(x_odom_index - self.depth_yellow * cos(self.angle_diff_yellow + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_yellow = int(y_odom_index - self.depth_yellow * sin(self.angle_diff_yellow + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_yellow, self.y_camera_yellow), 1, self.yellow)\n \n real_yellow_y = self.depth_yellow * cos(self.angle_diff_yellow + pi - self.odom_pose[2])\n real_yellow_x = self.depth_yellow * sin(self.angle_diff_yellow + pi - self.odom_pose[2])\n\n self.ycoor_pub.publish(Vector3(-real_yellow_x, -real_yellow_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_yellow, self.y_camera_yellow), 1, self.yellow)\n\n # draw the robot\n cv2.circle(im, (y_odom_index, x_odom_index), 2, (255, 0, 0))\n \n # display the image resized\n cv2.imshow(\"map\", cv2.resize(im, (500, 500)))\n cv2.waitKey(20)",
"def discover(self) -> int:\n\n # Get sensor data\n no_wall_in_front = not self.is_wall_in_front()\n no_wall_in_left = not self.is_wall_in_left()\n no_wall_in_right = not self.is_wall_in_right()\n\n # get neighboring tiles\n # Get current and neighboring points\n this_point: tuple = self.stack[-1]\n front_point = self.tile_in_the_direction(self.direction)\n right_point = self.tile_in_the_direction((self.direction + 1) % 4)\n left_point = self.tile_in_the_direction((self.direction - 1) % 4)\n\n # mark this point as discovered\n self.visited.add(this_point)\n\n # Check if this is center tile\n if self.is_ground_center():\n self.center = this_point\n\n # Record all possible turns and add to the graph\n if no_wall_in_front:\n self.add_edge_between(front_point, this_point)\n if no_wall_in_left:\n self.add_edge_between(left_point, this_point)\n if no_wall_in_right:\n self.add_edge_between(right_point, this_point)\n\n # For each choice it can take\n for choice in self.graph[this_point]:\n # If choice was not discovered before, do it\n if choice not in self.visited:\n self.stack.append(choice)\n break\n else:\n # No undiscovered nodes near robot (No choice to make)\n # Start to backtrack\n self.stack.pop()\n if not self.stack:\n # Came back to initial position\n # Start second half\n return SimulationRunStatus.RESUME_SIMULATION\n choice = self.stack[-1]\n\n if choice == front_point:\n self.go_forward()\n elif choice == left_point:\n self.go_to_left()\n elif choice == right_point:\n self.go_to_right()\n else:\n self.go_backward()",
"def scan(self):\n # send here something to verify sonar is connected?\n if not self.initialized:\n raise SonarNotConfigured(self.initialized)\n # Timeout count\n timeout_count = 0\n MAX_TIMEOUT_COUNT = 5\n\n # Scan until stopped\n self.preempted = False\n while not self.preempted:\n # Preempt on ROS shutdown\n if rospy.is_shutdown():\n self.preempt()\n return\n # Ask sonar to send a single measurement\n self.conn.send(Message.MEASURE)\n\n # Get the scan data\n try:\n data = self.get(Message.DATA,wait = 1).payload\n self.range = float(data)\n timeout_count = 0\n except TimeoutError:\n timeout_count += 1\n rospy.logdebug(\"Timeout count: %d\", timeout_count)\n if timeout_count >= MAX_TIMEOUT_COUNT:\n # Try to resend paramenters\n self.conn.send(Message.MEASURE)\n timeout_count = 0\n # Try again\n continue\n # Publish extracted data in personalised msg\n pub2 = rospy.Publisher('range',Range, queue_size=10)\n try:\n pub2.publish(range = self.range, min_range = self.min_range, max_range = self.max_range)\n except:\n pass",
"def scan(self, param):\n\t\tself.left(355)",
"def _monitor_move_start(self, start_pos):\n self._move_started = threading.Event()\n queue = Queue.Queue()\n\n dmov = self.get_pvobj(\"done_moving\")\n if dmov.isinitialized:\n def cb(e=None):\n if e is None:\n if not dmov.value:\n self._move_started.set()\n dmov.del_monitor_callback(queue.get())\n id = dmov.add_monitor_callback(cb)\n else:\n rbv = self.get_pvobj(\"readback\")\n res = self.get_par(\"resolution\")\n low = start_pos - res\n high = start_pos + res\n def cb(e=None):\n if e is None:\n if not low < rbv.value < high:\n self._move_started.set()\n rbv.del_monitor_callback(queue.get())\n id = rbv.add_monitor_callback(cb)\n\n queue.put(id)",
"def begin(self):\r\n self.queue.append((self.start, 0.0))\r\n self.cost_to_pos[self.start] = 0\r\n self.loop()",
"def search_parking_lot(self):\n\n self.start_driving()\n self.velocity = 8\n self.distance = 250 # maximum searching distance\n self.angle = 1.5 # TODO\n self.drive_thread.reset()\n\n vacant_distance = 0\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n if self.sensor_manager.right > 25:\n vacant_distance += 1\n else:\n vacant_distance = 0\n\n if vacant_distance >= 35:\n while self.sensor_manager.right > 25:\n time.sleep(0.1)\n\n distance_right = self.sensor_manager.right\n\n if 14 <= distance_right <= 18:\n self.angle = 0\n self.distance = 35\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n elif distance_right > 18:\n self.adjust_starting_position(\"left\")\n elif distance_right < 14:\n self.adjust_starting_position(\"right\")\n \n break\n\n self.stop_driving()",
"def test_start_scan(self):\n pass",
"def do_scans_with_ref(self, nr_runs):\n print()\n print('do_scan')\n print()\n scan = self.scan\n laser = self.devices[scan['laser']['name']]\n dev_to_scan = scan['axis']['device']['name']\n output = scan['axis']['device']['property']\n approx_time_to_scan = (laser.params['stop_wavelength']-laser.params['start_wavelength'])/laser.params['wavelength_speed']\n # Scan the laser and the values of the given device\n if output != 'time':\n dev_range = scan['axis']['device']['range']\n start = Q_(dev_range[0])\n units = start.u\n stop = Q_(dev_range[1])\n step = Q_(dev_range[2])\n \n num_points_dev = ((stop-start)/step).to('')\n else:\n dev_range = scan['axis']['device']['range']\n start = 1\n stop = dev_range[1]\n num_points_dev = stop\n\n num_points_dev += 1 # So the last bit of information is true.\n\n for value in np.linspace(start, stop, num_points_dev, endpoint=True):\n if output != 'time':\n self.set_value_to_device(dev_to_scan, {output: value * units})\n dev = self.devices[dev_to_scan]\n time.sleep(0.1)\n while not dev.driver.finished_moving:\n time.sleep(0.2)\n for i in range(nr_runs):\n print('run number = ', i)\n self.do_line_scan_shutter_closed()\n \n return True",
"def do_scan(self):\n scan = self.scan\n laser = self.devices[scan['laser']['name']]\n dev_to_scan = scan['axis']['device']['name']\n output = scan['axis']['device']['property']\n approx_time_to_scan = (laser.params['stop_wavelength']-laser.params['start_wavelength'])/laser.params['wavelength_speed']\n # Scan the laser and the values of the given device\n if output != 'time':\n dev_range = scan['axis']['device']['range']\n start = Q_(dev_range[0])\n units = start.u\n stop = Q_(dev_range[1])\n step = Q_(dev_range[2])\n \n num_points_dev = ((stop-start)/step).to('')\n else:\n dev_range = scan['axis']['device']['range']\n start = 1\n stop = dev_range[1]\n num_points_dev = stop\n\n num_points_dev += 1 # So the last bit of information is true.\n\n for value in np.linspace(start, stop, num_points_dev, endpoint=True):\n if output != 'time':\n self.set_value_to_device(dev_to_scan, {output: value * units})\n dev = self.devices[dev_to_scan]\n time.sleep(0.1)\n while not dev.driver.finished_moving:\n time.sleep(0.2)\n\n self.do_line_scan()\n \n return True",
"def tracking(self) -> None:\n dist, delta_angle, timestamp = self.vision.get_vision_data()\n # collect data only once per loop\n if timestamp is None:\n # self.next_state(\"searching\")\n # print(f\"tracking -> searching {self.vision.get_vision_data()}\")\n self.state = self.searching\n else:\n if abs(delta_angle) > self.find_allowable_angle(dist):\n # print(f\"Telling turret to slew by {delta_angle}\")\n self.turret.slew(delta_angle)\n if self.ready_to_spin():\n # self.next_state(\"firing\")\n # print(f\"tracking -> spining_up {self.vision.get_vision_data()}\")\n self.distance = dist\n self.state = self.spining_up",
"def _calc_ball_scan(self):\n # Default scan to false.\n self._scan_line = [False] * Stella.FRAME_WIDTH\n\n if self._enabled:\n for x in range(self._x_min, self._x_max):\n self._scan_line[x % Stella.FRAME_WIDTH] = True",
"def scan_callback(self, scan):\n # Fill some cells in the map just so we can see that something is\n # being published.\n Lresol = 1 / myRes\n r = scan.ranges[0]\n xt = [self.position[0] + 1, self.position[1] + 1, self.position[2]]\n # for k in range(0,len(scan.ranges)-1):\n scanAngles = np.linspace(scan.angle_max, scan.angle_min, len(scan.ranges))\n lidar_local = np.array(\n [xt[0] + scan.ranges * np.cos(scanAngles + xt[2]), xt[1] - (scan.ranges * np.sin(scanAngles + xt[2]))])\n\n # print len(lidar_local[1])\n xtg = [int(np.ceil(xt[0] * Lresol)), int(np.ceil(xt[1] * Lresol))]\n self._map.grid[xtg[1], xtg[0]] = 0 # set the robot position grid as empty\n\n for k in range(0, len(scan.ranges) - 1):\n if scan.ranges[k] < scan.range_max:\n rtl = np.ceil(lidar_local[:, k] * Lresol)\n rtli = [0, 0]\n rtli[0] = int(rtl[0])\n rtli[1] = int(rtl[1])\n l = bresenham(xtg, rtli)\n self.EISM(l.path, scan.ranges[k])\n # Now that the map is updated, publish it!\n rospy.loginfo(\"Scan is processed, publishing updated map.\")\n self.publish_map()",
"def next_point(self):\n if self.verbose:\n print(\"Computing acquisition function...\")\n if self.acquisition_function == 'cb':\n acq, pred = acqfunc.confidence_bound(\n self.surrogate_model, self.X_full,\n alpha=self.alpha, beta=self.beta)\n elif self.acquisition_function == 'ei':\n acq, pred = acqfunc.expected_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif self.acquisition_function == 'poi':\n acq, pred = acqfunc.probability_of_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif isinstance(self.acquisition_function, types.FunctionType):\n acq, pred = self.acquisition_function(\n self.surrogate_model, self.X_full, self.X_sparse)\n else:\n raise NotImplementedError(\n \"Choose between 'cb', 'ei', and 'poi' acquisition functions or define your own\")\n self.gp_predictions.append(pred)\n if self.mask is None:\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list][::-1][:self.batch_size].tolist()\n indices_list = np.dstack(indices_list)[0][::-1][:self.batch_size].tolist()\n else:\n acq = self.mask*acq\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list]\n vals_list = vals_list[~np.isnan(vals_list)][::-1]\n indices_list = np.dstack(indices_list)[0]\n indices_list = indices_list[:len(vals_list)][::-1]\n vals_list = vals_list[:self.batch_size].tolist()\n indices_list = indices_list[:self.batch_size].tolist()\n if not self.batch_update:\n return vals_list, indices_list\n if self.batch_dscale is None:\n batch_dscale_ = self.surrogate_model.model.kernel.lengthscale.mean().item()\n else:\n batch_dscale_ = self.batch_dscale\n vals_list, indices_list = self.update_points(\n vals_list, indices_list, batch_dscale_)\n return vals_list, indices_list",
"def start_processing(self):",
"def start_scan(self):\n self.ui.log_message.setText('Starting acquisition')\n self.overshoot = False\n self.plot_2D_ini=False\n self.plot_1D_ini = False\n res = self.set_scan()\n if res:\n\n # save settings from move modules\n move_modules_names = [mod.title for mod in self.move_modules_scan]\n for ind_move, move_name in enumerate(move_modules_names):\n move_group_name = 'Move{:03d}'.format(ind_move)\n if not self.h5saver.is_node_in_group(self.h5saver.current_scan_group, move_group_name):\n self.h5saver.add_move_group(self.h5saver.current_scan_group, title='',\n settings_as_xml=custom_tree.parameter_to_xml_string(\n self.move_modules_scan[ind_move].settings),\n metadata=dict(name=move_name))\n\n # save settings from detector modules\n detector_modules_names = [mod.title for mod in self.det_modules_scan]\n for ind_det, det_name in enumerate(detector_modules_names):\n det_group_name = 'Detector{:03d}'.format(ind_det)\n if not self.h5saver.is_node_in_group(self.h5saver.current_scan_group, det_group_name):\n settings_str = custom_tree.parameter_to_xml_string(self.det_modules_scan[ind_det].settings)\n try:\n if 'Data0D' not in [viewer.viewer_type for viewer in\n self.det_modules_scan[ind_det].ui.viewers]: # no roi_settings in viewer0D\n settings_str = b'<All_settings title=\"All Settings\" type=\"group\">' + settings_str\n for ind_viewer, viewer in enumerate(self.det_modules_scan[ind_det].ui.viewers):\n if '0D' not in viewer.viewer_type:\n settings_str += '<Viewer{:0d}_ROI_settings title=\"ROI Settings\" type=\"group\">'.format(\n ind_viewer).encode()\n settings_str += custom_tree.parameter_to_xml_string(\n viewer.roi_manager.settings) + '</Viewer{:0d}_ROI_settings>'.format(ind_viewer).encode()\n settings_str += b'</All_settings>'\n except Exception as e:\n self.update_status(getLineInfo() + str(e), wait_time=self.wait_time, log_type='log')\n\n self.h5saver.add_det_group(self.h5saver.current_scan_group,\n settings_as_xml=settings_str,\n metadata=dict(name=det_name))\n\n\n #mandatory to deal with multithreads\n if self.scan_thread is not None:\n self.command_DAQ_signal.disconnect()\n if self.scan_thread.isRunning():\n self.scan_thread.exit()\n while not self.scan_thread.isFinished():\n QThread.msleep(100)\n self.scan_thread = None\n\n self.scan_thread = QThread()\n\n scan_acquisition = DAQ_Scan_Acquisition(self.settings, self.scanner.settings, self.h5saver.settings,\n self.scan_moves,\n self.scan_saves,\n [mod.command_stage for mod in self.move_modules_scan],\n [mod.command_detector for mod in self.det_modules_scan],\n [mod.move_done_signal for mod in self.move_modules_scan],\n [mod.grab_done_signal for mod in self.det_modules_scan],\n [mod.settings.child('main_settings', 'Naverage').value() for mod in self.det_modules_scan],\n move_modules_names,\n detector_modules_names,\n [mod.settings for mod in self.move_modules_scan],\n [mod.settings for mod in self.det_modules_scan],\n )\n scan_acquisition.moveToThread(self.scan_thread)\n\n self.command_DAQ_signal[list].connect(scan_acquisition.queue_command)\n scan_acquisition.scan_data_tmp[OrderedDict].connect(self.update_scan_GUI)\n scan_acquisition.status_sig[list].connect(self.thread_status)\n\n self.scan_thread.scan_acquisition = scan_acquisition\n self.scan_thread.start()\n\n self.ui.set_scan_pb.setEnabled(False)\n self.ui.set_ini_positions_pb.setEnabled(False)\n self.ui.start_scan_pb.setEnabled(False)\n QtWidgets.QApplication.processEvents()\n self.ui.scan_done_LED.set_as_false()\n\n\n\n self.command_DAQ_signal.emit([\"start_acquisition\"])\n\n self.ui.log_message.setText('Running acquisition')",
"def move_start(self, event):\n self.canvas.scan_mark(event.x, event.y)"
]
| [
"0.6134527",
"0.6115277",
"0.60924757",
"0.602439",
"0.5998884",
"0.5907415",
"0.5609799",
"0.5566453",
"0.5554528",
"0.5535642",
"0.5516124",
"0.54980385",
"0.5470819",
"0.5463966",
"0.5421192",
"0.538856",
"0.53775305",
"0.5353817",
"0.53472614",
"0.53278494",
"0.5295174",
"0.5280348",
"0.52644336",
"0.525334",
"0.52496964",
"0.52495265",
"0.5228121",
"0.521966",
"0.51894134",
"0.5171484"
]
| 0.7576418 | 0 |
Helper for netstring_read and netstring_readfd. | def _netstring_read(read_func, max_length):
length_str = read_length(read_func)
length = int(length_str)
if max_length and length > max_length:
raise ValueError("Payload is too large: %s" % length)
payload = read_func(length)
tag_byte = read_func(1)
# dump_line can emit a newline.
if tag_byte != ',' and tag_byte != '\n':
raise ValueError('Got tag %r, expected comma or newline' % tag_byte)
return payload | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def netstring_readfd(fd, max_length=0):\n read_func = lambda length: os.read(fd, length)\n return _netstring_read(read_func, max_length)",
"def netstring_read(f, max_length=0):\n read_func = f.read\n return _netstring_read(read_func, max_length)",
"def readNetstring(sock):\n # First attempt to read the length.\n size = ''\n while True:\n try:\n c = sock.recv(1)\n except socket.error, e:\n if e[0] == errno.EAGAIN:\n select.select([sock], [], [])\n continue\n else:\n raise\n if c == ':':\n break\n if not c:\n raise EOFError\n size += c\n\n # Try to decode the length.\n try:\n size = int(size)\n if size < 0:\n raise ValueError\n except ValueError:\n raise ProtocolError, 'invalid netstring length'\n\n # Now read the string.\n s, length = recvall(sock, size)\n\n if length < size:\n raise EOFError\n\n # Lastly, the trailer.\n trailer, length = recvall(sock, 1)\n\n if length < 1:\n raise EOFError\n\n if trailer != ',':\n raise ProtocolError, 'invalid netstring trailer'\n\n return s",
"def readNetstring(self, size=2048):\n # Read until we have at least 4 bytes\n while not self.recvLength():\n self.__data += self.sock.recv(size)\n while not self.recvData():\n self.__data += self.sock.recv(size)\n while not self.recvComma():\n self.__data += self.sock.recv(size)\n string = self.__buffer\n self.__buffer = ''\n if self.verbose:\n print \"controller:\",string\n return string",
"def _read(self, string=\"\", fname=\"\"):\n if string:\n self.handle = gv.readstring(string)\n elif fname == \"stdin\":\n data = sys.stdin.read()\n self.handle = gv.readstring(data)\n else:\n self.handle = gv.read(fname)\n # gv returns None if eg. the input does not exist\n if not self.handle:\n raise ValueError(\"Error with file \" + fname)",
"def decode_network_string(msgtype, plen, buf):\n return buf[header.size:plen - 1]",
"def test_read_net_namespace(self):\n pass",
"def read(self, s):\n pass",
"def read_string(data, s_len):\n return struct.unpack(\"=%ds\" % s_len, data.read(s_len))[0].decode(\"utf-8\")",
"def read_c_string(fd: BinaryIO) -> bytes:\n string = bytearray()\n while True:\n byte = fd.read(1)\n if not byte or byte == b'\\0':\n return bytes(string)\n string += byte",
"def _read_fixed(buf, length):\n result = buf.read(length)\n actual = len(result)\n if actual != length:\n raise EndOfMessage(False if actual == 0 else True)\n return result",
"def recvStr(self, socket, bufferSize=1024):\n return self.recv(socket, bufferSize).decode()",
"def _read_wrapper(data):\n # Paramiko (strangely) in PY3 returns an int here.\n if isinstance(data, int):\n data = chr(data)\n # Ensure unicode\n return str(data)",
"def _decode_str(self, buf):\n length = self._decode_vint(buf)\n result = buf.read(length)\n if len(result) != length:\n raise EndOfMessage(True)\n return result",
"def test_read_strips(connection, reader, loop):\n reader.push(\" a b c | @#$ d \\n\")\n loop.run_until_complete(connection.connect())\n value = loop.run_until_complete(connection.read())\n assert value == \"a b c | @#$ d\"\n assert reader.has_read(\" a b c | @#$ d \\n\")",
"def read(self, nbytes, /) -> bytes | None:",
"def getnodenamed(self, nodename):\n # nodename = create_string_buffer(nodename)\n\n # Return the input if it is not a string (for node_p checks)\n if not isinstance(nodename, types.StringType):\n return nodename\n\n # (const char* name, const net_bn* net)\n cnetica.GetNodeNamed_bn.argtypes = [c_char_p, c_void_p]\n cnetica.GetNodeNamed_bn.restype = c_void_p\n node_p = cnetica.GetNodeNamed_bn(ccharp(nodename), self.net)\n if node_p is None:\n logger.warning('Node with name \"%s\" does not exist' % nodename)\n return node_p",
"def Read_String(self, size = 0, encoding='UTF-8', errors='strict'):\r\n if size == 0: size = self.Port.inWaiting()\r\n data = self.Port.read(size)\r\n return data.decode(encoding = encoding, errors = errors)",
"def read_str(self, timeout = 0):\n len = self.read_uint32(timeout)\n return self.read(len, timeout)",
"def __synchronise():\n # FIXME: We should match some pattern rather than single byte\n ch = endpoint.read()\n while not ch or ord(ch) not in DGTL.descriptors.keys():\n ch = endpoint.read()\n\n return ch",
"def _read(self):\n # because protocol has no termination chars the read reads the number\n # of bytes in the buffer\n bytes_in_buffer = self.visa_handle.bytes_in_buffer\n # a workaround for a timeout error in the pyvsia read_raw() function\n with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):\n mes = self.visa_handle.visalib.read(\n self.visa_handle.session, bytes_in_buffer)\n mes = str(mes[0].decode()) # cannot be done on same line for some reason\n # if mes[1] != 0:\n # # see protocol descriptor for error codes\n # raise Exception('IVVI rack exception \"%s\"' % mes[1])\n return mes",
"def __tcp_recv(self):\n total_data = []\n bs = 1024\n try:\n data = self.__sock.recv(bs)\n total_data.append(data)\n while True and data:\n if not re.search(\"L: (\\d+)\",data) and not data[-4:] == '\\r\\n\\r\\n':\n data = self.__sock.recv(bs)\n total_data.append(data)\n elif not re.search(\"L: (\\d+)\",data) and data[-4:] == '\\r\\n\\r\\n':\n return total_data\n else:\n break\n \n\n while re.search(\"L: (\\d+)\",data):\n n = len(data)\n L = int(re.findall(\"L: (\\d+)\",data)[-1])\n p = data.rfind('\\r\\n\\r\\n')\n abc = data\n data = ''\n\n p1 = data.rfind(str(L))\n if p < p1:\n log(\"rn before L\")\n left = L + n - (p1 + len(str(L))) + 4\n\n else:\n left = L - (n - p -4)\n if left == L:\n log(\"It happened!\")\n break\n\n #if more bytes then last L\n #come across another command: BN etc.\n #read until another L come\n if left < 0:\n log('abc')\n d = ''\n left = 0\n while True:\n d = self.__sock.recv(bs)\n data += d\n if re.search(\"L: (\\d+)\",d):\n break\n log(\"read left bytes\")\n log('data:'+data)\n total_data.append(data)\n\n #read left bytes in last L\n while left:\n data = self.__sock.recv(left)\n n = len(data)\n left = left - n\n\n if not data:\n break\n total_data.append(data)\n\n except socket.error,e:\n #self.__sock.close()\n raise PyFetionSocketError(e)\n\n return self.__split(''.join(total_data))\n\n #return ''.join(total_data)",
"def readString(stream):\n # read the string length (4-byte int, network byte order)\n buf = stream.read(4)\n if len(buf) < 4:\n raise RuntimeError(\"found %d bytes (expected: 4)\" % len(buf))\n n_bytes = struct.unpack(\"!i\", buf)[0]\n if n_bytes < 0:\n return None\n buf = stream.read(n_bytes)\n if len(buf) < n_bytes:\n raise RuntimeError(\"found %d bytes (expected: %d)\" % (\n len(buf), n_bytes\n ))\n return unicode(buf, 'UTF-8')",
"def _read_string(bs):\n result = bs.readto('0x00', bytealigned=True).bytes.decode(\"utf-8\")[:-1]\n return result if result else None",
"def rcvString(self, num=1):\r\n\t\t# verifico quanti caratteri ci sono gia' nel buffer\r\n\t\tcou = self.ser.inWaiting()\r\n\t\tif cou > 0:\r\n\t\t\tif cou >= num:\r\n\t\t\t\t# provo la ricezione\r\n\t\t\t\tdat = self.ser.read(num)\r\n\t\t\telse:\r\n\t\t\t\tdat = self.ser.read(cou)\r\n\t\t\t\t# dati letti\r\n\t\t\t\tnum = cou\r\n\t\telse:\r\n\t\t\tdat = None\r\n\t\t\tnum = 0\r\n\t\treturn (num, dat)",
"def read_str(self, p, offset, default_, additional_size):\n if p == 0:\n return default_\n assert ptr.kind(p) == ptr.LIST\n assert ptr.list_size_tag(p) == ptr.LIST_SIZE_8\n start = ptr.deref(p, offset)\n end = start + ptr.list_item_count(p) + additional_size\n return self.buf[start:end]",
"def _get_string():\n result = sys.stdin.readline().rstrip('\\n')\n return result",
"def cstringio_refill(self, partialread, reqlen):\r\n pass",
"def read():\n # TODO",
"def read(self, nbytes: int, /) -> bytes | None:"
]
| [
"0.6789914",
"0.6663811",
"0.63082224",
"0.60322523",
"0.58078915",
"0.5723228",
"0.5602307",
"0.557225",
"0.55436134",
"0.5517197",
"0.5407884",
"0.53974515",
"0.5347857",
"0.5190395",
"0.51745874",
"0.51651764",
"0.51445425",
"0.51276857",
"0.51222485",
"0.5116174",
"0.509744",
"0.50838983",
"0.5083367",
"0.50664365",
"0.5050373",
"0.5042208",
"0.50269365",
"0.500113",
"0.49969047",
"0.4993145"
]
| 0.6922542 | 0 |
Read a byte string from a file descriptor. | def netstring_readfd(fd, max_length=0):
read_func = lambda length: os.read(fd, length)
return _netstring_read(read_func, max_length) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_c_string(fd: BinaryIO) -> bytes:\n string = bytearray()\n while True:\n byte = fd.read(1)\n if not byte or byte == b'\\0':\n return bytes(string)\n string += byte",
"def readbyte(f):\n return struct.unpack(\">B\", f.read(1))[0]",
"def read_string(self):\n return self.bits.read('bytes:{0}'.format(self.read_int())).decode(\"utf-8\", 'replace')",
"def bfh(s):\n return io.BytesIO(bytes.fromhex(s))",
"def read_string(stream, size):\n\t\n\tvalue = ''\n\tif size > 0:\n\t\tvalue = stream.read(size)\n\t\tvalue = value.partition(chr(0))[0]\n\treturn value",
"def read_utf8_string(self, length):\n return self.read(length).decode(\"utf-8\")",
"def read_io_byte(self, address):\n return self.io.read(self.regs.resolve_address(address))",
"def read_file_bytes(filepath: str):\n with open(filepath, \"rb\") as reader:\n return reader.read()",
"def read_bytes(stream, writer_schema=None, reader_schema=None): # noqa\n size = read_long(stream)\n if reader_schema == 'string':\n # Schema Resolution: promote to unicode string\n return stream.read(size).decode('utf-8')\n else:\n return stream.read(size)",
"def Read_String(self, size = 0, encoding='UTF-8', errors='strict'):\r\n if size == 0: size = self.Port.inWaiting()\r\n data = self.Port.read(size)\r\n return data.decode(encoding = encoding, errors = errors)",
"def netstring_read(f, max_length=0):\n read_func = f.read\n return _netstring_read(read_func, max_length)",
"def read_string(data, s_len):\n return struct.unpack(\"=%ds\" % s_len, data.read(s_len))[0].decode(\"utf-8\")",
"def _read_next_bytes(\n fid, num_bytes, format_char_sequence, endian_character=\"<\"\n ):\n data = fid.read(num_bytes)\n return struct.unpack(endian_character + format_char_sequence, data)",
"def read_fd_decode_safely(fd, size=4096):\n data = os.read(fd.fileno(), size)\n for _ in range(3):\n try:\n return data, data.decode(\"utf-8\")\n except UnicodeDecodeError as e:\n if e.reason != \"unexpected end of data\":\n raise\n data += os.read(fd.fileno(), 1)\n\n return data, data.decode(\"utf-8\")",
"def read_char(data):\n s_type = \"=%s\" % get_type(\"char\")\n return struct.unpack(s_type, data.read(1))[0]",
"def read_char(self):\n return self._packers[\"b\"].unpack(self.read(1))[0]",
"def read_byte(self, addr):\n raise NotImplementedError()",
"def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character=\"<\"):\n data = fid.read(num_bytes)\n return struct.unpack(endian_character + format_char_sequence, data)",
"def readString(stream):\n # read the string length (4-byte int, network byte order)\n buf = stream.read(4)\n if len(buf) < 4:\n raise RuntimeError(\"found %d bytes (expected: 4)\" % len(buf))\n n_bytes = struct.unpack(\"!i\", buf)[0]\n if n_bytes < 0:\n return None\n buf = stream.read(n_bytes)\n if len(buf) < n_bytes:\n raise RuntimeError(\"found %d bytes (expected: %d)\" % (\n len(buf), n_bytes\n ))\n return unicode(buf, 'UTF-8')",
"def _read_i2c(fd, n):\n if n == 0:\n return b''\n buf = os.read(fd, n)\n if len(buf) != n:\n raise OSError(errno.EIO, os.strerror(errno.EIO))\n return buf",
"def read(num_bytes):\n # If file cannot be mmap'd (e.g. is stdin, or a fifo), fall back\n # to doing an actual read from the file.\n if not can_be_mmapd(fd):\n return fd.read(num_bytes)\n\n bytes_available = max(file_size - offset, 0)\n if bytes_available == 0:\n return b\"\"\n\n return mmap.mmap(fd.fileno(), min(num_bytes, bytes_available), offset=offset, access=mmap.ACCESS_READ)",
"def _read_string(bs):\n result = bs.readto('0x00', bytealigned=True).bytes.decode(\"utf-8\")[:-1]\n return result if result else None",
"def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()",
"def _decode_byte(fp):\n return struct.unpack('b', fp.read(1))[0]",
"def read_str(self, timeout = 0):\n len = self.read_uint32(timeout)\n return self.read(len, timeout)",
"def read_string(self):\n size = self.read_int32()\n\n if size == 0:\n return \"\"\n\n is_unicode = size < 0\n\n if is_unicode:\n size *= -2\n return self.read_bytes(size)[:-2].decode('utf-16')\n\n stream_bytes = self.read_bytes(size)\n string = stream_bytes[:-1]\n if stream_bytes[-1] != 0:\n raise ReadStringException('End of string not zero')\n\n try:\n return string.decode('utf-8')\n except UnicodeDecodeError:\n return string.decode('latin-1')",
"def load_bytes_from_fd(fd, start=None, end=None):\n if start:\n fd.seek(start)\n\n if end and start:\n batch = end - start\n elif end:\n batch = end\n else:\n batch = -1\n\n binary = fd.read(batch)\n syntax = str(int(len(binary) / 4)) + \"f\"\n try:\n data = struct.unpack(syntax, binary)\n return data\n except struct.error: # not enough bytes to unpack, end of binary\n return None",
"def read_string(self):\n\n # length may be -1, 0, or a positive integer\n length = self.read_and_unpack('l')[0]\n if length > 0:\n return self.read(length).decode(self.utf_16_decoder)\n else:\n return ''",
"def _readBytes(self, len):\n return self.stream.read(len)",
"def read_byte(fd, reg):\n b, = write_read_i2c(fd, bytes([reg]), 1)\n return b"
]
| [
"0.7474642",
"0.64740103",
"0.64529747",
"0.6127014",
"0.6118157",
"0.61152637",
"0.6086926",
"0.60721207",
"0.6066179",
"0.60622627",
"0.6013826",
"0.6012827",
"0.5981773",
"0.5960392",
"0.59422165",
"0.59304714",
"0.59214187",
"0.5903875",
"0.5879864",
"0.58540636",
"0.5853574",
"0.58427775",
"0.58357817",
"0.58142126",
"0.5812337",
"0.5811544",
"0.58096963",
"0.5797393",
"0.57897776",
"0.57788897"
]
| 0.6790993 | 1 |
CCG = ColumnandConstraint Generation ColumnandConstraint Generation algorithm. Iteration between the MP and SP until convergence criteria is reached. | def ccg_algo(dir:str, tol: float, gamma: int, pv_min: np.array, pv_max: np.array, engagement: np.array, solver_param: dict, day:str, log:bool=False, printconsole:bool=False, warm_start:bool=False, M_neg:float=None):
# Compute the maximal deviation between the max and min PV uncertainty set bounds
max_dev = pv_max - pv_min # (kW)
max_dev[max_dev < 0] = 0
nb_periods = max_dev.shape[0]
# ------------------------------------------------------------------------------------------------------------------
# CCG initialization: build the initial MP
# ------------------------------------------------------------------------------------------------------------------
# Building the MP
MP = CCG_MP()
MP.model.update()
print('MP initialized: %d variables %d constraints' % (len(MP.model.getVars()), len(MP.model.getConstrs())))
MP.export_model(dir + day + '_ccg_MP_initialized')
# ------------------------------------------------------------------------------------------------------------------
# CCG loop until convergence criteria is reached
# ------------------------------------------------------------------------------------------------------------------
if printconsole:
print('-----------CCG ITERATION STARTING-----------')
t_solve = time.time()
objectives = []
computation_times = []
mipgap = []
SP_dual_status = []
SP_primal_status = []
alpha_neg_list = []
epsilon = 1e20
# With CCG the convergence is stable.
epsilon_list = [epsilon] * 2
iteration = 1
BESS_count_list = []
BESS_charge_discharge_list = []
max_iteration = 50
while all(i < tol for i in epsilon_list) is not True and iteration < max_iteration:
logfile = ""
if log:
logfile = dir + 'logfile_' + str(iteration) + '.log'
if printconsole:
print('i = %s solve SP dual' % (iteration))
# ------------------------------------------------------------------------------------------------------------------
# 1. SP part
# ------------------------------------------------------------------------------------------------------------------
# 1.1 Solve the SP and get the worst PV trajectory to add the new constraints of the MP
SP_dual = BD_SP(pv_forecast=pv_max, max_dev=max_dev, engagement=engagement, gamma=gamma, heuristic=solver_param['heuristic'], M_neg=M_neg)
SP_dual.solve(logfile=logfile, Threads=solver_param['Threads'], MIPFocus=solver_param['MIPFocus'], TimeLimit=solver_param['TimeLimit'])
SP_dual_sol = SP_dual.store_solution()
SP_dual_status.append(SP_dual_sol['status'])
mipgap.append(SP_dual.model.MIPGap)
alpha_neg_list.append(SP_dual_sol['alpha_neg'])
# 1.2 Compute the worst PV trajectory from the SP dual solution
pv_worst_case_from_SP = [pv_max[i] - SP_dual_sol['z_neg'][i] * max_dev[i] for i in range(nb_periods)]
if printconsole:
print(' i = %s : SP dual status %s solved in %.1f s MIPGap = %.6f' % (iteration, SP_dual_sol['status'], SP_dual_sol['time_total'], SP_dual.model.MIPGap))
# 1.3 Solve the primal of the SP to check if the objectives of the primal and dual are equal to each other
SP_primal = SP_primal_LP(pv_forecast=pv_worst_case_from_SP, engagement=engagement)
SP_primal.solve()
SP_primal_sol = SP_primal.store_solution()
SP_primal_status.append(SP_primal_sol['status'])
if printconsole:
print(' i = %s : SP primal status %s' % (iteration, SP_primal_sol['status']))
print(' i = %s : SP primal %.1f € SP dual %.1f € -> |SP primal - SP dual| = %.2f €' % (iteration, SP_primal_sol['obj'], SP_dual_sol['obj'], abs(SP_primal_sol['obj'] - SP_dual_sol['obj'])))
# 1.4 SP solved to optimality ? -> Check if there is any simultaneous charge and discharge in the SP primal solution
if SP_primal_sol['status'] == 2 or SP_primal_sol['status'] == 9: # 2 = optimal, 9 = timelimit has been reached
nb_count = check_BESS(SP_primal_sol=SP_primal_sol)
if nb_count > 0:
BESS_charge_discharge_list.append([iteration, SP_primal_sol['y_charge'], SP_primal_sol['y_discharge']])
else: #
nb_count = float('nan')
BESS_count_list.append(nb_count)
if printconsole:
print(' i = %s : %s simultaneous charge and discharge' % (iteration, nb_count))
# ------------------------------------------------------------------------------------------------------------------
# 2. MP part
# ------------------------------------------------------------------------------------------------------------------
# Check Sub Problem status -> bounded or unbounded
if SP_dual_sol['status'] == 2 or SP_dual_sol['status'] == 9: # 2 = optimal, 9 = timelimit has been reached
# Add an optimality cut to MP and solve
MP.update_MP(pv_trajectory=pv_worst_case_from_SP, iteration=iteration)
if printconsole:
print('i = %s : MP with %d variables and %d constraints' % (iteration, len(MP.model.getVars()), len(MP.model.getConstrs())))
# MP.export_model(dir + 'MP_' + str(iteration))
if printconsole:
print('i = %s : solve MP' % (iteration))
MP.solve()
MP_sol = MP.store_solution()
MP.update_sol(MP_sol=MP_sol, i=iteration)
if MP_sol['status'] == 3 or MP_sol['status'] == 4:
print('i = %s : WARNING MP status %s -> Create a new MP, increase big-M value and compute a new PV trajectory from SP' % (iteration, MP_sol['status']))
# MP unbounded or infeasible -> increase big-M's value to get another PV trajectory from the SP
SP_dual = BD_SP(pv_forecast=pv_max, max_dev=max_dev, engagement=engagement, gamma=gamma, heuristic=solver_param['heuristic'], M_neg=M_neg+50)
SP_dual.solve(logfile=logfile, Threads=solver_param['Threads'], MIPFocus=solver_param['MIPFocus'],
TimeLimit=solver_param['TimeLimit'])
SP_dual_sol = SP_dual.store_solution()
# Compute a new worst PV trajectory from the SP dual solution
pv_worst_case_from_SP = [pv_max[i] - SP_dual_sol['z_neg'][i] * max_dev[i] for i in range(nb_periods)]
# Create a new MP
MP = CCG_MP()
MP.model.update()
MP.update_MP(pv_trajectory=pv_worst_case_from_SP, iteration=iteration)
if printconsole:
print('i = %s : MP with %d variables and %d constraints' % (iteration, len(MP.model.getVars()), len(MP.model.getConstrs())))
# MP.export_model(dir + 'MP_' + str(iteration))
if printconsole:
print('i = %s : solve new MP' % (iteration))
MP.solve()
MP_sol = MP.store_solution()
MP.update_sol(MP_sol=MP_sol, i=iteration)
computation_times.append([SP_dual_sol['time_total'], MP_sol['time_total']])
else: # 4 = Model was proven to be either infeasible or unbounded.
print('SP is unbounded: a feasibility cut is required to be added to the Master Problem')
objectives.append([iteration, MP_sol['obj'], SP_dual_sol['obj'], SP_primal_sol['obj']])
# ------------------------------------------------------------------------------------------------------------------
# 3. Update: the engagement, lower and upper bounds using the updated MP
# ------------------------------------------------------------------------------------------------------------------
# Solve the MILP with the worst case trajectory
planner = Planner_MILP(pv_forecast=pv_worst_case_from_SP)
planner.solve()
sol_planner = planner.store_solution()
# Update engagement
engagement = MP_sol['x']
# Update the lower and upper bounds
# MP -> give the lower bound
# SP -> give the upper bound
epsilon = abs(MP_sol['obj'] - SP_dual_sol['obj'])
print('i = %s : |MP - SP dual| = %.2f €' % (iteration, epsilon))
abs_err = abs(MP_sol['obj'] - sol_planner['obj'])
epsilon_list.append(epsilon)
epsilon_list.pop(0)
if printconsole:
print('i = %s : MP %.2f € SP dual %.2f € -> |MP - SP dual| = %.2f €' % (iteration, MP_sol['obj'], SP_dual_sol['obj'], epsilon))
print('i = %s : MP %.2f € MILP %.2f € -> |MP - MILP| = %.2f €' % (iteration, MP_sol['obj'], sol_planner['obj'], abs_err))
print(epsilon_list)
print(' ')
iteration += 1
# ------------------------------------------------------------------------------------------------------------------
# CCG loop terminated
# ------------------------------------------------------------------------------------------------------------------
if printconsole:
print('-----------CCG ITERATION TERMINATED-----------')
print('Final iteration = %s : MP %.2f € SP dual %.2f € -> |MP - SP dual| = %.2f €' % (iteration-1, MP_sol['obj'], SP_dual_sol['obj'], epsilon))
# Export last MP
MP.export_model(dir + day + '_MP_' + str(warm_start) + '_' + str(int(100 * PARAMETERS['tol_penalty'])) + '_' + str(PARAMETERS['penalty_factor']))
# MP.model.printStats()
# Dump last engagement plan at iteration
dump_file(dir=dir, name=day+'_x_' + str(warm_start)+ '_' + str(int(100 * PARAMETERS['tol_penalty'])) + '_' + str(PARAMETERS['penalty_factor']), file=engagement)
# Print T CPU
t_total = time.time() - t_solve
computation_times = np.asarray(computation_times)
SP_dual_status = np.asarray(SP_dual_status)
SP_primal_status = np.asarray(SP_primal_status)
if printconsole:
print('Total CCG loop t CPU %.1f min' % (t_total / 60))
print('T CPU (s): Sup Problem max %.1f Master Problem max %.1f' % (computation_times[:, 0].max(), computation_times[:, 1].max()))
print('nb Sup Problem status 2 %d status 9 %d' % (SP_dual_status[SP_dual_status == 2].shape[0], SP_dual_status[SP_dual_status == 9].shape[0]))
# Store data
objectives = np.asarray(objectives)
df_objectives = pd.DataFrame(index=objectives[:, 0], data=objectives[:, 1:], columns=['MP', 'SP', 'SP_primal'])
# store convergence information
conv_inf = dict()
conv_inf['mipgap'] = mipgap
conv_inf['computation_times'] = computation_times
conv_inf['SP_status'] = SP_dual_status
conv_inf['SP_primal_status'] = SP_primal_status
conv_inf['alpha_neg'] = alpha_neg_list
conv_inf['BESS_count'] = BESS_count_list
conv_inf['BESS_charge_discharge'] = BESS_charge_discharge_list
return engagement, df_objectives, conv_inf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compSBM_CGP(N, Nc, M, K, lbde, burn, L_test):\n\n # comp the Adjacency matrix\n A = compSBM_A(Nc, N, lbde)\n\n # comp the polynomial coefficients\n C = compCGP_C(M)\n P = [compCoeff_CGP(i, A, C, N) for i in np.arange(2, M + 1)]\n\n # ----------------------------------------------------------------------- #\n # simulate the time series\n x0 = np.zeros((N, M))\n x = simulate_CGP(x0, P, A, N, M, K + burn + L_test)\n\n # isolate in- and out-of-sample sets\n x = x[:, burn:]\n if L_test != 0:\n x_test = x[:, -L_test:]\n x = x[:, :-L_test]\n\n out = {'A': A,\n 'C': C,\n 'P': P,\n 'x': x,\n 'x_test': x_test}\n\n return out",
"def update_concentrations_batch(self): \n #--- Update the cell concentrations ---\n # dX_i/dt = mu_i*(1-rmp/100)*X_i*(1 - sum(i,(1-rmp/100)X(i))/carrying_capacity) or \n # (X_i(t+dt) - X_i(t))/dt = mu*(1-rmp/100)*X_i(t)*(1 - sum(i,(1-rmp/100)*X_i(t))/carrying_capacity)\n # where rmp is the random mortality percentage\n # If concentration is negative set it to zero\n members_gDW_per_ml_total = sum([(1 - self.random_mortality_percentage/100)*member.organism.gDW_per_ml[self._t] for member in self.community_members])\n self._logisticG_factor_gDW_per_ml = 1 - members_gDW_per_ml_total/self.carrying_capacity['gDW_per_ml']\n if len([member for member in self.community_members if member.organism.cells_per_ml != None]) == len(self.community_members):\n members_cells_per_ml_total = sum([(1 - self.random_mortality_percentage/100)*member.organism.cells_per_ml[self._t] for member in self.community_members])\n self._logisticG_factor_cells_per_ml = 1 - members_cells_per_ml_total/self.carrying_capacity['cells_per_ml']\n\n for member in self.community_members:\n # We always need gDW_per_ml to update compound concentrations but\n # providing cells_per_ml is optional\n member.organism.gDW_per_ml[self._t + self._dt] = max(member.organism.mu[self._t]*(1-self.random_mortality_percentage/100)*member.organism.gDW_per_ml[self._t]*self._logisticG_factor_gDW_per_ml*self._dt + member.organism.gDW_per_ml[self._t],0)\n\n if member.organism.cells_per_ml is not None:\n member.organism.cells_per_ml[self._t + self._dt] = max(member.organism.mu[self._t]*(1 - self.random_mortality_percentage/100)*member.organism.cells_per_ml[self._t]*self._logisticG_factor_cells_per_ml*self._dt + member.organism.cells_per_ml[self._t],0)\n\n # Total death rate (** newly added for DMMM_mortality **)\n if member.organism.mu[self._t] < 0:\n # In thise case random_mortality_rate has already been incorporated into mu\n # (see DMMM.py)\n member.organism.total_death_rate[self._t] = member.organism.mu[self._t] \n else:\n member.organism.total_death_rate[self._t] = member.organism.random_mortality_rate\n\n\n #--- Update shared compound concentrations ---\n # dC/dt = f where, f = sum(k,v_export_k*X_k) - sum(k,v_uptake_k*X_k) + dead_pool_rate\n # (C(t+dt) - c(t))/dt = sum(k,v_export_k*X_k) - sum(k,v_uptake_k*X_k) + dead_pool \n # where, dead_pool_rate = sum(k,-self.cell_pool_factor*cell_pool_concentration_k*total_death_rate_k*X_k)\n # Here, cell_pool_concentration is the concentration of the compound pool \n # per gDW or per cells, which should have already been assigned to each \n # shared compound. The minus sign is because total_death_rate is negative\n # while dead_pool_rate must be non-negative. Here, self.cell_pool_factor is\n # the factor that should be multiplied by concentration of that compound \n # (this is because sometimes we want to explore how the growth is affected if \n # the cell pool is higher than the ones reported experimentally)\n total_cmps_conc = sum([cmp.concentration[self._t] for cmp in self.shared_compounds])\n self._logisticG_factor_cmps = 1 - total_cmps_conc/self.carrying_capacity['compounds_mM']\n self._f = dict([(cmp,None) for cmp in self.shared_compounds])\n\n if not hasattr(self,'cell_pool_factor'):\n self.cell_pool_factor = 1\n\n for shared_cmp in self.shared_compounds:\n dead_pool_rate = -sum([self.cell_pool_factor*shared_cmp.cell_pool_concentration[member.organism.id]*member.organism.total_death_rate[self._t]*1000*member.organism.gDW_per_ml[self._t] for member in self.community_members])\n if dead_pool_rate < 0:\n raise userError('dead_pool_rate is negative')\n\n f = sum([r.flux[self._t]*1000*r.model.organism.gDW_per_ml[self._t] for r in shared_cmp.reactions]) + dead_pool_rate\n self._f[shared_cmp] = f\n\n conc = f*self._logisticG_factor_cmps*self._dt + shared_cmp.concentration[self._t]\n\n if conc >= 0 or (conc < 0 and abs(conc) <= 1e-9):\n conc = max(conc,0)\n\n shared_cmp.concentration[self._t + self._dt] = conc",
"def make_mp_forward_particles(cbc_path, min_part=1, max_part=None):\n # particles must have a label as a string otherwise it terminates\n # mass weighted particles for each of the models for the sfr/well(races/rivers) and rch\n\n print('reading cbc')\n # calculate the flow into each top layer cell\n indata = flopy.utils.CellBudgetFile(cbc_path)\n rch = indata.get_data(kstpkper=(0, 0), text='recharge', full3D=True)[0][0].filled(0) # take only the first row\n # take only the first row, no particles in southwertern boundary for wells\n well = indata.get_data(kstpkper=(0, 0), text='wells', full3D=True)[0][0].filled(0)\n sfr = indata.get_data(kstpkper=(0, 0), text='stream leakage', full3D=True)[0][0].filled(\n 0) # take only the first row\n well[well < 0] = 0\n sfr[sfr < 0] = 0\n flow = rch + well + sfr\n flow[smt.get_no_flow(0) != 1] = 0\n\n # generate particles (minimum of 1 per cell)\n num_parts = np.round(flow / flow[flow > 0].min() * min_part).astype(int)\n if max_part is not None:\n num_parts[num_parts > max_part] = int(max_part)\n\n # identify boundary condition types\n bd_type = smt.get_empty_model_grid() # 0=rch,1=well,2==sfr\n temp = np.concatenate((rch[np.newaxis], well[np.newaxis], sfr[np.newaxis]), axis=0).max(axis=0)\n bd_type[np.isclose(rch, temp)] = 0\n bd_type[np.isclose(well, temp)] = 1\n bd_type[np.isclose(sfr, temp)] = 2\n bd_type[(smt.get_no_flow(0) != 1) | np.isclose(temp, 0)] = -1\n\n outdata = flopy.modpath.mpsim.StartingLocationsFile.get_empty_starting_locations_data(num_parts.sum())\n outdata['label'] = 's'\n outdata['k0'] = 0 # I think that particles in flopy are 1 indexed, but 0 means active most layer\n js, iss = np.meshgrid(range(1, smt.cols + 1), range(1, smt.rows + 1)) # this is passed to the file as 1 indexed\n\n idx = bd_type.flatten() != -1\n group_dict = _part_group_cell_mapper(bd_type)\n start_idx = 0\n print('generating particles')\n for l, (num, i, j, bt) in enumerate(\n zip(num_parts.flatten()[idx], iss.flatten()[idx], js.flatten()[idx], bd_type.flatten()[idx])):\n if num == 0:\n raise ValueError('unexpected zero points')\n end_idx = start_idx + num\n # set particle starting location\n outdata['particlegroup'][start_idx:end_idx] = l + 1\n outdata['groupname'][start_idx:end_idx] = '{:03d}_{:03d}'.format(*group_dict[l + 1])\n outdata['i0'][start_idx:end_idx] = i\n outdata['j0'][start_idx:end_idx] = j\n outdata['xloc0'][start_idx:end_idx] = np.random.uniform(size=num)\n outdata['yloc0'][start_idx:end_idx] = np.random.uniform(size=num)\n outdata['zloc0'][start_idx:end_idx] = 1 if bt == 0 else np.random.uniform(size=num)\n start_idx = end_idx\n return outdata, bd_type, num_parts",
"def ACmomentConstraint(p, var):\n \n #extract the polynomial and variables \n x = var[0]\n th = var[1]\n\n\n #Identify support set, prepare for polytope reduction\n #A_pre = np.array(P.monoms())\n #b = np.array(P.coeffs())\n \n # #function to generate parameter coefficients\n # if len(var) == 1:\n # fb = lambda p: p\n # else:\n # fb = sp.lambdify(x, b, \"numpy\")\n if type(p) == list:\n fout = extract_monom(var, p)\n else:\n fout = extract_monom(var,[p])\n fb = fout[\"fb\"]\n A_pre = fout[\"A_pre\"]\n monom_poly = fout[\"monom_poly\"]\n geom = fout[\"geom\"]\n \n #add in constant term?\n z_blank = np.zeros([1, A_pre.shape[1]])\n z_list = [int(z) for z in z_blank.tolist()[0]] #probably a better way to do this\n add_z = []\n \n if z_blank not in A_pre:\n A_pre= np.append(A_pre, z_blank, axis = 0)\n b = np.append(b, 0)\n add_z = z_list\n \n #always add the constant term to the monomial set\n monom_all = A_pre.tolist()\n A = np.ones((A_pre.shape[1] + 1, A_pre.shape[0]), dtype = int)\n A[1:,:] = A_pre.T \n \n #find the support and generators of all monomials \n support = np.array(polytope.interior(A, strict = False))\n half_support = [list(v // 2) for v in support if v.any() and not (v % 2).any()]\n #once again, add back the constant\n\n\n #augmented support set, 1 + half_support + current support\n #TODO: This is incorrect, breaks the lexicographic ordering and many assumptions. Fix this\n #aug_support = monom_all + add_z + [i for i in half_support if i not in monom_all]\n monom_data = np.array(sum([[list(m) for m in monom_poly[i]] for i in range(len(geom)) if not geom[i]], []))\n\n keys_classify = np.lexsort(np.flipud(monom_data.T))\n\n #monom_classify = monom_data[keys_classify, :].tolist()\n monom_classify = monom_data.tolist()\n #for i = range(monom_poly):\n # if geom[i]:\n\n\n all_support = half_support + add_z + monom_classify\n aug_support = np.flip(np.unique(np.array(all_support), axis=0), axis=0).tolist()\n \n \n #lookup table to associate generating indices with monomials\n #fill out the moment constraints\n lookup = {} \n for vi in range(len(aug_support)):\n v = aug_support[vi]\n for ui in range(vi, len(aug_support)):\n u = aug_support[ui]\n s = tuple([u[i] + v[i] for i in range(len(v))])\n if s in lookup:\n lookup[s] += [(ui, vi)]\n else:\n lookup[s] = [(ui, vi)]\n \n M_out = {\"supp\": aug_support, \"monom_all\": monom_all, \"monom_poly\": monom_poly, \"monom_classify\":monom_classify,\n \"cons\": lookup, \"fb\": fb, \"geom\":geom}\n #M_out = {\"supp\" : aug_support, \"half_supp\" : half_support, \"monom\": monom, \"cons\" : lookup, \"fb\": fb} \n \n return M_out",
"def preCondConjugateGradientSolver(b, x, linsys_setup, eps, i_max, plotInterval, mapDir):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz=True\n \n \n # Calculate residual r = b - (A^-1) x\n r = b - applyMat(x, linsys_setup)\n d = r\n\n\n delta_new = numpy.inner(r,r)\n \n\n\n\n delta_o = delta_new\n delta_array = numpy.zeros(shape=(i_max))\n \n # Iterate CG solver until converged\n i = 0\n #i_max = 300\n while (i < i_max) and (delta_new > delta_o*eps**2.):\n if i==0: t = time.time()\n \n if i%plotInterval == 0 and i != 0:\n print \"\\tNumber of iterations in the CG:\", i\n x0 = x[:nx*ny] # CMB\n x1 = x[nx*ny:nx*ny+1] # Monopole\n x2 = x[nx*ny+1:nx*ny+1+nCluster] # TSZ\n if ksz: x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n print \"\\tMonopole:\", x1\n print \"\\tTSZ:\", x2\n if ksz: print \"\\tKSZ:\", x3\n \n x0.shape = (ny,nx)\n a_l = numpy.fft.fft2(x0)\n a_l *= precond_2d\n x_test = numpy.real(numpy.fft.ifft2(a_l))\n plot(x_test,mapDir+'/CMB_%d.png'%i,'Reconstructed CMB', range=(-250., 250.))\n print delta_new, delta_o*eps**2.\n\n q = applyMat(d, linsys_setup)\n alpha = delta_new / (numpy.inner(d,q))\n x += alpha * d\n\n # What does this do? It's always false.\n if i/50. < numpy.int(i/50):\n r = b - applyMat(x, linsys_setup)\n else:\n r = r - alpha*q\n \n delta_old = delta_new\n delta_new = numpy.inner(r,r)\n beta = delta_new/delta_old\n d = r + beta * d\n #if i==0: print \"\\tEach iteration takes:\", time.time()-t\n i += 1\n\n x0 = x[:nx*ny].reshape((ny, nx))\n x1 = x[nx*ny:nx*ny+1]\n x2 = x[nx*ny+1:nx*ny+1+nCluster]\n if ksz:\n x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n else:\n x3 = None\n \n a_l = numpy.fft.fft2(x0) * precond_2d\n x0 = numpy.real(numpy.fft.ifft2(a_l))\n\n \n # CMB, monopole, TSZ, KSZ\n return x0, x1, x2, x3",
"def bcp(self):\r\n logger.info('--\\tbcp')\r\n global gen_debug_info\r\n gen_debug_info.cnt_bcp += 1\r\n logger.info('\\t\\tcnt_bcp: %d' % gen_debug_info.cnt_bcp)\r\n\r\n conflict, ccindex = self.c_array.init_state(self.local_vars.vs)\r\n if conflict is True:\r\n logger.info('\\t\\tfind conflict in c_array.init_state()')\r\n return True, ccindex, -1\r\n\r\n self.need_bcp = True\r\n while self.need_bcp:\r\n self.need_bcp = False\r\n c_array = self.c_array\r\n unitc_i = self.c_array.find_unitc(self.local_vars.vs)\r\n for i, j in unitc_i:\r\n c = self.c_array.clauses[i] # unit clause\r\n vs = self.local_vars.vs[j]\r\n mindex = c_array.c_max_lvl_i[i]\r\n mvs = self.local_vars.vs[mindex]\r\n\r\n # 当出现一个文字出现多个推理时可以有几种不同的实现方式\r\n if vs.value != 0: # 选择第一个推理的\r\n continue\r\n # 选择最小层级推理的\r\n # if vs.value != 0 and self.local_vars.vs[mindex] > vs.level:\r\n # continue\r\n\r\n vs.value = c[j] # the free lit\r\n vs.level = mvs.level\r\n vs.implied = True\r\n self.local_vars.reason[j] = i + 1\r\n c_array.c_isreason[i] = True\r\n\r\n str1 = '\\t\\tc%d ' % (i + 1)\r\n str1 += 'var %d gvar %d '\\\r\n % (j + 1, self.local_vars.global_var[j] + 1)\r\n str1 += 'value %d level %d' % (c[j], vs.level)\r\n logger.info(str1)\r\n logger.debug(gen_debug_info.one_clause(self.c_array.clauses[i],\r\n self.local_vars,\r\n '\\t\\t'))\r\n conflict, ccindex = \\\r\n c_array.update_state(j, self.local_vars.vs)\r\n\r\n self.need_bcp = True\r\n if conflict is True:\r\n # find conflict\r\n return True, ccindex, j\r\n return False, 0, 0",
"def _formulate_as_vc(G, mipgap=0, threads=1, timelimit=None, memlimit=None):\n\n # Calculate number of nodes\n n = len(G.nodes())\n\n # Create a new cplex problem and set parameters\n prob = cplex.Cplex()\n prob.parameters.mip.tolerances.mipgap.set(mipgap)\n prob.parameters.emphasis.numerical.set(True)\n prob.parameters.parallel.set(\n prob.parameters.parallel.values.opportunistic\n )\n prob.parameters.threads.set(threads)\n if timelimit is not None:\n prob.parameters.timelimit.set(timelimit)\n if memlimit is not None:\n prob.parameters.workmem.set(memlimit)\n\n # Throw away default cplex output\n prob.set_results_stream(os.devnull)\n\n # Construct node variables\n node_vars = {\n node: 'c{}'.format(node)\n for node in G.nodes()\n }\n node_vars_values = list(node_vars.values())\n\n # Add vertices as variables to the LP\n prob.variables.add(\n names=node_vars_values,\n types=([prob.variables.type.integer] * n)\n )\n\n # Add constraints for edges\n constraints = [\n cplex.SparsePair(\n ind=(node_vars[e[0]], node_vars[e[1]]),\n val=[1, 1]\n )\n for e in G.edges()\n ]\n num_constraints = len(constraints)\n prob.linear_constraints.add(\n lin_expr=constraints,\n senses=['G'] * num_constraints,\n rhs=[1] * num_constraints\n )\n\n # Set LP objective\n prob.objective.set_sense(prob.objective.sense.minimize)\n prob.objective.set_linear(list(zip(node_vars_values, [1] * n)))\n\n # Return\n return prob, node_vars_values",
"def generate_constraints():\n return list(chain(collect_rows(), collect_columns(), collect_blocks()))",
"def run(self, C, p0 = None):\n global algorithm \n algorithm = AdaptiveMM(self.g, C, p0 = p0, lambda0 = 2000)\n solve()",
"def compCGP_C(M):\n # we store the coedd as a lower triangular matrix\n # random polynomial coefficients\n c = 0.5 * np.random.uniform(-1.0, -0.45, size=(M + 1, M + 1)) +\\\n 0.5 * np.random.uniform(0.45, 1.0, size=(M + 1, M + 1))\n for i in np.arange(M + 1):\n c[i, :] /= 2**(np.arange(M + 1) + i)\n c /= 1.5\n c = np.tril(c)\n c[0, 0] = 0\n c[1, 0] = 0\n c[1, 1] = 1\n\n return c",
"def semi_analytical_marginal_probj(param_inst, cash_tt, solu_dict,\n cur_col_prefix_space='_'):\n\n choice_set_list = param_inst.model_option['choice_set_list']\n choice_names_use = param_inst.model_option['choice_names_full_use']\n choice_names_use = param_inst.model_option['choice_names_use']\n\n each_j_prob = solu_dict['each_j_prob']\n ktp_opti_allJ = solu_dict['ktp_opti_allJ']\n btp_opti_allJ = solu_dict['btp_opti_allJ']\n consumption_opti_allJ = solu_dict['consumption_opti_allJ']\n\n # btp_fb_opti_allJ = solu_dict['btp_fb_opti_allJ']\n # btp_ib_opti_allJ = solu_dict['btp_ib_opti_allJ']\n # btp_fs_opti_allJ = solu_dict['btp_fs_opti_allJ']\n # btp_il_opti_allJ = solu_dict['btp_il_opti_allJ']\n\n trans_prob_list = []\n simu_output_pd_allj = 0\n\n for ctr, choicej in enumerate(choice_set_list):\n\n cur_col_prefix = choice_names_use[ctr]\n logger.info('ctr,choicej,cur_col_prefix:\\n%s,%s,%s',\n str(ctr), str(choicej), str(cur_col_prefix))\n\n btp_opti = btp_opti_allJ[:, ctr]\n ktp_opti = ktp_opti_allJ[:, ctr]\n prob_cur = each_j_prob[:, ctr]\n consumption_opti = consumption_opti_allJ[:, ctr]\n\n '''\n Get columns at centered interpolating grid points for:\n 'cash_grid_centered'\n 'marginal_dist'\n 'btp_opti_grid',\n 'ktp_opti_grid',\n 'consumption_opti_grid' \n '''\n logger.info('Solve, P(COH|j), P(COH|COH,j), ctr:%s, name:%s', str(ctr), cur_col_prefix)\n simu_output_pd_curj, trans_prob_curj = condianaly.semi_analytical_marginal(\n param_inst,\n cash_tt, ktp_opti, btp_opti, consumption_opti,\n each_j_prob=prob_cur,\n trans_prob_only=True,\n cur_col_prefix=cur_col_prefix + cur_col_prefix_space)\n\n '''Store Choice J Transition Prob'''\n trans_prob_list.append(trans_prob_curj)\n\n '''Update Column Names'''\n if (ctr == 0):\n simu_output_pd_allj = simu_output_pd_curj\n else:\n '''Cumulate'''\n simu_output_pd_allj = pd.concat([simu_output_pd_allj,\n simu_output_pd_curj], axis=1)\n\n \"\"\"\n D. Add columns for each j of J for fbibfsil\n \"\"\"\n # D1. Get Columns from just created panda files, j specific columns\n steady_var_suffixes_dict = hardstring.get_steady_var_suffixes()\n btp_opti_grid_allJ_cols = [col for col in simu_output_pd_allj.columns if\n cur_col_prefix_space + steady_var_suffixes_dict['btp_opti_grid'] in col]\n ktp_opti_grid_allJ_cols = [col for col in simu_output_pd_allj.columns if\n cur_col_prefix_space + steady_var_suffixes_dict['ktp_opti_grid'] in col]\n\n # D2. Matrix from columns\n btp_opti_grid_allJ = simu_output_pd_allj[btp_opti_grid_allJ_cols].to_numpy()\n ktp_opti_grid_allJ = simu_output_pd_allj[ktp_opti_grid_allJ_cols].to_numpy()\n\n # D3. Get fb ib fs il specific matrixes\n btp_fb_opti_allJ, btp_ib_opti_allJ, btp_fs_opti_allJ, btp_il_opti_allJ = \\\n fbibfsis.genfibs_btpstack(choice_set_list, btp_opti_grid_allJ, ktp_opti_grid_allJ, param_inst)\n\n # D4. Add to simu_output_pd_allj panda\n fb_ib_fs_il_steady_var_key_list = ['btp_fb_opti_grid', 'btp_ib_opti_grid',\n 'btp_fs_opti_grid', 'btp_il_opti_grid']\n varnames_list = [choice_names_use[ctr] + cur_col_prefix_space + steady_var_suffixes_dict[fbibfsil_stdykey]\n for fbibfsil_stdykey in fb_ib_fs_il_steady_var_key_list\n for ctr, choicej in enumerate(choice_set_list)]\n\n # D5. Additional Panda columns with fb ib fs il information\n varnames = \",\".join(map(str, varnames_list))\n varmat = np.column_stack((btp_fb_opti_allJ, btp_ib_opti_allJ, btp_fs_opti_allJ, btp_il_opti_allJ))\n simu_output_pd_allj_fbibfsil = proj_sys_sup.debug_panda(varnames, varmat, export_panda=False, log=False)\n\n # D6. Concatenate together, join more columns together. \n simu_output_pd_allj = pd.concat([simu_output_pd_allj, simu_output_pd_allj_fbibfsil], axis=1)\n\n \"\"\"\n E0. Grid Column\n \"\"\"\n cash_grid_centered_cols = [col for col in simu_output_pd_allj.columns\n if steady_var_suffixes_dict['cash_grid_centered'] in col]\n simu_output_pd_allj['cash_grid_centered'] = simu_output_pd_allj[cash_grid_centered_cols[0]]\n\n \"\"\"\n E1. Adjust Probabilities due to Interpolation issue over J choices\n \"\"\"\n logger.info('simu_output_pd_allj.columns:\\n%s', simu_output_pd_allj.columns)\n prob_cols = [col for col in simu_output_pd_allj.columns if 'probJ_opti_grid' in col]\n logger.info('prob_cols:\\n%s', prob_cols)\n probJ_matrix = simu_output_pd_allj[prob_cols].to_numpy()\n logger.info('probJ_matrix:\\n%s', probJ_matrix)\n\n '''These are actually not needed, perfect symmatery'''\n probJ_matrix_rowsum = np.reshape(np.sum(probJ_matrix, axis=1), (-1, 1))\n logger.info('probJ_matrix_rowsum:\\n%s', probJ_matrix_rowsum)\n probJ_matrix_rescale_sum1 = probJ_matrix / probJ_matrix_rowsum\n logger.info('probJ_matrix_rescale_sum1:\\n%s', probJ_matrix_rescale_sum1)\n\n \"\"\"\n E2. Overall Conditional probabilities\n \"\"\"\n trans_prob_wgtJ = 0\n trans_prob_dict_allj = {}\n for ctr, choicej in enumerate(choice_set_list):\n '''E2a. Transition Probability current j'''\n trans_prob_curj = trans_prob_list[ctr]\n\n '''E2b. Choice Probability over J'''\n prob_opti_grid = probJ_matrix_rescale_sum1[:, ctr]\n\n '''E2c. Update current column with reweighted sum to 1 choice J prob'''\n simu_output_pd_allj[prob_cols[ctr]] = prob_opti_grid\n\n '''E2d. Weighted Discrete Transition Probability'''\n trans_prob_curj_wgted = trans_prob_curj * np.reshape(prob_opti_grid, (-1, 1))\n logger.debug('trans_prob_curj:\\n%s', trans_prob_curj)\n logger.debug('prob_opti_grid:\\n%s', prob_opti_grid)\n logger.debug('trans_prob_curj_wgted:\\n%s', trans_prob_curj_wgted)\n\n '''E2e. Update Column Names'''\n trans_prob_dict_allj[choicej] = trans_prob_curj\n if (ctr == 0):\n trans_prob_wgtJ = trans_prob_curj_wgted\n else:\n '''Cumulate'''\n trans_prob_wgtJ = trans_prob_wgtJ + trans_prob_curj_wgted\n\n logger.info('trans_prob_wgtJ:\\n%s', trans_prob_wgtJ)\n logger.info('np.sum(trans_prob_wgtJ):\\n%s', np.sum(trans_prob_wgtJ, axis=1))\n\n return simu_output_pd_allj, trans_prob_wgtJ, trans_prob_dict_allj",
"def calc_Cinv_CCGT(CC_size_W, CCGT_cost_data):\n\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if CC_size_W < CCGT_cost_data['cap_min'][0]:\n CC_size_W = CCGT_cost_data['cap_min'][0]\n CCGT_cost_data = CCGT_cost_data[\n (CCGT_cost_data['cap_min'] <= CC_size_W) & (CCGT_cost_data['cap_max'] > CC_size_W)]\n\n\n #costs of connection\n connection_costs = ngas.calc_Cinv_gas(CC_size_W)\n\n Inv_a = CCGT_cost_data.iloc[0]['a']\n Inv_b = CCGT_cost_data.iloc[0]['b']\n Inv_c = CCGT_cost_data.iloc[0]['c']\n Inv_d = CCGT_cost_data.iloc[0]['d']\n Inv_e = CCGT_cost_data.iloc[0]['e']\n Inv_IR = CCGT_cost_data.iloc[0]['IR_%']\n Inv_LT = CCGT_cost_data.iloc[0]['LT_yr']\n Inv_OM = CCGT_cost_data.iloc[0]['O&M_%'] / 100\n\n InvC = Inv_a + Inv_b * (CC_size_W) ** Inv_c + (Inv_d + Inv_e * CC_size_W) * log(CC_size_W)\n\n Capex_a_CCGT_USD = calc_capex_annualized((InvC+connection_costs), Inv_IR, Inv_LT)\n Opex_fixed_CCGT_USD = InvC * Inv_OM\n Capex_CCGT_USD = InvC\n\n return Capex_a_CCGT_USD, Opex_fixed_CCGT_USD, Capex_CCGT_USD",
"def FindCA(e,PV,F,w,m):\n global MAX\n le=len(e)\n if le > MAX:\n MAX = le\n #print the new best columns and how many they are\n system('clear')\n print \"So far, the best columns are %s and MAX is %d\" % (e,MAX)\n PV=ComputeCand(e,PV,F,w,m)\n lp=len(PV)\n if le+lp > MAX:\n for i in range(le+lp-MAX):\n newe=e+[PV[i]]\n test=collections.deque(columns2bin(newe, w))\n if is_necklace(test):\n FindCA(newe,PV[i+1:],F,w,m)\n else:\n break",
"def generate_all_constraints(traj,policy,mdp):\n #print('generating all constraints')\n constraints = []\n traj_tmp = list(traj)\n #print(traj_tmp)\n #compute halfspace normals for all (s,a) pairs until terminal\n while(len(traj_tmp)>1):\n constraints += generate_half_space_normals(traj_tmp,policy,mdp)\n #print(constraints)\n traj_tmp.pop(0)\n #print('after pop',traj_tmp)\n return constraints",
"def cvstem(self):\n if (self.iEC == \"est\") and (len(sig(self.Cfun).parameters) == 1):\n fun1 = self.Cfun\n self.Cfun = lambda x,p: fun1(x)\n if (self.iEC == \"est\") and (len(sig(self.Gw).parameters) == 1):\n fun2 = self.Gw\n self.Gw = lambda x,p: fun2(x)\n if self.iEC == \"est\":\n self.c_over = self.matrix_2bound(self.Cfun)\n self.g_over = self.matrix_2bound(self.Gw)\n if (len(sig(self.Bw).parameters) == 1):\n fun3 = self.Bw\n self.Bw = lambda x,p: fun3(x)\n self.b_over = self.matrix_2bound(self.Bw)\n self.linesearch()\n alp = self.alp_opt\n Nx = self.Nx\n Nsplit = 1\n Np = int(Nx/Nsplit)\n Nr = np.remainder(Nx,Nsplit)\n xpmin = np.hstack((self.xlims[0,:],self.plims[0,:]))\n xpmax = np.hstack((self.xlims[1,:],self.plims[1,:]))\n Nxp = self.n+self.n_p\n xps = np.random.uniform(xpmin,xpmax,size=(Nx,Nxp))\n xs_opt,ps_opt,_ = np.hsplit(xps,np.array([self.n,Nxp]))\n Ws_opt = []\n chi_opt = 0\n nu_opt = 0\n print(\"========================================================\")\n print(\"====== SAMPLING OF CONTRACTION METRICS BY CV-STEM ======\")\n print(\"========================================================\")\n for p in range(Np):\n if np.remainder(p,int(Np/10)) == 0:\n print(\"# sampled metrics: \",p*Nsplit,\"...\")\n xs_p = xs_opt[Nsplit*p:Nsplit*(p+1),:]\n ps_p = ps_opt[Nsplit*p:Nsplit*(p+1),:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n if Nr != 0:\n print(\"# samples metrics: \",Nx,\"...\")\n xs_p = xs_opt[Nsplit*(p+1):Nx,:]\n ps_p = ps_opt[Nsplit*(p+1):Nx,:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n self.xs_opt = xs_opt\n self.ps_opt = ps_opt\n self.Ws_opt = Ws_opt\n self.chi_opt = chi_opt\n self.nu_opt = nu_opt\n if self.iEC == \"est\":\n self.Jcv_opt = (self.d1_over*self.b_over*np.sqrt(chi_opt)\\\n +self.d2_over*self.c_over*self.g_over*nu_opt)/alp\n print(\"Optimal steady-state estimation error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n elif self.iEC == \"con\":\n self.Jcv_opt = self.d1_over*self.b_over*np.sqrt(chi_opt)/alp\n print(\"Optimal steady-state tracking error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n else:\n raise ValueError('Invalid iEC: iEC = \"est\" or \"con\"')\n self.M2cholM()\n path = \"models/optvals/\"+self.fname\n if os.path.exists(path) == False:\n try:\n os.makedirs(path)\n except: \n raise OSError(\"Creation of directory %s failed\" %path)\n else:\n print (\"Successfully created directory %s \" %path)\n else:\n print (\"Directory %s already exists\" %path)\n np.save(path+\"/alp_opt.npy\",alp)\n np.save(path+\"/chi_opt.npy\",self.chi_opt)\n np.save(path+\"/nu_opt.npy\",self.nu_opt)\n np.save(path+\"/Jcv_opt.npy\",self.Jcv_opt)\n print(\"========================================================\")\n print(\"==== SAMPLING OF CONTRACTION METRICS BY CV-STEM END ====\")\n print(\"========================================================\\n\\n\")\n pass",
"def __addColumnConstraints(self):\n for x in range(self.width):\n plusTarget = self.columnPlusCounts[x]\n minusTarget = self.columnMinusCounts[x]\n plusTotal = 0\n minusTotal = 0\n for y in range(self.height):\n g = self.grid[(x, y)]\n plusTotal = plusTotal + If(g == Magnets.PLUS, 1, 0)\n minusTotal = minusTotal + If(g == Magnets.MINUS, 1, 0)\n if plusTarget != None:\n self.solver.add(plusTotal == plusTarget)\n if minusTarget != None:\n self.solver.add(minusTotal == minusTarget)",
"def process_component(COMP, G, max_k, min_length, max_CV, SEQS, bamfile, pool, use_scores=False, use_genes=False, num_procs=1):\n\n ###############MOVED FROM OUTER CODE ON WHOLE G\n if use_scores: remove_hi_confidence_chromosome(COMP) ##################################\n\n # initialize shortest path set considered\n path_count = 0\n seen_unoriented_paths = set([])\n paths_set = set([]) #the set of paths found\n\n\n # first look for paths starting from the nodes annotated with plasmid genes\n if use_genes:\n plasmid_gene_nodes = get_plasmid_gene_nodes(COMP)\n potential_plasmid_mass_tuples = [(get_spades_base_mass(COMP,nd),nd) for nd in plasmid_gene_nodes]\n potential_plasmid_mass_tuples.sort(key = lambda n: n[0])\n while potential_plasmid_mass_tuples: # could be removing other nodes from the list\n top_node = potential_plasmid_mass_tuples.pop() # highest mass node\n top_node_name = top_node[1]\n path = get_high_mass_shortest_path(top_node_name,COMP,use_scores,use_genes) #######\n if path is None: continue\n # check coverage variation\n path_CV = get_wgtd_path_coverage_CV(path,G,SEQS,max_k_val=max_k)\n logger.info(\"Plasmid gene path: %s, CV: %4f\" % (str(path),path_CV))\n if path_CV <= max_CV and is_good_cyc(path,G,bamfile):\n logger.info(\"Added plasmid gene path %s\" % (str(path)))\n\n # prevent checking nodes that have been removed\n i = 0\n while i < len(potential_plasmid_mass_tuples):\n if potential_plasmid_mass_tuples[i][1] in path or \\\n rc_node(potential_plasmid_mass_tuples[i][1]) in path:\n potential_plasmid_mass_tuples.pop(i)\n else: i += 1\n\n seen_unoriented_paths.add(get_unoriented_sorted_str(path))\n before_cov, _ = get_path_mean_std(path, G, SEQS, max_k)\n covs = update_path_coverage_vals(path, G, SEQS, max_k)\n update_path_with_covs(path, COMP, covs)\n path_count += 1\n paths_set.add((path,before_cov))\n\n else:\n logger.info(\"Did not add plasmid gene path: %s\" % (str(path)))\n\n # then look for circular paths that start from hi confidence plasmid nodes\n if use_scores:\n potential_plasmid_nodes = get_hi_conf_plasmids(COMP)\n potential_plasmid_mass_tuples = [(get_spades_base_mass(COMP,nd),nd) for nd in potential_plasmid_nodes]\n potential_plasmid_mass_tuples.sort(key = lambda n: n[0])\n while potential_plasmid_mass_tuples: # could be removing other nodes from the list\n top_node = potential_plasmid_mass_tuples.pop() # highest mass node\n top_node_name = top_node[1]\n path = get_high_mass_shortest_path(top_node_name,COMP,use_scores,use_genes)\n if path is None: continue\n # check coverage variation\n path_CV = get_wgtd_path_coverage_CV(path,G,SEQS,max_k_val=max_k)\n logger.info(\"Hi conf path: %s, CV: %4f\" % (str(path),path_CV))\n\n if path_CV <= max_CV and is_good_cyc(path,G,bamfile):\n logger.info(\"Added hi conf path %s\" % (str(path)))\n\n # prevent checking nodes that have been removed\n i = 0\n while i < len(potential_plasmid_mass_tuples):\n if potential_plasmid_mass_tuples[i][1] in path or \\\n rc_node(potential_plasmid_mass_tuples[i][1]) in path:\n potential_plasmid_mass_tuples.pop(i)\n else: i += 1\n\n seen_unoriented_paths.add(get_unoriented_sorted_str(path))\n before_cov, _ = get_path_mean_std(path, G, SEQS, max_k)\n #before_cov, _ = get_path_mean_std(path, COMP, SEQS, max_k)\n covs = update_path_coverage_vals(path, G, SEQS, max_k)##########################\n update_path_with_covs(path, COMP, covs) ####################################\n path_count += 1\n paths_set.add((path,before_cov))\n\n else:\n logger.info(\"Did not add hi-conf path: %s\" % (str(path)))\n\n # 3rd step. Run Recycler algorithm that looks for circular high mass shortest\n # paths and accept them as plasmid predictions if the coverages and mate pairs\n # match the required thresholds\n#######################################################################################\n#######################################################################################\n\n\n paths = enum_high_mass_shortest_paths(COMP, pool, use_scores,use_genes,seen_unoriented_paths)\n last_path_count = 0\n last_node_count = 0\n\n # continue as long as you either removed a low mass path\n # from the component or added a new path to final paths\n while(path_count!=last_path_count or\\\n len(COMP.nodes())!=last_node_count):\n\n last_node_count = len(COMP.nodes())\n last_path_count = path_count\n\n # make tuples of (CV, path)\n path_tuples = []\n for p in paths:\n if len(get_seq_from_path(p, SEQS, max_k_val=max_k)) < min_length:\n seen_unoriented_paths.add(get_unoriented_sorted_str(p))\n logger.info(\"Num seen paths: %d\" % (len(seen_unoriented_paths)))\n continue\n path_tuples.append((get_wgtd_path_coverage_CV(p,G,SEQS,max_k_val=max_k), p))\n\n logger.info(\"Num path tuples: %d\" % (len(path_tuples)))\n if(len(path_tuples)==0): break\n\n # sort in ascending CV order\n path_tuples.sort(key=lambda path: path[0])\n\n for pt in path_tuples:\n curr_path = pt[1]\n curr_path_CV = pt[0]\n logger.info(\"Path: %s\" % (\",\".join(curr_path)))\n if get_unoriented_sorted_str(curr_path) not in seen_unoriented_paths:\n\n ## only report if low CV and matches mate pair info\n if (curr_path_CV <= (max_CV) and \\\n is_good_cyc(curr_path,G,bamfile)):\n\n logger.info(\"Added path %s\" % \", \".join(curr_path))\n logger.info(\"\\tCV: %4f\" % curr_path_CV)\n seen_unoriented_paths.add(get_unoriented_sorted_str(curr_path))\n #before_cov, _ = get_path_mean_std(curr_path, COMP, SEQS, max_k)\n before_cov, _ = get_path_mean_std(curr_path, G, SEQS, max_k)\n covs = update_path_coverage_vals(curr_path, G, SEQS, max_k)\n update_path_with_covs(curr_path, COMP, covs)\n path_count += 1\n paths_set.add((curr_path,before_cov))\n break\n\n else:\n logger.info(\"Did not add path: %s\" % (\", \".join(curr_path)))\n logger.info(\"\\tCV: %4f\" % curr_path_CV)\n if curr_path_CV > max_CV:\n break # sorted by CV\n else: # not good mate pairs\n seen_unoriented_paths.add(get_unoriented_sorted_str(curr_path))\n\n # recalculate paths on the component\n print(str(len(COMP.nodes())) + \" nodes remain in component\")\n logger.info(\"Remaining nodes: %d\" % (len(COMP.nodes())))\n paths = enum_high_mass_shortest_paths(COMP, pool, use_scores,use_genes,seen_unoriented_paths)\n\n #end while\n return paths_set",
"def binary_dec(A,n_iter = 1000):\n\n\t### Initialization ###\n\n\tp, q = np.shape(A)\n\t### B : to be changed\n\tB = np.eye(p)\n \t###\n\tC = bin_random_mat(p,q)\n\tlist_dist = []\n\tB_argmin = B\n\tC_argmin = C\n\n\n\n\n\t## temperature ##\n\tT_n = np.log(np.arange(2,n_iter+2,1))\n\t#T_n = np.arange(2,n_iter+2,1)\n\tfor i in range(n_iter):\n\t## update ##\n\t\tC_0 = np.matrix(C)\n\t\tlist_dist =np.append( list_dist, V_potential(np.dot(B,C_0),A) )\n\t\tif V_potential(np.dot(B_argmin,C_argmin),A) == 0:\n\t\t\tbreak\n\t########## transition #############\n\t# Here we take 2 steps independent(for B and for C respectively)\n\t# We could also use metropolis hasting kernel.\n\n\t\tC_iter = np.matrix(Metropolis_transition_C(C))\n\t\n\n\t\tB_iter = B[np.random.permutation(np.arange(p))]\n\t\t\n\t\tif np.random.uniform(0,1,1) < \\\n\t\t\t\tnp.exp(-1./T_n[i]*( V_potential(np.dot(B_iter,C_iter), A)\\\n\t\t\t\t - V_potential(np.dot(B,C_0),A) ) ):\n\t\t\tC = C_iter\n\t\t\tB = B_iter\n\t######### end of transition ##############\n\n\t\t\tif V_potential(np.dot(B,C),A) < np.min(list_dist):\n\t\t\t\t\n\t\t\t\tB_argmin = B\n\t\t\t\tC_argmin = np.matrix(C)\n\t\t\t# print i+1\n\t\t\t# print V_potential(np.dot(B_argmin,C_argmin),A)\n\t\t\t# print C_argmin\n\t\t\t# print '\\n'\n\n\treturn list_dist,B_argmin, C_argmin",
"def main(N,N_p,T,lb,ub,prob,N_vars,F_min,F_const,P_c_min,P_c_max):\n\n lb,ub,f,fu,D,U,P = initDE(N_p,lb,ub,prob)\n if N_p < 4:\n raise Exception(\"Sorry, there must be atleast a population of 4. Reccomended 20\")\n for t in np.arange(T):\n for i in np.arange(N_p):\n V = mutation(i,N_p,t,T,P,N_vars,F_min,F_const)\n\n U=crossover(f,P_c_min,P_c_max,i,D,V,P,U)\n\n for j in np.arange(N_p): \n N,f,P = boundgreed(N,j,U,P,f,fu,ub,lb,prob)\n\t\n\t\t#if N == 500:\n\t\t\t#break\n best_of_f= min(f)\n globopt = P[f.argmin()]\n return N,best_of_f, globopt[:N_vars]",
"def prepare_each(self, model, wngrid):\n\n self._total_cia = len(self.ciaPairs)\n self._nlayers = model.nLayers\n self._ngrid = wngrid.shape[0]\n self.info('Computing CIA ')\n\n sigma_cia = np.zeros(shape=(model.nLayers, wngrid.shape[0]))\n\n chemistry = model.chemistry\n\n for pairName in self.ciaPairs:\n cia = self._cia_cache[pairName]\n sigma_cia[...] = 0.0\n\n cia_factor = chemistry.get_gas_mix_profile(cia.pairOne) * \\\n chemistry.get_gas_mix_profile(cia.pairTwo)\n\n for idx_layer, temperature in enumerate(model.temperatureProfile):\n _cia_xsec = cia.cia(temperature, wngrid)\n sigma_cia[idx_layer] += _cia_xsec*cia_factor[idx_layer]\n self.sigma_xsec = sigma_cia\n yield pairName, sigma_cia",
"def P2G_func(self, dt, P):\n p_C = ti.static(self.p_C)\n p_v = ti.static(self.p_v)\n p_x = ti.static(self.p_x)\n g_m = ti.static(self.g_m)\n g_v = ti.static(self.g_v)\n p_F = ti.static(self.p_F)\n p_Jp = ti.static(self.p_Jp)\n\n base = ti.floor(g_m.getG(p_x[P] - 0.5 * g_m.dx)).cast(Int)\n fx = g_m.getG(p_x[P]) - base.cast(Float)\n\n # Here we adopt quadratic kernels\n w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2]\n # dw = [fx - 1.5, -2.0 * (fx - 1), fx - 0.5]\n\n # # TODO affine would do this in P2G.. why\n # p_F[P] = (ti.Matrix.identity(Int, self.dim) + dt * p_C[P]) @ p_F[P]\n\n force = ti.Matrix.zero(Float, self.dim, self.dim)\n # want to decrease branching\n if self.p_material_id[P] == MaType.elastic:\n force = self.elasticP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.liquid:\n force = self.liquidP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.snow:\n force = self.snowP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.sand:\n force = self.sandP2Gpp(P, dt)\n\n affine = force + self.cfg.p_mass * p_C[P]\n for offset in ti.static(ti.grouped(self.stencil_range3())):\n # print(\"P2G: \", offset)\n dpos = g_m.getW(offset.cast(Float) - fx)\n\n weight = 1.0\n for d in ti.static(range(self.dim)):\n weight *= w[offset[d]][d]\n\n # dweight = ts.vecND(self.dim, self.cfg.inv_dx)\n # for d1 in ti.static(range(self.dim)):\n # for d2 in ti.static(range(self.dim)):\n # if d1 == d2:\n # dweight[d1] *= dw[offset[d2]][d2]\n # else:\n # dweight[d1] *= w[offset[d2]][d2]\n\n # force = - self.cfg.p_vol * kirchoff @ dweight\n # TODO ? AFFINE\n # g_v[base + offset] += self.cfg.p_mass * weight * (p_v[P] + p_C[P] @ dpos) # momentum transfer\n # TODO Got lots of simultaneous atomic here\n g_v[base + offset] += weight * (self.cfg.p_mass * self.p_v[P] + affine @ dpos)\n g_m[base + offset] += weight * self.cfg.p_mass\n\n # g_v[base + offset] += dt * force",
"def collapsed_gibbs_sample(self, X):\n K = self.K # number of topics\n M, V = X.shape\n alpha = self.alpha\n lmda = self.lmda\n topics = np.arange(0, K)\n\n #initialize everything uniformly\n # KxV dense matrix (used like beta)\n\n C = np.zeros(shape=(K, V), dtype=float)\n props = np.zeros(shape=(M, K), dtype=float)\n\n #Current state\n Ns = np.array(range(M), dtype=object)\n #Running sum\n MC_z = np.array(range(M), dtype=object)\n MC_c = np.zeros(shape=(K, V), dtype=float)\n\n for d in range(M):\n #allocate topics from prior\n word_indices = X[d, :].nonzero()[1]\n p = np.random.dirichlet(np.ones(K) * alpha)\n #TODO: Remove the p argument to get completely random topic assignments\n random_ks = np.random.choice(topics, size=len(word_indices), p=p)\n N_d = sp.coo_matrix((np.ones(len(word_indices)),\n (word_indices, random_ks)), shape=(V, K)).tolil()\n C = C + N_d.A.T\n Ns[d] = N_d\n MC_z[d] = sp.coo_matrix((V, K)).tolil()\n\n log_Xs = []\n perplexities = []\n for epoch in xrange(self.nr_em_epochs):\n print \"Epoch\", epoch\n for d in np.random.permutation(np.arange(M)):\n x = X[d]\n N_d = Ns[d]\n for v in np.nonzero(x)[1]:\n old_z_n = N_d[v, :].nonzero()[1][0]\n\n except_v = [v_prime for v_prime in np.nonzero(x)[1] if v_prime != v]\n N_d_except_v = N_d[except_v, :]\n C_except_v = np.copy(C[:, v])\n C_except_v[old_z_n] = C_except_v[old_z_n] - 1\n\n p = (np.sum(N_d_except_v.A, axis=0) + alpha) *\\\n ((C_except_v + lmda) / (np.sum(C, axis=1) -1 + V*lmda))\n p = p/np.sum(p)\n z_n = np.random.choice(topics, p = p)\n N_d[v, old_z_n] = 0\n N_d[v, z_n] = 1\n C[old_z_n, v] -= 1\n C[z_n, v] += 1\n Ns[d] = N_d\n MC_z[d] += N_d\n\n MC_c += C\n word_props = (MC_c.T / np.sum(MC_c, axis=1)).T\n log_X = 0\n for d in range(M):\n props_d = np.sum(MC_z[d].A, axis=0)\n props_d /= float(np.sum(props_d))\n props[d] = props_d\n ixw = np.nonzero(X[d, :])[1]\n log_X += np.sum(_doc_probability_from_p_of_z(props_d, word_props[:, ixw]))\n\n log_Xs.append(log_X)\n print log_X\n perplexities.append(self._perplexity(X, log_X))\n\n\n return props, word_props, log_Xs, perplexities",
"def create_cont_constraint_mat_separable(H,v1s,v2s,nSides,nConstraints,nC,\n dim_domain,dim_range,tess):\n if dim_domain != 2:\n raise ValueError\n if dim_range not in [1,2]:\n raise ValueError\n nHomoCoo=dim_domain+1 \n length_Avee = dim_range*nHomoCoo\n L1 = np.zeros((nConstraints/2,nC*nHomoCoo))\n\n \n\n nPtsInSide = 2 # Since, in 2D, the side is always a line joining 2 pts.\n# if nSides != nConstraints/(nPtsInSide*dim_domain):\n# raise ValueError(nSides,nConstraints)\n \n if nSides != nConstraints/(nPtsInSide*dim_range):\n print \" print nSides , nConstraints/(nPtsInSide*dim_range):\"\n print nSides , nConstraints/(nPtsInSide*dim_range)\n ipshell('stop')\n raise ValueError( nSides , (nConstraints,nPtsInSide,dim_range))\n\n \n if nSides != H.shape[0]:\n raise ValueError(nSides,H.shape)\n\n\n# M = nPtsInSide*dim_range\n M = nPtsInSide\n if dim_range == 1:\n raise NotImplementedError\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n # s stands for start\n # e stands for end \n s1 = a*length_Avee \n e1 = s1+nHomoCoo \n s2 = b*length_Avee\n e2 = s2+nHomoCoo \n \n # Constraint 1: \n L[i*M,s1:e1]= v1 \n L[i*M,s2:e2]= -v1 \n # Constraint 2: \n L[i*M+1,s1:e1]= v2 \n L[i*M+1,s2:e2]= -v2 \n \n \n elif dim_range==2:\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n\n if np.allclose(v1,v2):\n raise ValueError(v1,v2)\n\n\n \n \n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n \n\n # L1 is acting on columns of the following form:\n # [ a_1 b_1 c_1 d_1 a_2 b_2 c_2 d_2 ... a_Nc b_Nc c_Nc d_Nc] \n # s stands for start\n # e stands for end \n s1 = a*nHomoCoo\n e1 = s1+nHomoCoo \n s2 = b*nHomoCoo\n e2 = s2+nHomoCoo \n \n \n try: \n # Constraint 1: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v1\n row[s2:e2]=-v1 \n # x component \n L1[i*M]=row \n except:\n ipshell('fail')\n raise \n\n # Constraint 2: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v2\n row[s2:e2]=-v2 \n # x component \n L1[i*M+1]=row\n \n\n \n \n \n \n \n else:\n raise ValueError(dim_range)\n\n \n return L1",
"def bfgs_method(x0, eps=1e-6, H0=np.eye(18),c1=1e-4):\n k = 0 # initialize num of outer iterations.\n inner_k = 0 # initialize inner k iteration.\n old_xk = None\n alpha_original = 1\n alpha = np.copy(alpha_original)\n xk = x0 # intitialize x.\n Hk = H0 # initialize H, positive definite matrix.\n I = np.eye(len(x0)) # idenitity matrix of 2 by 2.\n\n alpha_vec = []\n f_vec = []\n grad_vec = []\n inner_k = []\n conv_c = []\n\n while np.linalg.norm(rosen_der(xk)) > eps:\n pk = -Hk @ rosen_der(xk)\n\n xk_next = xk + alpha * pk\n ink = 0\n print(xk)\n while rosen(xk_next) > rosen(xk) + c1 * alpha * (pk.T @ rosen_der(xk)):\n \"\"\" find a step size that will satisfy Armijo-Goldstein inequality. Modify alpha. \"\"\"\n alpha = 0.1* alpha\n xk_next = xk + alpha * pk\n ink += 1\n\n inner_k.append(abs(int(ink)))\n\n xk_next = xk + alpha * pk\n\n sk = xk_next - xk\n\n yk = rosen_der(xk_next) - rosen_der(xk)\n\n rho = 1 / (yk.T @ sk)\n\n Hk = np.copy((I - rho * sk @ yk.T) @ Hk @ (I - rho * yk @ sk.T) + rho * sk @ sk.T)\n\n old_xk = np.copy(xk)\n xk = np.copy(xk_next)\n\n alpha_vec.append(alpha)\n f_vec.append(rosen(xk))\n grad_vec.append(np.linalg.norm(rosen_der(xk)))\n alpha = np.copy(alpha_original)\n print(f_vec[-1])\n\n k += 1\n\n return xk, k, inner_k, alpha_vec, f_vec, grad_vec",
"def DC(s,theta=0,grid='eq',num_corr=None):\n if num_corr is None:\n num_corr = s\n\n # Choose the grid:\n if grid=='eq':\n t=snp.arange(s+1)/s # Equispaced\n elif grid=='cheb':\n t=0.5*(np.cos(np.arange(0,s+1)*np.pi/s)+1.) #Chebyshev\n t=t[::-1]\n\n dt=np.diff(t)\n\n alpha=snp.zeros([s*(num_corr+1)+1,s*(num_corr+1)])\n beta=snp.zeros([s*(num_corr+1)+1,s*(num_corr+1)])\n\n w=dcweights(t) #Get the quadrature weights for our grid\n #w[i,j] is the weight of node i for the integral\n #over [x_j,x_j+1]\n\n #first iteration (k=1)\n for i in range(1,s+1):\n alpha[i,i-1] = 1\n beta[i ,i-1] = dt[i-1]\n\n #subsequent iterations:\n for k in range(1,num_corr+1):\n beta[s*k+1,0]=w[0,0]\n for i in range(1,s+1):\n alpha[s*k+1,0]=1\n beta[s*k+1,s*(k-1)+i]=w[i,0]\n\n for m in range(1,s):\n alpha[s*k+m+1,s*k+m] = 1\n beta[s*k+m+1,s*k+m] = theta*dt[m]\n beta[s*k+m+1,0]=w[0,m]\n for i in range(1,s+1):\n beta[s*k+m+1,s*(k-1)+i]=w[i,m]\n if i==m:\n beta[s*k+m+1,s*(k-1)+i]-=theta*dt[m]\n\n name='Deferred correction method of order '+str(s+1)\n return ExplicitRungeKuttaMethod(alpha=alpha,beta=beta,name=name,order=s+1).dj_reduce()",
"def choice_C(data, L_plus, delta_min, wdw_length, scale, start=1, stop=10,\n step=1, delay=0, L_minus=None, k=None, n=36000, n_series=500, \n epsilon=0.001, block_length=None, BB_method='MBB', confusion=False, \n verbose=True): \n assert BB_method in ['MBB', 'NBB', 'CBB', 'MABB'], \"Undefined block bootstrap procedure\"\n if BB_method == 'MBB': \n blocks = bb.MBB(data, block_length) \n elif BB_method == 'NBB':\n blocks = bb.NBB(data, block_length) \n elif BB_method == 'CBB':\n blocks = bb.CBB(data, block_length) \n \n if 'blocks' in locals():\n n_blocks = int(np.ceil(n_series/blocks.shape[1]))\n \n wdw_length = int(np.ceil(wdw_length)) #should be integer\n \n delay = int(delay)\n n = int(n)\n assert n > 0, \"n must be strictly positive\"\n if n % 3 == 2: #n should be multiple of 3\n n += 1\n if n % 3 == 1: \n n += 2\n \n if L_minus is None:\n L_minus = -L_plus\n if k is None:\n k = delta_min/2\n \n sign = 1\n n_test = int(n/5) #n testing instances\n n_train = n - n_test #n training instances\n \n n_C = int(np.ceil((stop-start)/step))\n MAPE = np.zeros((n_C)); MSE = np.zeros((n_C)); accuracy = np.zeros((n_C))\n count = 0\n C_values = np.arange(start, stop, step)\n for C in np.arange(start, stop, step):\n \n ### training\n input_train = np.zeros((n_train, wdw_length))\n size_train = np.zeros((n_train))\n form_train = np.zeros((n_train))\n rnd = halfnorm(scale=scale).rvs(size=n_train) + delta_min #size of shifts\n delay_rnd = 0\n for b in range(0, n_train-2, 3):\n \n shift = rnd[b]*sign\n if BB_method == 'MABB': \n series = bb.resample_MatchedBB(data, block_length, n=n_series)\n else:\n series = resample(blocks, replace=True, n_samples=n_blocks).flatten()[:n_series]\n \n #simulate a random delay\n if delay > 0 :\n delay_rnd = np.random.randint(delay) \n\n \n for rnd_form in range(3):\n \n boot = np.copy(series)\n \n if rnd_form == 0: \n boot[wdw_length:] = boot[wdw_length:] + shift\n form_train[b] = 0\n elif rnd_form == 1:\n power = np.random.uniform(1.5,2)\n boot = shift/(n_series) * (np.arange(0,n_series)**power) + boot\n form_train[b] = 1\n else:\n eta = np.random.uniform(np.pi/(wdw_length), 3*np.pi/wdw_length)\n boot = np.sin(eta*np.pi*np.arange(n_series))*shift*boot\n form_train[b] = 2\n \n size_train[b] = shift\n \n input_plus = boot[wdw_length:wdw_length*2]\n C_plus = np.zeros((n_series, 1))\n for i in range(wdw_length + delay_rnd, n_series): #start the monitoring after random delay \n C_plus[i] = max(0, C_plus[i-1] + boot[i] - k)\n if C_plus[i] > L_plus:\n input_plus = boot[i+1-wdw_length:i+1] \n break \n \n input_minus = boot[wdw_length:wdw_length*2]\n C_minus = np.zeros((n_series, 1)) \n for j in range(wdw_length + delay_rnd, n_series):\n C_minus[j] = min(0, C_minus[j-1] + boot[j] + k)\n if C_minus[j] < L_minus:\n input_minus = boot[j+1-wdw_length:j+1] \n break\n \n if i > j: #save first alert recorded\n input_train[b,:] = input_minus\n else:\n input_train[b,:] = input_plus\n \n b += 1\n sign = -sign\n \n ### train the models\n regressor = SVR(C=C, epsilon=epsilon)\n regressor.fit(input_train, size_train)\n clf = svm.SVC(C=C)\n clf.fit(input_train, form_train)\n \n ###testing \n input_test = np.zeros((n_test, wdw_length))\n label_test = np.zeros((n_test))\n form_test = np.zeros((n_test))\n rnd = halfnorm(scale=scale).rvs(size=n_test) + delta_min\n delay_rnd = 0\n for b in range(0, n_test-2, 3):\n \n shift = rnd[b]*sign\n if BB_method == 'MABB': \n series = bb.resample_MatchedBB(data, block_length, n=n_series)\n else:\n series = resample(blocks, replace=True, n_samples=n_blocks).flatten()[:n_series]\n \n #simulate a random delay\n if delay > 0 :\n delay_rnd = np.random.randint(delay) \n \n for rnd_form in range(3):\n \n boot = np.copy(series)\n \n if rnd_form == 0:\n boot[wdw_length:] = boot[wdw_length:] + shift\n form_test[b] = 0\n elif rnd_form == 1:\n power = np.random.uniform(1.5,2)\n boot = shift/(n_series) * (np.arange(0,n_series)**power) + boot\n form_test[b] = 1\n else:\n eta = np.random.uniform(np.pi/(wdw_length), 3*np.pi/wdw_length)\n boot = np.sin(eta*np.pi*np.arange(n_series))*shift*boot\n form_test[b] = 2\n label_test[b] = shift\n \n input_plus = boot[wdw_length:wdw_length*2]\n C_plus = np.zeros((n_series, 1))\n for i in range(wdw_length + delay_rnd, n_series):\n C_plus[i] = max(0, C_plus[i-1] + boot[i] - k)\n if C_plus[i] > L_plus:\n input_plus = boot[i+1-wdw_length:i+1] \n break \n \n input_minus = boot[wdw_length:wdw_length*2]\n C_minus = np.zeros((n_series, 1)) \n for j in range(wdw_length + delay_rnd, n_series):\n C_minus[j] = min(0, C_minus[j-1] + boot[j] + k)\n if C_minus[j] < L_minus:\n input_minus = boot[j+1-wdw_length:j+1]\n break\n \n if i > j: #first alert recorded\n input_test[b,:] = input_minus\n else:\n input_test[b,:] = input_plus\n \n b += 1 \n sign = -sign\n \n ### compute accuracy and other precision measures \n label_pred = regressor.predict(input_test)\n label_pred_clf = clf.predict(input_test)\n \n #regressor\n MAPE[count] = (1/len(label_pred)) * sum(np.abs((np.abs(label_test) - np.abs(label_pred))/np.abs(label_test)))*100\n MSE[count] = (1/len(label_pred)) * sum((label_test - label_pred)**2)\n #classifier\n accuracy[count] = sum(label_pred_clf == form_test)*100 / len(label_pred_clf)\n \n ### compute the confusion matrix \n if confusion : \n class_names = ['jump', 'drift', 'oscill.']\n titles_options = [(\"Confusion matrix, without normalization\", None),\n (\"Normalized confusion matrix\", 'true')]\n for title, normalize in titles_options:\n disp = plot_confusion_matrix(clf, input_test, form_test,\n display_labels=class_names,\n cmap=plt.cm.Blues,\n normalize=normalize)\n disp.ax_.set_title(title)\n print(title)\n print(disp.confusion_matrix)\n plt.show()\n \n count += 1\n \n min_MAPE = C_values[np.argmin(MAPE)]\n min_MSE = C_values[np.argmin(MSE)] \n max_accuracy = C_values[np.argmax(accuracy)]\n \n if verbose:\n print('C value that minimizes the MAPE:', min_MAPE)\n print('C value that minimizes the MSE:', min_MSE)\n print('C value that maximizes the accuracy:', max_accuracy)\n \n return min_MAPE, min_MSE, max_accuracy",
"def __init__(self, nb_iter: int, linear_op: LinearOperatorFromMatrix, toeplitz_op: ToeplitzificationOperator,\n rank: int, nb_cadzow_iter: int = 20, denoise_verbose: bool = False, rho: float = np.Inf,\n tol: float = 1e-6, eig_tol: float = 1e-8, init_sol: np.ndarray = None, tau: float = None,\n tau_init_type: str = 'safest', tau_weight: float = 1.5, beta: Optional[float] = None,\n nb_init: int = 1, random_state: int = 1, cadzow_backend: str = 'scipy'):\n super(CPGDAlgorithm, self).__init__(nb_iter=nb_iter, nb_init=nb_init, name='CPGD', random_state=random_state)\n if not isinstance(linear_op, LinearOperatorFromMatrix):\n raise ValueError(\"Argument linear_op must be an instance of LinearOperatorFromMatrix class.\")\n self.linear_op = linear_op\n if not isinstance(toeplitz_op, ToeplitzificationOperator):\n raise ValueError(\"Argument toeplitz_op must be an instance of ToeplitzificationOperator class.\")\n self.toeplitz_op = toeplitz_op\n self.rank = rank\n self.rho = rho\n self.tol = tol\n self.eig_tol = eig_tol\n self.provided_init_sol = init_sol\n self.beta = beta\n if tau is None:\n self.tau_weight = tau_weight\n self.init_tau(type=tau_init_type, weight=self.tau_weight)\n else:\n self.tau_weight = None\n self.tau = tau\n\n self.min_error = np.infty\n self.best_estimate = None\n\n # Initialize Cadzow denoising algorithm\n self.denoise_verbose = denoise_verbose\n self.nb_cadzow_iter = nb_cadzow_iter\n self.cadzow_backend = cadzow_backend\n self.preweight = 1 / np.sqrt(toeplitz_op.gram)\n self.postweight = np.sqrt(toeplitz_op.gram)\n self.denoising_algorithm = CadzowAlgorithm(nb_iter=self.nb_cadzow_iter, toeplitz_op=self.toeplitz_op,\n rank=self.rank, rho=self.rho, tol=self.eig_tol,\n backend=self.cadzow_backend)",
"def calc_cop_CCGT(GT_size_W, T_sup_K, fuel_type):\n\n it_len = 50\n\n # create empty arrays\n range_el_output_CC_W = np.zeros(it_len)\n range_q_output_CC_W = np.zeros(it_len)\n range_eta_el_CC = np.zeros(it_len)\n range_eta_thermal_CC = np.zeros(it_len)\n range_q_input_CC_W = np.zeros(it_len)\n\n # create range of electricity output from the GT between the minimum and nominal load\n range_el_output_from_GT_W = np.linspace(GT_size_W * GT_MIN_PART_LOAD, GT_size_W, it_len)\n\n # calculate the operation data at different electricity load\n for i in range(len(range_el_output_from_GT_W)):\n el_output_from_GT_W = range_el_output_from_GT_W[i]\n\n # combine cycle operation\n CC_operation = calc_CC_operation(el_output_from_GT_W, GT_size_W, fuel_type, T_sup_K)\n range_el_output_CC_W[i] = CC_operation['el_output_W'] # Electricity output from the combined cycle\n range_q_output_CC_W[i] = CC_operation['q_output_ST_W'] # Thermal output from the combined cycle\n range_eta_el_CC[i] = CC_operation['eta_el'] # el. efficiency\n range_eta_thermal_CC[i] = CC_operation['eta_thermal'] # thermal efficiency\n\n range_q_input_CC_W[i] = range_q_output_CC_W[i] / range_eta_thermal_CC[i] # thermal energy input\n\n # create interpolation functions as a function of heat output\n el_output_interpol_with_q_output_W = interpolate.interp1d(range_q_output_CC_W, range_el_output_from_GT_W,\n kind=\"linear\")\n q_input_interpol_with_q_output_W = interpolate.interp1d(range_q_output_CC_W, range_q_input_CC_W, kind=\"linear\")\n\n # create interpolation functions as a function of thermal energy input\n eta_el_interpol_with_q_input = interpolate.interp1d(range_q_input_CC_W, range_eta_el_CC,\n kind=\"linear\")\n\n q_output_min_W = min(range_q_output_CC_W)\n q_output_max_W = max(range_q_output_CC_W)\n\n return {'el_output_fn_q_output_W': el_output_interpol_with_q_output_W,\n 'q_input_fn_q_output_W': q_input_interpol_with_q_output_W,\n 'q_output_min_W': q_output_min_W, 'q_output_max_W': q_output_max_W,\n 'eta_el_fn_q_input': eta_el_interpol_with_q_input}",
"def gagps(data_src, min_supp=MIN_SUPPORT, max_iteration=MAX_ITERATIONS, n_pop=N_POPULATION, pc=PC,\n gamma=GAMMA, mu=MU, sigma=SIGMA, return_gps=False):\n\n # Prepare data set\n d_set = DataGP(data_src, min_supp)\n d_set.init_attributes()\n attr_keys = [GI(x[0], x[1].decode()).as_string() for x in d_set.valid_bins[:, 0]]\n\n if d_set.no_bins:\n return []\n\n # Problem Information\n # costfxn\n\n # Parameters\n # pc: Proportion of children (if its 1, then nc == npop\n it_count = 0\n eval_count = 0\n counter = 0\n var_min = 0\n var_max = int(''.join(['1'] * len(attr_keys)), 2)\n\n nc = int(np.round(pc * n_pop / 2) * 2) # Number of children. np.round is used to get even number of children\n\n # Empty Individual Template\n empty_individual = structure()\n empty_individual.position = None\n empty_individual.cost = None\n\n # Initialize Population\n pop = empty_individual.repeat(n_pop)\n for i in range(n_pop):\n pop[i].position = random.randrange(var_min, var_max)\n pop[i].cost = 1 # costfxn(pop[i].position, attr_keys, d_set)\n # if pop[i].cost < best_sol.cost:\n # best_sol = pop[i].deepcopy()\n\n # Best Solution Ever Found\n best_sol = empty_individual.deepcopy()\n best_sol.position = pop[0].position\n best_sol.cost = costfxn(best_sol.position, attr_keys, d_set)\n\n # Best Cost of Iteration\n best_costs = np.empty(max_iteration)\n best_patterns = list()\n str_best_gps = list()\n str_iter = ''\n str_eval = ''\n\n repeated = 0\n while counter < max_iteration:\n # while eval_count < max_evaluations:\n # while repeated < 1:\n\n c_pop = [] # Children population\n for _ in range(nc // 2):\n # Select Parents\n q = np.random.permutation(n_pop)\n p1 = pop[q[0]]\n p2 = pop[q[1]]\n\n # a. Perform Crossover\n c1, c2 = crossover(p1, p2, gamma)\n\n # Apply Bound\n apply_bound(c1, var_min, var_max)\n apply_bound(c2, var_min, var_max)\n\n # Evaluate First Offspring\n c1.cost = costfxn(c1.position, attr_keys, d_set)\n if c1.cost < best_sol.cost:\n best_sol = c1.deepcopy()\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, best_sol.cost)\n\n # Evaluate Second Offspring\n c2.cost = costfxn(c2.position, attr_keys, d_set)\n if c2.cost < best_sol.cost:\n best_sol = c2.deepcopy()\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, best_sol.cost)\n\n # b. Perform Mutation\n c1 = mutate(c1, mu, sigma)\n c2 = mutate(c2, mu, sigma)\n\n # Apply Bound\n apply_bound(c1, var_min, var_max)\n apply_bound(c2, var_min, var_max)\n\n # Evaluate First Offspring\n c1.cost = costfxn(c1.position, attr_keys, d_set)\n if c1.cost < best_sol.cost:\n best_sol = c1.deepcopy()\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, best_sol.cost)\n\n # Evaluate Second Offspring\n c2.cost = costfxn(c2.position, attr_keys, d_set)\n if c2.cost < best_sol.cost:\n best_sol = c2.deepcopy()\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, best_sol.cost)\n\n # c. Add Offsprings to c_pop\n c_pop.append(c1)\n c_pop.append(c2)\n\n # Merge, Sort and Select\n pop += c_pop\n pop = sorted(pop, key=lambda x: x.cost)\n pop = pop[0:n_pop]\n\n best_gp = validategp(d_set, decodegp(attr_keys, best_sol.position))\n \"\"\":type best_gp: GP\"\"\"\n is_present = isduplicate(best_gp, best_patterns)\n is_sub = amcheck(best_patterns, best_gp, subset=True)\n if is_present or is_sub:\n repeated += 1\n else:\n if best_gp.support >= min_supp:\n best_patterns.append(best_gp)\n str_best_gps.append(best_gp.print(d_set.titles))\n # else:\n # best_sol.cost = 1\n\n try:\n # Show Iteration Information\n # Store Best Cost\n best_costs[it_count] = best_sol.cost\n str_iter += \"{}: {} \\n\".format(it_count, best_sol.cost)\n except IndexError:\n pass\n it_count += 1\n\n if max_iteration == 1:\n counter = repeated\n else:\n counter = it_count\n # Output\n out = json.dumps({\"Algorithm\": \"GA-GRAD\", \"Best Patterns\": str_best_gps, \"Iterations\": it_count})\n \"\"\":type out: object\"\"\"\n if return_gps:\n return out, best_patterns\n else:\n return out",
"def __calc_CoagS(self):\n\n Dp_small = self.dp_lim[0]*1e-9 # in m\n temp = self.temp_data # Kelvin\n pres = self.pres_data # Pascal\n Dp = self.par_diam*1e-9 # m\n time = self.par_time # days\n N = self.__dNdlog2dN(Dp,self.smoothed_par_data) # cm-3\n findex = np.argwhere(Dp>=Dp_small).flatten()\n big_R = Dp[findex]/2.\n big_N = N[:,findex]\n k_B = 1.38064852e-23 # Boltzmann constant m2 kg s-2 K-1\n r0=Dp_small/2.\n r1=r0\n dens=1000.\n self.CoagS=np.zeros(time.shape)\n for i in range(0,len(time)):\n lamda=(6.73e-8*temp[i]*(1+(110.4/temp[i])))/(296*pres[i]/101325.0*1.373)\n myy=(1.832e-5*(temp[i]**(1.5))*406.4)/(5093*(temp[i]+110.4))\n kn1=lamda/r1\n kn=lamda/big_R\n CC= 1.+(kn*(1.142+(0.558*np.exp((-.999)/kn))))\n CC1= 1. + (kn1*(1.142+(0.558*np.exp((-.999)/kn1))))\n D = (k_B*temp[i]*CC)/(6.*np.pi*myy*big_R)\n D1 = (k_B*temp[i]*CC1)/(6.*np.pi*myy*r1)\n M = 4./3.*np.pi*(big_R**3)*dens\n M1 = 4./3.*np.pi*(r1**3)*dens\n c= np.sqrt((8.*k_B*temp[i])/(np.pi*M))\n c1= np.sqrt((8.*k_B*temp[i])/(np.pi*M1))\n c12= np.sqrt((c**2)+(c1**2))\n r12= big_R+r1\n D12= D+D1\n CCONT = 4.*np.pi*r12*D12\n CFR = np.pi*r12*r12*c12\n L=(8.*D)/(np.pi*c)\n L1=(8.*D1)/(np.pi*c1)\n SIG=(1./(3.*r12*L))*((r12+L)**3-(r12*r12+L*L)**1.5)-r12\n SIG1=(1./(3.*r12*L1))*((r12+L1)**3-(r12*r12+L1*L1)**1.5)-r12\n SIG12= np.sqrt((SIG**2)+(SIG1**2))\n KO=CCONT/((r12/(r12+SIG12))+(CCONT/CFR))\n self.CoagS[i] = np.nansum(KO*big_N[i,:]*1e6)\n if (r0==big_R[0]):\n self.CoagS[i] = 0.5*KO*big_N[i,0]*1e6+np.nansum(KO*big_N[i,1:]*1e6)\n else:\n self.CoagS[i] = np.nansum(KO*big_N[i,:]*1e6)"
]
| [
"0.5832654",
"0.5646996",
"0.5500593",
"0.54833776",
"0.543777",
"0.54340506",
"0.539481",
"0.5391305",
"0.53071827",
"0.53047156",
"0.52895695",
"0.5265945",
"0.5244411",
"0.52225554",
"0.5215296",
"0.51821274",
"0.5179483",
"0.5171448",
"0.5150737",
"0.5135097",
"0.5132552",
"0.5125592",
"0.51142967",
"0.51085764",
"0.5097029",
"0.5095257",
"0.5084897",
"0.5082653",
"0.5070101",
"0.5052945"
]
| 0.6204177 | 0 |
this method creates a jobprocess instance and all its tasks, however if the job its created as finished it will not create any task, example the time has come for start a job but a previous instance is still running and the process its configured for not overlap the new instance will be created as finished | def create(cls, process, *args, **kwargs):
job = cls(process=process, *args, **kwargs)
job.save()
ret_tasks = []
if job.status != 'finished':
tasks = Task.objects.filter(is_active=True, process=process)
ret_tasks = [JobTask.create(job, t) for t in tasks]
return job, ret_tasks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_jobs(self, total_time):\r\n task_distribution = get_param('task_distribution')\r\n num_tasks = get_param('num_tasks')\r\n task_length = get_param('task_length')\r\n avg_arrival_delay = get_param('job_arrival_delay')\r\n job_arrival_distribution = get_param('job_arrival_distribution')\r\n for front_end in self.front_ends:\r\n last_job_arrival = 0\r\n count = 0\r\n while True:\r\n if job_arrival_distribution == \"constant\":\r\n new_last = last_job_arrival + avg_arrival_delay\r\n else:\r\n # If the job arrivals are a Poisson process, the time\r\n # between jobs follows an exponential distribution. \r\n new_last = last_job_arrival + \\\r\n random.expovariate(1.0/avg_arrival_delay)\r\n\r\n # See if we've passed the end of the experiment\r\n if new_last > total_time:\r\n break\r\n else: \r\n last_job_arrival = new_last\r\n \r\n if task_distribution == \"bimodal\":\r\n if random.random() > (1.0 / 6):\r\n # 5/6 of the jobs have 10 tasks.\r\n num_tasks = 10\r\n else:\r\n num_tasks = 200\r\n relative_demands = get_param(\"relative_demands\")\r\n if relative_demands == []:\r\n user_id = random.randrange(self.num_users)\r\n else:\r\n r = random.random()\r\n user_id = -1\r\n for current_user in range(self.num_users):\r\n if r < get_param(\"relative_demands\")[current_user]:\r\n user_id = current_user\r\n break\r\n assert user_id != -1\r\n job = Job(user_id, last_job_arrival, num_tasks, task_length,\r\n self.stats_manager, \r\n front_end.id_str + \":\" + str(count), self.servers)\r\n job_arrival_event = JobArrival(job, front_end)\r\n self.event_queue.put((last_job_arrival, job_arrival_event))\r\n self.total_jobs += 1\r\n count = count + 1",
"def createNewTasks(_id):\n job = mongo.db.jobs.find_one({'_id': _id})\n tasks = job.get('data').get('tasks')\n for task in tasks:\n data = {\n 'name': task.get('name'),\n 'datetime': now(),\n 'status': 'ready',\n 'owner': job.get('owner'),\n 'priority': job.get('priority'),\n 'is_active': True,\n 'slave': None,\n 'last_activity': now(),\n 'started_on': None,\n 'finished_on': None,\n 'paused_on': None,\n 'logs': [],\n 'ctid': None,\n 'target_info': {},\n 'cancelled_on': None,\n 'progress': 0,\n 'job': job.get('_id'),\n 'proccess':\n {\n 'command': getRenderCommand(job.get('category')),\n 'cwd': task.get('cwd'),\n 'filepath': task.get('filepath'),\n 'target': task.get('target'),\n }\n }\n newTask = mongo.db.tasks.insert(data)\n ctid = addTaskToQueue(newTask)\n #updateTaskInfo(str(task['_id']['$oid']), status='ready', ctid=str(ctid))\n job['status'] = 'ready'\n mongo.db.jobs.update({'_id': _id}, job)\n\n return",
"def create_task():",
"def __init__(self,\n input_queue: JoinableQueue,\n output_queue: JoinableQueue,\n error_queue: JoinableQueue,\n slack_queue: 'SlackBot.SlackQueue',\n logging_queue: JoinableQueue,\n process_job: Callable[[Type['Task.Task']], Type['Task.Task']],\n name: str =\"PipelineManager\",\n num_processes: int = 1,\n timeout_duration: int = 1) -> None:\n\n self.name = name\n #An attempt to idiot-proof the PipelineManager by instantiating a JoinableQueue() if one didn't exist already.\n self.input_queue = input_queue if input_queue else JoinableQueue()\n self.output_queue = output_queue if output_queue else JoinableQueue()\n self.error_queue = error_queue if error_queue else JoinableQueue()\n self.slack_queue = slack_queue\n self.logging_queue = logging_queue\n self.num_processes = num_processes\n self.process_job = process_job\n self.timeout_duration = timeout_duration\n #A list of active processes comprised of Process objects\n self.process_list: List[Process] = []\n #An internal restart flag (used when all processes managed die)\n self.restart_required = False\n self.logger = logging.getLogger(self.name)\n self.logger.setLevel(logging.DEBUG)",
"def createJobs():\n jobs_list = []\n for job in raw_jobs:\n cur_job = Job(int(job[0]), int(job[1]), int(job[2]))\n print(\"Created job: index:\", cur_job.number, \"Length:\", cur_job.length, \"Type\", cur_job.type, file=debug_file)\n jobs_list.append(cur_job)\n print(\"-----------------FINISHED CREATING JOB OBJECTS----------------------\\n\\n\", file=debug_file)\n return jobs_list",
"def queue(self):\n if not self.parent_node.is_job:\n return\n\n self.winstance.send_event('Queuing job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.queue',\n kwargs={\"name\": self.name})\n result.task.wait_for_terminated()\n if result.task.get_state() == tasks.TASK_FAILED:\n init_state = 'FAILED'\n else:\n self.winstance.send_event('.. job queued')\n init_state = 'PENDING'\n self.set_status(init_state)\n return result.task",
"def task_instance_post_save_handler(instance, created, **_):\n # Only start the job if the instance was just created\n if created:\n # Use the specified queue else the default queue\n kwargs = {\n 'uuid': instance.uuid,\n 'container_image': instance.task_type.container_image,\n 'container_type': instance.task_type.container_type,\n 'script_path': instance.task_type.script_path,\n 'logs_path': instance.task_type.logs_path,\n 'args_dict': instance.arguments,}\n\n run_task.apply_async(\n kwargs=kwargs,\n queue=instance.task_queue.name,\n task_id=str(instance.uuid),)",
"def createJob(self, joboptions, previousId=None):\n root = self.manifest.getRootResource()\n assert self.manifest.tosca\n job = Job(self, root, joboptions, previousId)\n\n if (\n self.manifest.localEnv\n and not joboptions.parentJob\n and not joboptions.startTime\n ):\n logPath = self.manifest.getJobLogPath(job.getStartTime(), \".log\")\n if not os.path.isdir(os.path.dirname(logPath)):\n os.makedirs(os.path.dirname(logPath))\n initLogging(logfile=logPath)\n path = self.manifest.path\n if joboptions.planOnly:\n logger.info(\"creating %s plan for %s\", joboptions.workflow, path)\n else:\n logger.info(\"starting %s job for %s\", joboptions.workflow, path)\n\n WorkflowPlan = Plan.getPlanClassForWorkflow(joboptions.workflow)\n if not WorkflowPlan:\n raise UnfurlError(\"unknown workflow: %s\" % joboptions.workflow)\n job.plan = WorkflowPlan(root, self.manifest.tosca, joboptions)\n return job",
"def create(self):\n\n # Validate Inputs\n create_dict = {\n \"model_id\": self.model.id,\n }\n\n try:\n # Create Task\n self.spinner.start()\n task_obj = self.dal.task.create(Task(create_dict))\n finally:\n self.spinner.stop()\n return task_obj",
"def start_new_processes(self):\n # initialize cache to mutualize calls to Variable.get in DAGs\n # needs to be done before this process is forked to create the DAG parsing processes.\n SecretCache.init()\n\n while self._parallelism - len(self._processors) > 0 and self._file_path_queue:\n file_path = self._file_path_queue.popleft()\n # Stop creating duplicate processor i.e. processor with the same filepath\n if file_path in self._processors:\n continue\n\n callback_to_execute_for_file = self._callback_to_execute[file_path]\n processor = self._create_process(\n file_path,\n self._pickle_dags,\n self._dag_ids,\n self.get_dag_directory(),\n callback_to_execute_for_file,\n )\n\n del self._callback_to_execute[file_path]\n Stats.incr(\"dag_processing.processes\", tags={\"file_path\": file_path, \"action\": \"start\"})\n\n processor.start()\n self.log.debug(\"Started a process (PID: %s) to generate tasks for %s\", processor.pid, file_path)\n self._processors[file_path] = processor\n self.waitables[processor.waitable_handle] = processor\n\n Stats.gauge(\"dag_processing.file_path_queue_size\", len(self._file_path_queue))",
"def _create_process_instance(self, process_id, name, module, cls, config, proc_attr):\n # SERVICE INSTANCE.\n process_instance = for_name(module, cls)\n if not isinstance(process_instance, BaseService):\n raise ContainerConfigError(\"Instantiated service not a BaseService %r\" % process_instance)\n\n # Prepare service instance\n process_instance.errcause = \"\"\n process_instance.id = process_id\n process_instance.container = self.container\n process_instance.CFG = config\n process_instance._proc_name = name\n process_instance._proc_start_time = time.time()\n for att, att_val in proc_attr.iteritems():\n setattr(process_instance, att, att_val)\n\n #Unless the process has been started as part of another Org, default to the container Org or the ION Org\n if config.has_key('org_governance_name'):\n process_instance.org_governance_name = config['org_governance_name']\n else:\n process_instance.org_governance_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))\n\n\n # Add stateful process operations\n if hasattr(process_instance, \"_flush_state\"):\n def _flush_state():\n with process_instance._state_lock:\n state_obj = process_instance.container.state_repository.put_state(process_instance.id, process_instance._proc_state,\n state_obj=process_instance._proc_state_obj)\n state_obj.state = None # Make sure memory footprint is low for larger states\n process_instance._proc_state_obj = state_obj\n process_instance._proc_state_changed = False\n\n def _load_state():\n if not hasattr(process_instance, \"_proc_state\"):\n process_instance._proc_state = {}\n try:\n with process_instance._state_lock:\n new_state, state_obj = process_instance.container.state_repository.get_state(process_instance.id)\n process_instance._proc_state.clear()\n process_instance._proc_state.update(new_state)\n process_instance._proc_state_obj = state_obj\n process_instance._proc_state_changed = False\n except NotFound as nf:\n log.debug(\"No persisted state available for process %s\", process_instance.id)\n except Exception as ex:\n log.warn(\"Process %s load state failed: %s\", process_instance.id, str(ex))\n process_instance._flush_state = _flush_state\n process_instance._load_state = _load_state\n process_instance._state_lock = RLock()\n process_instance._proc_state = {}\n process_instance._proc_state_obj = None\n process_instance._proc_state_changed = False\n\n # PROCESS RESTART: Need to check whether this process had persisted state.\n # Note: This could happen anytime during a system run, not just on RESTART boot\n log.debug(\"Loading persisted state for process %s\", process_id)\n process_instance._load_state()\n\n # start service dependencies (RPC clients)\n self._start_process_dependencies(process_instance)\n\n return process_instance",
"def send_job(self):\n graph = self.processgraphEdit.toPlainText()\n # info(self.iface, graph)\n response = self.connection.job_create(json.loads(graph))\n if response.status_code == 201:\n info(self.iface, \"Successfully created new job, Response: {}\".format(response.status_code))\n else:\n warning(self.iface, \"Not able to created new job, Response: {}\".format(str(response.json())))",
"def testJobKilling(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n locationAction = self.daoFactory(classname=\"Locations.New\")\n locationAction.execute(\"site1\", pnn=\"T2_CH_CERN\")\n\n testWorkflow = Workflow(spec=self.specUrl, owner=\"Steve\",\n name=\"wf001\", task=self.taskName)\n testWorkflow.create()\n testFileset = Fileset(name=\"TestFileset\")\n testFileset.create()\n\n for i in range(4):\n newFile = File(lfn=\"File%s\" % i, locations=set([\"T2_CH_CERN\"]))\n newFile.create()\n testFileset.addFile(newFile)\n\n testFileset.commit()\n testSubscription = Subscription(fileset=testFileset,\n workflow=testWorkflow,\n split_algo=\"FileBased\")\n testSubscription.create()\n\n splitter = SplitterFactory()\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=testSubscription)\n jobGroup = jobFactory(files_per_job=1)[0]\n\n assert len(jobGroup.jobs) == 4, \\\n \"Error: Splitting should have created four jobs.\"\n\n testJobA = jobGroup.jobs[0]\n testJobA[\"user\"] = \"sfoulkes\"\n testJobA[\"group\"] = \"DMWM\"\n testJobA[\"taskType\"] = \"Processing\"\n testJobB = jobGroup.jobs[1]\n testJobB[\"user\"] = \"sfoulkes\"\n testJobB[\"group\"] = \"DMWM\"\n testJobB[\"taskType\"] = \"Processing\"\n testJobC = jobGroup.jobs[2]\n testJobC[\"user\"] = \"sfoulkes\"\n testJobC[\"group\"] = \"DMWM\"\n testJobC[\"taskType\"] = \"Processing\"\n testJobD = jobGroup.jobs[3]\n testJobD[\"user\"] = \"sfoulkes\"\n testJobD[\"group\"] = \"DMWM\"\n testJobD[\"taskType\"] = \"Processing\"\n\n change.persist([testJobA], \"created\", \"new\")\n change.persist([testJobB], \"jobfailed\", \"executing\")\n change.persist([testJobC, testJobD], \"executing\", \"created\")\n\n change.persist([testJobA], \"killed\", \"created\")\n change.persist([testJobB], \"killed\", \"jobfailed\")\n change.persist([testJobC, testJobD], \"killed\", \"executing\")\n\n for job in [testJobA, testJobB, testJobC, testJobD]:\n job.load()\n self.assertEqual(job['retry_count'], 99999)\n self.assertEqual(job['state'], 'killed')\n\n return",
"def test_background_process(self):\n first = \"\"\"file://B <- file://A\n sleep 1\n echo A produces B > B\n \"\"\"\n\n pp = ProjectParser()\n pp.set_project(first)\n workflow = pp.parse_extend_and_check_project()\n process = workflow._processes[0]\n\n wr = WorkflowRuner(3)\n wr.init_workers()\n try:\n wr.start_process_in_background(process)\n assert wr.active_workers()\n timeout = time() + 1.5\n while time() < timeout and not wr._completed_processes:\n sleep(0.1)\n assert time() < timeout, \"Process should have stoped now\"\n finally:\n wr.terminate_workers_and_clean_subprocesses()",
"def create_job(self):\n job = Job()\n process = Process()\n process.process_graph = {\"load_collection1\": {\"process_id\": \"load_collection\", \"arguments\": {}}}\n\n job.process = process\n\n self.dlg = JobAdaptDialog(iface=self.iface, job=job, backend=self.backend, main_dia=self)\n self.dlg.manualButton.setIcon(QIcon(os.path.join(os.path.dirname(__file__),\n 'images/info_icon.png')))\n self.dlg.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.dlg.show()",
"def task_generate_job_batch():\n return {\n # force doit to always mark the task\n # as not up-to-date (unless target removed)\n 'uptodate': [False],\n 'file_dep': ['generate_job_batch.py'],\n 'task_dep': ['create_folders'],\n #'targets': ['.running_jobs/list_of_jobs.txt'],\n 'actions': ['python generate_job_batch.py'],\n }",
"def createjob(args):\n ncell = args.ncell\n nmg = args.nmg\n nsi = args.nsi\n nvac = args.nvac\n a0 = args.a0\n temp = args.temp\n nseeds = args.nseeds\n seeds = args.seeds\n nsteps = args.nsteps\n foldername_append = args.foldername_append\n pot = args.pot\n submit = args.submit\n submitdebug = args.submitdebug\n submittime_hours = args.submittime_hours\n test = args.test\n testfiles = args.testfiles\n nodes = args.nodes\n verbose = args.verbose\n\n\n ### check if ase runner/quippy/lammpps-data formats are known\n ase_formats = mu.ase_get_known_formats_class(verbose=True)\n ase_formats.check_if_default_formats_known(copy_and_adapt_formatspy_anyhow=False)\n\n # definex ffsocket inet/unix\n if nodes == 1:\n ffsocket = \"unix\"\n elif nodes > 1:\n ffsocket = \"inet\"\n else:\n sys.exit(\"Number of nodes has to be positive!\")\n\n\n # define ntasks, neval\n lmp_par = 2 # = OMP_NUM_THREADS\n ntasks = cores = nodes * 28\n ipi_inst = 4 # for sure best on fidis\n neval = ipi_inst*2 # was alwasy better, for ompi and impi\n\n ##### get the seed(s).\n if type(seeds) == bool:\n seeds = random.sample(range(1, 999999), nseeds)\n print('seeds',seeds)\n if test == True:\n nseeds = 1\n seeds = [1]\n print('seeds',seeds)\n nseeds = len(seeds)\n\n ##### a few checks\n scripts = mu.scripts()\n mypot = mu.mypot(pot)\n if submit is True or submitdebug is True:\n hostcheck = os.environ[\"myhost\"]\n if hostcheck == \"\":\n sys.exit('host unknown 87')\n\n\n ##### here only chck if the potential can be set up. (in.lmp)\n ##### the same command is then executed for every kmc folder\n ace = mu.ase_calculate_ene(pot=pot,\n potpath=False,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket)\n\n ##### if test\n if test == True:\n nsteps = 50\n\n file_ipi_input_runner = scripts + \"/i-pi-mc_scripts/input-runner.xml\"\n\n\n ####################################\n # get directory\n ####################################\n if verbose:\n print(\"get directory\")\n pcsi = nsi/ncell**3.*100\n pcmg = nmg/ncell**3.*100\n pcvac = nvac/ncell**3.*100\n if args.cubic == True:\n pc = \"cubic\"\n else:\n pc = \"primitive\"\n directory = str(ncell)+\"x\"+str(ncell)+\"x\"+str(ncell)+\"_\"+pc+\"_\"+pot+\"_\"+\\\n str(temp)+\"K_\"+\\\n str(nvac)+\"Vac_\"+str(nmg)+\"Mg_\"+str(nsi)+\"Si__\"+\\\n str(round(pcvac,3))+\"pctVac_\"+str(round(pcmg,3))+\"pctMg_\"+str(round(pcsi,3))+\"pctSi\"\n if foldername_append != \"\":\n directory = directory+\"_\"+foldername_append\n\n ###############################################\n # make the structure\n ###############################################\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,create_fake_vacancy = True,cubic=args.cubic)\n atomsc = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,cubic=args.cubic)\n\n # make the atomic structure\n # this was to play ... not necessary now?\n if False:\n nndist = a0/np.sqrt(2.)\n\n from ase.io import read as ase_read\n from ase.io import write as ase_write\n\n ###############################################\n # get the amount of 1NN in a relly large cell\n ###############################################\n atomsc_fakevac_i = ase_read('dataxx.extxyz3',index=\":\",format='extxyz') # works, cell ist not changed\n #atomsc_fakevac_i = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=10,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=3.,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=8.5,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #sys.exit()\n\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('ipi')\n atomsc_fakevac_i = ase_read('dataxx.ipi2',index=\":\",format='ipi') # works, cell ist not changed\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('quippy')\n atomsc_fakevac_i = ase_read('dataxx.quippy.xyz2',index=\":\",format='quippy') # works, cell ist not changed\n\n\n\n filename = '../sim.xyz'\n filename = '../simulation.pos_0.xyz'\n mu.count_amount_1NN_around_vacancies(filename,cutoffa=nndist,cutoffb=a0,skin=0.1,format='ipi')\n sys.exit()\n\n def mysave_quippy_xyz(atomsc_fakevac,text=False):\n if type(text) == bool:\n sys.exit('define text')\n atomsc_fakevac.write('data.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data.xyz',format=\"extxyz\",append=True)\n atomsc_fakevac.write('data'+text+'.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data'+text+'.xyz',format=\"extxyz\",append=True)\n return\n\n # create Al with single vacancy\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n #print('from ....',(atomsc_fakevac.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac.positions)[i])\n print('NN_1_indices (orig ):',NN_1_indices)\n print('NN_2_indices (orig ):',NN_2_indices)\n #sys.exit()\n atomsc_fakevac.write('dataxx.quippy.xyz',format='quippy',append=True)\n atomsc_fakevac.write('dataxx.poscar',format='vasp',append=True)\n atomsc_fakevac.write('dataxx.ipi',format='ipi',append=True) # works, currently so implemented that it canges cell\n atomsc_fakevac.write('dataxx.xyz',format='xyz',append=True)\n atomsc_fakevac.write('dataxx.extxyz',format='extxyz',append=True)\n atomsc_fakevac.write('dataxx.lammps-data',format='lammps-data',append=True)\n atomsc_fakevac.write('dataxx.lammps-runner',format='lammps-runner',append=True)\n\n atomsc_fakevac_a = ase_read('dataxx.extxyz',format='extxyz') # works, cell ist not changed\n atomsc_fakevac_a.write('dataxx.extxyz2',format='extxyz',append=True) # works, cell is not changed\n\n atomsc_fakevac_b = ase_read('dataxx.xyz',format='xyz') # not working # but this should work\n atomsc_fakevac_b.write('dataxx.xyz2',format='xyz',append=True) # this is working\n\n atomsc_fakevac_c = ase_read('dataxx.ipi',format='ipi') # works, currently so implemented that it canges cell\n #print('ipi cell',atomsc_fakevac_c.get_cell())\n\n atomsc_fakevac_c.write('dataxx.ipi2',format='ipi',append=True) # works, just writes the cell it gests.\n atomsc_fakevac_c.write('dataxx.ipi2_poscar',format='vasp',append=True) # works, just writes the cell it gests.\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_c,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (ipi ):',NN_1_indices)\n print('NN_2_indices (ipi ):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_c.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_c.positions)[i])\n\n atomsc_fakevac_cc = ase_read('dataxx.ipi2_poscar',format='vasp') # works, currently so implemented that it canges cell\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2',format='vasp',append=True)\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2_ipi',format='ipi',append=True) # works, just writes the cell it gests.\n #print('ipi cell2 (ext):',atomsc_fakevac_cc.get_cell())\n #print()\n #print('now quippy')\n atomsc_fakevac_d = ase_read('dataxx.quippy.xyz',format='quippy')\n #print('quippy cell (ext)',atomsc_fakevac_d.get_cell())\n atomsc_fakevac_d.write('dataxx.quippy.xyz2',format='quippy',append=True)\n atomsc_fakevac_d.write('dataxx.quippy.xyz2_extxyz',format='extxyz',append=True)\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_d,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (quippy):',NN_1_indices)\n print('NN_2_indices (quippy):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_d.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_d.positions)[i])\n path = \"/home/glensk/kmc/run_michele/Si6Mg6V1.1_/simulation.pos_libatom_2struct.xyz\"\n atomsc_fakevac_e = ase_read(path,format='quippy')\n\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_e,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (kmc ):',NN_1_indices)\n print('NN_2_indices (kmc ):',NN_2_indices)\n sys.exit()\n\n NN_1_indices = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=nndist,skin=0.1)\n NN_1_2_indices_tmp = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=a0,skin=0.1)\n print('NN_1_indices :',NN_1_indices)\n NN_2_indices = np.sort(np.array(mu.diff(NN_1_2_indices_tmp,NN_1_indices)))\n print('NN_2_indices :',NN_2_indices)\n NN_1_2_indices = np.concatenate((NN_1_indices, NN_2_indices ))\n print('NN_1_2_indices:',NN_1_2_indices)\n\n\n # fill only 1NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n for ii in NN_1_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n\n # fill only 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n for ii in NN_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n\n # fill 1NN and 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n for ii in NN_1_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n\n # dif compositions in 1NN shell\n filling = [ 2,4,6,8,10]\n for fi in filling:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n for idx,ii in enumerate(NN_1_indices):\n if idx < fi: ch = \"Mg\"\n else: ch = \"Si\"\n atomsc_fakevac[ii].symbol = ch\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n\n\n sys.exit()\n\n #mu.ase_get_known_formats(show=True, add_missing_formats=False, copy_formats=False, verbose=False,show_formatspy=True)\n for i in [ 'Mg', 'Si' ]:\n for ii in [ 0,1,2,3,4,5]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=i+'_'+str(ii))\n\n\n sys.exit()\n\n\n # show the input variables\n print('--------------------------- check the input --------------------------------')\n print('JOBS (nseeds) ',nseeds,'(defined by -nseeds / or -seeds)')\n print('seeds ',seeds)\n print('nsteps ',nsteps)\n print()\n print('ncell ',ncell,\"(\",atomsc.get_number_of_atoms(),\"atoms )\")\n print('nsi ',nsi, \"(\",pcsi,\"at%)\")\n print('nmg ',nmg,\"(\",pcmg,\"at%)\")\n print('nvac ',nvac,\"(\",pcvac,\"at%)\")\n print('a0 ',a0,\"angstrom\")\n print('temp ',temp,\"K\")\n print()\n print('mypot.pot ',mypot.pot)\n print('mypot.potpath ',mypot.potpath)\n print()\n print('directory ',directory)\n print('submit ',submit)\n print('submitdebug ',submitdebug)\n print()\n print('nodes ',nodes)\n print('ffsocket ',ffsocket)\n #print('python ver ',sys.version_info[0])\n #print()\n print('--------------------------- check the input --------------------------------')\n if submit == True or submitdebug == True:\n mu.get_from_prompt_Yy_orexit(\"Are the ine input variables ok? [y]es: \")\n\n # make the directory\n if os.path.isdir(directory):\n mu.get_from_prompt_Yy_orexit(\"This main directory exists already, shall I add jobs? [y]es: \")\n mu.mkdir(directory)\n\n # create README.md\n IPI_COMMAND = os.environ[\"IPI_COMMAND\"]\n LAMMPS_COMMAND = os.environ[\"LAMMPS_COMMAND\"]\n mu.create_READMEtxt(directory,add=[\"# to start manually (1): python \"+IPI_COMMAND+\" input-runner.xml\",\"# to start manually (2):\"+LAMMPS_COMMAND+\" < in.lmp\"])\n\n for seed in seeds:\n\n # make jobdirectory\n jobdir = directory+'/seed'+str(seed)\n print('jobdir',jobdir)\n if os.path.exists(jobdir):\n sys.exit(\"jobdirectory \"+str(jobdir)+\" already exists!\")\n mu.mkdir(jobdir)\n\n # get data.lmp and data.ipi\n atomsc.write(jobdir+'/data.runnerformat.lmp',format='lammps-runner')\n atomsc_fakevac.write(jobdir+'/data.ipi',format='ipi')\n atomsc_fakevac.write(jobdir+'/data.extxyz',format='extxyz')\n #atomsc_fakevac.write(jobdir+'/data_fakevac.ipi',format='ipi')\n\n if testfiles == True:\n atomsc.write(jobdir+'/data.lmp',format='lammps-data')\n atomsc.write(jobdir+'/data.POSCAR',format='vasp')\n atomsc.write(jobdir+'/data.xyz',format='xyz')\n atomsc.write(jobdir+'/data.extxyz',format='extxyz')\n atomsc.write(jobdir+'/data.espresso-in',format='espresso-in')\n\n # create in.lmp\n ace = mu.ase_calculate_ene(pot=pot,potpath=mypot.potpath,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n address = socket.gethostname()+\"_\"+os.path.basename(jobdir)\n print('address',address)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket,address=address)\n mu.lammps_write_inputfile(folder=jobdir,filename='in.lmp',positions='data.runnerformat.lmp',ace=ace)\n\n # create input-runner.xml (should be made without copying)\n mu.create_ipi_kmc_inputfile(jobdir,filename=\"input-runner.xml\",nsteps=nsteps,stride=100,seed=seed,a0=a0,ncell=ncell,nsi=nsi,nmg=nmg,nvac=nvac,neval=neval,temp=temp,nodes=nodes,address=address,testrun=test,cubic=args.cubic)\n\n # create submit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/submit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True)\n\n # create osubmit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/osubmit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=False)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=jobdir,submitskript=\"submit-ipi-kmc.sh\")\n\n # get submit-ipi-kmc.sh_all3 (should be made without copying)\n if nseeds == 3:\n mu.create_submitskript_ipi_kmc(directory+\"/submit-ipi-kmc.sh_all3\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True,\n LOOPFOLDER=True)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n if submit == True:\n mu.submitjob(submit_to_que=True,submit_to_debug_que=False,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n\n\n print('done')\n return",
"def create(self):\n return self.start()",
"def create_pending_tasks(self):\n for task in self.settings['setup_tasks']:\n self.setup_tasks.append(task)\n self.setup_results[task] = Result(task)\n\n for task in self.settings['tasks']:\n self.tasks.append(task)\n self.results[task] = Result(task)",
"def create_tasks(self):\n self.create_passport_task()\n\n self.create_visa_task()\n\n self.create_vaccines_task()\n self.create_malaria_task()\n\n self.create_weather_task()\n self.create_flight_needs_task()\n self.create_banking_task()\n\n self.create_insurance_task()\n\n self.create_systematic_tasks() # 3 tasks\n\n if self.trip.return_date_time is None or\\\n self.trip.return_date_time - self.trip.arrival_date_time > timedelta(days=14):\n\n self.create_long_travel_task()\n\n for task in self.tasks:\n task.auto = True\n\n return self.tasks",
"def create_job(self, context=None):\n return self._client.call_method(\n 'UserAndJobState.create_job',\n [], self._service_ver, context)",
"def __create_jobs_bin__(self):\n # | - __create_jobs_bin__\n folder_dir = os.path.join(self.root_dir, self.working_dir, \"jobs_bin\")\n # folder_dir = self.root_dir + \"/jobs_bin\"\n\n if not os.path.exists(folder_dir):\n # print(\"KDJFDI__\")\n # print(folder_dir)\n os.makedirs(folder_dir)\n # __|",
"def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job",
"def create_job(api_instance, job):\n api_response = api_instance.create_namespaced_job(\n body=job, namespace=\"default\", pretty=True\n )\n logger.info(\"Job created with status='%s'\" % str(api_response.status))\n return api_response",
"async def create_job(response: Response,\n request: Request,\n job: Job = Body(\n ...,\n example={\n \"id_video\": \"bbb_0.mp4\",\n \"bitrate\": 7000,\n \"speed\": \"ultrafast\",\n },\n )\n ): \n \n\n # get an ID and return to client\n id_job = mngr.getID()\n logger.debug(\"got id_job %s\" %id_job)\n resp = [\"http:/\"]\n resp.append(request.headers['host'])\n resp.append(id_job)\n response.headers[\"Location\"] = \"/\".join(resp)\n\n # create the task\n mngr.newJob(id_job, \n job.id_video, \n job.bitrate, \n job.speed)\n\n return id_job",
"def create_task_instances(sender, instance, **kwargs):\n task_type = instance\n existing_dates = set([task.date for task in task_type.tasks.all()])\n required_dates = set(task_type.date_range())\n missing_dates = required_dates - existing_dates\n superfluous_dates = existing_dates - required_dates\n Task.objects.filter(task_type=task_type, date__in=superfluous_dates).delete()\n for missing_date in missing_dates:\n task = Task(task_type=task_type, date=missing_date, num_people=task_type.num_people, score=task_type.score)\n task.save()\n\n Task.objects.filter(task_type=task_type).update(num_people=task_type.num_people, score=task_type.score)",
"def test_task_creation(self):\n Task.objects.filter(status=Task.Status.AWAITING_PROCESSING).delete()\n\n project = self.projects['test_human_and_machine']\n self.assertEqual(Task.objects.filter(project=project).count(),\n 0)\n create_subsequent_tasks(project)\n\n # Human Task was created\n self.assertEqual(Task.objects.filter(project=project).count(),\n 1)\n\n human_step = self.workflow_steps['test_workflow_2']['step4']\n task = Task.objects.get(step=human_step, project=project)\n data = {'submit_key1': 'submit_val1'}\n assign_task(self.workers[0].id, task.id)\n\n # user 0 submits a task\n response = self._submit_assignment(self.clients[0], task.id, data=data)\n self.assertEqual(response.status_code, 200)\n\n # Machine Task was created\n self.assertEqual(Task.objects.filter(project=project).count(),\n 2)\n machine_step = self.workflow_steps['test_workflow_2']['simple_machine']\n machine_task_assignment = (\n TaskAssignment.objects\n .filter(task__step=machine_step,\n task__project=project)[0])\n\n self.assertEqual(machine_task_assignment.status,\n TaskAssignment.Status.SUBMITTED)\n\n self.assertEqual(machine_task_assignment.in_progress_task_data,\n {'json': 'simple'})\n\n self.assertEqual(machine_task_assignment.task.status,\n Task.Status.COMPLETE)",
"def create_tasks(job_id, n_init, n_experiments, max_k, covars, columns, s3_file_key, scale):\n task_status = 'pending'\n created_time = float_to_str(time.time())\n\n # Add tasks to DynamoDB\n task_id = 0\n tasks = []\n for _ in range(n_experiments):\n for k in range(1, max_k + 1):\n for covar in covars:\n covar_type, covar_tied = covar.lower().split('-')\n covar_tied = covar_tied == 'tied'\n task = dict(job_id = job_id,\n task_id = task_id,\n k = k,\n covar_type = covar_type,\n covar_tied = covar_tied,\n n_init = n_init,\n s3_file_key = s3_file_key,\n columns = columns,\n scale = scale,\n task_status = task_status,\n created_time = created_time)\n tasks += [task]\n task_id += 1\n dynamo_no_context_add_tasks(tasks)\n print(\"job created: \" + job_id)",
"def new_process() -> Process:\n return multiprocessing.Process()",
"def create(self, validated_data):\n # STEP 1 - Get validated POST data.\n task_item = validated_data.get('task_item', None)\n number_of_visits = validated_data.get('number_of_visits', 0)\n\n # Go through the number of visits and create a new WorkOrder per visit.\n for visit in range(0, number_of_visits):\n self.create_work_order_from_ongoing_job(task_item.ongoing_job)\n\n # Update the `OngoingWorkOrder` to have the user whom edited it plus\n # remove our `TaskItem` instance from it.\n task_item.ongoing_job.latest_pending_task = None\n task_item.ongoing_job.last_modified_by =self.context['user']\n task_item.ongoing_job.last_modified_from =self.context['from']\n task_item.ongoing_job.last_modified_from_is_public =self.context['from_is_public']\n task_item.ongoing_job.save()\n\n # Update the task to be completed.\n task_item.is_closed = True\n task_item.last_modified_by = self.context['user']\n task_item.last_modified_from = self.context['from']\n task_item.last_modified_from_is_public = self.context['from_is_public']\n task_item.save()\n\n return validated_data"
]
| [
"0.6757296",
"0.6248927",
"0.609855",
"0.5757851",
"0.5690495",
"0.5688566",
"0.5634783",
"0.56327575",
"0.562663",
"0.5616947",
"0.559584",
"0.5571618",
"0.5568131",
"0.55623615",
"0.55595547",
"0.55562174",
"0.5551597",
"0.5521749",
"0.5493259",
"0.5476332",
"0.5460808",
"0.5450007",
"0.54242235",
"0.5420215",
"0.5413991",
"0.54002815",
"0.53779733",
"0.53762573",
"0.5366711",
"0.53622514"
]
| 0.7418253 | 0 |
Perform random distortions on an image. | def distort_image(image):
# Randomly flip horizontally.
with tf.name_scope("flip_horizontal", values=[image]):
image = tf.image.random_flip_left_right(image)
# Randomly distort the colors based on thread id.
with tf.name_scope("distort_color", values=[image]):
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distort_images(self, images, seed):\n if self.mode == \"train\":\n images = image_processing.distort_image(images, seed)\n\n # Rescale to [-1,1] instead of [0, 1]\n images = tf.subtract(images, 0.5)\n images = tf.multiply(images, 2.0)\n return images",
"def image_undistort():\n # read test images\n all_test_images = os.listdir('test_images')\n test_images = []\n for name in all_test_images:\n if name.endswith(\".jpg\"):\n test_images.append(name)\n # apply distortion correction on test images\n undistort_images(test_images, './camera_calib_dist_pickle.p')\n print(\"DONE: undistorted test-images saved\")",
"def _distort_resize(image, image_size):\n distorted_image = tf.image.random_crop(image, [image_size, image_size, 3])\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)\n distorted_image = tf.image.random_contrast(\n distorted_image, lower=0.2, upper=1.8)\n distorted_image.set_shape([image_size, image_size, 3])\n return distorted_image",
"def random_distortion(image, bboxes=None, brightness=None, contrast=None,\n hue=None, saturation=None, seed=None):\n # Following Andrew Howard (2013). \"Some improvements on deep convolutional\n # neural network based image classification.\"\n if brightness is not None:\n if 'max_delta' not in brightness:\n brightness.max_delta = 0.3\n image = tf.image.random_brightness(\n image, max_delta=brightness.max_delta, seed=seed\n )\n # Changing contrast, even with parameters close to 1, can lead to\n # excessively distorted images. Use with care.\n if contrast is not None:\n if 'lower' not in contrast:\n contrast.lower = 0.8\n if 'upper' not in contrast:\n contrast.upper = 1.2\n image = tf.image.random_contrast(\n image, lower=contrast.lower, upper=contrast.upper,\n seed=seed\n )\n if hue is not None:\n if 'max_delta' not in hue:\n hue.max_delta = 0.2\n image = tf.image.random_hue(\n image, max_delta=hue.max_delta, seed=seed\n )\n if saturation is not None:\n if 'lower' not in saturation:\n saturation.lower = 0.8\n if 'upper' not in saturation:\n saturation.upper = 1.2\n image = tf.image.random_saturation(\n image, lower=saturation.lower, upper=saturation.upper,\n seed=seed\n )\n if bboxes is None:\n return_dict = {'image': image}\n else:\n return_dict = {\n 'image': image,\n 'bboxes': bboxes,\n }\n return return_dict",
"def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)",
"def distorted_input(image, label):\n # Random crop image\n cropped_image = tf.image.resize_image_with_crop_or_pad(image, 324, 324)\n cropped_image = tf.random_crop(cropped_image, [FLAGS.image_size, FLAGS.image_size, 3])\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(cropped_image)\n\n # TODO: Make the order of following operations random.\n # Because these operations are not commutative, consider randomizing\n # the order their operation.\n distorted_image = tf.image.random_brightness(distorted_image,\n max_delta=63)\n distorted_image = tf.image.random_contrast(distorted_image,\n lower=0.2, upper=1.8)\n\n norm_image = tf.image.per_image_standardization(distorted_image)\n\n return norm_image, label",
"def test_with_predefined_dist(self, seed):\n dim = Dimension(\"yolo\", dists.norm, 0.9)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert dists.norm.rvs(0.9) == samples[0]",
"def random_color_distort(src, brightness_delta=32, contrast_low=0.5, contrast_high=1.5,\n saturation_low=0.5, saturation_high=1.5, hue_delta=18):\n\n def brightness(src, delta, p=0.5):\n \"\"\"Brightness distortion.\"\"\"\n if np.random.uniform(0, 1) > p:\n delta = np.random.uniform(-delta, delta)\n src += delta\n return src\n return src\n\n def contrast(src, low, high, p=0.5):\n \"\"\"Contrast distortion\"\"\"\n if np.random.uniform(0, 1) > p:\n alpha = np.random.uniform(low, high)\n src *= alpha\n return src\n return src\n\n def saturation(src, low, high, p=0.5):\n \"\"\"Saturation distortion.\"\"\"\n if np.random.uniform(0, 1) > p:\n alpha = np.random.uniform(low, high)\n gray = src * np.array([[[0.299, 0.587, 0.114]]])\n gray = np.sum(gray, axis=2, keepdims=True)\n gray *= (1.0 - alpha)\n src *= alpha\n src += gray\n return src\n return src\n\n def hue(src, delta, p=0.5):\n \"\"\"Hue distortion\"\"\"\n if np.random.uniform(0, 1) > p:\n alpha = random.uniform(-delta, delta)\n u = np.cos(alpha * np.pi)\n w = np.sin(alpha * np.pi)\n bt = np.array([[1.0, 0.0, 0.0],\n [0.0, u, -w],\n [0.0, w, u]])\n tyiq = np.array([[0.299, 0.587, 0.114],\n [0.596, -0.274, -0.321],\n [0.211, -0.523, 0.311]])\n ityiq = np.array([[1.0, 0.956, 0.621],\n [1.0, -0.272, -0.647],\n [1.0, -1.107, 1.705]])\n t = np.dot(np.dot(ityiq, bt), tyiq).T\n src = np.dot(src, np.array(t))\n return src\n return src\n\n src = src.astype('float32')\n\n # brightness\n src = brightness(src, brightness_delta)\n\n # color jitter\n if np.random.randint(0, 2):\n src = contrast(src, contrast_low, contrast_high)\n src = saturation(src, saturation_low, saturation_high)\n src = hue(src, hue_delta)\n else:\n src = saturation(src, saturation_low, saturation_high)\n src = hue(src, hue_delta)\n src = contrast(src, contrast_low, contrast_high)\n return src",
"def distort_color(image, thread_id=0, stddev=0.1, scope=None, grayscale=False):\n with tf.op_scope([image], scope, 'distort_color'):\n color_ordering = thread_id % 2\n\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n if not grayscale:\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n\n elif color_ordering == 1:\n # gil - original values\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n #image = tf.image.random_brightness(image, max_delta=16. / 255.)\n #image = tf.image.random_contrast(image, lower=0.75, upper=1.25)\n\n if not grayscale:\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n\n image += tf.random_normal(\n tf.shape(image),\n stddev=stddev,\n dtype=tf.float64,\n seed=42,\n name='add_gaussian_noise')\n # The random_* ops do not necessarily clamp.\n image = tf.clip_by_value(image, 0.0, 1.0)\n return image",
"def __call__(self, image):\n if random.random() < 0.5:\n image = np.flip(image, 1).copy()\n return image",
"def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert img.dtype == np.float32, \\\n 'PhotoMetricDistortion needs the input image of dtype ' \\\n 'np.float32, please set \"to_float32=True\" in ' \\\n '\"LoadImageFromFile\" pipeline'\n # random brightness\n if random.randint(2):\n delta = random.uniform(-self.brightness_delta,\n self.brightness_delta)\n img += delta\n\n # mode == 0 --> do random contrast first\n # mode == 1 --> do random contrast last\n mode = random.randint(2)\n if mode == 1:\n if random.randint(2):\n alpha = random.uniform(self.contrast_lower,\n self.contrast_upper)\n img *= alpha\n\n # convert color from BGR to HSV\n img = general_ocr.bgr2hsv(img)\n\n # random saturation\n if random.randint(2):\n img[..., 1] *= random.uniform(self.saturation_lower,\n self.saturation_upper)\n\n # random hue\n if random.randint(2):\n img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)\n img[..., 0][img[..., 0] > 360] -= 360\n img[..., 0][img[..., 0] < 0] += 360\n\n # convert color from HSV to BGR\n img = general_ocr.hsv2bgr(img)\n\n # random contrast\n if mode == 0:\n if random.randint(2):\n alpha = random.uniform(self.contrast_lower,\n self.contrast_upper)\n img *= alpha\n\n # randomly swap channels\n if random.randint(2):\n img = img[..., random.permutation(3)]\n\n results['img'] = img\n return results",
"def test_random_noise_img(alg, repNum=1):\n\timport math\n\n\tperformance = alg.predict_perf(repNum=repNum)\n\tprint performance\n\talg.plot()\n\n\talg.shape = (math.sqrt(alg.N),math.sqrt(alg.N))\n\tsave_img(alg.x, alg, 'random_original.png', False)\n\tsave_img(alg.x_pred, alg, 'random_predicted.png', False)\n\n\treturn performance",
"def undistort_img(img, mtx, dist, debug=False):\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n if (debug):\n window_name = \"Undistorted Image\"\n cv2.imshow('Undistorted Image', undist)\n cv2.moveWindow(\"Undistorted Image\", 10, 50);\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return undist",
"def distort_img(input_img, d_limit=4):\n if d_limit == 0:\n return input_img\n rows, cols, ch = input_img.shape\n pts2 = np.float32([[0, 0], [rows - 1, 0], [0, cols - 1], [rows - 1, cols - 1]])\n pts1 = np.float32(pts2 + np.random.uniform(-d_limit, d_limit, pts2.shape))\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(input_img, M, (cols, rows), borderMode=1)\n return dst",
"def randomize_pixels(image):\n shape_ = image.size()\n image_flat = image.view(-1, image.size(-1))\n shuffled_image = shuffle(image_flat)\n return shuffled_image.view(shape_)",
"def random_all(image, angle):\n\n # Create a copy of the image to prevent changing the original\n img = np.copy(image)\n\n # Apply augmentations that can impact steering angle first\n y_steer_corr_px = 0.001\n x_steer_corr_px = 0.01 # Initial value taken from Vivek Yadav's blog:\n max_tx = 30\n max_ty = 20\n # https://chatbotslife.com/using-augmentation-to-mimic-human-driving-\n # 496b569760a9\n img, angle = random_horizontal_flip(img, angle)\n img, angle = random_translation(img, angle, max_tx=max_tx, max_ty=max_ty,\n x_steer_corr_px=x_steer_corr_px,\n y_steer_corr_px=y_steer_corr_px)\n\n # Apply remaining augmentations\n img = random_gaussian(random_shadows(random_brightness(img)))\n\n return img, angle",
"def pgd_attack_random(model, images, labels, eps=1, alpha=1, iters=40, randomize=True):\r\n model.eval()\r\n labels = labels.to(device)\r\n if randomize:\r\n delta = torch.rand_like(images, requires_grad=True).to(device)\r\n delta.data = delta.data * 2 * eps - eps\r\n delta.data = (delta.data + images ).clamp(-0.5,0.5)-(images)\r\n else:\r\n delta = torch.zeros_like(images, requires_grad=True).to(device)\r\n \r\n for t in range(iters):\r\n loss = torch.nn.CrossEntropyLoss()(model(images + delta ), labels)\r\n loss.backward()\r\n \r\n delta.data = (delta + alpha*delta.grad.detach().sign()).clamp(-eps,eps)\r\n delta.data = (delta.data + images ).clamp(-0.5,0.5)-(images)\r\n delta.grad.zero_()\r\n \r\n return delta+images",
"def undistort_image(mtx_, dist_, img_):\n dst = cv2.undistort(img_, mtx_, dist_, None, mtx_)\n return dst",
"def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass",
"def undistort(img, mtx, dist):\n return cv2.undistort(img, mtx, dist, None, mtx)",
"def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n offset = random_negative(self.offset, self.random_negative_prob)\n self._translate_img(results, offset, self.direction)\n return results",
"def undistort(self, image):\n dst = cv2.undistort(image, self.mtx, self.dist_coeff, None)\n\n if self.args.is_test:\n self.image_logger.save_image(dst, 'undistorted')\n images = [[{'title': 'Original', 'data': image},\n {'title': 'Undistorted', 'data': dst}]]\n self.image_logger.plot_results(images)\n return dst",
"def distort_color(image, color_ordering=0, fast_mode=True, scope=None):\n with tf.name_scope(scope, 'distort_color', [image]):\n lower = random.uniform(0.5,1.)\n bright = random.uniform(0.,200.)\n hue = random.uniform(0.,0.5)\n if fast_mode:\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=bright / 255.)\n image = tf.image.random_saturation(image, lower=lower, upper=1.)\n else:\n image = tf.image.random_saturation(image, lower=lower, upper=1.)\n image = tf.image.random_brightness(image, max_delta=bright / 255.)\n else:\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=bright / 255.)\n image = tf.image.random_saturation(image, lower=lower, upper=1.)\n #image = tf.image.random_hue(image, max_delta=hue)\n image = tf.image.random_contrast(image, lower=lower, upper=1.)\n elif color_ordering == 1:\n image = tf.image.random_saturation(image, lower=lower, upper=1.)\n image = tf.image.random_brightness(image, max_delta=bright / 255.)\n image = tf.image.random_contrast(image, lower=lower, upper=1.)\n #image = tf.image.random_hue(image, max_delta=hue)\n elif color_ordering == 2:\n image = tf.image.random_contrast(image, lower=lower, upper=1.)\n #image = tf.image.random_hue(image, max_delta=hue)\n image = tf.image.random_brightness(image, max_delta=bright / 255.)\n image = tf.image.random_saturation(image, lower=lower, upper=1.)\n elif color_ordering == 3:\n #image = tf.image.random_hue(image, max_delta=hue)\n image = tf.image.random_saturation(image, lower=lower, upper=1.)\n image = tf.image.random_contrast(image, lower=lower, upper=1.)\n image = tf.image.random_brightness(image, max_delta=bright / 255.)\n else:\n raise ValueError('color_ordering must be in [0, 3]')\n # The random_* ops do not necessarily clamp.\n return image",
"def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n h, w = results['image'].shape[:2]\n center = self.center\n if center is None:\n center = ((w - 1) * 0.5, (h - 1) * 0.5)\n angle = random_negative(self.angle, self.random_negative_prob)\n self._rotate_img(results, angle, center, self.scale)\n return results",
"def measure_pixel_distortion(net, n_samples=10):\n dataset_dir, dataset_pth = make_dataset_pths(net)\n print(\"dataset path: \", dataset_pth)\n X0, Probes, Change_prob, Perceptual_dist = load_or_make_dataset(\n net, dataset_pth, dataset_dir, net.dataset_size)\n X = np.repeat(X0, n_samples, axis=0)\n Y = net.predict(X)\n D = np.sum((Y - X) ** 2 / len(X))\n print(D)\n return D",
"def elastic_distort(image, alpha, sigma):\r\n random_state = numpy.random.RandomState(None)\r\n shape = image.shape\r\n\r\n dx = gaussian_filter(\r\n (random_state.rand(*shape) * 2 - 1),\r\n sigma, mode=\"constant\"\r\n ) * alpha\r\n dy = gaussian_filter(\r\n (random_state.rand(*shape) * 2 - 1),\r\n sigma, mode=\"constant\"\r\n ) * alpha\r\n\r\n x, y = numpy.meshgrid(numpy.arange(shape[0]), numpy.arange(shape[1]))\r\n indices = numpy.reshape(y+dy, (-1, 1)), numpy.reshape(x+dx, (-1, 1))\r\n return map_coordinates(image, indices, order=1).reshape(shape)",
"def random_hflip_img(img):\n if np.random.rand() > 0.5:\n return np.fliplr(img)\n return img",
"def direction_correction(self):\n self.directions.monster = random.uniform(self.directions.monster * self.get_monster_sensitivity(),\n self.directions.monster * (1 + (1 - self.get_monster_sensitivity())))\n self.directions.food = random.uniform(self.directions.food * self.get_food_sensitivity(),\n self.directions.food * (1 + (1 - self.get_food_sensitivity())))\n self.directions.water = random.uniform(self.directions.water * self.get_water_sensitivity(),\n self.directions.water * (1 + (1 - self.get_water_sensitivity())))",
"def random_shadow(image):\n rand_width_scal_1 = np.random.rand()\n x1, y1 = IMAGE_WIDTH * rand_width_scal_1, 0\n rand_width_scal_2 = np.random.rand()\n x2, y2 = IMAGE_WIDTH * rand_width_scal_2, IMAGE_HEIGHT\n xn, yn = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]\n mask = np.zeros_like(image[:, :, 1])\n mask[(yn - y1) * (x2 - x1) - (y2 - y1) * (xn - x1) > 0] = 1\n\n cond = mask == np.random.randint(2)\n s_ratio = np.random.uniform(low=0.2, high=0.5)\n\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio\n return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)",
"def random_rotation(self, img, p = 0.5):\n if self.decision(p):\n theta = random.randrange(-15, 15)\n phi = random.randrange(-15, 15)\n gamma = random.randrange(-15, 15)\n it = ImagePerspectiveTransformer(img, shape=(img.shape[0] + abs(gamma), img.shape[1]))\n roi = it.rotate_along_axis(theta=theta, phi=phi, gamma=gamma)\n # check if cut is ahve to much dark pixels, more then 20 %\n non_zeros = np.count_nonzero(roi)\n non_zeros_procent = non_zeros / roi.size\n if non_zeros_procent < 0.8:\n pass\n else:\n img = roi\n return img"
]
| [
"0.6896461",
"0.67348176",
"0.669944",
"0.6393275",
"0.6318509",
"0.6064876",
"0.6007593",
"0.6006129",
"0.5864679",
"0.5803689",
"0.57570237",
"0.5750709",
"0.57435656",
"0.5728318",
"0.5716929",
"0.56942165",
"0.5679312",
"0.56645614",
"0.56456125",
"0.563164",
"0.56153893",
"0.560164",
"0.558923",
"0.5552598",
"0.55322194",
"0.54964364",
"0.5489524",
"0.54631615",
"0.54537004",
"0.54494685"
]
| 0.7043445 | 0 |
Initializes a nonfunctioning pipe. | def __init__(self):
self._read_pipe_name = ''
self._write_pipe_name = ''
self._thread: Optional[Thread] = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self._pipe = []\n self._group = None\n stages = ['on', 'off', 'color', 'transition', 'flash', 'callback',\n 'repeat', 'brightness', 'wait', 'temperature', 'white',\n 'white_up', 'white_down', 'red_up', 'red_down',\n 'green_up', 'green_down', 'blue_up', 'blue_down',\n 'night_light', 'link', 'unlink']\n for name in stages:\n self._add_stage(name)",
"def __init__(self, pd, line_delimitor = r\"\\r\\n\"):\n\t\tself._pipe_desc = pd\n\t\tif 0 == (pservlet.pipe_get_flags(pd) & pservlet.PIPE_OUTPUT):\n\t\t\tself._state = pservlet.pipe_pop_state(pd)\n\t\t\tself._input = True\n\t\telse:\n\t\t\tself._state = None\n\t\t\tself._input = False\n\t\tif self._input and self._state == None:\n\t\t\tself._state = _PipeState()\n\t\tself._nl_pattern = re.compile(line_delimitor)",
"def __init__(self, *args, **kwargs):\n super(RpkiListener, self).__init__(*args, **kwargs)\n RpkiBase.__init__(self)\n self.p_err, self.c_err = multiprocessing.Pipe(duplex=False)\n self.c_data, self.p_data = multiprocessing.Pipe(duplex=False)",
"def fission_pipe():\n yield base.BasePipe(1)\n base.reset()",
"def initialize(self, config: DataProviderConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.OUTPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = 5\n chn.sock_opts['sndhwm'] = int(self.batch_size / 2) + 5",
"def __init__(self, state: PipetteState) -> None:\n self._state = state",
"def initialize(self, config: BaseDataPipeConfig) -> None:\n super().initialize(config)\n # Configuration\n self.stop_on_close = config.stop_on_close.value\n self.pipe: str = config.pipe.value\n self.pipe_mode: SocketMode = config.pipe_mode.value\n self.pipe_address: ZMQAddress = config.pipe_address.value\n self.pipe_format: MIME = config.pipe_format.value\n self.batch_size: int = config.batch_size.value\n self.ready_schedule_interval: int = config.ready_schedule_interval.value\n # Set up FBDP protocol\n if self.pipe_mode == SocketMode.BIND:\n # server\n self.protocol = FBDPServer()\n self.protocol.on_exception = self.handle_exception\n self.protocol.on_accept_client = self.handle_accept_client\n self.protocol.on_schedule_ready = self.handle_schedule_ready\n # We have an endpoint to bind\n self.endpoints[PIPE_CHN] = [self.pipe_address]\n else:\n # client\n self.protocol = FBDPClient()\n # common parts\n self.protocol.log_context = self.logging_id\n self.protocol.batch_size = self.batch_size\n self.protocol.on_pipe_closed = self.handle_pipe_closed\n self.protocol.on_produce_data = self.handle_produce_data\n self.protocol.on_accept_data = self.handle_accept_data\n # Create pipe channel\n self.mngr.create_channel(DealerChannel, PIPE_CHN, self.protocol, wait_for=Direction.IN)",
"def _on_pipeline_init(self) -> None:\n pass",
"def __init__(self) -> None:\n self._state = PipetteState(\n pipettes_by_id={},\n aspirated_volume_by_id={},\n attached_tip_by_id={},\n current_well=None,\n current_deck_point=CurrentDeckPoint(mount=None, deck_point=None),\n movement_speed_by_id={},\n static_config_by_id={},\n flow_rates_by_id={},\n )",
"def _init_pipeline(self, cfg: ConfigType) -> Callable:",
"def __init__(self, pipe, rate=10):\n super(AsyncController, self).__init__()\n self._pipe = pipe\n self._pipe_thread = threading.Thread(target=self._pipe_monitor)\n self._updater = _Updater(self._pipe.send, rate)\n self._is_closed = False\n\n self._pipe_thread.start()",
"def __init__(self, buffer_size=1000):\r\n\r\n super(Pipe, self).__init__()\r\n self.buffer_size = buffer_size\r\n\r\n # Should it be deque or array?\r\n self.staging_buffer = []\r\n self._ready_buffer = None\r\n\r\n self._done_sending = False\r\n self._done_receiving = False\r\n self._closed = False\r\n\r\n # Taken from Python Queue implementation:\r\n\r\n # mutex must beheld whenever the queue is mutating. All methods\r\n # that acquire mutex must release it before returning. mutex\r\n # is shared between the three conditions, so acquiring and\r\n # releasing the conditions also acquires and releases mutex.\r\n self.mutex = threading.Lock()\r\n # Notify not_empty whenever an item is added to the queue; a\r\n # thread waiting to get is notified then.\r\n self.not_empty = threading.Condition(self.mutex)\r\n # Notify not_full whenever an item is removed from the queue;\r\n # a thread waiting to put is notified then.\r\n self.not_full = threading.Condition(self.mutex)",
"def __init__(self, pipe_name=None, pipe_bundle=None, file_root='noe', results_dir=None, save_state=True):\n\n # Execution lock.\n status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')\n\n # Set up the analysis status object.\n status.init_auto_analysis(pipe_bundle, type='noe')\n status.current_analysis = pipe_bundle\n\n # Store the args.\n self.save_state = save_state\n self.pipe_name = pipe_name\n self.pipe_bundle = pipe_bundle\n self.file_root = file_root\n self.results_dir = results_dir\n if self.results_dir:\n self.grace_dir = results_dir + sep + 'grace'\n else:\n self.grace_dir = 'grace'\n\n # Data checks.\n self.check_vars()\n\n # Set the data pipe to the current data pipe.\n if self.pipe_name != cdp_name():\n switch(self.pipe_name)\n\n # Load the interpreter.\n self.interpreter = Interpreter(show_script=False, raise_relax_error=True)\n self.interpreter.populate_self()\n self.interpreter.on(verbose=False)\n\n # Execute.\n self.run()\n\n # Finish and unlock execution.\n status.auto_analysis[self.pipe_bundle].fin = True\n status.current_analysis = None\n status.exec_lock.release()",
"def initialize(self, config: DataConsumerConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.INPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = int(self.batch_size / 2) + 5\n chn.sock_opts['sndhwm'] = 5",
"def __init__(self, input_stream=None, output_stream=None):\n super(Component, self).__init__()\n\n if input_stream is None:\n input_stream = sys.stdin\n\n if output_stream is None:\n # Pyleus serializer outputs byte sequence; however, Python 3.6 sys.stdout expects unicode stream inputs.\n # In order to write binary data to stdout, we use the underlying binary buffer object.\n # Ref: https://docs.python.org/3/library/sys.html#sys.stderr\n try:\n output_stream = sys.stdout.buffer\n except AttributeError:\n output_stream = sys.stdout\n\n self._input_stream = input_stream\n self._output_stream = output_stream\n\n self._pending_commands = deque()\n self._pending_taskids = deque()\n\n self._serializer = None",
"def piped(self):\n\t\tpass",
"def __init__ (self, pipe, histogram_buffer) :\n\t\tBasicDevice.__init__(self, pipe)\n\t\t# saving the buffer where the spectrum will be saved\n\t\tself.buffer = histogram_buffer",
"def setUp(self):\n\n # Add a data pipe to the data store.\n ds.add(pipe_name='orig', pipe_type='mf')\n\n # Add a single object to the 'orig' data pipe.\n ds['orig'].x = 1\n\n # Add a single object to the single spin system of the 'orig' data pipe.\n ds['orig'].mol[0].res[0].spin[0].num = 1\n\n # Add an empty data pipe (for the 'eliminate_unused_pipes' test).\n ds.add(pipe_name='empty', pipe_type='mf')\n\n # Set the current pipe to the 'orig' data pipe.\n pipes.switch('orig')",
"def __init__(self, pipeline, config=None):\n self.config = config\n self.pipeline = pipeline",
"def __init__(self, level):\n threading.Thread.__init__(self)\n self.daemon = False\n self.level = level\n self.fdRead, self.fdWrite = os.pipe()\n self.pipeReader = os.fdopen(self.fdRead)\n self.start()",
"def __init__(self, initial_state=True):\n self.ffmpeg = None\n self.initial_state = initial_state",
"def __init__(self):\n self._in = None\n self._out = None\n self._last_in_count = 0\n self._last_out_count = 0\n self._in_finished = False\n self._out_finished = False",
"def __init__(self, test_stream=None, no_delay=False, window=None, server=None):\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server",
"def __init__(self, fd=None):",
"def __init__(self, err: bool = False, ctx: Optional[click.Context] = None):\n self.click_ctx = ctx or click.get_current_context(silent=True)\n self.is_err = err\n threading.Thread.__init__(self)\n self.daemon = False\n self.fdRead, self.fdWrite = os.pipe()\n self.pipeReader = os.fdopen(self.fdRead)\n self.start()",
"def __init__(self, args):\n self.args = args\n self.sender, receiver = mp.Pipe()\n self.plotter = RealPlotter()\n self.plot_process = mp.Process(\n target=self.plotter, args=(receiver,), daemon=True)\n self.plot_process.start()",
"def __init__(self, stdin=None, stdout=None):\n self._stdin = stdin or sys.stdin\n self._stdout = stdout or sys.stdout\n self._builtins = self._make_builtins()",
"def __init__(self, backend, open_nonblock=True):\n\n (self._path_in, self._path_out) = get_vsys_fifo_names(backend)\n self._open_nonblock = open_nonblock\n self._fd_in = None\n self._fd_out = None\n\n # Check that file exists.\n if (not vsys_fifo_exists(self._path_in) or\n not vsys_fifo_exists(self._path_out)):\n raise VsysCreateException('vsys FIFOs not found: %s, %s' %\n (self._path_in, self._path_out))",
"def __init__(self, process=None, parent=None, **kwargs):\n super(ProcessIO, self).__init__(**kwargs)\n self.process = process\n self.parent = parent\n self.default_output = process.default_output",
"def __init__(self, pipeline_configuration):\n\n assert isinstance(pipeline_configuration, type(rs.config()))\n self._context = rs.context()\n self._available_devices = enumerate_connected_devices(self._context)\n self._enabled_devices = {}\n self._config = pipeline_configuration\n self._frame_counter = 0\n self._profile_pipe = \"\""
]
| [
"0.6365075",
"0.6329885",
"0.6310528",
"0.6218899",
"0.6135913",
"0.61334133",
"0.6073727",
"0.60552454",
"0.5996444",
"0.5959419",
"0.5923765",
"0.5914634",
"0.5893418",
"0.5892468",
"0.5878433",
"0.58646625",
"0.5845629",
"0.5842184",
"0.58149505",
"0.57957757",
"0.57928616",
"0.5783105",
"0.5772551",
"0.5767138",
"0.57389176",
"0.5721546",
"0.57194614",
"0.5712877",
"0.571025",
"0.57080144"
]
| 0.66151756 | 0 |
A static method used to create a pipe between two processes. On POSIX systems, it creates a named pipe using `os.mkfifo`. On Windows platforms, it starts a backgroud thread that transfars data from the writer to the reader process it is connected to. | def create_ipc_pipe(temp_dir: str, suffix: str = '') -> 'Pipe':
unique_name = str(uuid.uuid4()) + suffix
pipe = Pipe()
if sys.platform == 'win32':
import win32pipe # type: ignore
pipe_name = '-nt-shaka-' + unique_name
# The read pipe is connected to a writer process.
pipe._read_pipe_name = r'\\.\pipe\W' + pipe_name
# The write pipe is connected to a reader process.
pipe._write_pipe_name = r'\\.\pipe\R' + pipe_name
buf_size = 64 * 1024
read_side = win32pipe.CreateNamedPipe(
pipe._read_pipe_name,
win32pipe.PIPE_ACCESS_INBOUND,
win32pipe.PIPE_WAIT | win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE,
1,
buf_size,
buf_size,
0,
None)
write_side = win32pipe.CreateNamedPipe(
pipe._write_pipe_name,
win32pipe.PIPE_ACCESS_OUTBOUND,
win32pipe.PIPE_WAIT | win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE,
1,
buf_size,
buf_size,
0,
None)
pipe._thread = Thread(
target=Pipe._win_thread_fn,
args=(read_side, write_side, buf_size),
daemon=True)
# Start the thread.
pipe._thread.start()
elif hasattr(os, 'mkfifo'):
pipe_name = os.path.join(temp_dir, unique_name)
pipe._read_pipe_name = pipe_name
pipe._write_pipe_name = pipe_name
readable_by_owner_only = 0o600 # Unix permission bits
os.mkfifo(pipe_name, mode=readable_by_owner_only)
else:
raise RuntimeError('Platform not supported.')
return pipe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_fifo(self):\n if os.path.exists(self.fifo_path):\n os.remove(self.fifo_path)\n os.mkfifo(self.fifo_path)",
"def _pipe(self):\n if self._evdev:\n return None\n\n if not self.__pipe:\n target_function = self._get_target_function()\n if not target_function:\n return None\n\n self.__pipe, child_conn = Pipe(duplex=False)\n self._listener = Process(target=target_function,\n args=(child_conn,))\n self._listener.daemon = True\n self._listener.start()\n return self.__pipe",
"def io_pipe():\n r_fd, w_fd = os.pipe()\n with io.open(r_fd, 'rb', 0) as r, \\\n \t io.open(w_fd, 'wb', 0) as w:\n \tyield r, w",
"def __init__(self):\n \n self._read_pipe_name = ''\n self._write_pipe_name = ''\n self._thread: Optional[Thread] = None",
"def create_file_pipe(path: str, mode: str) -> 'Pipe':\n\n pipe = Pipe()\n # A process will write on the read pipe(file).\n if mode == 'w':\n pipe._read_pipe_name = path\n # A process will read from the write pipe(file).\n elif mode == 'r':\n pipe._write_pipe_name = path\n else:\n raise RuntimeError(\"'{}' is not a valid mode for a Pipe.\".format(mode))\n return pipe",
"def _win_thread_fn(read_side, write_side, buf_size):\n\n import win32pipe, win32file, pywintypes # type: ignore\n try:\n # Connect to both ends of the pipe before starting the transfer.\n # This funciton is blocking. If no process is connected yet, it will wait\n # indefinitely.\n win32pipe.ConnectNamedPipe(read_side)\n win32pipe.ConnectNamedPipe(write_side)\n while True:\n # Writer -> read_side -> write_side -> Reader\n _, data = win32file.ReadFile(read_side, buf_size)\n win32file.WriteFile(write_side, data)\n except Exception as ex:\n # Remove the pipes from the system.\n win32file.CloseHandle(read_side)\n win32file.CloseHandle(write_side)\n # If the error was due to one of the processes shutting down, just exit normally.\n if isinstance(ex, pywintypes.error) and ex.args[0] in [109, 232]:\n return 0\n # Otherwise, raise that error.\n raise ex",
"def socket_pipe():\n\n # Create read0end acceptor.\n read_acceptor = socket.socket()\n read_acceptor.bind(('localhost', 0))\n read_acceptor.listen(10)\n read_acceptor.setblocking(False)\n\n # Create writer and connect it\n writer = socket.socket()\n writer.setblocking(True)\n writer.connect(read_acceptor.getsockname())\n\n # Wait for connection from the right socket\n for _ in xrange(10):\n reader, writer_address = read_acceptor.accept()\n reader.setblocking(True)\n if writer_address != writer.getsockname():\n sys.stderr.write(__name__ + \".socket_pipe: Waring: port \"\n \"scanning detected.\\n\")\n reader.close()\n continue\n break\n else:\n raise RuntimeError(\"socket_pipe: did not receive writer connection.\")\n\n read_acceptor.close()\n\n # Verify, that the connected socket is really the right one.\n test_message = str(random.random())\n writer.sendall(test_message)\n while test_message:\n test_chunk = reader.recv(len(test_message))\n if not test_chunk or not test_message.startswith(test_chunk):\n raise RuntimeError(\"socket_pipe: invalid test data received.\")\n test_message = test_message[len(test_chunk):]\n\n return reader, writer",
"def testPipeReader(self):\n\n\t\ttest_string = 2 * \"blah blah blah\\n\"\n\n\t\tmaster_fd, slave_fd = self._create_pipe()\n\t\tmaster_file = os.fdopen(master_fd, 'rb')\n\n\t\ttask_scheduler = TaskScheduler(max_jobs=2)\n\t\tscheduler = task_scheduler.sched_iface\n\n\t\tclass Producer(SpawnProcess):\n\t\t\tdef _spawn(self, args, **kwargs):\n\t\t\t\trval = SpawnProcess._spawn(self, args, **kwargs)\n\t\t\t\tos.close(kwargs['fd_pipes'][1])\n\t\t\t\treturn rval\n\n\t\tproducer = Producer(\n\t\t\targs=[\"bash\", \"-c\", \"echo -n '%s'\" % test_string],\n\t\t\tfd_pipes={1:slave_fd}, scheduler=scheduler)\n\n\t\tconsumer = PipeReader(\n\t\t\tinput_files={\"producer\" : master_file},\n\t\t\tscheduler=scheduler)\n\n\t\ttask_scheduler.add(producer)\n\t\ttask_scheduler.add(consumer)\n\n\t\ttask_scheduler.run()\n\n\t\tif sys.hexversion >= 0x3000000:\n\t\t\ttest_string = test_string.encode()\n\n\t\tself._assertEqual(test_string, consumer.getvalue())",
"def open_persistent_pipe(self):\n if self.proc is not None:\n return\n self.proc = subprocess.Popen([self.herbstclient_path, '--binary-pipe'],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n env=self.env,\n encoding=None, # open stdout/stdin in binary mode\n )",
"def spawn(self, pcls, args):\n\n childp, ownp = multiprocessing.Pipe()\n p = pcls(self._id, childp)\n p._loglevel = self._loglevel\n p.start()\n\n childp.close()\n cid = ownp.recv()\n ownp.send((\"setup\", args))\n ownp.send(\"start\")\n\n self._child_procs.append((p.pid, cid))\n\n return cid",
"def open(self):\n # NOTE: caller MUST open for writing BEFORE opening for reading.\n self._fd_out = self._open_fifo(self._path_in, os.O_WRONLY)\n self._fd_in = self._open_fifo(self._path_out, os.O_RDONLY)",
"def net_proc(pipe):\n asyncio.run(net_server(pipe))",
"def zpipe(ctx):\n a = ctx.socket(zmq.PAIR)\n b = ctx.socket(zmq.PAIR)\n a.linger = b.linger = 0\n a.hwm = b.hwm = 1\n iface = f\"inproc://{binascii.hexlify(os.urandom(8))}\"\n a.bind(iface)\n b.connect(iface)\n return a, b",
"def threading_copy(self, data):\n r_fd, w_fd = os.pipe()\n rstream = os.fdopen(r_fd, \"rb\")\n wstream = os.fdopen(w_fd, \"wb\")\n copy_thread = threading.Thread(target=self.copystream, args=(rstream,))\n copy_thread.start()\n self.writestream(data, wstream)\n wstream.close()\n copy_thread.join()",
"def connect_pipes(input, output, service=VoidService, config={}):\n return connect_stream(PipeStream(input, output), service=service, config=config)",
"def live_network_input_to_pipe(iface=None, p=None):\n\n global g_pipein\n\n print(\"Named Pipe '{0}' has been opened for writing. Waiting for Pipe Reader to connect.\".format(p))\n g_pipein = open(p, 'wb')\n print(\"Connected to Named Pipe '{0}'. Writing binary TDMs into pipe.\".format(p))\n\n if iface is None:\n print(\"Listening on default interface.\")\n try:\n sniff(prn=write_tdm_to_pipe)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"Broken Pipe: EPIPE\")\n else:\n print(\"Listening on interface: {0}\".format(iface))\n try:\n sniff(iface=iface, prn=write_tdm_to_pipe)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"Broken Pipe: EPIPE\")",
"def make_pipes(self, stderr=True):\r\n\r\n pipes = {'child_stdin':None,\r\n 'stdin':None,\r\n 'stdout':None,\r\n 'child_stdout':None,\r\n 'stderr':None,\r\n 'child_stderr':None}\r\n try:\r\n stdin, child_stdin = os.pipe()\r\n pipes['child_stdin'], pipes['stdin'] = stdin, child_stdin\r\n stdout, child_stdout = os.pipe()\r\n pipes['stdout'], pipes['child_stdout'] = stdout, child_stdout\r\n if stderr:\r\n stderr, child_stderr = os.pipe()\r\n pipes['stderr'], pipes['child_stderr'] = stderr, child_stderr\r\n for fd in (pipes['stdout'], pipes['stderr'], pipes['stdin']):\r\n if fd is not None:\r\n fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | os.O_NDELAY)\r\n return pipes\r\n except OSError:\r\n for fd in pipes.values():\r\n if fd is not None:\r\n self.close_fd(fd)",
"def makeFifo(filename):\n\ttry:\n\t\tos.mkfifo(filename)\n\t\tprint filename\n\texcept OSError, e:\n\t\tpass\n\t\n\tfifo = open(filename, 'w')\n\treturn fifo",
"def test_mp_pipe_replacement(self):\n parent, child = create_psuedo_anonymous_duct_pair()\n\n def mp_child_target():\n parent.close()\n time.sleep(3)\n child.send(\"hello world\")\n child.close()\n\n p = multiprocessing.Process(target=mp_child_target)\n p.daemon = True\n p.start()\n child.close()\n\n p.join(10)\n assert_that(parent.recv()).is_equal_to(\"hello world\")\n parent.close()",
"def __init__(self, level):\n threading.Thread.__init__(self)\n self.daemon = False\n self.level = level\n self.fdRead, self.fdWrite = os.pipe()\n self.pipeReader = os.fdopen(self.fdRead)\n self.start()",
"def _open(self):\n if self._is_ready_to_open():\n try:\n # self.primary corresponds to the \"master\" file descriptor from pty.openpty().\n # self.secondary corresponds to the \"slave\" file descriptor from pty.openpty().\n self.primary, self.secondary = pty.openpty()\n self._process = subprocess.Popen( # pylint: disable=subprocess-popen-preexec-fn\n self._args,\n stdin=self.secondary,\n stdout=self.secondary,\n stderr=self.secondary,\n cwd=self._cwd,\n close_fds=self._properties[transport_properties.CLOSE_FDS],\n preexec_fn=os.setsid # This creates a new \"process group\"\n )\n # Set the stdin and stdout to self.primary\n self._process.stdout = self.primary\n self._process.stdin = self.primary\n os.close(self.secondary)\n self.secondary = None\n except Exception:\n self.close()\n raise",
"def fission_pipe():\n yield base.BasePipe(1)\n base.reset()",
"def _pipe_redirected(to=os.devnull, pipe=sys.stdout):\n pipe_fd = _fileno(pipe)\n # copy pipe_fd before it is overwritten\n with os.fdopen(os.dup(pipe_fd), 'wb') as copied: \n pipe.flush() # flush library buffers that dup2 knows nothing about\n try:\n os.dup2(_fileno(to), pipe_fd) # $ exec >&to\n except ValueError: # filename\n with open(to, 'wb') as to_file:\n os.dup2(to_file.fileno(), pipe_fd) # $ exec > to\n try:\n yield pipe # allow code to be run with the redirected pipe\n finally:\n # restore pipe to its previous value\n # dup2 makes pipe_fd inheritable unconditionally\n pipe.flush()\n os.dup2(copied.fileno(), pipe_fd) # $ exec >&copied",
"def fork_with_pipe(executable=None, args=None):\n executable, args = _executable_and_args(executable, args)\n pipe_r, pipe_w = os.pipe()\n pid = os.fork()\n if pid == 0:\n os.execvp(executable, args)\n raise RuntimeError('Failed to call os.execvp')\n os.close(pipe_w)\n return pid, pipe_r",
"def run(outs, ins_filter='/dev/ttyUSB.*', newport=lambda conn: None, write_queue=None):\r\n data_queue = multiprocessing.Queue()\r\n\r\n multiprocessing.Process(\r\n target=writer,\r\n args=(data_queue, write_queue, outs)\r\n ).start()\r\n\r\n readers = {}\r\n\r\n while True:\r\n\r\n for (path, _, _) in serial.tools.list_ports.grep(ins_filter):\r\n\r\n if path not in readers.keys() or not readers[path].is_alive():\r\n\r\n readers[path] = multiprocessing.Process(\r\n target=reader, args=(data_queue, path, newport)\r\n )\r\n readers[path].start()",
"def __init__(self, process_name, target_function, tasks):\n self.pipe_start, self.pipe_end = multiprocessing.Pipe()\n printnflush (\"Process started: %s\"%process_name)\n self.process = multiprocessing.Process(group=None,\n target=target_function,\n name=process_name,\n args = (process_name, tasks, self.pipe_end))\n self.busy = False",
"def __init__(self):\n # Open stata as pipe; make a queue for non-blocking. Start the thread.\n self.proc = sp.Popen(['stata-mp'], stdin=sp.PIPE, stdout=sp.PIPE, bufsize=1)\n\n self.qu = Queue()\n\n self.thread = Thread(target = self.enqueue_output, args = (self.proc.stdout,\n self.qu))\n self.thread.daemon = True\n self.thread.start()\n\n # Read the initial stdout content.\n self.genout()",
"def _start_child(self):\n parent_pipe, child_pipe = mp.Pipe()\n self._poll.register(parent_pipe.fileno(), select.POLLIN | select.POLLPRI)\n\n pid = os.fork()\n if not pid:\n ch = Worker(child_pipe, self.server_socket)\n parent_pipe.close()\n ch.run()\n else:\n self._children[parent_pipe.fileno()] = ManagerChild(pid, parent_pipe)\n child_pipe.close()",
"def spawn(f):\n def fun(pipe, x):\n pipe.send(f(x))\n pipe.close()\n return fun",
"def get_pipe(self):\n import d6tflow.pipes\n return d6tflow.pipes.get_pipe(self.get_pipename())"
]
| [
"0.6540652",
"0.6175657",
"0.61306775",
"0.5987756",
"0.580536",
"0.5800549",
"0.57563645",
"0.5750457",
"0.5711531",
"0.5672833",
"0.5666262",
"0.5652206",
"0.56253594",
"0.5574714",
"0.55548304",
"0.55499554",
"0.55309844",
"0.55285764",
"0.5519371",
"0.551878",
"0.54414266",
"0.5441144",
"0.5371659",
"0.5344933",
"0.53003174",
"0.5293924",
"0.5281167",
"0.5276894",
"0.5262257",
"0.52395755"
]
| 0.7244897 | 0 |
Returns a Pipe object whose read or write end is a path to a file. | def create_file_pipe(path: str, mode: str) -> 'Pipe':
pipe = Pipe()
# A process will write on the read pipe(file).
if mode == 'w':
pipe._read_pipe_name = path
# A process will read from the write pipe(file).
elif mode == 'r':
pipe._write_pipe_name = path
else:
raise RuntimeError("'{}' is not a valid mode for a Pipe.".format(mode))
return pipe | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def io_pipe():\n r_fd, w_fd = os.pipe()\n with io.open(r_fd, 'rb', 0) as r, \\\n \t io.open(w_fd, 'wb', 0) as w:\n \tyield r, w",
"def pipe(required=True, mode='r'):\n def validate(ctx, param, value):\n if value is not None:\n return click.open_file(value, mode=mode, lazy=True), value\n\n get_stream = click.get_binary_stream if 'b' in mode else click.get_text_stream\n\n if 'r' not in mode:\n return get_stream('stdout'), None\n\n stream = get_stream('stdin')\n\n if not stream.isatty():\n return stream, None\n\n if required:\n raise click.MissingParameter(ctx=ctx, param=param)\n\n return None, None\n return validate",
"def get_pipe(self):\n import d6tflow.pipes\n return d6tflow.pipes.get_pipe(self.get_pipename())",
"def _pipe(self):\n if self._evdev:\n return None\n\n if not self.__pipe:\n target_function = self._get_target_function()\n if not target_function:\n return None\n\n self.__pipe, child_conn = Pipe(duplex=False)\n self._listener = Process(target=target_function,\n args=(child_conn,))\n self._listener.daemon = True\n self._listener.start()\n return self.__pipe",
"def read_pipe(pipe_out):\n out = b''\n while more_data(pipe_out):\n out += os.read(pipe_out, 1024)\n\n return out.decode('utf-8')",
"def _pipe_redirected(to=os.devnull, pipe=sys.stdout):\n pipe_fd = _fileno(pipe)\n # copy pipe_fd before it is overwritten\n with os.fdopen(os.dup(pipe_fd), 'wb') as copied: \n pipe.flush() # flush library buffers that dup2 knows nothing about\n try:\n os.dup2(_fileno(to), pipe_fd) # $ exec >&to\n except ValueError: # filename\n with open(to, 'wb') as to_file:\n os.dup2(to_file.fileno(), pipe_fd) # $ exec > to\n try:\n yield pipe # allow code to be run with the redirected pipe\n finally:\n # restore pipe to its previous value\n # dup2 makes pipe_fd inheritable unconditionally\n pipe.flush()\n os.dup2(copied.fileno(), pipe_fd) # $ exec >&copied",
"def pipe_path(name: str, extension=\".txt\") -> str:\n return \"\\\\\".join(sys.argv[0].split(\"\\\\\")[:-3]) + f\"\\\\pipeline\\\\{name}{extension}\"",
"def create_ipc_pipe(temp_dir: str, suffix: str = '') -> 'Pipe':\n\n unique_name = str(uuid.uuid4()) + suffix\n pipe = Pipe()\n\n if sys.platform == 'win32':\n import win32pipe # type: ignore\n pipe_name = '-nt-shaka-' + unique_name\n # The read pipe is connected to a writer process.\n pipe._read_pipe_name = r'\\\\.\\pipe\\W' + pipe_name\n # The write pipe is connected to a reader process.\n pipe._write_pipe_name = r'\\\\.\\pipe\\R' + pipe_name\n buf_size = 64 * 1024\n\n read_side = win32pipe.CreateNamedPipe(\n pipe._read_pipe_name,\n win32pipe.PIPE_ACCESS_INBOUND,\n win32pipe.PIPE_WAIT | win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE,\n 1,\n buf_size,\n buf_size,\n 0,\n None)\n\n write_side = win32pipe.CreateNamedPipe(\n pipe._write_pipe_name,\n win32pipe.PIPE_ACCESS_OUTBOUND,\n win32pipe.PIPE_WAIT | win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE,\n 1,\n buf_size,\n buf_size,\n 0,\n None)\n\n pipe._thread = Thread(\n target=Pipe._win_thread_fn,\n args=(read_side, write_side, buf_size),\n daemon=True)\n # Start the thread.\n pipe._thread.start()\n elif hasattr(os, 'mkfifo'):\n pipe_name = os.path.join(temp_dir, unique_name)\n pipe._read_pipe_name = pipe_name\n pipe._write_pipe_name = pipe_name\n readable_by_owner_only = 0o600 # Unix permission bits\n os.mkfifo(pipe_name, mode=readable_by_owner_only)\n else:\n raise RuntimeError('Platform not supported.')\n return pipe",
"def open_unix_connection(path=None, *,\n loop=None, limit=_DEFAULT_LIMIT, **kwds):\n if loop is None:\n loop = events.get_event_loop()\n reader = StreamReader(limit=limit, loop=loop)\n protocol = StreamReaderProtocol(reader, loop=loop)\n transport, _ = yield from loop.create_unix_connection(\n lambda: protocol, path, **kwds)\n writer = StreamWriter(transport, protocol, reader, loop)\n return reader, writer",
"def output_to_pipe(pipe_in):\n os.dup2(pipe_in, 1) # stdout\n # os.dup2(pipe_in, 2) # stderr",
"def pipe(*args, **kwargs):\n return parser(*args, **kwargs)",
"def _open_fifo(self, path, flags):\n collectd.info('Opening: %s' % path)\n if self._open_nonblock:\n # NOTE: Open non-blocking, to detect when there is no reader. Or, so\n # reads can timeout using select or poll.\n flags |= os.O_NONBLOCK\n\n try:\n return os.open(path, flags)\n except OSError as err:\n # If opening for write, the error is likely errno.ENXIO. ENXIO occurs\n # when no reader has the other end open. e.g. when vsys is not running in\n # root context.\n raise VsysOpenException('Opening vsys fifo (%s) failed: %s' %\n (path, err))",
"def fileobj(path_or_file, mode='r'):\n if isinstance(path_or_file, basestring):\n try:\n return open(path_or_file, mode)\n except:\n return closing(StringIO())\n else:\n return closing(path_or_file)",
"def async_pipe(self, **kwargs):\n return AsyncPipe(source=self.async_fetch(), **kwargs)",
"def open(path: str, mode: str = 'r', buffer_size: int = 8192) -> io.IOBase:\n return _fs().open(path, mode, buffer_size)",
"def GetParanoidResourceAsFile(path: str,\n mode: str = 'r') -> IO[Union[bytes, str]]:\n path = os.path.join(_ROOT_DIR, path)\n if os.path.isdir(path):\n raise IOError('Resource \"{}\" is not a file'.format(path))\n if not os.path.isfile(path):\n raise IOError(\n 'Resource \"{}\" not found; is it a data dependency?'.format(path))\n return open(path, mode)",
"def get_stream(fname):\n if fname == '-':\n return sys.stdin\n\n if not os.path.isfile(fname):\n print(f\"file not found: {fname}\")\n sys.exit(1)\n\n return open(fname)",
"def open(self):\n # NOTE: caller MUST open for writing BEFORE opening for reading.\n self._fd_out = self._open_fifo(self._path_in, os.O_WRONLY)\n self._fd_in = self._open_fifo(self._path_out, os.O_RDONLY)",
"def open(self):\n return File(open(self.get_path()), \"rb\")",
"def bind(self, func: Callable[[Any], IO]) -> IO:\n\n filename, g = self._value\n return ReadFile(filename, lambda s: g(s).bind(func))",
"def connect_pipes(input, output, service=VoidService, config={}):\n return connect_stream(PipeStream(input, output), service=service, config=config)",
"def echo_pipe(name: str, pipe: IO[AnyStr], dest_file: TextIO):\r\n assert pipe\r\n with pipe:\r\n for line in iter(pipe.readline, b''):\r\n if not line:\r\n break\r\n logging.debug(name, len(line), line, end=\"\", file=dest_file)\r\n print(line, end=\"\", file=dest_file)\r\n\r\n logging.info(f\"{name} closed\")",
"def live_network_input_to_pipe(iface=None, p=None):\n\n global g_pipein\n\n print(\"Named Pipe '{0}' has been opened for writing. Waiting for Pipe Reader to connect.\".format(p))\n g_pipein = open(p, 'wb')\n print(\"Connected to Named Pipe '{0}'. Writing binary TDMs into pipe.\".format(p))\n\n if iface is None:\n print(\"Listening on default interface.\")\n try:\n sniff(prn=write_tdm_to_pipe)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"Broken Pipe: EPIPE\")\n else:\n print(\"Listening on interface: {0}\".format(iface))\n try:\n sniff(iface=iface, prn=write_tdm_to_pipe)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"Broken Pipe: EPIPE\")",
"def streaming_from_file(filepath):\r\n pipe = rs.pipeline()\r\n cfg = rs.config()\r\n cfg.enable_device_from_file(filepath, repeat_playback=False)\r\n return pipe, cfg",
"def open_persistent_pipe(self):\n if self.proc is not None:\n return\n self.proc = subprocess.Popen([self.herbstclient_path, '--binary-pipe'],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n env=self.env,\n encoding=None, # open stdout/stdin in binary mode\n )",
"def fission_pipe():\n yield base.BasePipe(1)\n base.reset()",
"def to_filehandle(fname, flag='r', return_opened=False, encoding=None):\n if is_string_like(fname):\n fh = Path(fname).open(mode=flag)\n opened = True\n elif isinstance(fname, Path):\n fh = fname.open(mode=flag)\n elif hasattr(fname, 'seek'):\n fh = fname\n opened = False\n else:\n raise ValueError('fname must be a pathlib Path, string or file handle')\n if return_opened:\n return fh, opened\n return fh",
"def pipe(cmdline, input = None):\n args = _shlex.split(cmdline)\n\n if input is not None:\n command = _subprocess.Popen(args, stdin = _subprocess.PIPE, stdout = _subprocess.PIPE, stderr = None)\n else:\n command = _subprocess.Popen(args, stdin = None, stdout = _subprocess.PIPE, stderr = None)\n\n (out, err) = command.communicate(input)\n return out",
"def pipe(cmd):\n fp = os.popen(cmd)\n res = fp.read()\n stat = fp.close()\n assert stat is None\n return res, stat",
"def read(self, n = None):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tsaved = self._state.read(n)\n\t\tif n == None:\n\t\t\treturn saved + pservlet.pipe_read(self._pipe_desc)\n\t\telif n == len(saved): \n\t\t\treturn saved\n\t\telse:\n\t\t\treturn saved + pservlet.pipe_read(self._pipe_desc, n - len(saved))"
]
| [
"0.6639405",
"0.63086575",
"0.623345",
"0.61123365",
"0.6065255",
"0.5675125",
"0.5619844",
"0.55862534",
"0.53370893",
"0.52485263",
"0.5228577",
"0.5204829",
"0.52042913",
"0.51842123",
"0.51655436",
"0.5163331",
"0.5127727",
"0.5091169",
"0.5064754",
"0.5046593",
"0.5034485",
"0.5022802",
"0.5016275",
"0.50132877",
"0.49828473",
"0.4972664",
"0.49559382",
"0.49519795",
"0.49339837",
"0.4918393"
]
| 0.76168203 | 0 |
Reset the value of a theano shared variable to all zeros t_S shared theano variable | def reset_shared_var(t_S):
t_S.set_value(np.zeros_like(t_S.get_value()).astype('float32')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_adadelta_variables(t_A=self.t_A):\n A0 = np.zeros_like(t_A.get_value()).astype(theano.config.floatX)\n t_ada_Eg2.set_value(A0)\n t_ada_dA2.set_value(A0)\n t_A.set_value(A0)",
"def build_shared_zeros(shape, name):\n return theano.shared(\n \tvalue=np.zeros(shape, dtype=theano.config.floatX), \n \tname=name, \n \tborrow=True\n )",
"def reset(self):\n self.t = 0\n # two outputs: the thrusters, u_r and u_l and stop neuron\n self.action = [0.0, 0.0, 0.0]\n # x, vx, y, vy, theta, omega\n # self.state = [2.0, 0.0, 2.0, 0.0, 0.0, 0.0]\n self.state = self.start_cnd\n x, vx, y, vy, theta, omega = self.state\n# print x, self.state\n self.init_distance = self.getDistance()\n \n self.solver = ode(self.dX)\n self.solver.set_integrator('dopri5') \n self.solver.set_initial_value(self.state, self.t)",
"def init_m_aux(self):\n self.t_T.set_value(np.array([1.]).astype(theano.config.floatX))\n self.t_fista_X.set_value(self.t_A.get_value())",
"def reset_layer(self):\n if self.W is None:\n if self.sparse_initialize:\n W_values = self.sparse_initialize_weights()\n else:\n if self.activation == theano.tensor.tanh:\n born = np.sqrt(6. / (self.n_in + self.n_out))\n else:\n born = 4 * np.sqrt(6. / (self.n_in + self.n_out))\n W_values = np.asarray(self.rng.uniform(\n low=-born,\n high=born,\n size=(self.n_in, self.n_out)),\n dtype=theano.config.floatX)\n\n self.W = theano.shared(value=W_values, name='W', borrow=True)\n\n if self.b is None:\n b_values = np.zeros(int(self.n_out/self.num_pieces),\n dtype=theano.config.floatX)\n self.b = theano.shared(value=b_values, name='b', borrow=True)\n\n if self.sparser is None:\n s_values = np.ones(\n int(self.n_out/self.num_pieces), dtype=theano.config.floatX)\n self.sparser = theano.shared(value=s_values, name='sparser',\n borrow=True)\n # The layer parameters\n self.params = [self.W, self.b]",
"def reset_global(self):\n self.T = 0\n self.ep = 0\n self.t = 0\n self.G = 0.0\n self._ep_starttime = time.time()",
"def reset_state(self):\n self.s = np.copy(self.s_i)",
"def reset_state(self):\n self.intersection_per_class.assign(\n tf.zeros_like(self.intersection_per_class)\n )\n self.union_per_class.assign(tf.zeros_like(self.union_per_class))",
"def reset(self, var=0.0):\n\n for stype in self.s:\n self.s[stype][:] = 1./self.ns\n if var != 0:\n rnd = utils.rand_mat(self.ns, 1, 2)\n self.s[stype] += 1./self.ns * var * rnd\n self.s[stype] = self.s[stype] / self.s[stype].sum()",
"def test_no_shared_as_input(self):\r\n w_init = numpy.random.rand(2, 2)\r\n w = shared(w_init.copy(), 'w')\r\n try:\r\n pfunc([w], theano.tensor.sum(w * w))\r\n assert False\r\n except TypeError, e:\r\n msg = 'Cannot use a shared variable (w) as explicit input'\r\n if str(e).find(msg) < 0:\r\n raise",
"def reset(self):\n \n s = self\n s.step_counter = 0\n \n # TODO: initialize first layer activations here, and not everywhere else\n # self.model.initialize_local_vars()\n # self.model.initialize_global_vars()\n\n ops = []\n\n for var in self.model.trainable_vars:\n if self.needs_correction(var):\n A_svd = s[var].A.svd\n B2_svd = s[var].B2.svd \n ops.extend(A_svd.init_ops)\n ops.extend(B2_svd.init_ops)\n ops.append(s[var].A.cov.initializer)\n ops.append(s[var].B2.cov.initializer)\n\n # in new TensorFlow this breaks, probably because of\n # https://github.com/tensorflow/tensorflow/commit/07adc2ea910de715d31e16a019fcbcccb575e931\n # sometimes get \"need to feed\" placeholder error\n # sometimes do not get this error, but spend two minutes inside\n # _build_initializer_expr\n s.run(ops)",
"def reset_states(self):\n K.batch_set_value([(v, 0) for v in self.variables])",
"def reset(self):\r\n self.state = copy.copy(self.mu)",
"def __init__(self, input=None, n_visible=784, n_hidden=500, \\\n W=None, hbias=None, vbias=None, \n seed = None, theano_rng=None,\n batch_size=0, t_batch_size=1, \n n_beta=10, beta_lbound=0., tau=None):\n assert (n_beta > 1 and t_batch_size > 0) or (n_beta==1 and t_batch_size==0)\n if t_batch_size > 0: assert batch_size%t_batch_size==0\n\n self.n_visible = n_visible\n self.n_hidden = n_hidden\n self.t_batch_size = t_batch_size # size of tempered minibatch\n self.batch_size = batch_size # size of T=1 minibatch\n \n # deal with random number generation\n if seed is None:\n rng = numpy.random.RandomState(123)\n else:\n rng = numpy.random.RandomState(seed)\n if theano_rng is None:\n theano_rng = RandomStreams(rng.randint(2**30))\n self.rng = rng\n self.theano_rng = theano_rng\n\n if W is None : \n # W is initialized with `initial_W` which is uniformely sampled\n # from -4*sqrt(6./(n_visible+n_hidden)) and 4*sqrt(6./(n_hidden+n_visible))\n # the output of uniform if converted using asarray to dtype \n # theano.config.floatX so that the code is runable on GPU\n initial_W = 0.01 * self.rng.randn(n_visible, n_hidden)\n # theano shared variables for weights and biases\n W = sharedX(initial_W, 'W')\n self.W = W\n\n if hbias is None :\n # create shared variable for hidden units bias\n hbias = sharedX(numpy.zeros(n_hidden), 'hbias')\n self.hbias = hbias\n\n if vbias is None :\n # create shared variable for visible units bias\n vbias = sharedX(numpy.zeros(n_visible), 'vbias')\n self.vbias = vbias\n\n # initialize input layer for standalone RBM or layer0 of DBN\n if input is None:\n input = T.matrix('input')\n self.input = input \n\n #########################################################################\n # Fields indexed by batch_size + mixstat: buffer, E\n # Fields indexed by mixstat: beta, labels, rtime\n # Fields indexed by temp index: mixstat, fup_target, nup, ndown, swapstat\n #########################################################################\n\n ### initialize tempering stuff ###\n n_chain = t_batch_size * n_beta\n self.n_chain = theano.shared(n_chain, name='n_chain') # number of active chains in buffer array\n self.n_beta = theano.shared(n_beta, name='n_beta') # number of temperatures in system\n self.n_chain_total = batch_size + self.n_chain\n\n # configure buffers for negative particles\n _buffer = self.rng.randint(0,2,size=(batch_size + 2*n_chain, n_visible))\n self._buffer = sharedX(_buffer, name='buffer')\n self.buffer = self._buffer[:self.n_chain_total]\n # buffer used to store mean-field activation\n self.mf_buffer = sharedX(numpy.zeros_like(_buffer), name='mf_buffer')\n\n # vectors containing energy of current negative particles (at T=1)\n self._E = sharedX(numpy.zeros(batch_size + 2*n_chain), name='E')\n self.E = self._E[:self.n_chain_total]\n\n # Space out inverse temperature parameters linearly in [1,beta_lbound] range .\n beta = numpy.zeros(2*n_chain)\n for bi in range(t_batch_size):\n base_idx = n_beta*bi\n beta[base_idx:base_idx+n_beta] = numpy.linspace(1, beta_lbound, n_beta)\n self._beta = sharedX(beta, name='beta')\n self.beta = self._beta[:self.n_chain]\n\n # Used to multiply the rows of \"W x + b\"\n self.beta_matrix = T.vertical_stack(\n T.alloc(1.0, batch_size, 1),\n self.beta.dimshuffle([0,'x']))\n\n # initialize data structure to map nhid/nvis rows to a given temperature\n # mixstat stores pointers to self.nvis array\n mixstat = numpy.zeros((t_batch_size, 2*n_beta), dtype='int32')\n mixstat[:, :n_beta] = numpy.arange(n_chain).reshape(t_batch_size, n_beta)\n self._mixstat = theano.shared(mixstat, name='mixstat')\n self.mixstat = self._mixstat[:, :self.n_beta]\n\n ### Initialize particle properties ###\n\n # labels: 1 means going up in temperature, 0 going down in temperature\n labels = LBL_NONE * numpy.ones(2*n_chain, dtype='int32')\n labels[mixstat[:,0]] = LBL_UP\n self.labels = theano.shared(labels, name='labels') \n\n # return time\n rtime = numpy.zeros(2*n_chain, dtype='int32')\n self.rtime = theano.shared(rtime, name='rtime') \n self.avg_rtime = sharedX(rtime_deo(0.4,n_beta), name='avg_rtime')\n\n ### Initialize temperature properties ###\n\n # configure fup target for each chain (this shouldn't change very often)\n _fup_target = numpy.zeros(2*n_beta)\n _fup_target[:n_beta] = numpy.linspace(1,0,n_beta)\n self._fup_target = sharedX(_fup_target, name='fup_target')\n self.fup_target = self._fup_target[:self.n_beta]\n\n # configure histogram of up moving particles\n _nup = numpy.zeros(2*n_beta)\n _nup[:n_beta] = numpy.linspace(1,0,n_beta)\n self._nup = sharedX(_nup, name='nup')\n self.nup = self._nup[:self.n_beta]\n \n # configure histogram of down moving particles\n _ndown = numpy.zeros(2*n_beta)\n _ndown[:n_beta] = numpy.linspace(0,1,n_beta)\n self._ndown = sharedX(_ndown, name='ndown')\n self.ndown = self._ndown[:self.n_beta]\n\n # use return time as the time constant for all moving averages\n if not tau:\n self.tau = 1./self.avg_rtime\n else:\n self.tau = T.as_tensor(tau)\n self.get_tau = theano.function([], self.tau)\n\n # create PT Op\n self._swapstat = sharedX(numpy.zeros(2*n_beta), name='swapstat')\n self.swapstat = self._swapstat[:self.n_beta]\n\n self.pt_swaps = PT_Swaps(rng=self.rng)\n self.pt_swap_t1_sample = PT_SwapT1Sample(rng=self.rng, batch_size=self.batch_size)",
"def reset_image_estimate(self):\n # reset_shared_var(self.t_A)\n self.t_A.set_value(self.t_QUAD_REG_MEAN.get_value())\n reset_shared_var(self.t_Ap)",
"def reset_params(self):\n for pp in self.params:\n if 'optimizer_param' in pp.tags:\n pp.set_value(np.zeros(pp.get_value(borrow=True).shape, dtype=theano.config.floatX))",
"def initialise_theano_rng(self):\n\n\t\tself.theano_rng = RandomStreams(self.rng.randint(2**30))",
"def reset(self):\n self.m = normalize(self.m0)\n self.t = 0.0",
"def reset() -> None:\n global t0\n\n t0 = time.perf_counter()",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)"
]
| [
"0.6679739",
"0.6287664",
"0.61643195",
"0.6138375",
"0.6127964",
"0.5851154",
"0.58234274",
"0.5742112",
"0.5700925",
"0.56882864",
"0.56664526",
"0.5655095",
"0.55909204",
"0.55726826",
"0.5546559",
"0.55088156",
"0.5503559",
"0.5497779",
"0.5488067",
"0.54708165",
"0.54708165",
"0.54708165",
"0.54708165",
"0.54708165",
"0.54708165",
"0.54708165",
"0.54708165",
"0.54708165",
"0.54708165",
"0.54708165"
]
| 0.811806 | 0 |
Take dot product of image with an array of gaussians t_S image variable shape (i2, i1) t_Var variances of receptive fields t_XS X coordinate for image pixels for dimension i1 t_YS Y coordinate for image pixels for dimension i2 t_XE X coordinate for receptive fields j t_YE Y coordinate for receptive fields j t_XR X coordinate for retina in form batch, timestep, b,t t_YR Y coordinate '' Returns | def inner_products(t_S, t_Var, t_XS, t_YS, t_XE, t_YE, t_XR, t_YR):
# Note in this computation, we do the indices in this form:
# b, i, j, t
# batch, pixel, neuron, time step
# indices: b, i1, j, t
t_dX = (t_XS.dimshuffle('x', 0, 'x', 'x') -
t_XE.dimshuffle('x', 'x', 0, 'x') -
t_XR.dimshuffle(0, 'x', 'x', 1))
t_dX.name = 'dX'
# indices: b, i2, j, t
t_dY = (t_YS.dimshuffle('x', 0, 'x', 'x') -
t_YE.dimshuffle('x', 'x', 0, 'x') -
t_YR.dimshuffle(0, 'x', 'x', 1))
t_dY.name = 'dY'
# Use outer product trick to dot product image with point filters
t_PixRFCouplingX = T.exp(-0.5 * t_dX ** 2 /
t_Var.dimshuffle('x', 0, 'x', 'x'))
t_PixRFCouplingY = T.exp(-0.5 * t_dY ** 2 /
t_Var.dimshuffle('x', 0, 'x', 'x'))
t_PixRFCouplingX.name = 'PixRFCouplingX'
t_PixRFCouplingY.name = 'PixRFCouplingY'
# Matrix of inner products between the images and the retinal RFs
# indices: b, j, t
# Sum_i2 T(i2, i1) * T(b, i2, j, t) = T(b, i1, j, t)
t_IpsY = T.sum(t_S.dimshuffle('x', 0, 1, 'x', 'x') *
t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3),
axis=1)
# Sum_i1 T(b, i1, j, t) * T(b, i2, j, t) = T(b, j, t)
t_Ips = T.sum(t_IpsY * t_PixRFCouplingX, axis=1)
t_Ips.name = 'Ips'
# For the gradient, we also prepare d Ips / dS
# This is in the form b, i2, i1, j, t
t_PixRFCoupling = (t_PixRFCouplingX.dimshuffle(0, 'x', 1, 2, 3) *
t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3))
return t_Ips, t_PixRFCoupling | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prediction_aggregation(self, xt_s,mu_s,var_s, method='PoE', weighting='uniform', power=26):\n\n nt = xt_s.shape[0]\n mu = np.zeros([nt, self.C],dtype='float64')\n var = np.zeros([nt, self.C],dtype='float64')\n\n prior_var = self.experts[0].kernel(xt_s[0], xt_s[0])\n\n \n #Process each latent gp individually \n for j in range(self.C):\n \n mu_s_c = mu_s[:, :, j]\n var_s_c = var_s[:, :, j]\n \n weight_matrix = compute_weights(mu_s_c, var_s_c, power, weighting, prior_var)\n \n prec_s= 1/var_s_c\n\n if method == 'PoE':\n \n prec = tf.reduce_sum(prec_s, axis=0)\n \n\n if method == 'gPoE':\n \n weight_matrix = normalize_weights(weight_matrix)\n\n prec = tf.reduce_sum(weight_matrix * prec_s , axis=0)\n \n\n if method == 'BCM':\n \n prec = tf.reduce_sum(prec_s, axis=0) + (1 - self.M) / prior_var \n\n if method == 'rBCM':\n \n \n prec = tf.reduce_sum(weight_matrix * prec_s, axis=0) \\\n + (1 - tf.reduce_sum(weight_matrix, axis=0)) / prior_var\n \n \n \n if method != 'bar':\n \n var[:, j] = 1 / prec\n\n mu[:, j] = var[:, j] * tf.reduce_sum(weight_matrix * prec_s * mu_s_c, axis=0)\n \n else:\n \n weight_matrix = normalize_weights(weight_matrix)\n\n mu[:, j] = tf.reduce_sum(weight_matrix * mu_s_c, axis=0)\n var[:, j] = tf.reduce_sum(weight_matrix * var_s_c, axis=0)\n \n \n return self.lik_aggregation(mu, var)",
"def dot_image(image, B):\n \n imshape = image.shape\n if not image.flags['C_CONTIGUOUS']:\n raise TypeError, 'Error: cannot deal with non-C-contiguous image'\n output = gemm(1.0, image.reshape((np.prod(imshape[:-1]), imshape[-1])), B)\n return output.reshape(imshape[:-1] + (B.shape[1],))",
"def make_gt(img, labels, outputRes=None, sigma=10):\n\n if outputRes is not None:\n h, w = outputRes\n else:\n h, w = img.shape\n # print (h, w, len(labels))\n #gt = np.zeros((h, w, len(labels)), np.float32)\n gt = np.zeros((h, w, 1), np.float32)\n\n for land in range(0, labels.shape[0]):\n gt[:,:,0] = gt[:,:,0] + (make_gaussian((h, w), sigma, (labels[land, 0], labels[land, 1])))\n return gt",
"def transform(self, src):\n T, feature_dim = src.shape[0], self.Y_static_dim*3\n\n if feature_dim == self.Y_static_dim:\n return super(GMM_M, self).transform(src)\n\n # A suboptimum mixture sequence (eq.37)\n optimum_mix = self.px.predict(src)\n\n # Compute E eq.(40)\n E = np.empty((T, feature_dim))\n for t in range(T):\n m = optimum_mix[t] # estimated mixture index at time t\n xx = np.linalg.solve(self.covarXX[m], src[t] - self.src_means[m])\n #print(xx.shape,self.tgt_means[m].shape,self.covarYX[m].shape)\n # Eq. (22)\n E[t] = self.tgt_means[m] +np.dot(self.covarYX[m], xx)\n\n # Compute D eq.(23)\n # Approximated variances with diagonals so that we can do MLPG\n # efficiently in dimention-wise manner\n #print(E.shape)\n D = np.empty((T, feature_dim))\n #print(D.shape)\n for t in range(T):\n m = optimum_mix[t]\n # Eq. (23), with approximating covariances as diagonals\n #D[t] = np.diag(self.covarYY[m]) - np.diag(self.covarYX[m]) / \\\n # np.diag(self.covarXX[m]) * np.diag(self.covarXY[m])\n\n # Exact Inference\n dd = self.covarYY[m] - np.linalg.multi_dot([self.covarYX[m], np.linalg.pinv(self.covarXX[m]), self.covarXY[m]])\n #print(dd.shape)\n D[t] = np.diag(dd)\n\n # Once we have mean and variance over frames, then we can do MLPG\n return E, D, self.windows#mlpg(E, D, self.windows)",
"def sgd(self):\n for i, j, r in self.samples:\n # Computer prediction and error\n if (self.type=='bias'):\n prediction = self.get_rating_bias(i, j)\n elif(self.type=='nonbias') :\n prediction = self.get_rating(i, j)\n # print(i, j, r,prediction)\n e = (r - prediction)\n\n # Update biases\n self.b_u[i] =self.b_u[i]+ self.alpha * (e - self.beta * self.b_u[i])\n self.b_i[j] = self.b_i[j] + self.alpha * (e - self.beta * self.b_i[j])\n\n # Create copy of row of P since we need to update it but use older values for update on Q\n P_i = self.P[i, :][:]\n\n # Update user and item latent feature matrices\n # print(self.alpha * (e * self.Q[j, :] - self.beta * self.P[i, :]))\n # print(self.P[i, :])\n self.P[i, :] =self.P[i, :] + self.alpha * (e * self.Q[j, :] - self.beta * self.P[i, :])\n # print(self.P[i, :],\"&&&&&&\")\n self.Q[j, :] = self.Q[j, :] + self.alpha * (e * P_i - self.beta * self.Q[j, :])\n # print(self.Q[j, :])",
"def tifg_step(self, dataset='coco', input_stream='img', lambda_coeff=1):\n assert lambda_coeff >= 0\n if lambda_coeff == 0:\n return\n params = self.params\n name = 'model' if params.encoder_only else 'encoder'\n model = getattr(self, name)\n model.train()\n\n (x1, len1), (x_img, x_img_mask, img_loc, img_id) = self.get_batch('tifg', dataset, input_stream)\n\n img_len = x_img_mask.sum(dim=1)\n x_img = x_img.transpose(0, 1)\n img_loc = img_loc.transpose(0, 1)\n\n x_img = x_img[:36]\n img_len -= 64\n img_loc = img_loc[:36]\n\n x_img, x_img_mask, img_len, img_loc, x1, len1 = to_cuda(x_img, x_img_mask, img_len, img_loc, x1, len1)\n\n bs = len1.size(0)\n if bs == 1: # can happen (although very rarely), which makes the negative loss fail\n self.n_sentences += params.batch_size\n return\n\n img_enc, img_mask = model('ImageEmbed', x=x_img, lengths=img_len, causal=False, image_loc=img_loc,\n refine_image=params.refine_image, image_dist=None)\n\n enc1 = model('crossfwd', stream_='text', x=x1, lengths=len1, langs=None, causal=False)\n enc1 = enc1.transpose(0, 1)\n\n # enc_mask = x1.ne(params.mask_index)\n # enc_mask = enc_mask.transpose(0, 1)\n\n dec2 = model('crossfwd', stream_='img',\n x=x_img, lengths=img_len, langs=None, causal=True, image_loc=img_loc,\n src_enc=enc1, src_len=len1, positions=None, enc_mask=None)\n\n dec2 = dec2.transpose(0, 1) # text to image\n\n loss_G = F.mse_loss(dec2.squeeze(1), img_enc.float())\n\n self.stats['TIFG-%s' % dataset].append(loss_G.item())\n loss = lambda_coeff * loss_G\n\n # optimize\n self.optimize(loss)\n\n # number of processed sentences / words\n self.n_sentences += params.batch_size\n self.stats['processed_s'] += bs\n self.stats['processed_w'] += bs * 100",
"def _dot(self, s1, s2, tf_embs):\n mat1 = tf.gather(tf_embs, s1)\n mat2 = tf.gather(tf_embs, s2)\n return tf.matmul(mat1, tf.transpose(mat2))",
"def create_gasf_gadf_mtf_compound_images(observations, image_size=128):\n if len(observations) == 0:\n raise ValueError(\"Observations cannot be empty.\")\n\n gasf_transformer = GASF(image_size)\n gadf_transformer = GADF(image_size)\n mtf_transformer = MTF(image_size)\n\n gasf = gasf_transformer.fit_transform(observations)\n gadf = gadf_transformer.fit_transform(observations)\n mtf = mtf_transformer.fit_transform(observations)\n\n return np.stack((gasf, gadf, mtf), axis=3)",
"def TB(t,init,rhoS,deltaSC,rhoC,deltaCB,rhoB):\n\n y=SCB(t,init,rhoS,deltaSC,rhoC,deltaCB,rhoB)\n T=np.sum(y,axis=0)\n Y=np.vstack((T,y[2]))\n return(Y)",
"def prediction2d(self, X, t):\n self.A = self.createA(t)\n X = self.A.dot(X)\n return X",
"def __init__(\n self,\n XS,\n YS,\n XE,\n YE,\n IE,\n Var,\n d,\n l0,\n l1,\n DT,\n G,\n GAMMA,\n LAMBDA,\n TAU,\n QUAD_REG,\n QUAD_REG_MEAN,\n pos_only=True,\n SMIN=0.,\n SMAX=1.\n ):\n self.l_i = XS.shape[0]\n self.n_l = d.shape[0]\n self.n_n = XE.shape[0]\n\n # Define Theano Variables Common to Generation and Inference\n self.t_XS = theano.shared(XS, 'XS')\n self.t_YS = theano.shared(YS, 'YS')\n self.t_XE = theano.shared(XE, 'XE')\n self.t_YE = theano.shared(YE, 'YE')\n self.t_IE = theano.shared(IE, 'IE')\n self.t_Var = theano.shared(Var, 'Var')\n\n self.t_XR = T.matrix('XR')\n self.t_YR = T.matrix('YR')\n\n # Parameters\n self.t_L0 = theano.shared(np.float32(l0), 'L0')\n self.t_L1 = theano.shared(np.float32(l1), 'L1')\n self.t_DT = theano.shared(np.float32(DT), 'DT')\n self.t_G = theano.shared(np.float32(G), 'G')\n self.t_TAU = theano.shared(np.float32(TAU), 'TAU')\n self.t_SMIN = theano.shared(np.float32(SMIN), 'SMIN')\n self.t_SMAX = theano.shared(np.float32(SMAX), 'SMAX')\n\n ##############################\n # Simulated Spike Generation #\n ##############################\n\n self.t_S_gen = T.matrix('S_gen') # Image dims are i2, i1\n self.t_Ips_gen, _ = inner_products(self.t_S_gen, self.t_Var,\n self.t_XS, self.t_YS,\n self.t_XE, self.t_YE,\n self.t_XR, self.t_YR)\n self.t_FP_gen = firing_prob(self.t_Ips_gen, self.t_G, self.t_IE,\n self.t_L0, self.t_L1, self.t_SMIN,\n self.t_SMAX, self.t_DT)\n\n # Computes image-RF inner products and the resulting firing\n # probabilities\n self.RFS = theano.function(\n inputs=[self.t_S_gen, self.t_XR, self.t_YR],\n outputs=[self.t_Ips_gen, self.t_FP_gen])\n\n self.rng = T.shared_randomstreams.RandomStreams(seed=10)\n self.t_R_gen = (self.rng.uniform(size=self.t_FP_gen.shape) <\n self.t_FP_gen).astype('float32')\n\n self.spikes = theano.function(\n inputs=[self.t_S_gen, self.t_XR, self.t_YR],\n outputs=self.t_R_gen)\n\n ##############################\n # Latent Variable Estimation #\n ##############################\n\n self.t_R = T.matrix('R')\n\n # Current value of A\n self.t_A = theano.shared(\n np.zeros((self.n_l,)).astype('float32'), 'A')\n # Previous value of A\n self.t_Ap = theano.shared(\n np.zeros((self.n_l,)).astype('float32'), 'Ap')\n\n self.t_D = theano.shared(d, 'D') # Dictionary\n\n self.t_Wbt = T.matrix('Wbt') # Weights (b,t) from particle filter\n\n # Sum of Hessians\n self.t_H = theano.shared(\n np.zeros((self.n_l, self.n_l)).astype('float32'), 'H')\n self.t_B = theano.shared(\n np.zeros((self.n_l,)).astype('float32'), 'B') # Prior Bias\n\n # Constants\n\n self.t_GAMMA = theano.shared(np.float32(GAMMA), 'GAMMA')\n self.t_LAMBDA = theano.shared(np.float32(LAMBDA), 'LAMBDA')\n self.QUAD_REG = QUAD_REG.astype('float32')\n # self.t_QUAD_REG = theano.shared(np.float32(QUAD_REG), 'QUAD_REG')\n self.t_QUAD_REG_MEAN = theano.shared(\n np.float32(QUAD_REG_MEAN), 'QUAD_REG_MEAN')\n\n # Calculate Firing rate\n self.t_S = T.dot(self.t_A, self.t_D).reshape((self.l_i, self.l_i))\n self.image_est = theano.function(inputs=[], outputs=self.t_S)\n\n self.t_Ips, t_PixRFCoupling = inner_products(\n self.t_S, self.t_Var, self.t_XS, self.t_YS,\n self.t_XE, self.t_YE, self.t_XR, self.t_YR)\n\n self.t_FP = firing_prob(self.t_Ips, self.t_G, self.t_IE,\n self.t_L0, self.t_L1,\n self.t_SMIN, self.t_SMAX, self.t_DT)\n\n # Define Hessian\n # Reshape dictionary for computing derivative: k, i2, i1\n t_Dp = self.t_D.reshape((self.n_l, self.l_i, self.l_i))\n\n # Compute dc/dA = dc/dS * ds/dA\n # b, i2, i1, j, t -> b, _, i2, i1, j, t\n # k, i2, i1 -> _, k, i2, i1, _, _\n # b, k, j, t\n\n # t_SpRFCoupling1 = (\n # t_PixRFCoupling.dimshuffle(0, 'x', 1, 2, 3, 4) *\n # t_Dp.dimshuffle('x', 0, 1, 2, 'x', 'x')).sum(axis=(2, 3))\n\n def pix_rf_to_sp_rf(t_PixRFCoupling, t_Dp):\n \"\"\"\n b i2 i1 j t\n k i2 i1\n b k j t\n \"\"\"\n n_n = t_PixRFCoupling.shape[3]\n tmp1 = t_PixRFCoupling.dimshuffle(1, 2, 0, 3, 4).reshape(\n (self.l_i ** 2, -1))\n # i2i1 bjt\n\n tmp2 = t_Dp.reshape((self.n_l, -1)) # k i2i1\n\n tmp3 = T.dot(tmp2, tmp1) # k bjt\n n_b, n_t = self.t_Wbt.shape\n return tmp3.reshape(\n (self.n_l, n_b, n_n, n_t)).dimshuffle(\n 1, 0, 2, 3)\n\n t_SpRFCoupling = pix_rf_to_sp_rf(t_PixRFCoupling, t_Dp)\n\n # self.sp_rf_test= theano.function(\n # inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n # outputs=[t_SpRFCoupling, t_SpRFCoupling1])\n\n # Get RGC Sparse Coeff couplings\n # bkjt,bt-> kj\n t_SpRGCCoupling = (self.t_Wbt.dimshuffle(0, 'x', 'x', 1) *\n t_SpRFCoupling).sum(axis=(0, 3))\n\n self.get_sp_rf_coupling = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n outputs=t_SpRGCCoupling)\n\n # b, k, j, t\n t_dlogFPdA = dlogfp_dA(\n t_SpRFCoupling, self.t_G, self.t_IE, self.t_L0, self.t_L1,\n self.t_SMIN, self.t_SMAX)\n\n # b, k, k', j, t -> k, k'\n t_dE_R_dAA1 = (\n self.t_Wbt.dimshuffle(0, 'x', 'x', 'x', 1) *\n t_dlogFPdA.dimshuffle(0, 'x', 1, 2, 3) *\n t_dlogFPdA.dimshuffle(0, 1, 'x', 2, 3) *\n self.t_FP.dimshuffle(0, 'x', 'x', 1, 2)\n ).sum(axis=(0, 3, 4))\n\n def calc_hessian(t_Wbt, t_dlogFPdA, t_FP):\n \"\"\"\n Calculate the hessian given the following\n\n Parameters\n ----------\n t_Wbt : theano.tensor, shape (b, t)\n t_dlogFPdA : theano.tensor, shape (b,k,j,t)\n t_FP : theano.tensor, shape (b, j, t)\n\n Returns\n -------\n t_dE_R_dAA : theano.tensor, shape (k, k')\n \"\"\"\n\n tmp = t_Wbt.dimshuffle(0, 'x', 1) * t_FP # b, j, t\n tmp1 = tmp.dimshuffle(0, 'x', 1, 2) * t_dlogFPdA\n\n return T.dot(\n tmp1.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)),\n t_dlogFPdA.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)).T\n )\n\n t_dE_R_dAA = calc_hessian(self.t_Wbt, t_dlogFPdA, self.t_FP)\n\n self.sp_rf_test = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n outputs=[t_dE_R_dAA, t_dE_R_dAA1])\n\n self.t_dlogFPdA = t_dlogFPdA\n self.t_dE_R_dAA = t_dE_R_dAA\n\n # Compute Energy Functions (negative log-likelihood) to minimize\n\n # Spiking cost separated by b, j, t\n self.t_E_R_f = spiking_cost(self.t_R, self.t_FP)\n\n self.t_E_R = T.sum(T.sum(self.t_E_R_f, axis=1) * self.t_Wbt)\n self.t_E_R.name = 'E_R'\n\n self.t_E_bound = self.t_GAMMA * (\n T.sum(T.switch(self.t_S < self.t_SMIN,\n -(self.t_S - self.t_SMIN), 0.)) +\n T.sum(T.switch(self.t_S > self.t_SMAX,\n self.t_S - self.t_SMAX, 0.)))\n self.t_E_bound.name = 'E_bound'\n\n self.t_E_sp = self.t_LAMBDA * T.sum(T.abs_(self.t_A))\n self.t_E_sp.name = 'E_sp'\n\n # self.t_E_quad = 0.5 * T.sum(self.t_QUAD_REG *\n # ((self.t_A - self.t_QUAD_REG_MEAN) ** 2))\n # self.t_E_quad.name = 'E_quad'\n\n # Define bias term\n t_dPrior = T.grad(self.t_E_sp, self.t_A)\n\n self.t_E_prev = (\n (self.t_A - self.t_Ap).dimshuffle('x', 0) *\n self.t_H *\n (self.t_A - self.t_Ap).dimshuffle(0, 'x')\n ).sum() * 0.5\n\n self.t_E_lin_prior = ((self.t_A - self.t_Ap) * self.t_B).sum()\n\n # Split off terms that will go into fista (i.e. not icluding E_sp)\n self.t_E_rec = (\n self.t_E_prev + self.t_E_R +\n self.t_E_lin_prior + self.t_E_bound\n # + self.t_E_quad\n )\n self.t_E_rec.name = 'E_rec'\n\n self.t_E = self.t_E_rec + self.t_E_sp\n self.t_E.name = 'E'\n\n # Cost from poisson terms separated by batches for particle filter log\n # probability\n self.t_E_R_pf = T.sum(self.t_E_R_f, axis=(1, 2))\n self.spike_energy = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_R],\n outputs=self.t_E_R_pf)\n\n # Generate costs given a path, spikes, and time-batch weights\n energy_outputs = [\n self.t_E,\n self.t_E_prev,\n self.t_E_R,\n self.t_E_bound,\n self.t_E_sp,\n self.t_E_lin_prior,\n # self.t_E_quad,\n ]\n\n self.costs = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_R, self.t_Wbt],\n outputs=energy_outputs)\n\n self.image_costs = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_R,\n self.t_Wbt, self.t_S],\n outputs=self.t_E_R)\n\n # Define variables for FISTA minimization\n self.t_L = T.scalar('L')\n\n self.grad_updates = fista_updates(\n self.t_A, self.t_E_rec, self.t_LAMBDA,\n self.t_L, pos_only=pos_only)\n\n _, self.t_fista_X, self.t_T = self.grad_updates.keys()\n\n # Initialize t_A, and extra variables\n\n inputs = [self.t_XR, self.t_YR, self.t_R, self.t_Wbt, self.t_L]\n self.run_fista_step = theano.function(\n inputs=inputs, outputs=energy_outputs,\n updates=self.grad_updates)\n\n # Define functions for online learning #\n\n self.hessian_func = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n outputs=t_dE_R_dAA)\n\n # After each iteration, replace value of Ap with A\n self.update_Ap = theano.function(\n inputs=[], updates=[(self.t_Ap, self.t_A)])\n\n t_decay = T.exp(- self.t_DT / self.t_TAU *\n self.t_XR.shape[1].astype('float32'))\n\n self.update_HB = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n updates=[\n (self.t_H, t_decay * self.t_H + t_dE_R_dAA),\n (self.t_B, t_dPrior)])\n\n # Code for no motion optimizer\n self.t_E_R_no_mo = T.sum(spiking_cost(self.t_R, self.t_FP))\n self.t_E_R_no_mo.name = 'E_R_no_mo'\n\n t_E_no_mo = self.t_E_R_no_mo + self.t_E_bound\n t_E_no_mo.name = 'E_no_mo'\n\n t_Rho = T.scalar('Rho')\n t_Eps = T.scalar('Eps')\n ada_updates = ada_delta(t_E_no_mo, self.t_A, *(t_Rho, t_Eps))\n t_ada_Eg2, t_ada_dA2, _ = ada_updates.keys()\n\n def reset_adadelta_variables(t_A=self.t_A):\n \"\"\"\n Resets ADA Delta auxillary variables\n \"\"\"\n A0 = np.zeros_like(t_A.get_value()).astype(theano.config.floatX)\n t_ada_Eg2.set_value(A0)\n t_ada_dA2.set_value(A0)\n t_A.set_value(A0)\n\n self.reset_adadelta_variables = reset_adadelta_variables\n\n self.run_image_max_step = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_R, t_Rho, t_Eps],\n updates=ada_updates,\n outputs=[t_E_no_mo]\n )",
"def compute_attention(t1, t2):\n dim = t1.shape.as_list()[2]\n init = tf.constant_initializer(1.0 / dim)\n\n t1_logits = ops.last_dim_weighted_sum(t1, \"t1_w\")\n t2_logits = ops.last_dim_weighted_sum(t2, \"t2_w\")\n\n dot_w = tf.get_variable(\n \"dot_w\", shape=dim, initializer=init, dtype=tf.float32)\n # Compute x * dot_weights first, then batch mult with x\n dots = t1 * tf.expand_dims(tf.expand_dims(dot_w, 0), 0)\n dot_logits = tf.matmul(dots, t2, transpose_b=True)\n\n return dot_logits + \\\n tf.expand_dims(t1_logits, 2) + \\\n tf.expand_dims(t2_logits, 1)",
"def f_dot(self, external, state):\n SOC = state[0]\n P = external[0]\n T = external[5]\n\n voc = 3 + np.expm1(SOC) / (np.e-1)\n # dVoc_dSOC = np.exp(SOC) / (np.e-1)\n\n V = IR * voc * (2.0 - np.exp(alpha*(T-T0)/T0))\n I = P/V # noqa: E741\n\n soc_dot = -sigma/24*SOC + eta/Cp*I\n\n return soc_dot",
"def XstarT_dot(self,M):\n if 0:\n #TODO: implement this properly\n pass\n else:\n RV = np.dot(self.Xstar().T,M)\n return RV",
"def evaluate(t, x, y):\n # TODO: fix normalization\n from PIL import Image\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n stim = np.zeros([Nt, Nx, Ny])\n\n for i, filename in enumerate(filenames):\n im = Image.open(filename).convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM)\n t_start = delay + i * (delay + duration)\n t_stop = (i+1) * (duration + delay)\n stim += np.array(im.resize((Ny, Nx))) * (heaviside(t - t_start) - heaviside(t - t_stop))\n\n if stim.max() - stim.min() != 0:\n stim = 2 * ((stim - stim.min()) / (stim.max() - stim.min())) - 1\n return stim",
"def m_step(X, T):\n def get_sigma(X, muk, Tk):\n \"\"\"\n function that calculate the covariance of the k-th component\n :param muk: n-dim vector, the k-th component's mean\n :param Tk: N-dim vector, the k-th component posterior of hidden state z_i, for each x_i\n \"\"\"\n X_centred = X - muk\n X_weighted = X_centred * np.tile(Tk, reps=(X.shape[1],1)).T # repeat Tk in N-direction to match X's shape and weigh it\n return X_weighted.T@X_centred/np.sum(Tk) # weighted and centred are exchangable: we only need to weigh it by T_k once\n\n N, n = X.shape # N: number of data points, n: dimension of the data point\n K = T.shape[0] # num of hidden component\n T_sum = np.sum(T, axis=1) # caculate the common term sum of T_{k, i} over all i, this is a k-dim vector\n\n taus = T_sum / N # average over i for T_{k, i} gives MLE for all tau_k\n\n T_sum_rep = np.tile(T_sum, reps=(n, 1)).T # repeat T_sum n times in column\n mus = T@X/T_sum_rep # T@X gives a Kxn matrix with it's k, i th component be \\sum_{i=1}^NT_{k, i}x_i then each row is divided by T_sum, gives MLE for all mu_k\n\n sigmas = np.array([get_sigma(X, mus[k, :], T[k, :]) for k in range(K)])\n return taus, mus, sigmas",
"def xdot(self, t, x, u, w):\n a= u[0]\n thetadot = u[1]\n theta = x[2]\n v = x[3]\n w = w * self.w_scale\n return np.array([v*np.cos(theta), v*np.sin(theta), thetadot, a]) + np.array([np.cos(theta) * w[0] - np.sin(theta) * w[1], np.sin(theta) * w[0] + np.cos(theta) * w[1], v * w[2], v * w[3]])",
"def process_batch(self, X, y):\n # normalize to [-1.0, 1.0]\n X = X / 127.5 - 1.0\n\n for i in range(X.shape[0]):\n # scaling and bias for contrast and brightness augmentation\n scale = 1.0 + 0.1 * np.random.randn()\n bias = 0.0 + 0.1 * np.random.randn()\n X[i] = np.clip(scale*X[i] + bias, -1.0, 1.0)\n\n # transformations for geometric augmentations\n angle = 6.0 * np.random.randn()\n zoom = 1 + 0.1 * np.random.randn()\n translation = 2.0 * np.random.randn()\n shear = 0.1 * np.random.randn()\n\n trafo = skimage.transform.AffineTransform(\n translation = translation,\n rotation = np.deg2rad(angle),\n scale = (zoom, zoom),\n shear = shear)\n centered_trafo = (self.postshift + (trafo + self.preshift))\n X[i] = skimage.transform.warp(X[i], centered_trafo, mode = \"edge\", order = 1)\n return X, y",
"def evaluate(t, x, y):\n from PIL import Image\n im = Image.open(filename)\n duration = im.info[\"duration\"]*pq.ms if im.info[\"duration\"] is not 0 else 30*pq.ms\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n\n stim = np.zeros([Nt, Ny, Nx])\n t_map = (t.flatten().rescale(\"ms\") / duration).astype(int)\n t_map = t_map[1:] - t_map[:-1]\n for i, ti in enumerate(t_map):\n try:\n im.seek(im.tell()+ti)\n except EOFError:\n break\n frame = im.convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM).resize((Ny, Nx))\n stim[i, :, :] = np.array(frame)\n stim[i, :, :] = 2 * ((stim[i, :, :] - stim[i, :, :].min()) / (stim[i, :, :].max() - stim[i, :, :].min())) - 1\n\n return stim",
"def act(self, x: np.ndarray, t: int = None, noise: np.ndarray = None) -> np.ndarray:",
"def function2D(self, t):\n if t.ndim == 1:\n nX = int(self.getAttributeValue('nX'))\n nY = int(self.getAttributeValue('nY'))\n pos = t.reshape(nX, nY, 2)\n elif t.ndim == 3:\n pos = t\n X = pos[...,0]\n Y = pos[...,1]\n A = self.getParamValue(0)\n muX = self.getParamValue(1)\n muY = self.getParamValue(2)\n sigX = self.getParamValue(3)\n sigY = self.getParamValue(4)\n sigP = self.getParamValue(5)\n bg = self.getParamValue(6)\n\n sigXY = sigX*sigY*sigP\n Z = A*bivariate_normal(X,Y, sigmax=sigX, sigmay=sigY,\n mux=muX,muy=muY,sigmaxy=sigXY)\n Z += bg\n return Z",
"def __call__(self, T, X):\n dims = (T.shape[0], 1, 1)\n\n a = X[:, 0:1].reshape(dims)\n e = X[:, 1:2].reshape(dims)\n i = X[:, 2:3].reshape(dims)\n w = X[:, 4:5].reshape(dims)\n f = X[:, 5:6].reshape(dims)\n\n sf = np.sin(f)\n cf = np.cos(f)\n st = np.sin(f + w)\n ct = np.cos(f + w)\n si = np.sin(i)\n ci = np.cos(i)\n p = a * (1. - e**2)\n r = p / (1. + e*cf)\n h = (self.mu * p)**.5\n zero = np.zeros(dims)\n\n adot = np.concatenate((e*sf, p/r, zero), axis=2) * 2*a**2/h\n edot = np.concatenate((p*sf, (p+r)*cf + r*e, zero), axis=2) / h\n idot = np.concatenate((zero, zero, r*ct/h), axis=2)\n Wdot = np.concatenate((zero, zero, r*st/h/si), axis=2)\n wdot = np.concatenate((-p*cf/e, (p+r)*sf/e, -r*st*ci/si), axis=2) / h\n fdot = np.concatenate((p*cf, -(p+r)*sf, zero), axis=2) / h / e\n\n return np.concatenate((adot, edot, idot, Wdot, wdot, fdot), axis=1)",
"def sdot(s):\n\n mu = 398600.4405\n r = np.linalg.norm(s[0:3])\n a = -mu/(r**3)*s[0:3]\n\n p_j2 = j2_pert(s)\n p_drag = drag(s)\n\n a = a+p_j2+p_drag\n return np.array([*s[3:6],*a])",
"def stp_transformation(self, prev_image, stp_input, num_masks, reuse= None, suffix = None):\n # Only import spatial transformer if needed.\n from spatial_transformer import transformer\n\n identity_params = tf.convert_to_tensor(\n np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))\n transformed = []\n trafos = []\n for i in range(num_masks):\n params = slim.layers.fully_connected(\n stp_input, 6, scope='stp_params' + str(i) + suffix,\n activation_fn=None,\n reuse= reuse) + identity_params\n outsize = (prev_image.get_shape()[1], prev_image.get_shape()[2])\n transformed.append(transformer(prev_image, params, outsize))\n trafos.append(params)\n\n return transformed, trafos",
"def batch_dot_product_sparse(spectra, tdata, nz, use_gpu):\n\n if (use_gpu):\n #Use GPU to do dot products in batch\n return _batch_dot_product_sparse_gpu(spectra, tdata)\n\n #Need to find shape of output array of batch dot product\n nrows = 0\n nbasis = None\n for key in tdata:\n nrows += tdata[key].shape[1]\n if (nbasis is None):\n nbasis = tdata[key].shape[2]\n\n #Create empty array rather than stacking a list - faster\n Tbs = np.empty((nz, nrows, nbasis))\n #Loop over all templates\n for i in range(nz):\n irow = 0\n for s in spectra:\n key = s.wavehash\n curr_tb = s.Rcsr.dot(tdata[key][i,:,:])\n #Copy this dot product result into the Tbs array\n Tbs[i, irow:irow+curr_tb.shape[0],:] = curr_tb\n irow += curr_tb.shape[0]\n return Tbs",
"def sgd_optimization(dataset, learning_rate, n_epochs, batch_size):\n datasets = load_data(dataset)\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n #number of minibatches\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\n\n #build the model\n print \"... building the model\"\n\n index = T.lscalar()\n x = T.matrix('x') #data for the rasterized images\n y = T.ivector('y') # labels (int)\n\n # logistic regression Class\n classifierLR = LogisticRegression(input=x, n_in=28*28, n_out=10)\n cost = classifierLR.negative_log_likelihood(y)\n\n # test model (no updates)\n test_model = theano.function(\n inputs=[index],\n outputs=classifierLR.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #validate model (no updates)\n validate_model = theano.function(\n inputs=[index],\n outputs=classifierLR.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #compute the gradient of cost wrt W, b\n g_W = T.grad(cost=cost, wrt=classifierLR.W)\n g_b = T.grad(cost=cost, wrt=classifierLR.b)\n\n #updating expression\n updates = [(classifierLR.W, classifierLR.W - learning_rate * g_W),\n (classifierLR.b, classifierLR.b - learning_rate * g_b)]\n\n # Train model (theano function); updates\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n\n }\n )\n\n # Training model (early stopping with validation examples)\n print \"... training the model\"\n patience = 5000\n patience_inc = 2 # wait this much\n improved_threshold = 0.995 # relative improvement (significant)\n validation_frequency = min(n_train_batches, patience / 2)\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = timeit.default_timer()\n\n done_looping = False\n epoch = 0\n while (epoch < n_epochs) and (not done_looping):\n epoch += 1\n for minibatch_index in xrange(n_train_batches):\n minibatch_avg_cost = train_model(minibatch_index)\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute loss on validation set\n validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n\n print(\n \"Epoch: %i, minibatch: %i/%i, validation_error: %f %%\" %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n if this_validation_loss < best_validation_loss:\n #improve patience if good improvement\n if this_validation_loss < best_validation_loss * improved_threshold:\n patience = max(patience, iter * patience_inc)\n\n best_validation_loss = this_validation_loss\n\n #testing on test_set\n test_losses = [test_model(i) for i in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print(\n (\n \"Epoch : %i, minibatch %i/%i,\"\n \" test error of best model %f %%\"\n ) % (\n epoch,\n minibatch_index,\n n_train_batches,\n test_score * 100.\n )\n )\n\n #save the best model\n print \"New best model found; saving ...\"\n with open('best_model.pkl', \"w\") as f:\n cPickle.dump(classifierLR, f)\n\n if patience <= iter:\n done_looping = True\n break\n\n\n end_time = timeit.default_timer()\n print(\n (\n \"Optimization Complete: best validation score : %f %%,\"\n \" test performance : %f %%\"\n )\n % (best_validation_loss * 100., test_score * 100.)\n )\n print \"The code run for %d epochs, with %f epochs/sec\" %(epoch, 1. * epoch / (end_time - start_time))\n print >> sys.stderr, (\"The code for file \" + os.path.split(__file__)[1] + \" ran for %.1fs\" % ((end_time - start_time)))",
"def forward(self, inputs, outputs):\n\n inimg = inputs[0]\n inimg_reshaped = inimg.reshape((inimg.shape[0] * inimg.shape[1], inimg.shape[2]))\n result = np.dot(self.TC, inimg_reshaped.T).T.reshape(inimg.shape)\n np.copyto(outputs[0], result)",
"def _x_dot(self, x, t, u):\r\n # freeze system if state is irrecoverable\r\n if self.is_irrecoverable(ignore_force_check=True):\r\n return np.concatenate([x[3:], -100 * x[3:]])\r\n\r\n omega_dot = self._compute_omega_dot(x, u)\r\n return np.concatenate([x[3:], omega_dot])",
"def sdot_asgd(y, nu, C, x_sample, W = None):\n # if W == None: W = np.zeros(y.shape[0]) else: assert(W.shape[0] == y.shape[0])\n W = np.zeros(y.shape[0]) # (500, 0)\n W_tmp = np.copy(W)\n #source_density = dp.get_density_by_name(name_source) # Density of source distribution\n h_save = np.empty_like(0)\n # Print iteration status\n niter = np.shape(x_sample)[0]\n for t in range(niter):\n if (t+1) % 10000 == 0:\n print(\"Iteration: {}\".format(t+1))\n \n # Sample from source distribution\n #x = source_density.sample_from(1).numpy()\n x = x_sample[t]\n\n # Gradient Step\n r = np.sum(np.square(x-y) , axis=1) - W_tmp # |x-y|^2 - W_tmp (900, )\n indx_min = np.argmin(r)\n grad = np.copy(nu)\n grad[indx_min] = grad[indx_min] - 1 # (900, )\n\n # Evaluate empirical Reward\n r2 = np.sum(np.square(x-y) , axis=1) - W # |x-y|^2 - W_tmp (900, )\n h = np.min(r2) + np.dot(W,nu) \n h_save = np.hstack((h_save,h))\n\n # Gradient Ascent \n W_tmp = W_tmp + C/np.sqrt(t+1) *grad # t+1 because it starts from 0\n W = t/(t+1) *W + 1/(t+1)*W_tmp # t+1 because it starts from 0\n # W = W / np.max(np.abs(W)) \n\n return W, h_save",
"def mte_observed(X, b1_b0):\n mte_x = np.dot(X, b1_b0)\n\n return mte_x"
]
| [
"0.53138065",
"0.5156941",
"0.5128571",
"0.511613",
"0.50998497",
"0.5030418",
"0.5006108",
"0.49372065",
"0.49192542",
"0.4917367",
"0.4906383",
"0.4893074",
"0.48705524",
"0.48512393",
"0.4842089",
"0.48295724",
"0.4824507",
"0.4812384",
"0.4804426",
"0.4794742",
"0.47918177",
"0.47773862",
"0.4775496",
"0.47731015",
"0.47647977",
"0.4750934",
"0.47448996",
"0.47356525",
"0.4724494",
"0.4722127"
]
| 0.6263825 | 0 |
t_dIpsdA d Ips / dA indexed as b, k, j, t t_G gain constant t_IE RGC identity, j t_L0, t_L1 min, max firing rates Returns the d log FP / dA indexed b, k, j, t | def dlogfp_dA(t_dIpsdA, t_G, t_IE, t_L0, t_L1, t_SMIN, t_SMAX):
t_IEr = t_IE.dimshuffle('x', 'x', 0, 'x')
t_dGen_dA = (1 - 2 * t_IEr) * t_G * t_dIpsdA / (t_SMAX - t_SMIN)
t_dlogFPdA = T.log(t_L1 / t_L0) * t_dGen_dA
return t_dlogFPdA | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rateFcn(a0,a1,a2,a3,a4,a5,a6,T):\n return np.exp(a0+a1/T+a2/T**(1/3)+a3*T**(1/3)+a4*T+a5*T**(5/3)+a6*np.log(T))",
"def get_rec_rate_H_caseA_Dr11(T):\n T4 = T*1e-4\n return 4.13e-13*T4**(-0.7131 - 0.0115*np.log(T4))",
"def ETPA(omegap, E, edip, Te, g_idx=[0], e_idx=[], f_idx=[]):\n N = len(E)\n tdm = edip\n # gamma = np.zeros(nstates)\n # for j in range(1, N):\n # gamma[j] = sum(tdm[:j, j]**2) * 0.0005\n # gamma[1:] = 0.0001\n\n # print('lifetimes of polariton states = {} eV'.format(gamma * au2ev))\n\n omega1 = omegap * 0.5\n omega2 = omegap - omega1\n # flist = [3, 4] # final states list\n i = g_idx[0]\n\n A = np.zeros(N, dtype=complex)\n\n signal = 0.0\n\n for f in f_idx:\n for m in e_idx:\n A[f] += tdm[f, m] * tdm[m, i] * \\\n ((exp(1j * (omega1 - (en[m] - en[i]) + 1j * gamma[m]) * T) - 1.) / (omega1 - (en[m] - en[i]) + 1j * gamma[m]) \\\n + (exp(1j * (omega2 - (en[m] - en[i])) * T) - 1.)/(omega2 - (en[m] - en[i]) + 1j * gamma[m]))\n\n signal += np.abs(A[f])**2 * lorentzian(omegap - en[f] + en[i], gamma[f])\n\n return signal",
"def hinderedRotor_d_heatCapacity_d_freq(T, freq, barr):\n x = constants.h * constants.c * 100. * freq / constants.kB / T\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n return x * exp_x / one_minus_exp_x / one_minus_exp_x * (2 + x + 2 * x * exp_x / one_minus_exp_x) * x / freq",
"def Kg(T, D):\n# return 2.10*np.ones(np.shape(T)) #at 0 degC\n# return Kg0*np.exp(Kg1*T)\n KiT=Kg0*np.exp(Kg1*T)\n return (2.*KiT*D)/(3.-D)",
"def kA_func(self):\n\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_bp_p(i1)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2 and not self.inl[0].T.val_set:\n T_i1 = T_o2 + 0.5\n if T_i1 <= T_o2 and not self.outl[1].T.val_set:\n T_o2 = T_i1 - 0.5\n\n if T_o1 <= T_i2 and not self.outl[0].T.val_set:\n T_o1 = T_i2 + 1\n if T_o1 <= T_i2 and not self.inl[1].T.val_set:\n T_i2 = T_o1 - 1\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log",
"def get_rec_rate_H_caseB_Dr11(T):\n T4 = T*1e-4\n return 2.54e-13*T4**(-0.8163 - 0.0208*np.log(T4))",
"def rate(self, t, yt):\n # TODO add with parameters\n T = yt[-1]\n y = yt[:-1]\n # self.__log.debug('Em %s', Em)\n dIdt = (self.parameters.A0 * np.exp(-self._Em / Rgas / T))\n # self.__log.debug('dkdt %s', dkdt)\n coeff1 = self.Wm * self.mt / sqrtpi\n coeff2 = np.exp(-pow(\n (self._Em - self.parameters.E0) / self.parameters.sigma, 2) / 2)\n coeff3 = np.exp(-y[1:]) * dIdt\n # self.__log.debug('coeff: %s %s %s', coeff1, coeff2, coeff3)\n # dydt = (self.parameters['y0'] - y[0]) * \\\n # np.sum(coeff1 + coeff2 + coeff3)\n dydt = self.parameters.y0 * np.sum(coeff1 * coeff2 * coeff3)\n # self.__log.debug('dydt %s', dydt)\n return np.append(dydt, dIdt)",
"def aiot(d):\n res = A * (1-exp(-d/D)) / magic\n _adjust(res)\n return ( log(res) if use_log\n else res )",
"def calc_D(state):\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state",
"def _ice_dgdt(temp,pres):\n # Reduced variables\n tn = temp/_TTP\n pn = pres/_PTPE\n _PI0 = _PATM/_PTPE\n g_t = 0.\n \n # Power series and entropy components\n g_t += -_GCOEFFS[4]\n \n # Residual terms including complex numbers\n sr = [_GCOEFFS[1], complex(0.0,0.0)]\n for (k,rk) in enumerate(_GCOEFFS[2]):\n sr[1] += rk * (pn-_PI0)**k\n for (tk,s) in zip(_GCOEFFS[3],sr):\n term = -numpy.log(tk-tn) + numpy.log(tk+tn) - 2*tn/tk\n g_t += (s*term).real\n return g_t",
"def g_ag_func(lai, ts, rn, coef1=1.80, coef2=0.084):\n a = np.copy(lai).astype(np.float64)\n a *= -0.521\n np.exp(a, out=a)\n a *= 0.18\n a += 0.05\n a *= rn\n b = ts - 273.16\n b *= coef1\n b /= rn\n b += coef2\n b *= rn\n return np.where(lai >= 0.5, a, b).astype(np.float32)",
"def cracking_rate_Ai2020(T_dim):\n k_cr = 3.9e-20\n Eac_cr = 0 # to be implemented\n arrhenius = np.exp(Eac_cr / pybamm.constants.R * (1 / T_dim - 1 / 298.15))\n return k_cr * arrhenius",
"def logistic_equation(conc,drugless_rate,ic50,hc=-0.6824968):\n \n # conc = conc/10**6\n f = drugless_rate/(1+np.exp((ic50-np.log10(conc))/hc))\n \n return f",
"def kA_func(self):\n i1 = self.inl[0].to_flow()\n i2 = self.inl[1].to_flow()\n o1 = self.outl[0].to_flow()\n o2 = self.outl[1].to_flow()\n\n T_i1 = T_mix_ph(i1, T0=self.inl[0].T.val_SI)\n T_i2 = T_mix_ph(i2, T0=self.inl[1].T.val_SI)\n T_o1 = T_mix_ph(o1, T0=self.outl[0].T.val_SI)\n T_o2 = T_mix_ph(o2, T0=self.outl[1].T.val_SI)\n\n if T_i1 <= T_o2:\n T_i1 = T_o2 + 0.01\n if T_i1 <= T_o2:\n T_o2 = T_i1 - 0.01\n if T_i1 <= T_o2:\n T_o1 = T_i2 + 0.02\n if T_o1 <= T_i2:\n T_i2 = T_o1 - 0.02\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1[0] * (o1[2] - i1[2]) + self.kA.val * td_log",
"def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)",
"def graphite_cracking_rate_Ai2020(T_dim):\n k_cr = 3.9e-20\n Eac_cr = 0 # to be implemented\n arrhenius = np.exp(Eac_cr / pybamm.constants.R * (1 / T_dim - 1 / 298.15))\n return k_cr * arrhenius",
"def mGI(t):\r\n times = np.array([0, 3, 6, 9, 12, 15, 18, 21, 24])\r\n mGIs = np.array([0.0535789, 0.277942, 0.813305, 1., 0.373043, 0.00648925, 0.00439222, 0.0122333, 0.0535789])\r\n\r\n t_ = t % 24\r\n # print(\"GI:\", np.interp(t_, times, mGIs))\r\n return np.interp(t_, times, mGIs)",
"def kA_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n ttd_1 = T_mix_ph(i, T0=self.inl[0].T.val_SI) - self.Tamb.val_SI\n ttd_2 = T_mix_ph(o, T0=self.outl[0].T.val_SI) - self.Tamb.val_SI\n\n if ttd_1 > ttd_2:\n td_log = (ttd_1 - ttd_2) / np.log(ttd_1 / ttd_2)\n elif ttd_1 < ttd_2:\n td_log = (ttd_2 - ttd_1) / np.log(ttd_2 / ttd_1)\n else:\n td_log = 0\n\n return i[0] * (o[2] - i[2]) + self.kA.val * td_log",
"def investigate4DRepeatability():\n parentdir = '/home/rallured/Dropbox/Interferometer/SolarBFlat/Repeatability/'\n avgs = [1,2,4,8,16,32]\n\n #Temporal with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTiltTemporal*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptemptilt = d[-1]-d[-2]\n figtemptilt = d[-1]\n\n #Dynamic with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTilt_*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = [met.readFlatScript(fi.split('.')[0])[0] for fi in fn]\n #Make progressive averaging plot\n plt.figure('DynamicTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdyntilt = d[-1]-d[-2]\n figdyntilt = d[-1]\n \n #Temporal with fringes nulled\n fn = glob.glob(parentdir+'Nulled/17*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalNulledFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptempnull = d[-1]-d[-2]\n figtempnull = d[-1]\n \n #Dynamic with fringes nulled\n d = pyfits.getdata('/home/rallured/Dropbox/Interferometer/'\n 'SolarBFlat/Repeatability/'\n 'Nulled/170103_Processed.fits')\n rep = np.array([d[i,0]-d[i,1] for i in range(32)])\n #Make progressive averaging plot\n plt.figure('DynamicNulledFigure')\n for i in [0,1,3,7,15,31]:\n f,p = fourier.meanPSD(d[i,0],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(i+1))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdynnull = d[-1][0]-d[-1][1]\n figdynnull = d[-1][0]\n\n #Make comparative repeatability plots with 32 averages\n plt.figure('CompareRepeatability')\n f,p = fourier.meanPSD(repdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(repdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(reptemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(reptempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Repeatability - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make comparative figure plots with 32 averages\n plt.figure('CompareFigure')\n f,p = fourier.meanPSD(figdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(figdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(figtemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(figtempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Figure - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make parroting repeatability plots\n fig = plt.figure('Parroting')\n fig.add_subplot(2,2,1)\n plt.imshow(repdyntilt)\n plt.title('Dynamic Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,2)\n plt.imshow(reptemptilt)\n plt.title('Temporal Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,3)\n res = legendre2d(repdyntilt,xo=3,yo=3)[0]\n plt.imshow(repdyntilt-res)\n plt.title('Dynamic Repeatability Filtered')\n plt.colorbar()\n fig.add_subplot(2,2,4)\n res = legendre2d(reptemptilt,xo=3,yo=3)[0]\n plt.imshow(reptemptilt-res)\n plt.title('Temporal Repeatability Filtered')\n plt.colorbar()",
"def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb",
"def compute_gains(self, t, add_noise=True):\n #get the basis funtion at a time step\n basis, Dbasis = self.get_basis(t)\n\n if t < self._time_steps-1:\n basis_t_dt, _ = self.get_basis(t+1)\n else:\n basis_t_dt = np.zeros_like(basis)\n\n\n #part 1 equation 46\n B_pseudo = np.linalg.pinv(self._B)\n\n #equation 12 for t\n Sigma_t = np.dot(np.dot(basis, self._sigma_W), basis.T)\n\n #equation 12 for t+dt\n Sigma_t_dt = np.dot(np.dot(basis_t_dt, self._sigma_W), basis_t_dt.T)\n\n #Cross correlation between t, t+dt, Equation 49\n Ct = np.dot(np.dot(basis, self._sigma_W), basis_t_dt.T)\n\n #System noise Equation 51\n Sigma_s = (1./self._dt)* ( Sigma_t_dt - np.dot( np.dot( Ct.T, np.linalg.inv(Sigma_t) ), Ct) )\n\n #control noise Equation 52\n Sigma_u = np.dot(np.dot(B_pseudo, Sigma_s), B_pseudo.T)\n\n #part 2 equation 46\n tmp1 = np.dot(np.dot(Dbasis, self._sigma_W), basis.T)\n\n #part 3 equation 46\n tmp2 = np.dot(self._A, Sigma_t) + 0.5*Sigma_s\n\n #compute feedback gain; complete equation 46\n K = np.dot( np.dot(B_pseudo, (tmp1-tmp2) ), np.linalg.inv(Sigma_t))\n\n #part 1 equation 48\n tmp3 = np.dot(Dbasis, self._mean_W)\n\n #part 2 equation 48\n tmp4 = np.dot( (self._A + np.dot(self._B, K)), np.dot(basis, self._mean_W) )\n\n #compute feedforward gain; complete equation 48\n k = np.dot(B_pseudo, (tmp3-tmp4))\n\n return K, k, Sigma_u",
"def get_fractional_degradation(bt):\n\n\n\n NomIch = 0.125 # Nominal charge current\n NomId = 0.25 # Nominal discharge current\n NomSoC = 0.5 # Nominal state of charge_mode\n NomDoD = 1.0 # Nominal depth of discharge\n B = 5 #Battery capacity\n qt = 5 * 0.5 # Amount of energy in the battery at the start\n # Determin charge of discharge\n if bt > 0:\n Id = bt/(B*1) # time interval differnece is 1\n Ich = NomIch\n else:\n Ich = bt/(B*1)\n Id = NomId\n\n #Calculate average State of Charge\n SoC = 100 * (qt - 0.5*bt)/B\n\n #Calculate Depth of Discharge\n DoD = 100 * bt /B\n\n # Functions\n nCL1 = (e * np.exp (f * Id) + g * np.exp(h * Id))/ (e * np.exp (f * NomId) + g * np.exp(h * NomId))\n nCL2 = (m * np.exp (n * Ich) + o * np.exp(p * Ich))/ (m* np.exp (n* NomIch) + o * np.exp(p * NomIch))\n nCL3 = get_CL4(DoD, SoC)/get_CL4(NomDoD, NomSoC)\n nCL = nCL1 * nCL2 * nCL3\n Fractional_D = (0.5/3650)/ nCL\n return Fractional_D",
"def LogLikeEducation(DFData,SATTuition,grad_horizon,sectors,beta,\n ability,flows_penalized,unskilled_var,grade_params_by_quality,normReps,simReps,\n LaborGradeRange,final_size,dropout_payouts, STEM_payouts_by_quality,\n nonSTEM_payouts_by_quality,gamma_p, unskilled_meanvar, norm_quantiles,\n skilled_wage_coeffs,unskilled_wage_coeffs, skilled_wage_covar,LaborGradeInt,\n choose,year_four_intercept,year_four_flow_penalized, ed_switching_costs,\n univ_type_shifters,grad_payoff,return_array=False):\n\n year_four_exp = 0\n year_four_quadratic = 0\n year_four_year_1 = 0\n\n\n flowUnskilled=flows_penalized[sectors]\n flowSTEM=flows_penalized[sectors+1]\n flownonSTEM=flows_penalized[sectors+2]\n flow_educ=np.array([flowSTEM,flownonSTEM,flowUnskilled],dtype=np.float64)\n ed_Emax=np.zeros((len(SATTuition),6,81,2),dtype=np.float64)\n STEM1=np.zeros(len(SATTuition),dtype=np.float64)\n nonSTEM1=np.zeros(len(SATTuition),dtype=np.float64)\n\n flows_by_univ_type = [np.zeros(3,dtype=np.float64) for x in range(4)]\n flows_by_univ_type[0][0]=flow_educ[0]\n flows_by_univ_type[0][1]=flow_educ[1]\n flows_by_univ_type[0][2]=flow_educ[2]\n for x in range(1,4):\n flows_by_univ_type[x][0]=flow_educ[0]+univ_type_shifters[2*x-2]\n flows_by_univ_type[x][1]=flow_educ[1]+univ_type_shifters[2*x-1]\n flows_by_univ_type[x][2]=flows_penalized[sectors]\n\n for idx,x in enumerate(SATTuition):\n # differentiate by quality\n if x[5] == 1:\n STEM_payouts = STEM_payouts_by_quality[1]\n nonSTEM_payouts = nonSTEM_payouts_by_quality[1]\n grade_params = grade_params_by_quality[1]\n else:\n STEM_payouts = STEM_payouts_by_quality[0]\n nonSTEM_payouts = nonSTEM_payouts_by_quality[0]\n grade_params = grade_params_by_quality[0]\n\n tuition = x[6]\n flow_educ_univ_type=flows_by_univ_type[x[7]]\n\n Ed=EmaxEducationJIT(dropout_payouts,STEM_payouts,nonSTEM_payouts,\n grade_params,gamma_p,beta,flow_educ_univ_type,\n np.array(([tuition,tuition,tuition,tuition]),dtype=np.float64),\n np.array((x[2],x[3],x[4]),dtype=np.float64),\n np.array((ability[0],ability[1]),dtype=np.float64),\n unskilled_meanvar, norm_quantiles, year_four_intercept,\n year_four_flow_penalized, ed_switching_costs, grad_payoff)\n Ed.solve()\n ed_Emax[idx]=Ed.EmaxEducationValues\n STEM1[idx]=Ed.STEM_cond_val_first\n nonSTEM1[idx]=Ed.nonSTEM_cond_val_first\n del Ed\n\n # this part is hard coded for the number of sectors\n skilled_experience=create_skilled_experience(np.array(DFData.skilled1),\n np.array(DFData.skilled2),np.array(DFData.skilled3),\n np.array(DFData.hp))\n\n\n wage_shock=calculate_wage_shock(np.array(DFData.outcome),\n np.array(DFData.col_type),np.array(DFData.numeric_choice),\n np.array(DFData.numeric_state),skilled_wage_coeffs,\n unskilled_wage_coeffs,skilled_experience,\n np.array(DFData.unskilled),np.array(DFData.dSTEM),np.array(DFData.tGPA),\n np.array(DFData.quality),np.array(DFData.tdropout),year_four_intercept,\n year_four_exp,year_four_quadratic,year_four_year_1)\n\n\n (meanterm,covar,skilled_shocks,hp_wage_shocks)=(\n MVNposterior(skilled_wage_covar,4))\n skilled_shocks_list=[x for x in skilled_shocks]\n skilled_wage_shocks=tuple(skilled_shocks_list)\n\n unskilledWageShocks=(np.transpose(scipy.stats.norm.ppf(\n (np.array(range(simReps))+1)/(simReps+1))*(unskilled_var[0][0])**0.5))\n firstUnskilledDraws=np.exp(unskilled_wage_coeffs[0]+unskilledWageShocks)\n\n year_four_first_draws=np.exp(year_four_intercept+unskilledWageShocks)\n\n num_grades=20\n grade_quantiles=scipy.stats.norm.ppf(\n np.array(range(1,num_grades))/num_grades)\n\n out=calculate_likelihood_education(grad_horizon,sectors,\n np.array(DFData.time),\n np.array(DFData.numeric_choice),np.array(DFData.numeric_state),\n np.array(DFData.cumulativeGPA),np.array(DFData.tdropout),ability,\n np.array(DFData.SAT_M),\n np.array(DFData.SAT_V),np.array(DFData.hs_GPA),np.array(DFData.tuition),\n STEM_payouts_by_quality, nonSTEM_payouts_by_quality, grade_quantiles,\n dropout_payouts, wage_shock,\n skilled_wage_shocks,hp_wage_shocks,skilled_experience,\n np.array(DFData.unskilled),np.array(DFData.outcome),\n np.array(DFData.dSTEM),np.array(DFData.tGPA),meanterm,\n ed_Emax,np.array(DFData.ed_emax_mapping),\n flowUnskilled,flow_educ,skilled_wage_covar,\n gamma_p,beta,skilled_wage_coeffs,unskilled_wage_coeffs,\n unskilled_var,choose,unskilledWageShocks,grade_params_by_quality,STEM1,\n nonSTEM1,firstUnskilledDraws,LaborGradeInt,np.array(DFData.quality),\n year_four_first_draws,year_four_flow_penalized,ed_switching_costs,\n np.array(DFData.lastchoice),univ_type_shifters,\n np.array(DFData.univ_type_num),grad_payoff)\n\n del ed_Emax\n if return_array:\n return out\n return np.sum(out)",
"def test_single_ended_trans_att_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n nx = 200\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n ts_ambient = np.ones(nt) * 12\n ts_valid = np.ones(nt) * 16\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)\n cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)\n warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)\n warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)\n valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)\n temp_real = np.ones((len(x), nt)) * 12 + 273.15\n temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15\n temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15\n temp_real[valid_mask] = ts_valid + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n # Add attenuation\n tr_att = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.4) :] *= tr_att\n tr_att2 = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.6) :] *= tr_att2\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n \"ambient\": ([\"time\"], ts_ambient),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"ambient\": [slice(0.52 * cable_len, 0.58 * cable_len)],\n \"cold\": [\n slice(0.125 * cable_len, 0.25 * cable_len),\n slice(0.65 * cable_len, 0.70 * cable_len),\n ],\n \"warm\": [slice(0.25 * cable_len, 0.375 * cable_len)],\n }\n\n ds_test = ds.copy(deep=True)\n\n # WLS\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n # test `trans_att` related functions\n # Clear out old results\n ds_test.set_trans_att([])\n\n assert ds_test.trans_att.size == 0, \"clear out trans_att config\"\n\n del_keys = []\n for k, v in ds_test.data_vars.items():\n if \"trans_att\" in v.dims:\n del_keys.append(k)\n\n assert len(del_keys) == 0, \"clear out trans_att config\"\n\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing alpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_dalpha=(6.46e-05, 0),\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )",
"def kA_char_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n ttd_1 = T_mix_ph(i, T0=self.inl[0].T.val_SI) - self.Tamb.val_SI\n ttd_2 = T_mix_ph(o, T0=self.outl[0].T.val_SI) - self.Tamb.val_SI\n\n if ttd_1 > ttd_2:\n td_log = (ttd_1 - ttd_2) / np.log(ttd_1 / ttd_2)\n elif ttd_1 < ttd_2:\n td_log = (ttd_2 - ttd_1) / np.log(ttd_2 / ttd_1)\n else:\n td_log = 0\n\n f = 1\n if not np.isnan(self.inl[0].m.design):\n if self.kA_char.param == 'm':\n f = self.kA_char.func.evaluate(i[0] / self.inl[0].m.design)\n\n fkA = 2 / (1 + 1 / f)\n\n return i[0] * (o[2] - i[2]) + self.kA.design * fkA * td_log",
"def test_single_ended_wls_fix_gamma_fix_dalpha_synthetic():\n\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 500)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n print(\"alphaint\", cable_len * (dalpha_p - dalpha_m))\n print(\"alpha\", dalpha_p - dalpha_m)\n print(\"C\", np.log(C_p / C_m))\n print(\"x0\", x.max())\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.5 * cable_len)],\n \"warm\": [slice(0.5 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n fix_dalpha=(dalpha_p - dalpha_m, 0.0),\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)\n assert_almost_equal_verbose(ds.dalpha.values, dalpha_p - dalpha_m, decimal=18)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=8)\n\n pass",
"def test_single_ended_wls_estimate_synthetic():\n\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 500)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n print(\"alphaint\", cable_len * (dalpha_p - dalpha_m))\n print(\"alpha\", dalpha_p - dalpha_m)\n print(\"C\", np.log(C_p / C_m))\n print(\"x0\", x.max())\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.5 * cable_len)],\n \"warm\": [slice(0.5 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_single_ended(\n sections=sections, st_var=1.0, ast_var=1.0, method=\"wls\", solver=\"sparse\"\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=6)\n assert_almost_equal_verbose(ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=4)\n\n pass",
"def infilRateGA(Ks, presHead, thetaSat, thetaInit, F, tp):\n numerator = Ks*np.absolute(presHead)*(thetaSat - thetaInit)\n fraction = numerator/F\n f = Ks + fraction\n\n return f",
"def ALPflux(self, EMeV, t_sec, g11):\n na_dedt = self._alp(EMeV=EMeV, ts = t_sec, g10 = g11 * 0.1) # alp spectrum per energy and time\n return na_dedt * 1.e52"
]
| [
"0.6032632",
"0.5838996",
"0.57856476",
"0.5695584",
"0.56228185",
"0.5600415",
"0.55986273",
"0.55946696",
"0.55773216",
"0.5558041",
"0.55422586",
"0.55100906",
"0.5491797",
"0.5468888",
"0.5450849",
"0.5436682",
"0.5423116",
"0.53676325",
"0.5357754",
"0.5339139",
"0.533334",
"0.5328433",
"0.531503",
"0.5290875",
"0.5289468",
"0.5288406",
"0.5270619",
"0.5256118",
"0.52520454",
"0.5250572"
]
| 0.7176311 | 0 |
Resets ADA Delta auxillary variables | def reset_adadelta_variables(t_A=self.t_A):
A0 = np.zeros_like(t_A.get_value()).astype(theano.config.floatX)
t_ada_Eg2.set_value(A0)
t_ada_dA2.set_value(A0)
t_A.set_value(A0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_variables(self) -> None:\n self.attributs = {}\n self.data = []",
"def reset_data_recorder(self):\n\n self.t_values = []\n self.x_values = []\n self.tau_values = []",
"def reset(self):\n self.c_count = 0\n self.a_count = -1\n self.epsilon = self.init_epsilon",
"def reset(self):\r\n self.A = np.zeros(self.A.shape)",
"def reset(self):\n self.x_prev = np.zeros_like(self.mu)",
"def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()",
"def reset_state(self):\n self.y = np.copy(self.start)\n self.dy = np.zeros(self.n_dmps)\n self.ddy = self.ay * (self.by * (self.goal - self.y) - self.dy) + self.force[0]\n self.timestep = 0",
"def reset(self):\n self.tot = 0\n self.cnt = [0.0 for _ in range( self.alpha.getLen() )]",
"def reset(self):\n self.position = np.zeros(self.ndegres)\n self.velocity = np.zeros(self.ndegres)\n self.state = np.zeros(2*self.ndegres)\n self.flag = 0\n self.h_ref = np.array([self.ref for _ in range(self.horizon)])\n self.action = np.zeros(self.ACTION_DIM) \n self.h_action = np.zeros(self.ACTION_DIM*self.horizon)",
"def reset_state(self):\n self.y = self.y0.copy()\n self.dy = jnp.zeros(self.n_dmps)\n self.ddy = jnp.zeros(self.n_dmps)\n self.cs.reset_state()",
"def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_",
"def reset(self):\n self._previous_v = 0\n self._previous_m = 0\n self._previous_shape = 0",
"def reset(self):\n self.acc_loss = 0\n self.norm_term = 0",
"def reset(self):\n self.control_counter = 0\n self.last_position_error = np.zeros(3)\n self.integral_position_error = np.zeros(3)\n self.last_attitude_error = np.zeros(3)\n self.integral_attitude_error = np.zeros(3)",
"def reset(self):\n for var in self.var_list:\n var.value = None\n var.domain = copy.deepcopy(var.init_domain)",
"def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_",
"def reset(self):\n self._timestep = np.array([0])",
"def reset(self):\n self.t = 0.0\n self.last_t = None\n self.current_y = np.copy(self.start_y)\n self.current_yd = np.copy(self.start_yd)",
"def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None",
"def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0",
"def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)",
"def reset_states(self):\n K.batch_set_value([(v, 0) for v in self.variables])",
"def _reset(self, env_id: np.ndarray) -> None:",
"def reset(self):\n self.epsilon = self.epsilon_start",
"def reset_error(self):\n self.dLdu = np.zeros(self.u.shape)\n self.dLdw = np.zeros(self.w.shape)\n self.dLdv = np.zeros(self.v.shape)\n self.dLdb = np.zeros(self.b.shape)",
"def reset(self) -> None:\n self._dist['current'] = np.copy(self._dist['initial'])",
"def reset(self) -> None:\n self.true_positives = 0\n self.actual_positives = 0",
"def reset(self):\n self.velocity_controller.reset()\n self.yaw_filter.reset()",
"def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0",
"def reset(self):\n self.dynamic_predictions = {}\n self.position = 0\n self.references = []"
]
| [
"0.6393441",
"0.6191717",
"0.6155624",
"0.61232173",
"0.6086047",
"0.6076546",
"0.5989268",
"0.5978808",
"0.5949876",
"0.5936625",
"0.5931919",
"0.5930811",
"0.5918655",
"0.5910692",
"0.590065",
"0.58905596",
"0.5883913",
"0.58700746",
"0.5862289",
"0.58366185",
"0.57838637",
"0.5780442",
"0.57739896",
"0.5770067",
"0.5745167",
"0.57443446",
"0.57419986",
"0.57342887",
"0.5734229",
"0.57278126"
]
| 0.7476554 | 0 |
Reset the values of the hessian term and the bias term to zero to reset the | def reset_hessian_and_bias(self):
# reset_shared_var(self.t_H)
t = self.QUAD_REG
if len(t.shape) == 1:
self.t_H.set_value(np.diag(self.QUAD_REG))
elif len(t.shape) == 2:
self.t_H.set_value(self.QUAD_REG)
else:
raise ValueError('Invalid quad_reg shape')
reset_shared_var(self.t_B) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_parameters(self):\n if self.W is not None:\n tanh_gain = weight_init.calculate_gain(\"tanh\")\n weight_init.xavier_normal_(self.W, tanh_gain)\n # self.W.data.uniform_(-0.001, 0.001)",
"def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()",
"def reset(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)",
"def reset_grad(self):\r\n self.unet.zero_grad()",
"def reset_grad(self):\n self.unet.zero_grad()",
"def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0",
"def grad_zero(self):\r\n self.dw = torch.zeros_like(self.w)\r\n self.db = torch.zeros_like(self.b)",
"def _reset_parameters(self):\n if self.cfg.initial_forget_bias is not None:\n self.hindcast_lstm.bias_hh_l0.data[self.cfg.hidden_size:2 * self.cfg.hidden_size] = self.cfg.initial_forget_bias",
"def reset(self):\n self.x_prev = np.zeros_like(self.mu)",
"def reset_parameters(self) -> None:\n std = math.sqrt(3 / self.in_features)\n self.weight.data.uniform_(-std, std)\n self.bias.data.uniform_(-std, std)",
"def reset(self):\n weight = self.module.weight.data\n self.sensitivity_in = torch.zeros(weight.shape[1]).to(weight.device)\n self._features = torch.Tensor()\n self._current_batch = 1",
"def reset_grad(self):\n self.optimizer.zero_grad()",
"def reset(self):\n self.acc_loss = 0\n self.norm_term = 0",
"def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)",
"def reset(self):\n self.F = 0\n self.M = 0\n self.w = np.zeros(self.n)\n self.z = np.zeros(self.n)",
"def reset_state(self):\n self.y = self.y0.copy()\n self.dy = jnp.zeros(self.n_dmps)\n self.ddy = jnp.zeros(self.n_dmps)\n self.cs.reset_state()",
"def zero_grad(self):\n for p, dp in self.params:\n dp.zero_()",
"def reset_grad(self):\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()\n self.dr_optimizer.zero_grad()",
"def reset_grad(self):\r\n self.g_optimizer.zero_grad()",
"def reset(self):\r\n self.state = copy.copy(self.mu)",
"def reset_grad(self):\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()",
"def reset_grad(self):\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()",
"def reset_grad(self):\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()",
"def reset_parameters(self):\n mu_range = 1 / math.sqrt(self.in_features)\n self.weight_mu.data.uniform_(-mu_range, mu_range)\n self.weight_sigma.data.fill_(\n self.std_init / math.sqrt(self.in_features)\n )\n self.bias_mu.data.uniform_(-mu_range, mu_range)\n self.bias_sigma.data.fill_(\n self.std_init / math.sqrt(self.out_features)\n )",
"def clear(self):\n self.xi[:] = 0\n self.meanlogr[:] = 0\n self.weight[:] = 0\n self.npairs[:] = 0",
"def reset_parameters(self, bias):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n nn.init.xavier_uniform_(self.w_key.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.w_value.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.w_query.weight, gain=1 / math.sqrt(2))\n if bias:\n nn.init.constant_(self.w_key.bias, 0.0)\n nn.init.constant_(self.w_value.bias, 0.0)\n nn.init.constant_(self.w_query.bias, 0.0)\n nn.init.xavier_uniform_(self.w_out.weight)\n if bias:\n nn.init.constant_(self.w_out.bias, 0.0)",
"def reset_parameters(self, bias):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n nn.init.xavier_uniform_(self.w_key.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.w_value.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.w_query.weight, gain=1 / math.sqrt(2))\n if bias:\n nn.init.constant_(self.w_key.bias, 0.0)\n nn.init.constant_(self.w_value.bias, 0.0)\n nn.init.constant_(self.w_query.bias, 0.0)\n nn.init.xavier_uniform_(self.w_out.weight)\n if bias:\n nn.init.constant_(self.w_out.bias, 0.0)",
"def reset_grad(self):\n self.g_optimizer.zero_grad()",
"def reset(self):\n self._weights.clear()",
"def zero_grad(self):\r\n for param in self.params:\r\n param.grad = None"
]
| [
"0.70881516",
"0.708221",
"0.6806984",
"0.67656636",
"0.6675885",
"0.6660311",
"0.66351706",
"0.66191715",
"0.6577939",
"0.6569499",
"0.6541274",
"0.6515547",
"0.65126866",
"0.6502201",
"0.64899135",
"0.6471892",
"0.6464242",
"0.64477897",
"0.6444937",
"0.6430383",
"0.64219666",
"0.64219666",
"0.64219666",
"0.641942",
"0.6419006",
"0.63737905",
"0.63737905",
"0.63700414",
"0.63634115",
"0.6355018"
]
| 0.7808178 | 0 |
Verifies CAS 1.0 authentication ticket. Returns username on success and None on failure. | def _verify_cas1(ticket, service):
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'validate') + '?' +
urlencode(params))
page = urlopen(url)
try:
verified = page.readline().strip()
if verified == 'yes':
return page.readline().strip()
else:
return None
finally:
page.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _CAS_login(self):\n import urllib\n self.ticket = current.request.vars.ticket\n if not current.request.vars.ticket:\n redirect(\"%s?service=%s\" % (self.cas_login_url,\n self.cas_my_url))\n else:\n url = \"%s?service=%s&ticket=%s\" % (self.cas_check_url,\n self.cas_my_url,\n self.ticket)\n data = urllib.urlopen(url).read()\n if data.startswith('yes') or data.startswith('no'):\n data = data.split('\\n')\n if data[0] == 'yes':\n if ':' in data[1]: # for Compatibility with Custom CAS\n items = data[1].split(':')\n a = items[0]\n b = len(items) > 1 and items[1] or a\n c = len(items) > 2 and items[2] or b\n else:\n a = b = c = data[1]\n return dict(user=a, email=b, username=c)\n return None\n import xml.dom.minidom as dom\n import xml.parsers.expat as expat\n try:\n dxml = dom.parseString(data)\n envelop = dxml.getElementsByTagName(\n \"cas:authenticationSuccess\")\n if len(envelop) > 0:\n res = dict()\n for x in envelop[0].childNodes:\n if x.nodeName.startswith('cas:') and len(x.childNodes):\n key = x.nodeName[4:].encode('utf8')\n value = x.childNodes[0].nodeValue.encode('utf8')\n if not key in res:\n res[key] = value\n else:\n if not isinstance(res[key], list):\n res[key] = [res[key]]\n res[key].append(value)\n return res\n except expat.ExpatError:\n pass\n return None # fallback",
"def _verify_cas2(ticket, service):\n try:\n from xml.etree import ElementTree\n except ImportError:\n from elementtree import ElementTree\n\n params = {'ticket': ticket, 'service': service}\n url = (urljoin(settings.CAS_SERVER_URL, 'serviceValidate') + '?' +\n urlencode(params))\n page = urlopen(url)\n try:\n response = page.read()\n '''Remove \\n\\t character from response xml'''\n response = re.sub(r'(?m)[\\t\\n]+', \"\", response)\n tree = ElementTree.fromstring(response)\n if tree[0].tag.endswith('authenticationSuccess'):\n member_of = []\n access_token = None\n user_name = None\n first_name = None\n last_name = None\n department = None\n for xmlTag in tree[0]:\n if xmlTag.tag.endswith('user'):\n user_name = xmlTag.text\n elif xmlTag.tag.endswith('firstName'):\n first_name = xmlTag.text\n elif xmlTag.tag.endswith('lastName'):\n last_name = xmlTag.text\n\n user_args = {\n \"user_name\":user_name,\n \"first_name\": first_name,\n \"last_name\": last_name\n }\n \n return user_args\n else:\n return None\n except Exception, e:\n logger.error(e)\n finally:\n page.close()",
"def casLogin(request):\n service = cas.getServiceUrl(request)\n username = unauthenticated_userid(request)\n if username is None:\n ticket = request.GET.get('ticket')\n if ticket is None:\n return cas.sendToService(request)\n username = cas.verifyCas20(request,ticket,service)\n if username is None:\n return 'no user'\n\n settings = request.registry.settings\n if 'pyramid_cas.callback.get_user' in settings:\n callable = settings['pyramid_cas.callback.get_user']\n module = callable.split('.')[0] + '.' + callable.split('.')[1]\n caller = sys.modules[module]\n method = getattr(caller,callable.split('.')[2])\n user = method(username,request)\n else:\n user = username\n headers = remember(request,user,max_age = '86400')\n return HTTPFound(location=request.route_url('home'),headers=headers)\n else:\n return HTTPFound(location='/not-allowed')",
"def verify_ticket(self, ticket, **kwargs):\n\n try:\n from xml.etree import ElementTree\n except ImportError:\n from elementtree import ElementTree\n\n page = self.fetch_saml_validation(ticket)\n\n try:\n user = None\n attributes = {}\n response = page.content\n tree = ElementTree.fromstring(response)\n # Find the authentication status\n success = tree.find('.//' + SAML_1_0_PROTOCOL_NS + 'StatusCode')\n if success is not None and success.attrib['Value'].endswith('Success'):\n # User is validated\n name_identifier = tree.find('.//' + SAML_1_0_ASSERTION_NS + 'NameIdentifier')\n if name_identifier is not None:\n user = name_identifier.text\n attrs = tree.findall('.//' + SAML_1_0_ASSERTION_NS + 'Attribute')\n for at in attrs:\n if self.username_attribute in list(at.attrib.values()):\n user = at.find(SAML_1_0_ASSERTION_NS + 'AttributeValue').text\n attributes['uid'] = user\n\n values = at.findall(SAML_1_0_ASSERTION_NS + 'AttributeValue')\n if len(values) > 1:\n values_array = []\n for v in values:\n values_array.append(v.text)\n attributes[at.attrib['AttributeName']] = values_array\n else:\n attributes[at.attrib['AttributeName']] = values[0].text\n return user, attributes, None\n finally:\n page.close()",
"def authenticate(self, ticket, service, request):\n user = _verify(ticket, service)\n logger.debug(\"Verified User %s\" % user)\n if user is None:\n return None\n\n return models.SSOUser(**user)",
"def verify_ticket(self, ticket):\n response = self.get_verification_response(ticket)\n return self.verify_response(response)",
"def verify_ticket(self, ticket):\n raise NotImplementedError()",
"def verify_ticket(self, ticket):\n params = [('ticket', ticket), ('service', self.service_url)]\n url = (urllib_parse.urljoin(self.server_url, 'validate') + '?' +\n urllib_parse.urlencode(params))\n page = self.session.get(\n url,\n stream=True,\n verify=self.verify_ssl_certificate\n )\n try:\n page_iterator = page.iter_lines(chunk_size=8192)\n verified = next(page_iterator).strip()\n if verified == 'yes':\n return next(page_iterator).strip(), None, None\n else:\n return None, None, None\n finally:\n page.close()",
"def authenticate(self, ticket, service):\n User = get_user_model()\n\n if self.protocol == 1:\n valid = CAS1Validation(ticket, service)\n elif self.protocol == 2:\n valid = CAS2Validation(ticket, service)\n else:\n valid = None\n logger.info('Authenticating against CAS %s: service = %s ; ticket = %s; identifiers %s\\n%s', self.protocol, service, ticket, valid.identifiers, valid)\n if not valid or not valid.identifiers:\n return None\n # Select any users that match valid identifiers. Specify an ordering for consistent results.\n users = list(User.objects.filter(username__in=valid.identifiers).order_by('id'))\n logger.info('Authentication turned up %s users: %s', len(users), users)\n if users:\n user = None\n primary = valid.username\n for potential in users:\n # Try and pick a user that matches the primary identifier.\n if potential.username == primary:\n user = potential\n break\n if user is None:\n # Otherwise, pick the first in the result set.\n user = users[0]\n logger.info('Picking primary user: %s', user)\n\n else:\n logger.info('Creating new user for %s', valid.username)\n user = User(username=valid.username)\n user.set_unusable_password()\n if self.set_email and 'email' in valid.attributes:\n user.email = valid.attributes['email']\n user.save()\n\n if len(users) > 1:\n others = [u for u in users if u.username != user.username]\n logger.info('Sending merge signal for other users: %s', others)\n try:\n result = signals.on_cas_merge_users.send(sender=self, primary=user,\n others=others)\n except Exception:\n logger.exception('Merge signal failed!')\n else:\n logger.info('Sent merge signal. Result: %s', result)\n\n if users:\n changed = False\n if (self.set_email\n and 'email' in valid.attributes\n and valid.attributes['email'] != user.email\n ):\n user.email = valid.attributes['email']\n changed = True\n\n if (self.set_username\n and user.username != primary\n ):\n user.username = primary\n changed = True\n\n if changed:\n user.save()\n\n logger.info('Authenticated user: %s' % user)\n\n signals.on_cas_authentication.send(sender=self, user=user, attributes=valid.attributes)\n return user",
"def test_success(self):\n \n result = self.authenticator.authenticate(\n username=u'thruflo', \n password=u'secret'\n )\n self.assertTrue(result.username == self.user.username)",
"def authenticate(self):\n try:\n auth_header = self.basic_token\n username, password = decode(auth_header)\n\n user_principal = None\n allowlisted_users = Environment().get_allowlisted_users()\n if allowlisted_users is not None:\n password_from_allowlist = allowlisted_users.get(username)\n if password_from_allowlist is None or password_from_allowlist != password:\n logger.log_error(\"Invalid user credentials provided\")\n raise AuthenticationError(\"Invalid user credential\")\n else:\n raise AuthenticationError(\"No whitelisted users found to authenticate against\")\n\n if Environment().is_kerberos_enabled():\n user_principal = self.get_user_principal(username)\n key_tab_path = Environment().get_hdfs_keytab_file_path()\n logger.log_info(\"Minting a kerberos ticket for principal {} using keytab {}\".format(user_principal, key_tab_path))\n if key_tab_path is None or user_principal is None:\n raise AuthenticationError(\"Keytab file or kerberos principal missing\")\n returncode = KerberosUtil.renew_kinit(key_tab_path, user_principal)\n logger.log_info('kinit return code:' + str(returncode))\n\n return username, user_principal\n except Exception as e:\n logger.log_exception(\"Failed while authenticating user\", exc_info=True)\n raise AuthenticationError(str(e))",
"def patch_cas_response(\n self,\n valid_ticket,\n username=None, attributes={}):\n if hasattr(self, '_patch_cas_client'):\n self.patch_cas_response_stop()\n\n class MockCASClient(object):\n _username = username\n\n def __new__(self_client, *args, **kwargs):\n version = kwargs.pop('version')\n if version in (1, '1'):\n client_class = cas.CASClientV1\n elif version in (2, '2'):\n client_class = cas.CASClientV2\n elif version in (3, '3'):\n client_class = cas.CASClientV3\n elif version == 'CAS_2_SAML_1_0':\n client_class = cas.CASClientWithSAMLV1\n else:\n raise ValueError('Unsupported CAS_VERSION %r' % version)\n\n client_class._username = self_client._username\n\n def verify_ticket(self, ticket):\n if valid_ticket == '__all__' or ticket == valid_ticket:\n username = self._username or 'username'\n return username, attributes, None\n return None, {}, None\n\n patcher = patch.object(\n client_class, 'verify_ticket',\n new=verify_ticket,\n )\n patcher.start()\n\n return client_class(*args, **kwargs)\n\n self._patch_cas_client = patch(\n 'allauth_cas.views.cas.CASClient',\n MockCASClient,\n )\n self._patch_cas_client.start()",
"def validate_oculus_ticket():\n\n ob = request.get_json()\n try:\n OculusProviderAuthSchema().load(ob)\n except ma.ValidationError as e:\n abort_unauthorized(\"Oculus token property %s is invalid\" % e.field_name)\n\n provider_details = ob['provider_details']\n # Get Oculus authentication config\n oculus_config = get_provider_config('oculus')\n\n if not oculus_config:\n abort(http_client.SERVICE_UNAVAILABLE, description=\"Oculus authentication not configured for current tenant\")\n\n # Call validation and authenticate if ticket is good\n identity_id = run_ticket_validation(\n user_id=provider_details['user_id'],\n access_token=oculus_config['access_token'],\n nonce=provider_details['nonce']\n )\n\n return identity_id",
"def login(request, redirect_field_name=REDIRECT_FIELD_NAME):\n ticket = request.GET.get(settings.CAS_TICKET_LABEL, None)\n redirect_to = request.GET.get(redirect_field_name,\n request.session.pop('login_redirect_to', ''))\n service = request.GET.get(settings.CAS_SERVICE_LABEL,\n request.session.pop('cas_service', settings.CAS_SERVICE))\n \n\n if ticket is None:\n # Need to obtain a service validation ticket from the CAS provider.\n request.session['cas_service'] = service\n request.session['login_redirect_to'] = redirect_to\n params = settings.CAS_EXTRA_LOGIN_PARAMS\n params.update({settings.CAS_SERVICE_LABEL: service})\n # override params with anything posted to the view\n for key,value in params.items():\n if key in request.GET:\n params[key] = request.GET.get(key, value)\n url = cas_login + '?'\n if 'req_press' in request.GET and 'uid' in request.GET:\n url += urlencode({\n 'req_press': 1,\n 'uid': request.GET['uid']\n })\n url += '&'\n raw_params = ['%s=%s' % (key, value) for key, value in params.items()]\n url += '&'.join(raw_params)\n return HttpResponseRedirect(url)\n\n user = authenticate(service=service, ticket=ticket)\n if user is not None:\n netloc = urlparse.urlparse(redirect_to)[1]\n\n # Use default setting if redirect_to is empty\n if not redirect_to:\n redirect_to = settings.LOGIN_REDIRECT_URL\n\n # Heavier security check -- don't allow redirection to a different\n # host.\n elif netloc and netloc != request.get_host():\n redirect_to = settings.LOGIN_REDIRECT_URL\n \n # Okay, security checks complete. Log the user in.\n auth_login(request, user)\n name = user.first_name or user.username\n\n if '?' in redirect_to:\n redirect_to += '&loginComplete=true'\n else:\n redirect_to += '?loginComplete=true'\n\n if messages is not None:\n messages.success(request, \"Login succeeded. Welcome, %s.\" % name)\n return HttpResponseRedirect(redirect_to)\n else:\n return HttpResponseForbidden(\"Error authenticating with CAS\")",
"def _login_challenge(self):\n headers, items = self._get('/login', {\n 'dbus': 'AUTH DBUS_COOKIE_SHA1 %s' % self.username\n })\n\n if headers.get('request_result') != 'success':\n raise ApiException(\"Failed receiving challenge\")\n\n return items[0].get('dbus').split(' ')[-1]",
"def authenticate(self, username: str, password: str) -> Optional[str]:",
"def authenticate(self, username: str, password: str) -> Optional[str]:",
"def check_auth(username, password):\n return get_ct_object(username, password) is not None",
"def check_auth(username, password):\n return basic_login(username, password)",
"def _authenticate(self, request, params):\n username = params[\"username\"]\n # Quick check if we've already validated these params.\n if request.environ.get(_ENVKEY_VALID_RESPONSE):\n return True\n # Obtain the verifier information somehow.\n (_, _, verifier) = self._get_verifier(username)\n if verifier is None:\n return False\n # Validate the HMAC digest response.\n privkey = self._get_privkey(params[\"nonce\"])\n if not check_response(request, params,\n privkey=privkey, verifier=verifier):\n return False\n # Cache the successful authentication.\n request.environ[_ENVKEY_VALID_RESPONSE] = True\n return True",
"def remote_login(self, username):\n c = challenge()\n return c, _PortalAuthVerifier(self.portal, self.broker, username, c)",
"def test_authenticatorChallengeResponse(self):\n username = b'testuser'\n secret = b'secret'\n chal = b'challenge'\n cAuth = imap4.PLAINAuthenticator(username)\n response = cAuth.challengeResponse(secret, chal)\n self.assertEqual(response, b'\\0' + username + b'\\0' + secret)",
"def check_user_credentials(token, auth_url='https://accounts.okeanos.grnet.gr'\n '/identity/v2.0'):\n logging.info(' Test the credentials')\n try:\n auth = AstakosClient(auth_url, token)\n auth.authenticate()\n logging.info(' Authentication verified')\n return AUTHENTICATED\n except ClientError:\n logging.error('Authentication failed with url %s and token %s' % (\n auth_url, token))\n return NOT_AUTHENTICATED",
"def authenticate(self, rfid):\n print(\"Auth id: [{}]\".format(rfid))\n\n values = {'id' : rfid}\n data = urllib.parse.urlencode(values)\n data = data.encode('utf-8')\n\n t1 = perf_counter()\n\n req = urllib.request.Request(self.auth_url, data)\n try:\n resp = urllib.request.urlopen(req, timeout=self.request_timeout)\n except URLError as err:\n print(\"URLError: auth_url:[{}]\".format(self.auth_url))\n print(\"URLError: {}\".format(err))\n print(\"Falling back to local cache\")\n cached = self.auth_from_cache(rfid)\n return cached\n except timeout as err:\n cached = self.auth_from_cache(rfid)\n return cached\n\n text = resp.read()\n\n t2 = perf_counter()\n print(\"Auth got [{}] in {} seconds\".format(text, t2-t1))\n\n if text == b'Granted':\n return True",
"def authenticate(self, username, password):\n return None",
"def skyserv_authenticator(self):\n \n header = {\n 'Content-Type': accept, \n 'X-Auth-Token': self.casjobtoken,\n 'Accept': accept\n }\n # this format is disgusting but required....\n authdata = {\n 'auth' :{\n 'identity': {\n 'password': {\n 'user': {\n 'name': username,\n 'password': password\n }\n }\n }\n }\n }\n payload = json.dumps(authdata).encode(encoding='utf-8')\n try:\n post = requests.post(self.loginurl, data=payload, headers=header)\n\n if post.status_code == 200:\n response = json.loads(post.text)\n token = response[self.tokenkey]\n return token\n else:\n print('Username and/or password are invalid.')\n post.raise_for_status()\n except Exception as e:\n raise(str(e))",
"def authenticate():\n\treturn Response(\n\t'Could not verify your access level for that URL.\\n'\n\t'You have to login with proper credentials', 401,\n\t{'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def client_cas_login(\n self,\n client, provider_id='theid',\n username=None, attributes={}):\n client.get(reverse('{id}_login'.format(id=provider_id)))\n self.patch_cas_response(\n valid_ticket='__all__',\n username=username, attributes=attributes,\n )\n callback_url = reverse('{id}_callback'.format(id=provider_id))\n r = client.get(callback_url, {'ticket': 'fake-ticket'})\n self.patch_cas_response_stop()\n return r",
"def authenticate(self, realm=None, username=None, password=None, login_params=None):\n if not username:\n raise ValueError(\"You will need to provide a username to login.\")\n\n if not password:\n raise ValueError(\"You will need to provide a password to login.\")\n\n post_data = '{}'\n self.realm = realm\n cookiename = self.cookiename\n login_headers = self.headers\n login_headers['X-OpenAM-Username'] = username\n login_headers['X-OpenAM-Password'] = password\n uri = self._uri_realm_creator(realm=realm, uri=\"authenticate\", arguments=login_params)\n\n data = self._post(uri=uri, data=post_data, headers=login_headers)\n if data.status_code == 200:\n json_data = data.json()\n self.headers[cookiename] = json_data['tokenId']\n return json_data\n else:\n return False"
]
| [
"0.6621475",
"0.64085776",
"0.6396165",
"0.6132385",
"0.6075758",
"0.60504854",
"0.59542656",
"0.5880836",
"0.57991385",
"0.5658998",
"0.5637163",
"0.55938107",
"0.5570902",
"0.55611295",
"0.5410291",
"0.5364074",
"0.5364074",
"0.5313934",
"0.5269508",
"0.52593505",
"0.5255258",
"0.5237285",
"0.52268606",
"0.51953024",
"0.51919496",
"0.5187796",
"0.5187735",
"0.5162593",
"0.51591444",
"0.5152562"
]
| 0.6950565 | 0 |
Verifies CAS 2.0+ XMLbased authentication ticket. Returns user's attribute on success and None on failure. | def _verify_cas2(ticket, service):
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'serviceValidate') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
'''Remove \n\t character from response xml'''
response = re.sub(r'(?m)[\t\n]+', "", response)
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
member_of = []
access_token = None
user_name = None
first_name = None
last_name = None
department = None
for xmlTag in tree[0]:
if xmlTag.tag.endswith('user'):
user_name = xmlTag.text
elif xmlTag.tag.endswith('firstName'):
first_name = xmlTag.text
elif xmlTag.tag.endswith('lastName'):
last_name = xmlTag.text
user_args = {
"user_name":user_name,
"first_name": first_name,
"last_name": last_name
}
return user_args
else:
return None
except Exception, e:
logger.error(e)
finally:
page.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_ticket(self, ticket, **kwargs):\n\n try:\n from xml.etree import ElementTree\n except ImportError:\n from elementtree import ElementTree\n\n page = self.fetch_saml_validation(ticket)\n\n try:\n user = None\n attributes = {}\n response = page.content\n tree = ElementTree.fromstring(response)\n # Find the authentication status\n success = tree.find('.//' + SAML_1_0_PROTOCOL_NS + 'StatusCode')\n if success is not None and success.attrib['Value'].endswith('Success'):\n # User is validated\n name_identifier = tree.find('.//' + SAML_1_0_ASSERTION_NS + 'NameIdentifier')\n if name_identifier is not None:\n user = name_identifier.text\n attrs = tree.findall('.//' + SAML_1_0_ASSERTION_NS + 'Attribute')\n for at in attrs:\n if self.username_attribute in list(at.attrib.values()):\n user = at.find(SAML_1_0_ASSERTION_NS + 'AttributeValue').text\n attributes['uid'] = user\n\n values = at.findall(SAML_1_0_ASSERTION_NS + 'AttributeValue')\n if len(values) > 1:\n values_array = []\n for v in values:\n values_array.append(v.text)\n attributes[at.attrib['AttributeName']] = values_array\n else:\n attributes[at.attrib['AttributeName']] = values[0].text\n return user, attributes, None\n finally:\n page.close()",
"def _CAS_login(self):\n import urllib\n self.ticket = current.request.vars.ticket\n if not current.request.vars.ticket:\n redirect(\"%s?service=%s\" % (self.cas_login_url,\n self.cas_my_url))\n else:\n url = \"%s?service=%s&ticket=%s\" % (self.cas_check_url,\n self.cas_my_url,\n self.ticket)\n data = urllib.urlopen(url).read()\n if data.startswith('yes') or data.startswith('no'):\n data = data.split('\\n')\n if data[0] == 'yes':\n if ':' in data[1]: # for Compatibility with Custom CAS\n items = data[1].split(':')\n a = items[0]\n b = len(items) > 1 and items[1] or a\n c = len(items) > 2 and items[2] or b\n else:\n a = b = c = data[1]\n return dict(user=a, email=b, username=c)\n return None\n import xml.dom.minidom as dom\n import xml.parsers.expat as expat\n try:\n dxml = dom.parseString(data)\n envelop = dxml.getElementsByTagName(\n \"cas:authenticationSuccess\")\n if len(envelop) > 0:\n res = dict()\n for x in envelop[0].childNodes:\n if x.nodeName.startswith('cas:') and len(x.childNodes):\n key = x.nodeName[4:].encode('utf8')\n value = x.childNodes[0].nodeValue.encode('utf8')\n if not key in res:\n res[key] = value\n else:\n if not isinstance(res[key], list):\n res[key] = [res[key]]\n res[key].append(value)\n return res\n except expat.ExpatError:\n pass\n return None # fallback",
"def authenticate(self, ticket, service, request):\n user = _verify(ticket, service)\n logger.debug(\"Verified User %s\" % user)\n if user is None:\n return None\n\n return models.SSOUser(**user)",
"def _verify_cas1(ticket, service):\n params = {'ticket': ticket, 'service': service}\n url = (urljoin(settings.CAS_SERVER_URL, 'validate') + '?' +\n urlencode(params))\n page = urlopen(url)\n try:\n verified = page.readline().strip()\n if verified == 'yes':\n return page.readline().strip()\n else:\n return None\n finally:\n page.close()",
"def verify_ticket(self, ticket):\n params = [('ticket', ticket), ('service', self.service_url)]\n url = (urllib_parse.urljoin(self.server_url, 'validate') + '?' +\n urllib_parse.urlencode(params))\n page = self.session.get(\n url,\n stream=True,\n verify=self.verify_ssl_certificate\n )\n try:\n page_iterator = page.iter_lines(chunk_size=8192)\n verified = next(page_iterator).strip()\n if verified == 'yes':\n return next(page_iterator).strip(), None, None\n else:\n return None, None, None\n finally:\n page.close()",
"def verify_ticket(self, ticket):\n response = self.get_verification_response(ticket)\n return self.verify_response(response)",
"def authenticate(self, ticket, service):\n User = get_user_model()\n\n if self.protocol == 1:\n valid = CAS1Validation(ticket, service)\n elif self.protocol == 2:\n valid = CAS2Validation(ticket, service)\n else:\n valid = None\n logger.info('Authenticating against CAS %s: service = %s ; ticket = %s; identifiers %s\\n%s', self.protocol, service, ticket, valid.identifiers, valid)\n if not valid or not valid.identifiers:\n return None\n # Select any users that match valid identifiers. Specify an ordering for consistent results.\n users = list(User.objects.filter(username__in=valid.identifiers).order_by('id'))\n logger.info('Authentication turned up %s users: %s', len(users), users)\n if users:\n user = None\n primary = valid.username\n for potential in users:\n # Try and pick a user that matches the primary identifier.\n if potential.username == primary:\n user = potential\n break\n if user is None:\n # Otherwise, pick the first in the result set.\n user = users[0]\n logger.info('Picking primary user: %s', user)\n\n else:\n logger.info('Creating new user for %s', valid.username)\n user = User(username=valid.username)\n user.set_unusable_password()\n if self.set_email and 'email' in valid.attributes:\n user.email = valid.attributes['email']\n user.save()\n\n if len(users) > 1:\n others = [u for u in users if u.username != user.username]\n logger.info('Sending merge signal for other users: %s', others)\n try:\n result = signals.on_cas_merge_users.send(sender=self, primary=user,\n others=others)\n except Exception:\n logger.exception('Merge signal failed!')\n else:\n logger.info('Sent merge signal. Result: %s', result)\n\n if users:\n changed = False\n if (self.set_email\n and 'email' in valid.attributes\n and valid.attributes['email'] != user.email\n ):\n user.email = valid.attributes['email']\n changed = True\n\n if (self.set_username\n and user.username != primary\n ):\n user.username = primary\n changed = True\n\n if changed:\n user.save()\n\n logger.info('Authenticated user: %s' % user)\n\n signals.on_cas_authentication.send(sender=self, user=user, attributes=valid.attributes)\n return user",
"def test_auth_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey'),\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertNotEqual(\n response,\n None,\n \"Authentication with the configured settings \"\n \"was not successful\"\n )",
"def test_auth_failure_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey') + \"1234\",\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertEqual(\n response,\n None,\n \"Authentication did not return 'None', but %s instead.\" % (\n response\n )\n )",
"def _get_login_response_authn(self, ticket: SSOLoginData, user: IdPUser) -> AuthnInfo:\n self.logger.debug('MFA credentials logged in the ticket: {}'.format(ticket.mfa_action_creds))\n self.logger.debug('External MFA credential logged in the ticket: {}'.format(ticket.mfa_action_external))\n self.logger.debug('Credentials used in this SSO session:\\n{}'.format(self.sso_session.authn_credentials))\n self.logger.debug('User credentials:\\n{}'.format(user.credentials.to_list()))\n\n # Decide what AuthnContext to assert based on the one requested in the request\n # and the authentication performed\n\n req_authn_context = get_requested_authn_context(self.context.idp, ticket.saml_req, self.logger)\n\n try:\n resp_authn = assurance.response_authn(req_authn_context, user, self.sso_session, self.logger)\n except WrongMultiFactor as exc:\n self.logger.info('Assurance not possible: {!r}'.format(exc))\n raise eduid_idp.error.Forbidden('SWAMID_MFA_REQUIRED')\n except MissingMultiFactor as exc:\n self.logger.info('Assurance not possible: {!r}'.format(exc))\n raise eduid_idp.error.Forbidden('MFA_REQUIRED')\n except AssuranceException as exc:\n self.logger.info('Assurance not possible: {!r}'.format(exc))\n raise MustAuthenticate()\n\n self.logger.debug(\"Response Authn context class: {!r}\".format(resp_authn))\n\n try:\n self.logger.debug(\"Asserting AuthnContext {!r} (requested: {!r})\".format(resp_authn, req_authn_context))\n except AttributeError:\n self.logger.debug(\"Asserting AuthnContext {!r} (none requested)\".format(resp_authn))\n\n # Augment the AuthnInfo with the authn_timestamp before returning it\n return replace(resp_authn, instant=self.sso_session.authn_timestamp)",
"def verify_ticket(self, ticket):\n raise NotImplementedError()",
"def casLogin(request):\n service = cas.getServiceUrl(request)\n username = unauthenticated_userid(request)\n if username is None:\n ticket = request.GET.get('ticket')\n if ticket is None:\n return cas.sendToService(request)\n username = cas.verifyCas20(request,ticket,service)\n if username is None:\n return 'no user'\n\n settings = request.registry.settings\n if 'pyramid_cas.callback.get_user' in settings:\n callable = settings['pyramid_cas.callback.get_user']\n module = callable.split('.')[0] + '.' + callable.split('.')[1]\n caller = sys.modules[module]\n method = getattr(caller,callable.split('.')[2])\n user = method(username,request)\n else:\n user = username\n headers = remember(request,user,max_age = '86400')\n return HTTPFound(location=request.route_url('home'),headers=headers)\n else:\n return HTTPFound(location='/not-allowed')",
"def verify_user(self, tokendict):\n return self.post('verify', tokendict)",
"def do_verify(context: IdPContext):\n query = eduid_idp.mischttp.get_post(context.logger)\n # extract password to keep it away from as much code as possible\n password = query.pop('password', None)\n if password:\n query['password'] = '<redacted>'\n context.logger.debug(\"do_verify parsed query :\\n{!s}\".format(pprint.pformat(query)))\n\n _info = {}\n for this in ['SAMLRequest', 'binding', 'RelayState']:\n if this not in query:\n raise eduid_idp.error.BadRequest(f'Missing parameter {this} - please re-initiate login')\n _info[this] = unescape(query[this])\n _ticket = _get_ticket(context, _info, None)\n\n authn_ref = _ticket.saml_req.get_requested_authn_context()\n context.logger.debug(\"Authenticating with {!r}\".format(authn_ref))\n\n if not password or 'username' not in query:\n lox = f'{query[\"redirect_uri\"]}?{_ticket.query_string}'\n context.logger.debug(f'Credentials not supplied. Redirect => {lox}')\n raise eduid_idp.mischttp.Redirect(lox)\n\n login_data = {\n 'username': query['username'].strip(),\n 'password': password,\n }\n del password # keep out of any exception logs\n try:\n authninfo = context.authn.password_authn(login_data)\n except exceptions.EduidTooManyRequests as e:\n raise eduid_idp.error.TooManyRequests(e.args[0])\n except exceptions.EduidForbidden as e:\n raise eduid_idp.error.Forbidden(e.args[0])\n\n if not authninfo:\n _ticket.FailCount += 1\n cherrypy.session.sso_ticket = _ticket\n lox = f'{query[\"redirect_uri\"]}?{_ticket.query_string}'\n context.logger.debug(f'Unknown user or wrong password. Redirect => {lox}')\n raise eduid_idp.mischttp.Redirect(lox)\n\n # Create SSO session\n user = authninfo.user\n context.logger.debug(\"User {} authenticated OK\".format(user))\n _sso_session = SSOSession(\n user_id=user.user_id, authn_request_id=_ticket.saml_req.request_id, authn_credentials=[authninfo],\n )\n\n # This session contains information about the fact that the user was authenticated. It is\n # used to avoid requiring subsequent authentication for the same user during a limited\n # period of time, by storing the session-id in a browser cookie.\n _session_id = context.sso_sessions.add_session(user.eppn, _sso_session.to_dict())\n eduid_idp.mischttp.set_cookie('idpauthn', '/', context.logger, context.config, _session_id.decode('utf-8'))\n # knowledge of the _session_id enables impersonation, so get rid of it as soon as possible\n del _session_id\n\n # INFO-Log the request id (sha1 of SAMLrequest) and the sso_session\n context.logger.info(\n \"{!s}: login sso_session={!s}, authn={!s}, user={!s}\".format(\n _ticket.key, _sso_session.public_id, authn_ref, user\n )\n )\n\n # Now that an SSO session has been created, redirect the users browser back to\n # the main entry point of the IdP (the 'redirect_uri'). The ticket reference `key'\n # is passed as an URL parameter instead of the SAMLRequest.\n lox = query[\"redirect_uri\"] + '?key=' + _ticket.key\n context.logger.debug(\"Redirect => %s\" % lox)\n raise eduid_idp.mischttp.Redirect(lox)",
"def validate_oculus_ticket():\n\n ob = request.get_json()\n try:\n OculusProviderAuthSchema().load(ob)\n except ma.ValidationError as e:\n abort_unauthorized(\"Oculus token property %s is invalid\" % e.field_name)\n\n provider_details = ob['provider_details']\n # Get Oculus authentication config\n oculus_config = get_provider_config('oculus')\n\n if not oculus_config:\n abort(http_client.SERVICE_UNAVAILABLE, description=\"Oculus authentication not configured for current tenant\")\n\n # Call validation and authenticate if ticket is good\n identity_id = run_ticket_validation(\n user_id=provider_details['user_id'],\n access_token=oculus_config['access_token'],\n nonce=provider_details['nonce']\n )\n\n return identity_id",
"def authenticate(self):\n try:\n auth_header = self.basic_token\n username, password = decode(auth_header)\n\n user_principal = None\n allowlisted_users = Environment().get_allowlisted_users()\n if allowlisted_users is not None:\n password_from_allowlist = allowlisted_users.get(username)\n if password_from_allowlist is None or password_from_allowlist != password:\n logger.log_error(\"Invalid user credentials provided\")\n raise AuthenticationError(\"Invalid user credential\")\n else:\n raise AuthenticationError(\"No whitelisted users found to authenticate against\")\n\n if Environment().is_kerberos_enabled():\n user_principal = self.get_user_principal(username)\n key_tab_path = Environment().get_hdfs_keytab_file_path()\n logger.log_info(\"Minting a kerberos ticket for principal {} using keytab {}\".format(user_principal, key_tab_path))\n if key_tab_path is None or user_principal is None:\n raise AuthenticationError(\"Keytab file or kerberos principal missing\")\n returncode = KerberosUtil.renew_kinit(key_tab_path, user_principal)\n logger.log_info('kinit return code:' + str(returncode))\n\n return username, user_principal\n except Exception as e:\n logger.log_exception(\"Failed while authenticating user\", exc_info=True)\n raise AuthenticationError(str(e))",
"def get(self):\n\n\t\trequest = user_auth_parser.parse_args(strict=True)\n\n\t\tresult = Authenticator.authenticate(\n\t\t\trequest[\"username\"],\n\t\t\trequest[\"password\"]\n\t\t)\n\n\t\treturn result",
"def fetch_saml_validation(self, ticket):\n\n headers = {\n 'soapaction': 'http://www.oasis-open.org/committees/security',\n 'cache-control': 'no-cache',\n 'pragma': 'no-cache',\n 'accept': 'text/xml',\n 'connection': 'keep-alive',\n 'content-type': 'text/xml; charset=utf-8',\n }\n params = {'TARGET': self.service_url}\n saml_validate_url = urllib_parse.urljoin(\n self.server_url, 'samlValidate',\n )\n return self.session.post(\n saml_validate_url,\n self.get_saml_assertion(ticket),\n params=params,\n headers=headers)",
"def patch_cas_response(\n self,\n valid_ticket,\n username=None, attributes={}):\n if hasattr(self, '_patch_cas_client'):\n self.patch_cas_response_stop()\n\n class MockCASClient(object):\n _username = username\n\n def __new__(self_client, *args, **kwargs):\n version = kwargs.pop('version')\n if version in (1, '1'):\n client_class = cas.CASClientV1\n elif version in (2, '2'):\n client_class = cas.CASClientV2\n elif version in (3, '3'):\n client_class = cas.CASClientV3\n elif version == 'CAS_2_SAML_1_0':\n client_class = cas.CASClientWithSAMLV1\n else:\n raise ValueError('Unsupported CAS_VERSION %r' % version)\n\n client_class._username = self_client._username\n\n def verify_ticket(self, ticket):\n if valid_ticket == '__all__' or ticket == valid_ticket:\n username = self._username or 'username'\n return username, attributes, None\n return None, {}, None\n\n patcher = patch.object(\n client_class, 'verify_ticket',\n new=verify_ticket,\n )\n patcher.start()\n\n return client_class(*args, **kwargs)\n\n self._patch_cas_client = patch(\n 'allauth_cas.views.cas.CASClient',\n MockCASClient,\n )\n self._patch_cas_client.start()",
"def getTicketValue(xml):\n doc = libxml2.parseDoc(xml)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"//ticket/value\")\n if len(res) == 1:\n for i in res:\n return i.getContent()\n else:\n return None",
"def check_user(self):\n return self.client.service.checkUser(self.authentication).accountDetails",
"def check_user_credentials(token, auth_url='https://accounts.okeanos.grnet.gr'\n '/identity/v2.0'):\n logging.info(' Test the credentials')\n try:\n auth = AstakosClient(auth_url, token)\n auth.authenticate()\n logging.info(' Authentication verified')\n return AUTHENTICATED\n except ClientError:\n logging.error('Authentication failed with url %s and token %s' % (\n auth_url, token))\n return NOT_AUTHENTICATED",
"def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)",
"def xmlrpc_authenticate(self, void, api_key, username, password):\n\t\tkey_info = False\n\n\t\t@stack\n\t\tdef check_info(result):\n\t\t\tif not result:\n\t\t\t\treturn None\n\t\t\treturn self.app.api.users.check_authentication(username, password, None)\n\t\td = self.get_key_info(api_key)\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d",
"def test_password_auth_xml(self):\n\n config = get_config()\n\n if config.getboolean(\"auth_by_password_test\", \"enabled\"):\n\n # Run only if enabled\n\n response = authenticate(\n config.get(\"auth_by_password_test\", \"url\"),\n config.get(\"auth_by_password_test\", \"account\"),\n config.get(\"auth_by_password_test\", \"password\"),\n config.get(\"auth_by_password_test\", \"account_by\"),\n use_password=True,\n request_type=\"xml\"\n )\n\n self.assertNotEqual(\n response,\n None,\n \"Authentication with the configured settings \"\n \"was not successful\"\n )",
"def authenticate(self, request):\n\n # Get the underlying HttpRequest object\n request = request._request\n user = getattr(request, 'user', None)\n\n # Unauthenticated, CSRF validation not required\n if not user or not user.is_active:\n return None\n\n #self.enforce_csrf(request)\n\n # CSRF passed with authenticated user\n return (user, None)",
"def auth(self):\r\n basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))\r\n if basic: return basic\r\n ruser = self.environ.get('REMOTE_USER')\r\n if ruser: return (ruser, None)\r\n return None",
"def verify_auth_token(token):\n\n s = Serializer(current_app.config['SECRET_KEY'])\n\n try:\n data = s.loads(token)\n except SignatureExpired:\n print \"EXP\", token\n return None\n except BadSignature:\n print \"BAD\", token\n return None\n\n user = User.query.get(data['id'])\n return user",
"def check_auth(requests_session, server, user, logger):\n logger.debug('Getting {} to test auth'.format(server))\n test_auth_request = requests_session.get(server)\n logger.debug('Status: {}'.format(test_auth_request.status_code))\n\n if test_auth_request.status_code == 401 or 'Login attempt failed. Please try again.' in test_auth_request.text:\n message = 'Login attempt failed for {}, please make sure your credentials for user {} are correct!'.format(server, user)\n logger.critical(message)\n raise exceptions.XNATLoginFailedError(message)\n\n if test_auth_request.status_code != 200:\n logger.warning('Simple test requests did not return a 200 code! Server might not be functional!')\n\n if user is not None:\n match = re.search(r'<span id=\"user_info\">Logged in as: <a (id=\"[^\"]+\" )?href=\"[^\"]+\">(?P<username>[^<]+)</a>',\n test_auth_request.text)\n\n if match is None:\n match = re.search(r'<span id=\"user_info\">Logged in as: <span style=\"color:red;\">Guest</span>',\n test_auth_request.text)\n if match is None:\n match = re.search('Your password has expired', test_auth_request.text)\n if match:\n message = 'Your password has expired. Please try again after updating your password on XNAT.'\n logger.error(message)\n raise exceptions.XNATExpiredCredentialsError(message)\n else:\n message = 'Could not determine if login was successful!'\n logger.error(message)\n logger.debug(test_auth_request.text)\n raise exceptions.XNATAuthError(message)\n else:\n message = 'Login failed (in guest mode)!'\n logger.error(message)\n raise exceptions.XNATLoginFailedError(message)\n else:\n username = match.group('username')\n logger.info('Logged in successfully as {}'.format(username))\n return username\n\n match = re.search(r'<span id=\"user_info\">Logged in as: <span style=\"color:red;\">Guest</span>',\n test_auth_request.text)\n if match is None:\n message = 'Could not determine if login was successful!'\n logger.error(message)\n raise exceptions.XNATAuthError(message)\n else:\n logger.info('Logged in as guest successfully')\n return 'guest'",
"def verify_auth_token(token):\n s = Serializer(mscolab_settings.SECRET_KEY)\n try:\n data = s.loads(token)\n except SignatureExpired:\n logging.debug(\"Signature Expired\")\n return None # valid token, but expired\n except BadSignature:\n logging.debug(\"Bad Signature\")\n return None # invalid token\n user = User.query.filter_by(id=data['id']).first()\n return user"
]
| [
"0.76262337",
"0.6640478",
"0.6270583",
"0.616184",
"0.6048709",
"0.6014877",
"0.5855325",
"0.5819822",
"0.56555504",
"0.56180346",
"0.55637974",
"0.55475897",
"0.5488287",
"0.54433984",
"0.537912",
"0.5327678",
"0.5322191",
"0.53005683",
"0.52785677",
"0.5266824",
"0.52515423",
"0.5237776",
"0.5233932",
"0.52296543",
"0.5220314",
"0.5213672",
"0.5118172",
"0.50986254",
"0.50946677",
"0.5081357"
]
| 0.7101547 | 1 |
Verifies CAS ticket and gets or creates User object | def authenticate(self, ticket, service, request):
user = _verify(ticket, service)
logger.debug("Verified User %s" % user)
if user is None:
return None
return models.SSOUser(**user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authenticate(self, ticket, service):\n User = get_user_model()\n\n if self.protocol == 1:\n valid = CAS1Validation(ticket, service)\n elif self.protocol == 2:\n valid = CAS2Validation(ticket, service)\n else:\n valid = None\n logger.info('Authenticating against CAS %s: service = %s ; ticket = %s; identifiers %s\\n%s', self.protocol, service, ticket, valid.identifiers, valid)\n if not valid or not valid.identifiers:\n return None\n # Select any users that match valid identifiers. Specify an ordering for consistent results.\n users = list(User.objects.filter(username__in=valid.identifiers).order_by('id'))\n logger.info('Authentication turned up %s users: %s', len(users), users)\n if users:\n user = None\n primary = valid.username\n for potential in users:\n # Try and pick a user that matches the primary identifier.\n if potential.username == primary:\n user = potential\n break\n if user is None:\n # Otherwise, pick the first in the result set.\n user = users[0]\n logger.info('Picking primary user: %s', user)\n\n else:\n logger.info('Creating new user for %s', valid.username)\n user = User(username=valid.username)\n user.set_unusable_password()\n if self.set_email and 'email' in valid.attributes:\n user.email = valid.attributes['email']\n user.save()\n\n if len(users) > 1:\n others = [u for u in users if u.username != user.username]\n logger.info('Sending merge signal for other users: %s', others)\n try:\n result = signals.on_cas_merge_users.send(sender=self, primary=user,\n others=others)\n except Exception:\n logger.exception('Merge signal failed!')\n else:\n logger.info('Sent merge signal. Result: %s', result)\n\n if users:\n changed = False\n if (self.set_email\n and 'email' in valid.attributes\n and valid.attributes['email'] != user.email\n ):\n user.email = valid.attributes['email']\n changed = True\n\n if (self.set_username\n and user.username != primary\n ):\n user.username = primary\n changed = True\n\n if changed:\n user.save()\n\n logger.info('Authenticated user: %s' % user)\n\n signals.on_cas_authentication.send(sender=self, user=user, attributes=valid.attributes)\n return user",
"def test_get(self):\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n\n self.assertEqual(api.user.get('chuck'), user)",
"def _CAS_login(self):\n import urllib\n self.ticket = current.request.vars.ticket\n if not current.request.vars.ticket:\n redirect(\"%s?service=%s\" % (self.cas_login_url,\n self.cas_my_url))\n else:\n url = \"%s?service=%s&ticket=%s\" % (self.cas_check_url,\n self.cas_my_url,\n self.ticket)\n data = urllib.urlopen(url).read()\n if data.startswith('yes') or data.startswith('no'):\n data = data.split('\\n')\n if data[0] == 'yes':\n if ':' in data[1]: # for Compatibility with Custom CAS\n items = data[1].split(':')\n a = items[0]\n b = len(items) > 1 and items[1] or a\n c = len(items) > 2 and items[2] or b\n else:\n a = b = c = data[1]\n return dict(user=a, email=b, username=c)\n return None\n import xml.dom.minidom as dom\n import xml.parsers.expat as expat\n try:\n dxml = dom.parseString(data)\n envelop = dxml.getElementsByTagName(\n \"cas:authenticationSuccess\")\n if len(envelop) > 0:\n res = dict()\n for x in envelop[0].childNodes:\n if x.nodeName.startswith('cas:') and len(x.childNodes):\n key = x.nodeName[4:].encode('utf8')\n value = x.childNodes[0].nodeValue.encode('utf8')\n if not key in res:\n res[key] = value\n else:\n if not isinstance(res[key], list):\n res[key] = [res[key]]\n res[key].append(value)\n return res\n except expat.ExpatError:\n pass\n return None # fallback",
"def casLogin(request):\n service = cas.getServiceUrl(request)\n username = unauthenticated_userid(request)\n if username is None:\n ticket = request.GET.get('ticket')\n if ticket is None:\n return cas.sendToService(request)\n username = cas.verifyCas20(request,ticket,service)\n if username is None:\n return 'no user'\n\n settings = request.registry.settings\n if 'pyramid_cas.callback.get_user' in settings:\n callable = settings['pyramid_cas.callback.get_user']\n module = callable.split('.')[0] + '.' + callable.split('.')[1]\n caller = sys.modules[module]\n method = getattr(caller,callable.split('.')[2])\n user = method(username,request)\n else:\n user = username\n headers = remember(request,user,max_age = '86400')\n return HTTPFound(location=request.route_url('home'),headers=headers)\n else:\n return HTTPFound(location='/not-allowed')",
"def create_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"create_user\")",
"def test_get_user(self):\n user = User(self.client, \"test-user\", {})\n\n self.assertEqual(user.username, \"test-user\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertTrue(user.restricted)\n self.assertTrue(user.tfa_enabled)\n self.assertIsNotNone(user.ssh_keys)",
"def test_create_defined_user(self):\r\n self._auto_auth(\r\n username='robot', password='test',\r\n email='[email protected]', full_name=\"Robot Name\"\r\n )\r\n\r\n # Check that the user has the correct info\r\n user = User.objects.get(username='robot')\r\n self.assertEqual(user.username, 'robot')\r\n self.assertTrue(user.check_password('test'))\r\n self.assertEqual(user.email, '[email protected]')\r\n\r\n # Check that the user has a profile\r\n user_profile = UserProfile.objects.get(user=user)\r\n self.assertEqual(user_profile.name, \"Robot Name\")\r\n\r\n # By default, the user should not be global staff\r\n self.assertFalse(user.is_staff)",
"def user(self):\n return self.create_user",
"def _verify_cas2(ticket, service):\n try:\n from xml.etree import ElementTree\n except ImportError:\n from elementtree import ElementTree\n\n params = {'ticket': ticket, 'service': service}\n url = (urljoin(settings.CAS_SERVER_URL, 'serviceValidate') + '?' +\n urlencode(params))\n page = urlopen(url)\n try:\n response = page.read()\n '''Remove \\n\\t character from response xml'''\n response = re.sub(r'(?m)[\\t\\n]+', \"\", response)\n tree = ElementTree.fromstring(response)\n if tree[0].tag.endswith('authenticationSuccess'):\n member_of = []\n access_token = None\n user_name = None\n first_name = None\n last_name = None\n department = None\n for xmlTag in tree[0]:\n if xmlTag.tag.endswith('user'):\n user_name = xmlTag.text\n elif xmlTag.tag.endswith('firstName'):\n first_name = xmlTag.text\n elif xmlTag.tag.endswith('lastName'):\n last_name = xmlTag.text\n\n user_args = {\n \"user_name\":user_name,\n \"first_name\": first_name,\n \"last_name\": last_name\n }\n \n return user_args\n else:\n return None\n except Exception, e:\n logger.error(e)\n finally:\n page.close()",
"def test_credit_ticket_as_user(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN,\n )",
"def create_ticket(self, user):\n return Ticket.objects.create_ticket('test', user)",
"async def ensure_user(self, uid: int):\n user = self.get_user(uid)\n if user:\n return user\n return await self.fetch_user(uid)",
"def test_returns_new_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n new_user = User.objects.get(email=email)\r\n self.assertEquals(user, new_user)",
"def post(self):\r\n return create_user(request)",
"def verify_ticket(self, ticket, **kwargs):\n\n try:\n from xml.etree import ElementTree\n except ImportError:\n from elementtree import ElementTree\n\n page = self.fetch_saml_validation(ticket)\n\n try:\n user = None\n attributes = {}\n response = page.content\n tree = ElementTree.fromstring(response)\n # Find the authentication status\n success = tree.find('.//' + SAML_1_0_PROTOCOL_NS + 'StatusCode')\n if success is not None and success.attrib['Value'].endswith('Success'):\n # User is validated\n name_identifier = tree.find('.//' + SAML_1_0_ASSERTION_NS + 'NameIdentifier')\n if name_identifier is not None:\n user = name_identifier.text\n attrs = tree.findall('.//' + SAML_1_0_ASSERTION_NS + 'Attribute')\n for at in attrs:\n if self.username_attribute in list(at.attrib.values()):\n user = at.find(SAML_1_0_ASSERTION_NS + 'AttributeValue').text\n attributes['uid'] = user\n\n values = at.findall(SAML_1_0_ASSERTION_NS + 'AttributeValue')\n if len(values) > 1:\n values_array = []\n for v in values:\n values_array.append(v.text)\n attributes[at.attrib['AttributeName']] = values_array\n else:\n attributes[at.attrib['AttributeName']] = values[0].text\n return user, attributes, None\n finally:\n page.close()",
"def create_user(self):\n return UserFactory.create()",
"def test_returns_existing_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n existing_user = User.objects.create(email=email)\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n self.assertEquals(user, existing_user)",
"def setup_user():\n if 'auth_user' in flask.session:\n user = models.User.query.get(flask.session['auth_user'])\n if user is None:\n # old bad cookie, no good\n del flask.session['auth_user']\n # save the user in `flask.g`, which is a set of globals for this request\n flask.g.user = user",
"def _thawUser( self, rgsUser ):\n\n\t\ttry:\n\t\t\toUser = db.user.UserEntry()\n\t\t\toUser.setID( rgsUser['bID'] )\n\t\t\toUser.setName( rgsUser['sName'] )\n\t\t\toUser.setDescription( rgsUser['sDescription'] )\n\t\t\tif rgsUser['sPassword'] != '__notset__':\n\t\t\t\t# Password change\n\t\t\t\toUser.setPassword( rgsUser['sPassword'], fHash=False )\n\t\t\toUser.setType( rgsUser['bType'] )\n\n\t\t\treturn oUser\n\n\t\texcept Exception, e:\n\t\t\traise Exception, 'error thawing user [%s]' % e",
"def test_start_new_verification(self):\r\n user = UserFactory.create(username=\"rusty\", password=\"test\")\r\n self.client.login(username=\"rusty\", password=\"test\")",
"def test_check_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n test_user.check_user(\"test\", \"walIas15\")",
"def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})",
"def check_existing_users(user_name,password):\n\n\n new_user = User(user_name,password)\n\n return new_user",
"def get_one_user():",
"def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user",
"def user():\n\n user = User.objects.create(name='Janek', surname='Kowalski',\n internal_id='PUHgjdJ', is_administrator=True,\n is_payment_creator=True, is_payment_approver=False,\n can_delete_payment=True)\n return user",
"def test_get_user_if_exists(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details, user=user)\n self.assertDictEqual(actual, {'is_new': False})",
"def test_create_new_auth_customer(self):\n request = self.factory.get('/', follow=True)\n request.user = self.lisa\n request.session = {'session_key': 'lisa1234'}\n self.cm.process_request(request)\n self.assertEqual(request.customer.user, self.lisa)",
"def create_user(self):\n return User.objects.create_user(**self.user_data)",
"def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n\n return user"
]
| [
"0.7041426",
"0.64837295",
"0.6294565",
"0.6078544",
"0.60566604",
"0.6025016",
"0.6016404",
"0.5997659",
"0.59910583",
"0.5988254",
"0.59696954",
"0.59639955",
"0.595156",
"0.59499717",
"0.5919112",
"0.5894354",
"0.58817637",
"0.5876354",
"0.5868059",
"0.5853921",
"0.58517385",
"0.58082634",
"0.58020693",
"0.5779775",
"0.57731545",
"0.57724935",
"0.5738936",
"0.57314974",
"0.5729804",
"0.5727244"
]
| 0.71341777 | 0 |
Constructs a HMM from probability tables. | def fromtables(pi, t, e):
#sanity checks
nStates=len(pi)
assert(nStates==len(t) and nStates==len(e) and nStates>0)
nObs=len(e[0])
for i in range(nStates):
assert(len(t[i])==nStates and len(e[i])==nObs)
m=hmm(nStates, nObs)
m.pi=deepcopy(pi)
m.t=deepcopy(t)
m.e=deepcopy(e)
return m | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _market_hmm_example():\n states = [\"bull\", \"bear\", \"static\"]\n symbols = [\"up\", \"down\", \"unchanged\"]\n A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64)\n B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64)\n pi = np.array([0.5, 0.2, 0.3], np.float64)\n\n model = _create_hmm_tagger(states, symbols, A, B, pi)\n return model, states, symbols",
"def __init__(self):\n\n ## TODO: create the actual HMM\n\n self.hmm = [HmmStates('S', {'S': 0.8, 'R': 0.2}, {'H': 0.8, 'G': 0.2}),\n HmmStates('R', {'S': 0.4, 'R': 0.6}, {'H': 0.4, 'G': 0.6})] # dummy HMM for testing purpose\n\n # prior probabilities TODO: compute prior probabilities from HMM\n self.prior = {'S': 2/3, 'R': 1/3}",
"def hdmatrix(self):\n hdmat = np.zeros((len(self.pulsars), len(self.pulsars)))\n\n for i,pulsar1 in enumerate(self.pulsars):\n for j,pulsar2 in enumerate(self.pulsars):\n hdmat[i,j] = hellingsdowns_factor(pulsar1, pulsar2)\n self.hdm = hdmat\n return hdmat",
"def set_params_hmm_exp1(hmm) :\n\thmm.length = 12\n\thmm.dims = [(2,3)]*hmm.length # (latent,emit) dimspace\n\thmm.emit = [\n\t\t[[0.6,0.2,0.2],[0.2,0.6,0.2]]\n\t]*hmm.length\n\thmm.trans = [\n\t\t[[0.7,0.3],[0.3,0.7]]\n\t]*hmm.length\n\thmm.seqmap = [{'a':0,'b':1}]*hmm.length\n\thmm.seqmap2 = [{0:'a',1:'b'}]*hmm.length\n\thmm.featmap = [{'H':0,'B':1,'L':2}]*hmm.length\n\thmm.initprob = [0.5,0.5]\n\thmm.trained = True",
"def make_tables(self):\n r = np.zeros((self.size*self.size, 4))\n p = np.zeros((self.size*self.size, 4, self.size*self.size))\n directions = np.array([[1, -1, 0, 0], [0, 0, -1, 1]])\n for x in range(self.size):\n for y in range(self.size):\n for a in range(4):\n i = x*self.size + y\n r[i, a] = self.reward((x, y))\n if (x, y) == (self.size-1, self.size-1) or \\\n (x, y) == (self.mid, self.mid):\n p[i, a, 0] = 1\n else:\n for d in range(4):\n dx, dy = directions[:, d]\n x_ = max(0, min(self.size-1, x+dx))\n y_ = max(0, min(self.size-1, y+dy))\n j = x_*self.size + y_\n if self.noise is not None:\n p[i, a, j] += 0.3 * self.noise[x, y, a, d] + 0.7 * int(a == d)\n else:\n p[i, a, j] += int(a == d)\n return r, p",
"def hmm(pc, n1, n2, w, dt, rounding):\n\n t = 0 # start time\n ts = 0 #start time of stimulus \n \n x = 'default' #start hidden state\n obs = [] #observations\n states = [] #hidden states\n count = 0 #counter\n transitionYes = 0 #keeps track of whether transition has happened\n foreperiodSteps = int((6/dt)+1)\n \n \n while round(ts,rounding) < w: #this ends when w is over, rounding to avoid\n #floating point no. comaparison errors\n states.append(x)\n \n \n if transitionYes == 1:\n ts = ts+dt\n \n #generating observations, 1 is cue, 0 is default\n if x == 'cue':\n k = np.random.binomial(1,n2)\n if k == 1:\n obs.append(1) \n else:\n obs.append(0)\n \n if x == 'default':\n k = np.random.binomial(1,n1)\n if k == 1:\n obs.append(0) \n else:\n obs.append(1)\n \n if count < foreperiodSteps and transitionYes == 0:\n #\n r = 1/(foreperiodSteps-count)\n #print(r)\n i = np.random.binomial(1, r) #transition out of default if i == 1\n if i == 1:\n transitionYes = 1\n #tLeft = round(t,rounding)\n j = np.random.binomial(1, pc) #on transitioning out of default, prob of \n if j == 1: #cue is pc, and going back to default is 1-pc\n x = 'cue'\n else:\n x = 'default'\n \n #print(r, t, count, ts, sep=' ')\n t = t+dt\n count = count +1\n \n \n return obs, states",
"def __init__(self,num_states): \n self.data = {}\n self.data_matrix = None\n self.K = num_states # number of states\n self.state_transition_mat = {} # The transition probability matrix ! The Matrix is a dict here !!\n self.state_symbol_prob = {} # The emission probability sequences !\n self.state_initial_prob= {} #The initial state probability distributions !\n \n self.forward_trellis = {} # Chart for forwrad trellis\n self.backward_trellis = {} # Chart for backward Trellis\n self.posterior_state_trellis = {} # Posterior probability of each state at time\n self.posterior_transition_trellis = {} # Posterior probability for each of the state transitions !\n\n self.forward_scaling_vector = {} # Forward scaling factors indexed by time intervals !\n self.backward_scaling_vector = {} # Backward Scaling factors !!\n\n self.model = {} # The trained HMM model !!\n self.N = 0 # total number of instances !\n self.T = 0 # Total number of time slots !\n\n # Initialize Corpus\n self._corpusReader(input_file)\n self._initializeMatrices()",
"def __init__(self, m, d):\n if not m or not d:\n raise ValueError(\"Table size (m) and amount of hash functions (d)\"\n \" must be non-zero\")\n self.m = m\n self.d = d\n self.n = 0\n self.tables = []\n \n \n for _ in xrange(d):\n table = array.array(\"l\", (0 for _ in xrange(m)))\n self.tables.append(table)",
"def train_hmm_compute_statistics(features, labels):\n unique_labels = np.unique(labels)\n n_comps = len(unique_labels)\n\n n_feats = features.shape[0]\n\n if features.shape[1] < labels.shape[0]:\n print(\"trainHMM warning: number of short-term feature vectors \"\n \"must be greater or equal to the labels length!\")\n labels = labels[0:features.shape[1]]\n\n # compute prior probabilities:\n class_priors = np.zeros((n_comps,))\n for i, u_label in enumerate(unique_labels):\n class_priors[i] = np.count_nonzero(labels == u_label)\n # normalize prior probabilities\n class_priors = class_priors / class_priors.sum()\n\n # compute transition matrix:\n transmutation_matrix = np.zeros((n_comps, n_comps))\n for i in range(labels.shape[0]-1):\n transmutation_matrix[int(labels[i]), int(labels[i + 1])] += 1\n # normalize rows of transition matrix:\n for i in range(n_comps):\n transmutation_matrix[i, :] /= transmutation_matrix[i, :].sum()\n\n means = np.zeros((n_comps, n_feats))\n for i in range(n_comps):\n means[i, :] = \\\n np.array(features[:,\n np.nonzero(labels == unique_labels[i])[0]].mean(axis=1))\n\n cov = np.zeros((n_comps, n_feats))\n for i in range(n_comps):\n \"\"\"\n cov[i, :, :] = np.cov(features[:, np.nonzero(labels == u_labels[i])[0]])\n \"\"\"\n # use line above if HMM using full gaussian distributions are to be used\n cov[i, :] = np.std(features[:,\n np.nonzero(labels == unique_labels[i])[0]],\n axis=1)\n\n return class_priors, transmutation_matrix, means, cov",
"def make_table(m, n):\n return np.array([[0] * n for _ in range(m)], dtype=float)",
"def generate_latex_table(true_hmm, sampled_hmm_list, conf=0.95, dt=1, time_unit='ms', obs_name='force', obs_units='pN', outfile=None):\n\n # confidence interval\n for sampled_hmm in sampled_hmm_list:\n sampled_hmm.set_confidence(conf)\n # dt\n dt = float(dt)\n # nstates\n nstates = sampled_hmm_list[0].nstates\n\n table = r\"\"\"\n\\begin{table*}\n\\caption{{\\bf Estimated mean model parameters and confidence intervals for synthetic timeseries data}}\n\\label{table:synthetic-confidence-intervals}\n\\begin{tabular*}{\\textwidth}{@{\\extracolsep{\\fill}}lccccc}\n\\hline\n& & & \\multicolumn{3}{c}{\\bf Estimated Model Parameters} \\\\ \\cline{4-6}\n\\multicolumn{2}{l}{\\bf Property} & \\bf True Value & \\bf 1 000 observations & \\bf 10 000 observations & \\bf 100 000 observations\\\\ \\hline\n\"\"\"\n # Stationary probability.\n for i in range(nstates):\n if (i == 0):\n table += '\\t\\tEquilibrium probability '\n table += '\\t\\t& $\\pi_{%d}$ & $%0.3f$' % (i+1, true_hmm.stationary_distribution[i])\n for sampled_hmm in sampled_hmm_list:\n p = sampled_hmm.stationary_distribution_mean\n p_lo, p_hi = sampled_hmm.stationary_distribution_conf\n table += ' & $%0.3f_{\\:%0.3f}^{\\:%0.3f}$ ' % (p[i], p_lo[i], p_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # Transition probabilities.\n for i in range(nstates):\n for j in range(nstates):\n if (i == 0) and (j==0):\n table += '\\t\\tTransition probability ($\\Delta t = $%s) ' % (str(dt)+' '+time_unit)\n table += '\\t\\t& $T_{%d%d}$ & $%0.3f$' % (i+1, j+1, true_hmm.transition_matrix[i,j])\n for sampled_hmm in sampled_hmm_list:\n P = sampled_hmm.transition_matrix_mean\n P_lo, P_hi = sampled_hmm.transition_matrix_conf\n table += ' & $%0.3f_{\\:%0.3f}^{\\:%0.3f}$' % (P[i,j], P_lo[i,j], P_hi[i,j])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # Transition rates via pseudogenerator.\n index = 0\n for i in range(nstates):\n for j in range(nstates):\n if (i != j):\n if (index==0):\n table += '\\t\\tTransition rate (%s$^{-1}$) ' % time_unit\n Ktrue = compute_rate(true_hmm.transition_matrix, dt)\n table += '\\t\\t& $k_{%d%d}$ & $%2.3f$' % (i+1, j+1, Ktrue[i,j])\n for sampled_hmm in sampled_hmm_list:\n P = sampled_hmm.transition_matrix_mean\n P_lo, P_hi = sampled_hmm.transition_matrix_conf\n K = compute_rate(P, dt)\n K_lo = compute_rate(P_lo, dt)\n K_hi = compute_rate(P_hi, dt)\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (K[i,j], K_lo[i,j], K_hi[i,j])\n index += 1\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # State mean lifetimes.\n for i in range(nstates):\n if (i == 0):\n table += '\\t\\tState mean lifetime (%s) ' % time_unit\n l = true_hmm.lifetimes\n l *= dt\n table += '\\t\\t& $t_{%d}$ & $%.3f$' % (i+1, l[i])\n for sampled_hmm in sampled_hmm_list:\n l = sampled_hmm.lifetimes_mean\n l *= dt\n l_lo, l_hi = sampled_hmm.lifetimes_conf\n l_lo *= dt; l_hi *= dt\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (l[i], l_lo[i], l_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # State relaxation timescales.\n for i in range(nstates-1):\n if (i == 0):\n table += '\\t\\tRelaxation time (%s) ' % time_unit\n t = true_hmm.timescales\n t *= dt\n table += '\\t\\t& $\\\\tau_{%d}$ & $%.3f$' % (i+1, t[i])\n for sampled_hmm in sampled_hmm_list:\n t = sampled_hmm.timescales_mean\n t *= dt\n t_lo, t_hi = sampled_hmm.timescales_conf\n t_lo *= dt; t_hi *= dt\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (t[i], t_lo[i], t_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n if True:\n table += '\\t\\t\\hline' + '\\n'\n\n # State mean forces.\n for i in range(nstates):\n if (i == 0):\n table += '\\t\\tState %s mean (%s) ' % (obs_name, obs_units)\n m = true_hmm.output_model.means\n table += '\\t\\t& $\\mu_{%d}$ & $%.3f$' % (i+1, m[i])\n for sampled_hmm in sampled_hmm_list:\n m = sampled_hmm.means_mean\n m_lo, m_hi = sampled_hmm.means_conf\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (m[i], m_lo[i], m_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # State force standard deviations.\n for i in range(nstates):\n if (i == 0):\n table += '\\t\\tState %s std dev (%s) ' % (obs_name, obs_units)\n s = true_hmm.output_model.sigmas\n table += '\\t\\t& $s_{%d}$ & $%.3f$' % (i+1, s[i])\n for sampled_hmm in sampled_hmm_list:\n s = sampled_hmm.sigmas_mean\n s_lo, s_hi = sampled_hmm.sigmas_conf\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (s[i], s_lo[i], s_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n table += r\"\"\"\\hline\n\\end{tabular*}\n\\end{table*}\n\"\"\"\n\n # Write to file if desired.\n if outfile is not None:\n f = open(outfile,'w')\n f.write(table)\n f.close()\n\n return table",
"def test_make_hmp(self):\n table_factory = DataTableFactory(PACKET_DIR)\n table_factory.hmp()",
"def get_fitted_hmm(X, n_components):\n\tHMM = hmm.GaussianHMM(n_components=n_components, covariance_type=\"full\", n_iter=100)\n\tprint(HMM)\n\tHMM.fit(X)\n\tmodel = HMM\n\treturn model",
"def learn_hmm(dict_path = wordlist, training_inputs = inputs_path,\n training_outputs = outputs_path):\n init_counts()\n words = open ( dict_path, 'r' )\n states = set(['word_start'])\n trans = {'word_start' : {}}\n observations = tuple ( punctuation + ' ' + digits + ascii_lowercase)\n \n # Compute states and state transition probabilities\n for w in words:\n w = w.lower()\n w = w[:-1] # remove EOL char\n for i in range( len(w) ): \n new = w[:i+1]\n if new not in states:\n states.add(new)\n trans[new] = {}\n if i == 0:\n trans['word_start'][new] = eta * prefix_rel_freq(w[:i+1],'')\n else:\n prev = w[:i]\n trans[prev][new] = eta * prefix_rel_freq(w[:i+1],w[:i])\n if i == len(w) - 1: # last character in a word\n trans[new]['word_start'] = word_rel_freq(w,w[:i])\n\n for state in trans:\n trans[state][state] = 1 - eta\n states = list(states)\n num_states = len(states)\n num_obs = len(observations)\n\n # Compute observation emission probabilities via MLE\n observed_chars = (char.lower()\n for line in open(training_inputs)\n for char in line[:-1])\n true_chars = (char.lower()\n for line in open(training_outputs)\n for char in line[:-1])\n paired = itertools.izip(observed_chars,true_chars)\n\n def c_to_i(s):\n if s == 'word_start':\n return len(ascii_lowercase)\n else:\n return ascii_lowercase.index(s)\n\n def c_from_i(i):\n if i == len(ascii_lowercase):\n return 'word_start'\n else:\n return ascii_lowercase[i]\n\n def to_index(letter,ob):\n return c_to_i(letter) * num_obs + observations.index(ob) \n def from_index(i):\n char_index = i / num_obs\n ob_index = i % num_obs\n return (c_from_i(char_index),observations[ob_index])\n\n # Construct linear programming problem for cvxopt\n P = matrix(numpy.zeros( (27 * num_obs,27 * num_obs) ),tc='d')\n q = matrix(numpy.zeros(27 * num_obs),tc='d')\n G = matrix(numpy.diag([-1] * (27 * num_obs)),tc='d')\n h = matrix(numpy.zeros(27 * num_obs),tc='d')\n A = numpy.zeros( (27, 27*num_obs) )\n b = matrix(numpy.ones(27),tc='d')\n # construct q\n for o,a in paired:\n if o not in observations: continue\n if a == '-':\n q[to_index(last_a,o)] += 1\n elif a != ' ':\n if a not in ascii_lowercase: continue\n q[to_index(a,o)] += 1\n last_a = a\n else:\n q[to_index('word_start',o)] += 1\n last_a = 'word_start'\n q = -q # Invert since we want maximum not minimum\n\n # construct A\n for i in range(27):\n for k in range(num_obs):\n A[i][i * num_obs + k] = 1\n A = matrix(A,tc='d')\n\n # Solve linear program\n sol = list(solvers.qp(P,q,G,h,A,b)['x'])\n\n # Convert solution into dictionary of emission probabilities\n emission_probs = dict( [(s,{}) for s in states] )\n for s in emission_probs.keys():\n for o in observations:\n if s != 'word_start':\n emission_probs[s][o] = sol[to_index(s[-1],o)]\n else:\n emission_probs[s][o] = sol[to_index(s,o)]\n\n return (tuple(states), observations, trans, emission_probs)",
"def hmm(self, sentence):\r\n viterbi_dict = {i: {} for i in range(len(sentence))}\r\n\r\n for i in range(len(sentence)):\r\n temp_dict = {}\r\n if i == 0:\r\n for pos in self.position_list:\r\n temp_dict[pos] = tuple(\r\n [\r\n (\r\n (\r\n (-math.log(self.initial_probability[pos]))\r\n + (\r\n -math.log(\r\n self.emission_probability[pos][sentence[i]]\r\n )\r\n )\r\n )\r\n if sentence[i] in self.emission_probability[pos]\r\n else (\r\n (-math.log(self.initial_probability[pos]))\r\n + (-math.log((1 / float(10 ** 8))))\r\n )\r\n ),\r\n \"Start\",\r\n ]\r\n )\r\n viterbi_dict[i] = temp_dict\r\n\r\n else:\r\n for pos in self.position_list:\r\n emi = (\r\n -math.log(self.emission_probability[pos][sentence[i]])\r\n if sentence[i] in self.emission_probability[pos]\r\n else (-math.log((1 / float(10 ** 8))))\r\n )\r\n min_val = min(\r\n [\r\n (\r\n (\r\n viterbi_dict[i - 1][pos_prev][0]\r\n + (\r\n -math.log(\r\n self.transition_probability[pos_prev][pos]\r\n )\r\n )\r\n ),\r\n pos_prev,\r\n )\r\n for pos_prev in self.position_list\r\n ]\r\n )\r\n temp_dict[pos] = tuple([emi + min_val[0], min_val[1]])\r\n viterbi_dict[i] = temp_dict\r\n i = i + 1\r\n pos_list = []\r\n prev = \"\"\r\n for i in range(len(sentence) - 1, -1, -1):\r\n\r\n if i == len(sentence) - 1:\r\n minimum = min(list(viterbi_dict[i].values()))\r\n\r\n for key in viterbi_dict[i].keys():\r\n if (\r\n viterbi_dict[i][key][0] == minimum[0]\r\n and viterbi_dict[i][key][1] == minimum[1]\r\n ):\r\n\r\n pos_list.append(key)\r\n prev = minimum[1]\r\n else:\r\n pos_list.append(prev)\r\n prev = viterbi_dict[i][prev][1]\r\n\r\n pos_list.reverse()\r\n\r\n return pos_list",
"def make_table(m, n):\n return [[0] * n for _ in range(m)]",
"def __init__(self, data, m=100, eta=0.1, seq_length=25, sigma= 0.01):\n\n self.m, self.eta, self.seq_length = m, eta, seq_length\n self.vocab_len = data['vocab_len']\n self.ind_to_char = data['ind_to_char']\n self.char_to_ind = data['char_to_ind']\n self.book_data = data['book_data']\n\n self.b = np.zeros((m, 1))\n self.c = np.zeros((self.vocab_len, 1))\n\n self.U = np.random.normal(0, sigma, size=(m, self.vocab_len))\n self.W = np.random.normal(0, sigma, size=(m, m))\n self.V = np.random.normal(0, sigma, size=(self.vocab_len, m))",
"def build_table(numpoints, table_oversamp, grid_size, im_size, ndims, order, alpha):\n table = []\n\n # build one table for each dimension\n for i in range(ndims):\n J = numpoints[i]\n L = table_oversamp[i]\n K = grid_size[i]\n N = im_size[i]\n\n # The following is a trick of Fessler.\n # It uses broadcasting semantics to quickly build the table.\n t1 = J / 2 - 1 + np.array(range(L)) / L # [L]\n om1 = t1 * 2 * np.pi / K # gam\n s1 = build_spmatrix(\n np.expand_dims(om1, 0),\n numpoints=(J,),\n im_size=(N,),\n grid_size=(K,),\n n_shift=(0,),\n order=(order[i],),\n alpha=(alpha[i],)\n )\n h = np.array(s1.getcol(J - 1).todense())\n for col in range(J - 2, -1, -1):\n h = np.concatenate(\n (h, np.array(s1.getcol(col).todense())), axis=0)\n h = np.concatenate((h.flatten(), np.array([0])))\n\n table.append(h)\n\n return table",
"def __init__(self, dim_hv, dim_hw, msg_dim):\n super(PairMessageGenerator, self).__init__()\n self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim\n self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048\n self.mlp = nn.Sequential(\n nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity\n nn.Linear(self.in_dim, self.msg_dim),\n nn.LeakyReLU(0.2)\n )",
"def sample_HMM(parameters, T, seed=None):\n\n K = parameters[\"num_states\"]\n pi_0 = parameters[\"init_prob\"]\n A = parameters[\"trans_matrix\"]\n\n D = parameters[\"obs_dim\"]\n mean = parameters[\"mean\"]\n cov = parameters[\"cov\"]\n\n np.random.seed(seed)\n\n # create empty numpy arrays to store samples\n states = np.empty(T, np.int32)\n obs = np.empty((T, D), np.float32)\n\n for t in range(T):\n if t == 0:\n # sample the first state from initial distribution\n states[t] = np.random.choice(K, p=pi_0)\n else:\n # get the next state based on transition matrix (the row\n # corresponding to the previous state)\n states[t] = np.random.choice(K, p=A[states[t - 1]])\n\n # sample observation from the corresponding Gaussian distribution\n obs[t] = np.random.multivariate_normal(\n mean[states[t]], cov[states[t]])\n\n return states, obs",
"def tabulate_histogram(self):\n\n # Generate a table of uniform variates\n from mitsuba.core import Float, Vector2f, Vector2u, Float32, \\\n UInt64, PCG32\n\n rng = PCG32(initseq=ek.arange(UInt64, self.sample_count))\n\n samples_in = getattr(mitsuba.core, 'Vector%if' % self.sample_dim)()\n for i in range(self.sample_dim):\n samples_in[i] = rng.next_float32() if Float is Float32 \\\n else rng.next_float64()\n\n self.pdf_start = time.time()\n\n # Invoke sampling strategy\n samples_out = self.sample_func(samples_in)\n\n if type(samples_out) is tuple:\n weights_out = samples_out[1]\n samples_out = samples_out[0]\n else:\n weights_out = Float(1.0)\n\n # Map samples into the parameter domain\n xy = self.domain.map_backward(samples_out)\n\n # Sanity check\n eps = self.bounds.extents() * 1e-4\n in_domain = ek.all((xy >= self.bounds.min - eps) &\n (xy <= self.bounds.max + eps))\n if not ek.all(in_domain):\n self._log('Encountered samples outside of the specified '\n 'domain: %s' % str(ek.compress(xy, ~in_domain)))\n self.fail = True\n\n # Normalize position values\n xy = (xy - self.bounds.min) / self.bounds.extents()\n xy = Vector2u(ek.clamp(xy * Vector2f(self.res), 0,\n Vector2f(self.res - 1)))\n\n # Compute a histogram of the positions in the parameter domain\n self.histogram = ek.zero(Float, ek.hprod(self.res))\n\n ek.scatter_add(\n target=self.histogram,\n index=xy.x + xy.y * self.res.x,\n source=weights_out\n )\n\n self.pdf_end = time.time()\n\n histogram_min = ek.hmin(self.histogram)\n if not histogram_min >= 0:\n self._log('Encountered a cell with negative sample '\n 'weights: %f' % histogram_min)\n self.fail = True\n\n self.histogram_sum = ek.hsum(self.histogram) / self.sample_count\n if self.histogram_sum > 1.1:\n self._log('Sample weights add up to a value greater '\n 'than 1.0: %f' % self.histogram_sum)\n self.fail = True",
"def generate_moments(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n\n p = params # Shorthand, don't judge\n m = {} # Moments\n for x1 in xrange(1,d+1):\n m[(x1,)] = sum( p[(h,x1)] * p[(h,)] for h in xrange(1,k+1) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,)] for h in xrange(1,k+1) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,x3)] * p[(h,)] for h in xrange(1,k+1) )\n return m",
"def hmm(training_sentences, reducedtagset):\n transitions = DefaultDict(DefaultDict(0))\n emissions = DefaultDict(DefaultDict(0))\n wordcounts = DefaultDict(0)\n tagcounts = DefaultDict(0)\n\n for line in training_sentences:\n\tprevtag = '<START>' # Before each sentence, begin in START state\n tagcounts['<START>'] += 1\n\tfor taggedword in line.split():\n\t (word, tag) = re.split('(?<!\\\\\\)\\/', taggedword)\n\n if reducedtagset:\n \tif re.match('VB', tag) is not None: tag = 'VB'\n \telif re.match('NN', tag) is not None: tag = 'NN'\n \telif re.match('JJ', tag) is not None: tag = 'JJ'\n \telif re.match('RB', tag) is not None: tag = 'RB'\n\n\t transitions[prevtag][tag] += 1\n\t emissions[tag][word] += 1\n\t wordcounts[word] += 1\n tagcounts[tag] += 1\n prevtag = tag\n\n print emissions.keys()\n \n return hmmtuple(transitions, emissions, wordcounts, tagcounts)",
"def _create_model(n_bins=10, alpha=0.1, tol=0.1, contamination=0.1):\n n_bins = int(n_bins)\n\n hbos = HBOS(\n n_bins=n_bins,\n alpha=alpha,\n tol=tol,\n contamination=contamination\n )\n\n print('Created Model: {}'.format(hbos))\n\n return hbos",
"def defstuff():\n\t\n\tglobal PA, PB, col, col2, rng, xlimits, nbin, lPbw, WJK, outTab\n\t\n\tPA = ['Per1', 'Per2', 'Per3', 'Per4', 'Per5', 'Per6', 'Per7', 'Per8', 'Per9', 'Per10'] # Period columns for A sample\n\tPB = ['P_1', 'P_2', 'P_3'] # Period columns for B sample\n\t# logPB = ['logP_1', 'logP_2', 'logP_3'] \n\tcol = {1:'r', 2:'g', 3:'b'} \n\tcol2 = {1:'m', 2:'y', 3:'k'}\n\trng = (8,14) # Magnitude range\n\txlimits = (0.3 ,3.0) # X-axis plot limits\n\tbw = 0.01 # histogram bin width -- not global!\n\tnbin = (max(rng)-min(rng))/bw # How many bins for histogram.\n\n\t################# CAREFUL!!!!! #####################\n\tlPbw = 0.025 # log period bin width\n\t\n\toutTab = Table(np.zeros((len(B), 11)), names=('ID', 'WJK', 'est_mag', 'delta_mag', 'delta1', 'delta2', 'delta3', 'KDE_mag', 'KDEdelta_mag', 'sigma', 'nstar'), dtype=('string', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64' ))",
"def form_population_matrix(N, hb, Nb, ht, Nt, pac, age_and_gender):\n \n # 1 allocated to iso-boxes and tents\n household_column = create_household_column(hb, Nb, ht, Nt)\n # 2 allocate an infector\n disease_column = create_diseasestate_column(N)\n # 3 weibull distribution\n dsymptom_column = create_daystosymptoms_column(N)\n # 4 np.zeros(num_ppl)\n daycount_column = create_daycount_column(N)\n # 5 hard coded number of chronics\n asymp_column = create_asymp_column(N, pac)\n # 6 age\n age_column = create_age_column(age_and_gender[:,0])\n # 7 gender\n gender_column = create_gender_column(age_and_gender[:,1])\n # 8 formula based on age\n chronic_column = create_chronic_column(N, age_column)\n # 5 new_asymp_column[chronic_column==1]=0\n new_asymp_column = adjust_asymp_with_chronic(asymp_column, chronic_column)\n # 9\n wanderer_column = create_wanderer_column(gender_column, age_column)\n\n pop_matrix = np.column_stack((household_column, disease_column, dsymptom_column,\n daycount_column, new_asymp_column, age_column,\n gender_column, chronic_column, wanderer_column))\n \n assert pop_matrix.shape == (N, 9)\n return pop_matrix",
"def initiatilise_empty_probability_sets(self):\r\n \r\n self.letters = probabilities.ProbabilitySet(adjust=True, redo_repeats=True)\r\n self.punctuation_endline = probabilities.ProbabilitySet()\r\n self.punctuation_midline = probabilities.ProbabilitySet()\r\n self.punctuation_matched = probabilities.ProbabilitySet()\r\n self.word_constructions = probabilities.ProbabilitySet(redo_repeats=True)\r\n self.word_sizes = probabilities.ProbabilitySet(redo_repeats=True)\r\n self.sentence_sizes = probabilities.ProbabilitySet(redo_repeats=True)\r\n self.paragraph_sizes = probabilities.ProbabilitySet(redo_repeats=True)",
"def create_parameter_table(problem: petab.Problem,\n nominal_parameters):\n\n df = petab.create_parameter_df(\n problem.sbml_model, problem.condition_df,\n problem.observable_df, problem.measurement_df,\n include_optional=True, lower_bound=1e-3, upper_bound=1e5)\n\n df['hierarchicalOptimization'] = 0\n df.loc['scaling_x1_common', 'hierarchicalOptimization'] = 1\n df.loc['offset_x2_batch_0', 'hierarchicalOptimization'] = 1\n df.loc['offset_x2_batch_1', 'hierarchicalOptimization'] = 1\n df.loc['x1withsigma_sigma', 'hierarchicalOptimization'] = 1\n\n for pid, val in nominal_parameters.items():\n if pid in df.index:\n df.loc[pid, ptc.NOMINAL_VALUE] = val\n df.loc[pid, ptc.PARAMETER_SCALE] = ptc.LOG10\n df.loc[pid, ptc.ESTIMATE] = 1\n elif pid.startswith('noiseParameter') \\\n or pid.startswith('observableParameter'):\n continue\n else:\n print(\"extra parameter\", pid, val)\n\n # offsets can be negative: adapt scaling and bounds:\n offsets = df.index.str.startswith('offset_')\n df.loc[offsets, ptc.PARAMETER_SCALE] = ptc.LIN\n\n problem.parameter_df = df",
"def __init__( self, observed, hidden ):\n from numpy.random import gamma\n self.observed = array( observed )\n self.hidden = array( hidden )\n self.W = empty( (len(observed), len(hidden)), dtype = numpy.object_ )\n\n # randomise W\n for i, o in enumerate(observed):\n for j, h in enumerate(hidden):\n self.W[i,j] = asmatrix( -gamma( 1.0, size = (o.family.size(), h.family.size()) ) )",
"def createHMM(self, qtc_seq, qtc_type='qtcc'):\n \n try:\n qtc_state_seq = self.qtc2state(qtc_seq)\n trans, emi = self.createCNDTransEmiProb(qtc_type)\n qtchmm = self.trainHMM(qtc_state_seq, trans, emi, qtc_type)\n print '...done'\n self.hmm = qtchmm\n return qtchmm\n except QtcException as e:\n print e.message\n return None"
]
| [
"0.61488056",
"0.5965925",
"0.58499694",
"0.575859",
"0.5749717",
"0.5712239",
"0.57099265",
"0.56164676",
"0.56137776",
"0.56059283",
"0.5592278",
"0.55705166",
"0.55137616",
"0.54979444",
"0.549498",
"0.54608506",
"0.54598826",
"0.544192",
"0.54352206",
"0.5413207",
"0.5385556",
"0.53672373",
"0.5285552",
"0.5246279",
"0.5214994",
"0.51982075",
"0.5165303",
"0.51525486",
"0.5140051",
"0.5134561"
]
| 0.70774823 | 0 |
Learns from a list of observation sequences and their associated ground truth. | def learn(self, observations, ground_truths):
assert(len(observations)==len(ground_truths))
self.__init__(self.nStates, self.nObs)
N=len(observations)
for i in range(N):
o=observations[i]
t=ground_truths[i]
assert(len(o)==len(t))
self.pi[t[0]]+=1
for j in range(len(t)-1):
self.t[t[j]][t[j+1]]+=1
self.e[t[j]][o[j]]+=1
j+=1
self.e[t[j]][o[j]]+=1
for i in range(self.nStates):
self.pi[i]/=N
Zt=sum(self.t[i])
Ze=sum(self.e[i])
for j in range(self.nStates):
self.t[i][j]/=max(1, Zt)
for j in range(self.nObs):
self.e[i][j]/=max(1, Ze) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prepare_sequences(notes, n_vocab):\n sequence_length = 50\n\n # get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n # create a dictionary to map pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n\n network_input = []\n network_output = []\n\n # create input sequences and the corresponding outputs\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n network_output.append(note_to_int[sequence_out])\n\n n_patterns = len(network_input)\n\n # reshape the input into a format compatible with LSTM layers\n network_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))\n # normalize input\n network_input = network_input / float(n_vocab)\n\n network_output = np_utils.to_categorical(network_output)\n\n return (network_input, network_output)",
"def prediction2(sequence, listPaires, rules) :\n prediction=[]\n for e in sequence:\n for r in rules :\n if r[0]==e[0]:\n next_event=predict_event2(r,e)\n prediction.append(next_event)\n return prediction",
"def batchify(self, observations):\n # valid examples\n exs = [ex for ex in observations if 'text' in ex]\n # the indices of the valid (non-empty) tensors\n valid_inds = [i for i, ex in enumerate(observations) if 'text' in ex]\n\n # set up the input tensors\n batchsize = len(exs)\n if batchsize == 0:\n return None, None, None\n # tokenize the text\n parsed_x = [deque(maxlen=self.truncate) for _ in exs]\n for dq, ex in zip(parsed_x, exs):\n dq += self.parse(ex['text'])\n # parsed = [self.parse(ex['text']) for ex in exs]\n max_x_len = max((len(x) for x in parsed_x))\n for x in parsed_x:\n # left pad with zeros\n x.extendleft([self.fairseq_dict.pad()] * (max_x_len - len(x)))\n xs = torch.LongTensor(parsed_x)\n\n # set up the target tensors\n ys = None\n if 'labels' in exs[0]:\n # randomly select one of the labels to update on, if multiple\n labels = [random.choice(ex.get('labels', [''])) for ex in exs]\n parsed_y = [deque(maxlen=self.truncate) for _ in labels]\n for dq, y in zip(parsed_y, labels):\n dq.extendleft(reversed(self.parse(y)))\n for y in parsed_y:\n y.append(self.fairseq_dict.eos())\n # append EOS to each label\n max_y_len = max(len(y) for y in parsed_y)\n for y in parsed_y:\n y += [self.fairseq_dict.pad()] * (max_y_len - len(y))\n ys = torch.LongTensor(parsed_y)\n return xs, ys, valid_inds",
"def _create_sequence_and_mismask(self, training_set, missing_residues):\n seq_list = []\n mismask_list = []\n \n if not self.show_warnings:\n warning_list = warnings.filters[:]\n warnings.filterwarnings('ignore', category=TorusDBNWarning)\n \n training_set_count = len(training_set)\n for filename in training_set:\n self.info('Reading data from training file %s...' % (filename))\n try:\n sequences, mismasks = create_sequence_from_file(\n filename, missing_residues, not self.show_warnings)\n seq_list += sequences\n mismask_list += mismasks\n except TorusDBNException as error:\n warnings.warn(\n \"%s The file was not included in the training set.\" % error,\n TorusDBNWarning\n )\n training_set_count -= 1\n self.info('\\n%d files included in the training set.' % (training_set_count))\n if not self.show_warnings:\n warnings.filters = warning_list\n \n return seq_list, mismask_list",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n\tlabel_map = {label : i for i, label in enumerate(label_list)}\n\n\tfeatures = []\n\tfor (ex_index, example) in enumerate(examples):\n\t\ttokens_a = tokenizer.tokenize(example.text_a)\n\n\t\ttokens_b = None\n\t\tif example.text_b:\n\t\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\t\t# Modifies `tokens_a` and `tokens_b` in place so that the total\n\t\t\t# length is less than the specified length.\n\t\t\t# Account for [CLS], [SEP], [SEP] with \"- 3\"\n\t\t\t_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\t\telse:\n\t\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\t\tif len(tokens_a) > max_seq_length - 2:\n\t\t\t\ttokens_a = tokens_a[:(max_seq_length - 2)]\n\n\t\t# The convention in BERT is:\n\t\t# (a) For sequence pairs:\n\t\t# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n\t\t# type_ids: 0 0 0\t0\t0\t 0\t 0 0\t1 1 1 1 1 1\n\t\t# (b) For single sequences:\n\t\t# tokens: [CLS] the dog is hairy . [SEP]\n\t\t# type_ids: 0 0 0 0 0\t 0 0\n\t\t#\n\t\t# Where \"type_ids\" are used to indicate whether this is the first\n\t\t# sequence or the second sequence. The embedding vectors for `type=0` and\n\t\t# `type=1` were learned during pre-training and are added to the wordpiece\n\t\t# embedding vector (and position vector). This is not *strictly* necessary\n\t\t# since the [SEP] token unambigiously separates the sequences, but it makes\n\t\t# it easier for the model to learn the concept of sequences.\n\t\t#\n\t\t# For classification tasks, the first vector (corresponding to [CLS]) is\n\t\t# used as as the \"sentence vector\". Note that this only makes sense because\n\t\t# the entire model is fine-tuned.\n\t\ttokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n\t\tsegment_ids = [0] * len(tokens)\n\n\t\tif tokens_b:\n\t\t\ttokens += tokens_b + [\"[SEP]\"]\n\t\t\tsegment_ids += [1] * (len(tokens_b) + 1)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\tpadding = [0] * (max_seq_length - len(input_ids))\n\t\tinput_ids += padding\n\t\tinput_mask += padding\n\t\tsegment_ids += padding\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\n\t\t\n\t\tlabels_ids = [label_map[example.label]]\n\n#\t\t label_id = label_map[example.label]\n\t\tif ex_index < 0:\n\t\t\tlogger.info(\"*** Example ***\")\n\t\t\tlogger.info(\"guid: %s\" % (example.guid))\n\t\t\tlogger.info(\"tokens: %s\" % \" \".join(\n\t\t\t\t\t[str(x) for x in tokens]))\n\t\t\tlogger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n\t\t\tlogger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n\t\t\tlogger.info(\n\t\t\t\t\t\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\t\t\tlogger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n\n\t\tfeatures.append(\n\t\t\t\tInputFeatures(input_ids=input_ids,\n\t\t\t\t\t\t\t input_mask=input_mask,\n\t\t\t\t\t\t\t segment_ids=segment_ids,\n\t\t\t\t\t\t\t label_ids=labels_ids))\n\treturn features",
"def ForegroundSeqs(sequences):\n seqs = []\n yts = [\"Y\", \"T\", \"S\"]\n for motif in sequences:\n motif = motif.upper()\n assert \"-\" not in motif, \"gap in motif\"\n assert motif[5] in yts, \"WRONG CENTRAL AMINO ACID\"\n seqs.append(Seq(motif, alphabet=AAlist))\n return seqs",
"def prepare_sequences(notes, pitch_names, n_vocab):\n \n # Length of note sequences to be created for model prediction seed\n sequence_length = 25\n \n # Create a dictionary to map note pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitch_names))\n\n # Create empty lists for note sequence inputs (many notes)\n network_input = []\n \n # Create input sequences (of length 'sequence_length')\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n\n # Number of different input sequence patterns\n n_patterns = len(network_input)\n\n # Reshape the input into a format compatible with LSTM layers\n normalized_input = np.reshape(network_input, (n_patterns, sequence_length, 1))\n \n # Normalize the network input by dividing by n_vocab (number of unique notes, rests, and chords)\n normalized_input = normalized_input / float(n_vocab)\n\n return (network_input, normalized_input)",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list)}\n tokens_a_longer_max_seq_length = 0\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n\n len_tokens_a = len(tokens_a)\n len_tokens_b = 0\n\n\n\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n len_tokens_b = len(tokens_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n if (len_tokens_a + len_tokens_b) > (max_seq_length - 2):\n tokens_a_longer_max_seq_length += 1\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids)==max_seq_length\n assert len(input_mask)==max_seq_length\n assert len(segment_ids)==max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 1 and example.guid is not None and example.guid.startswith('train'):\n logger.info(\"\\n\\n*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n logger.info(\"\\n\\n\")\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n\n logger.info(\":: Sentences longer than max_sequence_length: %d\" % (tokens_a_longer_max_seq_length))\n logger.info(\":: Num sentences: %d\" % (len(examples)))\n return features",
"def prepare_sequences(notes, n_vocab):\n sequence_length = GLOBAL_SEQUENCE_LENGTH\n\n # Get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n # Create a dictionary to map pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n\n network_input = []\n \n\n # create input sequences and the corresponding outputs\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n \n n_patterns = len(network_input)\n\n # Reshape the input into a format compatible with LSTM layers\n network_input = np.array(network_input)\n # Normalize input between -1 and 1\n network_input = (network_input - float(n_vocab)/2) / (float(n_vocab)/2)\n\n return network_input",
"def get_train_data(sequence_length=100):\n\n network_input = list()\n network_output = list()\n notes = read_binary_file(str(data_dir / \"notes.pkl\"))\n\n # get all pitch names\n pitch_names = sorted(set(item for item in notes))\n # Embedding #TODO use keras Embedding layer instead\n note_to_int = read_binary_file(metadata_dir / \"note_to_int.pkl\")\n vocab_size = len(set(note_to_int))\n\n # create input sequences and the corresponding outputs\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i : i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n network_output.append(note_to_int[sequence_out])\n\n n_patterns = len(network_input)\n # reshape the input into a format compatible with LSTM layers\n network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))\n # normalize input\n network_input = network_input / float(vocab_size)\n network_output = np_utils.to_categorical(network_output)\n\n with open(metadata_dir / \"sequence_in.pkl\", \"wb\") as f:\n pickle.dump(network_input, f)\n with open(metadata_dir / \"sequence_out.pkl\", \"wb\") as f:\n pickle.dump(network_output, f)\n return network_input, network_output, vocab_size",
"def train_le(self):\n\n lisa = [self.get_prev_word(i, orignal=True) for i in range(len(self.df))]\n lisb = [self.get_next_word(i, orignal=True) for i in range(len(self.df))]\n lis = lisa + lisb\n lis.append(\"<NAP>\")\n lis.append(\"<START>\")\n return self.label_encoder.fit(lis)",
"def load_dataset(sequence_length=10):\n train_x = []\n train_y = []\n notes_to_emotion = []\n song_index_to_notes = get_notes()\n song_index_to_emotion = get_emotions()\n\n for index, notes in song_index_to_notes.items():\n if index in song_index_to_emotion:\n notes_to_emotion.append((notes, song_index_to_emotion[index]))\n\n for notes, emotion in notes_to_emotion:\n # get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n # create a dictionary to map pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n for i in range(0, int(len(notes)) - sequence_length):\n music_in = notes[i: i + sequence_length]\n train_x.append([note_to_int[char] for char in music_in])\n train_y.append(emotion)\n\n print(\"train_x has shape: \", len(train_x))\n print(\"train_y has shape: \", len(train_y))\n\n return (np.asarray(train_x), np.asarray(train_y))",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\r\n\r\n label_map = {label : i for i, label in enumerate(label_list)}\r\n\r\n features = []\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[:(max_seq_length - 2)]\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\r\n segment_ids = [0] * len(tokens)\r\n\r\n if tokens_b:\r\n tokens += tokens_b + [\"[SEP]\"]\r\n segment_ids += [1] * (len(tokens_b) + 1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\r\n # tokens are attended to.\r\n input_mask = [1] * len(input_ids)\r\n\r\n # Zero-pad up to the sequence length.\r\n padding = [0] * (max_seq_length - len(input_ids))\r\n input_ids += padding\r\n input_mask += padding\r\n segment_ids += padding\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n label_id = example.label\r\n if ex_index < 5:\r\n logger.info(\"*** Example ***\")\r\n logger.info(\"guid: %s\" % (example.guid))\r\n logger.info(\"tokens: %s\" % \" \".join(\r\n [str(x) for x in tokens]))\r\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n logger.info(\r\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n # logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\r\n\r\n features.append(\r\n InputFeatures(input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n label_id=label_id))\r\n return features",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\r\n\r\n label_map = {label : i for i, label in enumerate(label_list)}\r\n\r\n features = []\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[:(max_seq_length - 2)]\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\r\n segment_ids = [0] * len(tokens)\r\n\r\n if tokens_b:\r\n tokens += tokens_b + [\"[SEP]\"]\r\n segment_ids += [1] * (len(tokens_b) + 1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\r\n # tokens are attended to.\r\n input_mask = [1] * len(input_ids)\r\n\r\n # Zero-pad up to the sequence length.\r\n padding = [0] * (max_seq_length - len(input_ids))\r\n input_ids += padding\r\n input_mask += padding\r\n segment_ids += padding\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n # label_id = label_map[example.label]\r\n label_id = example.label\r\n label_id = float(label_id)\r\n if ex_index < 5:\r\n logger.info(\"*** Example ***\")\r\n logger.info(\"guid: %s\" % (example.guid))\r\n logger.info(\"tokens: %s\" % \" \".join(\r\n [str(x) for x in tokens]))\r\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n logger.info(\r\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n logger.info(\"label: %s (id = %s)\" % (example.label, label_id))\r\n\r\n features.append(\r\n InputFeatures(input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n label_id=label_id))\r\n return features",
"def train(length, n_ep, show=False):\r\n al_dict = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5, 'g':6, 'h':7, 'i':8, \r\n 'j':9, 'k':10, 'l':11, 'm':12, 'n':13, 'o':14, 'p':15, 'q':16, \r\n 'r':17, 's':18, 't':19, 'u':20, 'v':21, 'w':22, 'x':23, 'y':24, \r\n 'z':25}\r\n \r\n #Load proper data\r\n raw_rand = open(f'Length {length} Data/rand_data_len{length}.txt', 'r').readlines()\r\n raw_human = open(f'Length {length} Data/human_data_len{length}.txt','r').readlines()\r\n \r\n #Turn data into numbers\r\n raw_rand = [string[:length] for string in raw_rand]\r\n raw_human = [string[:length] for string in raw_human]\r\n \r\n raw_rand = [list(string) for string in raw_rand]\r\n raw_human = [list(string) for string in raw_human]\r\n \r\n for lists in raw_rand:\r\n for i in range(length):\r\n lists[i] = al_dict[lists[i]]\r\n for lists in raw_human:\r\n for i in range(length):\r\n lists[i] = al_dict[lists[i]]\r\n \r\n #Set aside 1/3 for test data\r\n test_rand = raw_rand[:len(raw_rand)//3]\r\n test_human = raw_human[:len(raw_human)//3]\r\n train_rand = raw_rand[len(raw_rand)//3:]\r\n train_human = raw_human[len(raw_human)//3:]\r\n \r\n #Intersperse both samples randomly with labels (0 = rand, 1 = human)\r\n train = []\r\n trn_labels = []\r\n test = []\r\n tst_labels = []\r\n while len(test_rand) and len(test_human) != 0:\r\n num = random.random()\r\n if num > .5 and len(test_rand) > 0:\r\n test.append(test_rand.pop())\r\n tst_labels.append(0)\r\n else:\r\n test.append(test_human.pop())\r\n tst_labels.append(1)\r\n \r\n while len(train_rand) and len(train_human) != 0:\r\n num = random.random()\r\n if num > .5 and len(train_rand) > 0:\r\n train.append(train_rand.pop())\r\n trn_labels.append(0)\r\n else:\r\n train.append(train_human.pop())\r\n trn_labels.append(1)\r\n \r\n # Build network, 2 hidden layers, 26 dimensional vectors for alphabet\r\n model = keras.Sequential()\r\n model.add(keras.layers.Embedding(26, 16)) #26=numletters\r\n model.add(keras.layers.GlobalAveragePooling1D())\r\n model.add(keras.layers.Dense(16, activation=tf.nn.relu))\r\n model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\r\n \r\n # Loss function, Using Probabilities\r\n model.compile(optimizer=tf.train.AdamOptimizer(),\r\n loss='binary_crossentropy',\r\n metrics=['accuracy'])\r\n \r\n # Perform n_ep epochs of training on train_data\r\n partial_train = np.array(train[len(train)//10:])\r\n partial_labels = trn_labels[len(train)//10:]\r\n val_train = np.array(train[:len(train)//10])\r\n val_labels = trn_labels[:len(train)//10]\r\n print((len(partial_train), len(partial_labels)),\r\n (len(val_train), len(val_labels)),\r\n (len(test), len(tst_labels)))\r\n \r\n history = model.fit(partial_train,\r\n partial_labels,\r\n epochs=n_ep,\r\n batch_size=512,\r\n validation_data=(val_train, val_labels),\r\n verbose=1)\r\n \r\n #EVALUATE THE FINAL MODEL\r\n\r\n results = model.evaluate(np.array(test), tst_labels)\r\n print(results)\r\n \r\n # GRAPH ACCURACY AND LOSS OVER TIME\r\n if show:\r\n acc = history.history['acc']\r\n val_acc = history.history['val_acc']\r\n loss = history.history['loss']\r\n val_loss = history.history['val_loss']\r\n \r\n history_dict = history.history\r\n \r\n epochs = range(1, len(acc) + 1)\r\n \r\n # \"bo\" is for \"blue dot\"\r\n plt.plot(epochs, loss, 'bo', label='Training loss')\r\n # b is for \"solid blue line\"\r\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\r\n plt.title('Training and validation loss')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('Loss')\r\n plt.legend()\r\n \r\n plt.show()\r\n \r\n plt.clf() # clear figure\r\n acc_values = history_dict['acc']\r\n val_acc_values = history_dict['val_acc']\r\n \r\n plt.plot(epochs, acc, 'bo', label='Training acc')\r\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\r\n plt.title('Training and validation accuracy')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('Accuracy')\r\n plt.legend()\r\n \r\n plt.show()\r\n \r\n return model",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n # label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n exindex = {}\n passagelens = []\n\n sum_of_labels = 0\n\n for (ex_index, example) in tqdm(enumerate(examples), desc=\"Tokenizing:\"):\n if example.text_a not in tokenmap.keys():\n tokens_a = tokenizer.tokenize(example.text_a)\n tokenmap[example.text_a] = tokens_a\n else:\n tokens_a = tokenmap[example.text_a]\n\n tokens_b = None\n if example.text_b:\n if example.text_b not in tokenmap.keys():\n tokens_b = tokenizer.tokenize(example.text_b)\n tokenmap[example.text_b] = tokens_b\n else:\n tokens_b = tokenmap[example.text_b]\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n\n passagelens.append(len(tokens_a) + len(tokens_b) + 3)\n\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n # label_id = label_map[example.label]\n label_id = example.label\n\n sum_of_labels += label_id\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (str(example.label), 0))\n\n exindex[ex_index] = example.guid\n features.append(\n InputFeatures(uuid=ex_index,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n\n print(\"Passage Token Lengths Distribution\", passagelens[-1], np.percentile(passagelens, 50),\n np.percentile(passagelens, 90), np.percentile(passagelens, 95), np.percentile(passagelens, 99))\n return features, exindex",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n if label_list:\n label_map = {label: i for i, label in enumerate(label_list)}\n else:\n label_map = None\n\n features = []\n tokenslist = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n base_tokens = [\"[UNK]\"] + [\"[UNK]\"]*len(tokens_a) + [\"[UNK]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n base_tokens += [\"[UNK]\"]*len(tokens_b) + [\"[UNK]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n baseline_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(baseline_ids) == max_seq_length\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if label_map:\n label_id = label_map[example.label]\n else:\n label_id = float(example.label)\n if ex_index < 2:\n logger.debug(\"*** Example ***\")\n logger.debug(\"guid: %s\" % (example.guid))\n logger.debug(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.debug(\"input_ids: %s\" %\n \" \".join([str(x) for x in input_ids]))\n logger.debug(\"input_mask: %s\" %\n \" \".join([str(x) for x in input_mask]))\n logger.debug(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.debug(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n baseline_ids=baseline_ids))\n tokenslist.append({\"token\":tokens, \"golden_label\":example.label, \"pred_label\":None})\n return features, tokenslist",
"def prepare_sequences(notes, n_vocab):\n sequence_length = 100\n\n pitchnames = sorted(set(item for item in notes))\n\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n\n network_input = []\n network_output = []\n\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n network_output.append(note_to_int[sequence_out])\n\n n_patterns = len(network_input)\n\n network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))\n\n network_input = (network_input - float(n_vocab)/2) / (float(n_vocab)/2)\n network_output = np_utils.to_categorical(network_output)\n\n return (network_input, network_output)",
"def test_read_multiple_lxyrs(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n test_dir = os.path.join(cwd, 'test_files/')\n ground_truths = read_lxyrs(test_dir)\n self.assertEquals(len(ground_truths), 3)\n self.assertEquals(len(ground_truths['test1']), 3)\n self.assertEquals(len(ground_truths['test_gt']), 2)",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens = example.text\n\n# # Account for [CLS] and [SEP] with \"- 2\"\n# if len(tokens) > max_seq_length - 2:\n# tokens = tokens[:(max_seq_length - 2)]\n\n bert_tokens = []\n orig_to_tok_map = []\n\n bert_tokens.append(\"[CLS]\")\n for token in tokens:\n new_tokens = tokenizer.tokenize(token)\n if len(bert_tokens) + len(new_tokens) > max_seq_length - 1:\n # print(\"You shouldn't see this since the test set is already pre-separated.\")\n break\n else:\n orig_to_tok_map.append(len(bert_tokens))\n bert_tokens.extend(new_tokens)\n bert_tokens.append(\"[SEP]\")\n\n if len(bert_tokens) == 2: # edge case\n continue\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n\n input_ids = tokenizer.convert_tokens_to_ids(bert_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n\n segment_ids = [0] * max_seq_length # no use for our problem\n\n labels = example.label\n label_ids = [0] * max_seq_length\n label_mask = [0] * max_seq_length\n\n for label, target_index in zip(labels, orig_to_tok_map):\n label_ids[target_index] = label_map[label]\n label_mask[target_index] = 1\n\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(label_mask) == max_seq_length\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=label_ids,\n label_mask=label_mask))\n return features",
"def convert_examples_to_features(examples, seq_length, tokenizer):\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(InputFeatures(unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features",
"def AnBn(nseq, nT, L, eps=0.5, cue=True, align=False, atfront=True):\n \n p_gram = (1-eps)\n p_nois = eps\n # here's one way to generate the sequences, \n # going to create an empty array, fill it with the valid sequences first\n seqs = -1*np.ones((nseq, nT))\n \n n = int(p_gram*nseq/len(L))\n N = 0\n for l in L:\n \n valid_seqs = np.apply_along_axis(np.repeat, 1, np.repeat([[0,1]],n,0), [l, l])\n \n if align:\n idx = np.arange(0,nT-np.mod(nT,2*l),np.floor(nT/(2*l)))\n idx = np.ones(n,nT)*idx[None,:]\n else:\n idx = np.random.rand(n,nT).argsort(1)[:,:(2*l)]\n idx = np.sort(idx,1)\n np.put_along_axis(seqs[N:N+n,:], idx, valid_seqs, axis=1)\n N+=n\n \n # now I want to add noise sequences, i.e. random number of A and B tokens\n # but I want to make sure that the sparseness of the sequences isn't\n # too different from the grammatical ones -- so I set that manually\n \n thr = sts.norm.ppf(2*np.mean(L)/nT)\n noise_seqs = ((np.ones(nseq-N)[:,None]*np.arange(nT) - np.random.choice(nT-5,(nseq-N,1)))>0).astype(int)\n noise_seqs[np.random.randn(nseq-N,nT)>thr] = -1\n \n seqs[N:,:] = noise_seqs\n labels = (seqs == 0).sum(1) == (seqs==1).sum(1)\n \n if cue:\n seqs = np.append(seqs, np.ones(nseq)[:,None]*2, axis=1)\n if atfront:\n # push to the front\n seqs = np.where(seqs==-1, np.nan, seqs)\n seqs = np.sort(seqs,1)\n seqs = np.where(np.isnan(seqs),-1,seqs)\n \n shf = np.random.choice(nseq,nseq,replace=False)\n seqs = seqs[shf,:]\n labels = labels[shf]\n \n return seqs, labels",
"def vectorize(self, observations):\n is_training = any(['labels' in obs for obs in observations])\n\n src_seq, tgt_seq, labels, valid_inds, _, _ = PaddingUtils.pad_text(\n observations, self.dict, end_idx=self.END_IDX,\n null_idx=self.NULL_IDX, dq=True, eval_labels=True,\n encode_truncate=self.encode_max_seq_len, decode_truncate=self.decode_max_seq_len)\n\n max_seq_len = len(src_seq[0])\n # now the source sequence turn is just `relative distance`\n src_seq_dis = []\n # TODO: add turn embedding for src_seq\n for cur_ind, org_ind in enumerate(valid_inds):\n org_dis_ids = observations[org_ind]['dis2vec'].copy()\n org_dis_ids.extend([0] * (max_seq_len - len(org_dis_ids)))\n src_seq_dis.append(org_dis_ids)\n\n src_seq_turn = []\n tgt_seq_turn = []\n # TODO: add turn embedding for src_seq\n for cur_ind, org_ind in enumerate(valid_inds):\n org_turn_ids = observations[org_ind]['turn2vec'].copy()\n org_turn_ids.extend([0] * (max_seq_len - len(org_turn_ids)))\n src_seq_turn.append(org_turn_ids)\n # decode turn id as input\n tgt_seq_turn.append(observations[org_ind]['cur_turn'])\n\n if src_seq is None:\n return None, None, None, None, None, None, None\n\n src_seq = torch.LongTensor(src_seq)\n # src_seq_turn = torch.LongTensor(src_seq_turn)\n # src_seq_dis = torch.LongTensor(src_seq_dis)\n # tgt_seq_turn = torch.LongTensor(tgt_seq_turn)\n\n if tgt_seq is not None:\n tgt_seq = torch.LongTensor(tgt_seq)\n\n if self.use_cuda:\n # copy to gpu\n src_seq = src_seq.cuda()\n # src_seq_turn = src_seq_turn.cuda()\n # src_seq_dis = src_seq_dis.cuda()\n # tgt_seq_turn = tgt_seq_turn.cuda()\n if tgt_seq is not None:\n tgt_seq = tgt_seq.cuda()\n\n # set up candidates\n cands = []\n sampling_cands = []\n valid_cands = []\n for i, v in enumerate(valid_inds):\n if 'label_candidates' in observations[v] and observations[v]['label_candidates']:\n curr_lcs = list(observations[v]['label_candidates'])\n curr_cands = [{'text': c + ' ' + self.dict.end_token} for c in curr_lcs]\n # padding candidates\n cs, _, _, valid_c_inds, *_ = PaddingUtils.pad_text(curr_cands, self.dict, null_idx=self.NULL_IDX,\n # TODO: whether add end idx to add\n dq=True, encode_truncate=self.decode_max_seq_len)\n valid_cands.append((i, v, [curr_lcs[j] for j in valid_c_inds]))\n cs = torch.LongTensor(cs)\n if self.use_cuda:\n cs = cs.cuda()\n cands.append(cs)\n # random select one from 0:18 from curr_lcs\n sampling_cands.append(random.choice(curr_lcs[:19]) + ' ' + self.dict.end_token)\n if is_training:\n # construct one tensor\n sample_can_sep = ' {} '.format(self.dict.start_token).join(sampling_cands)\n # the sample should appended a END symbol as well.\n sample_out = PaddingUtils.pad_text([{'text': sample_can_sep, 'eval_labels': [sample_can_sep]}],\n self.dict, null_idx=self.NULL_IDX, dq=False)\n # remove the last which is extra END IDX\n sample_ys = sample_out[1]\n sampling_cands = split_pad_vector(sample_ys, self.START_IDX, self.NULL_IDX)[0]\n sampling_cands = torch.LongTensor(sampling_cands)\n if self.use_cuda:\n sampling_cands = sampling_cands.cuda()\n\n return src_seq, src_seq_turn, src_seq_dis, tgt_seq, tgt_seq_turn, labels, valid_inds, cands, valid_cands, sampling_cands, is_training",
"def test_get_input_unlabeled(self):\n result = pre.get_input(self.testfilename)\n truth = (\n [[Reference(0, 'm jones', \n 'symbol intersect detect method improv spatial intersect join', \n ['e rundensteiner', 'y huang'], 'geoinformatica', None),\n Reference(1, 'matthew c jones', \n 'improv spatial intersect join symbol intersect detect', \n ['e rundensteiner', 'h kuno', 'p marron', 'v taube', 'y ra'], \n 'sigmodels.intern manag data', None),\n Reference(2, 'matthew c jones',\n 'view materi techniqu complex hirarch object', ['e rundensteiner',\n 'y huang'], 'ssd symposium larg spatial databas', None)],\n [Reference(3, 'mike w miller', 'domin draw bipartit graph', \n ['l berg'], 'sigucc special interest group univers comput servic',\n None),\n Reference(4, 'mike w miller', 'rel compromis statist databas', \n [], 'sigucc special interest group univers comput servic', None)],\n [Reference(5, 'c chen', 'formal approach scenario analysi',\n ['d kung', 'j samuel', 'j gao', 'p hsia', 'y toyoshima'],\n 'ieee softwar', None)],\n [Reference(6, 'jane j robinson', 'discours code clue context', [], \n 'acl meet the associ comput linguist', None),\n Reference(7, 'jane j robinson', 'diagram grammar dialogu', [],\n 'cooper interfac inform system', None)],\n [Reference(8, 'a gupta', 'iri h java distanc educ', ['a gonzalez', \n 'a hamid', 'c overstreet', 'h wahab', 'j wild', 'k maly', 's ghanem',\n 'x zhu'], 'acm journal educ resourc comput', None)],\n [Reference(9, 'mary d brown',\n 'intern redund represent limit bypass support pipelin adder regist'\n 'file', ['y patt'], 'proceed the th ieee intern symposium high '\n 'perform comput architectur hpca intern symposium high perform '\n 'comput architectur talk slide', None)]],\n\n ['m jones', 'e rundensteiner', 'y huang', 'matthew c jones', \n 'e rundensteiner', 'h kuno', 'p marron', 'v taube', 'y ra', \n 'matthew c jones', 'e rundensteiner', 'y huang', 'mike w miller',\n 'l berg', 'mike w miller', 'c chen', 'd kung', 'j samuel', 'j gao',\n 'p hsia', 'y toyoshima', 'jane j robinson', 'jane j robinson',\n 'a gupta', 'a gonzalez', 'a hamid', 'c overstreet', 'h wahab', 'j wild',\n 'k maly', 's ghanem', 'x zhu', 'mary d brown', 'y patt'])\n self.assertEquals(result, truth)",
"def convert_examples_to_features(examples,label_list, max_seq_length,tokenizer):\r\n label_map = {}\r\n for (i, label) in enumerate(label_list):\r\n label_map[label] = i\r\n\r\n input_data=[]\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n if tokens_b:\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[0:(max_seq_length - 2)]\r\n\r\n if ex_index % 10000 == 0:\r\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = []\r\n segment_ids = []\r\n tokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n for token in tokens_a:\r\n tokens.append(token)\r\n segment_ids.append(0)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n\r\n if tokens_b:\r\n for token in tokens_b:\r\n tokens.append(token)\r\n segment_ids.append(1)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n input_mask = [1] * len(input_ids)\r\n\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n label_id = label_map[example.label]\r\n if ex_index < 3:\r\n tf.logging.info(\"*** Example ***\")\r\n tf.logging.info(\"guid: %s\" % (example.guid))\r\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\r\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\r\n\r\n features = collections.OrderedDict()\r\n features[\"input_ids\"] = input_ids\r\n features[\"input_mask\"] = input_mask\r\n features[\"segment_ids\"] = segment_ids\r\n features[\"label_ids\"] =label_id\r\n input_data.append(features)\r\n\r\n return input_data",
"def rle(inarray):\n ia = np.asarray(inarray) # force numpy\n n = len(ia)\n if n == 0: \n return (None, None, None)\n else:\n y = ia[1:] != ia[:-1] # pairwise unequal (string safe)\n i = np.append(np.where(y), n - 1) # must include last element posi\n z = np.diff(np.append(-1, i)) # run lengths\n p = np.cumsum(np.append(0, z))[:-1] # positions\n\n d = {\n 'run_lengths': z,\n 'positions': p,\n 'labels': ia[i]\n }\n return d",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {} # label\n for (i, label) in enumerate(label_list): # ['0', '1']\n label_map[label] = i\n\n features = [] # feature\n for (ex_index, example) in enumerate(examples):\n text_a_id = int(example.text_a_id)\n text_b_id = int(example.text_b_id)\n\n text_a_fields = example.text_a.split(\" _eop_ \")\n \n tokens_a = []\n text_a_subtype = []\n for text_a_field_idx, text_a_field in enumerate(text_a_fields):\n text_a_field_token = tokenizer.tokenize(text_a_field)\n tokens_a.extend(text_a_field_token)\n text_a_subtype.extend([text_a_field_idx]*len(text_a_field_token))\n assert len(tokens_a) == len(text_a_subtype)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b) # text_b tokenize\n\n if tokens_b: # if has b\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) # truncate\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because # (?)\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n subtype_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n subtype_ids.append(0)\n for token_idx, token in enumerate(tokens_a):\n tokens.append(token)\n segment_ids.append(0)\n subtype_ids.append(text_a_subtype[token_idx])\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n subtype_ids.append(1)\n\n if tokens_b:\n for token_idx, token in enumerate(tokens_b):\n tokens.append(token)\n segment_ids.append(1)\n subtype_ids.append(2)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n subtype_ids.append(2)\n\n input_sents = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_sents) # mask\n\n # Zero-pad up to the sequence length.\n while len(input_sents) < max_seq_length:\n input_sents.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n subtype_ids.append(0)\n\n assert len(input_sents) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(subtype_ids) == max_seq_length\n\n label_id = label_map[example.label]\n\n if ex_index%2000 == 0:\n print('convert_{}_examples_to_features'.format(ex_index))\n\n features.append(\n InputFeatures( # object\n text_a_id=text_a_id,\n text_b_id=text_b_id,\n input_sents=input_sents,\n input_mask=input_mask,\n segment_ids=segment_ids,\n subtype_ids=subtype_ids,\n label_id=label_id))\n\n return features",
"def convert_examples_to_features(self, examples, max_seq_length):\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = self.tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = self.tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[: (max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n labels_ids = []\n for label in example.labels:\n labels_ids.append(float(label))\n\n if ex_index < 0:\n self.logger.info(\"*** Example ***\")\n self.logger.info(\"guid: %s\" % (example.guid))\n self.logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))\n self.logger.info(\n \"input_ids: %s\" % \" \".join([str(x) for x in input_ids])\n )\n self.logger.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask])\n )\n self.logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids])\n )\n self.logger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n\n if example.parent_labels is None:\n input_features = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=labels_ids,\n )\n else:\n input_features = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=labels_ids,\n parent_labels=example.parent_labels,\n )\n features.append(input_features)\n\n return features",
"def seq_test(subject_array,stop_rule,p, batch_size, typeII_error, typeI_error, repeat = 1, \nprob_threshold = 1, seq = True, batch_limit = 32):\n temp_list = []\n neg_list = [] #renamed to negativeInfoList\n pos_list = [] #renamed to positiveInfoList\n consum = 0\n temp = {'data': subject_array,\n 'NB_Num': 0,\n 'PB_Num': 0,\n 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = [] #renamed to negativeBatches\n pos_array = [] #renamed to positiveBatches\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'], i['p'], i['batch_size'],\n typeII_error, typeI_error, \n batch_limit = batch_limit)\n temp0 = {'data': temp0,\n 'NB_Num': i['NB_Num'] + 1,\n 'PB_Num': i['PB_Num'],\n 'p': p0,\n 'batch_size': n0}\n temp1 = {'data': temp1,\n 'NB_Num': i['NB_Num'],\n 'PB_Num': i['PB_Num'] + 1,\n 'p': p1,\n 'batch_size': n1}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n \n if len(temp1['data'])>0:\n if temp1['PB_Num'] >= stop_rule or temp1['p']>=prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con \n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n \n neg_array[:,1] = 0\n individual_test, individual_con = conventional_test(pos_array, typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:,0].argsort()]\n result = result.astype('int64')\n return (result, consum, individual_con)"
]
| [
"0.55630493",
"0.55254424",
"0.5501198",
"0.5487197",
"0.54234886",
"0.53784466",
"0.5335226",
"0.5326911",
"0.5304064",
"0.5296955",
"0.5287288",
"0.52840465",
"0.5266204",
"0.5259986",
"0.5259986",
"0.52509177",
"0.52489835",
"0.5209026",
"0.52064824",
"0.5155642",
"0.5141018",
"0.5078068",
"0.5075134",
"0.50692016",
"0.5060335",
"0.5055308",
"0.5046331",
"0.504527",
"0.50380963",
"0.5024652"
]
| 0.5635134 | 0 |
Viterbi inference of the highest likelihood hidden states sequence given the observations. Time complexity is O(|observation|nStates^2). | def viterbi(self, observation):
N=len(observation)
tab=[[0]*self.nStates for i in range(N)]
backtrack=[[-1]*self.nStates for i in range(N)]
if not self.logdomain:
self.__convert_to_log()
for i in range(self.nStates):
tab[0][i]=self.e[i][observation[0]]+self.pi[i]
for i in range(1,N):
for j in range(self.nStates):
smax=-1
maxval=float('-inf')
for s in range(self.nStates):
cs=tab[i-1][s]+self.t[s][j]
if cs>maxval:
smax=s
maxval=cs
assert(smax>-1 and smax<self.nStates)
tab[i][j]=self.e[j][observation[i]]+maxval
backtrack[i][j]=smax
smax=-1
llike=float('-inf')
for s in range(self.nStates):
if llike<tab[N-1][s]:
llike=tab[N-1][s]
smax=s
best=[-1]*N
best[-1]=smax
for i in range(N-2, -1, -1):
best[i]=backtrack[i+1][best[i+1]]
return best, llike | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def viterbi(self, hmm, initial, emissions):\n probabilities = hmm.emission(emissions[0]) * initial\n stack = []\n \n for emission in emissions[5:]:\n trans_probabilities = hmm.transition_probabilities * np.row_stack(probabilities) #Matrix for transition probabilities\n max_col_ixs = np.argmax(trans_probabilities, axis=0)\n probabilities = hmm.emission(emission) * trans_probabilities[max_col_ixs, np.arange(hmm.num_states)] #Probabilities\n stack.append(max_col_ixs) #Store the axis and the data in the stack\n state_seq = [np.argmax(probabilities)] #Store the resulted probabilities\n\n while stack:\n max_col_ixs = stack.pop() #Take out the top data store in stack\n state_seq.append(max_col_ixs[state_seq[-1]])\n state_seq.reverse()\n return state_seq",
"def Viterbi(_sentence, _model, _emission_df, _transition_df):\n\n if not _sentence:\n return []\n\n # EXECUTE VITERBI\n states = [state for state, _ in _model.y_count.items()]\n states.remove('__START__')\n states.remove('__STOP__')\n\n # keep table of values\n # (len(states) x len(sentence))\n value_table = [[0 for x in range(len(_sentence) + 1)] for y in range(len(states))]\n\n # keep table of sequences\n sequence_table = [[[] for x in range(len(_sentence))] for y in range(len(states))]\n\n # base case - START to all states\n for i in range(len(states)):\n # transition prob from __START__ to anything\n try:\n transition_prob = _transition_df[('__START__', states[i])]\n except KeyError:\n transition_prob = 0.0\n\n # error occurs here due to empty _sentence\n try:\n emission_prob = _emission_df[(_sentence[0], states[i])]\n except KeyError:\n emission_prob = 0.0\n\n value_table[i][0] = float(transition_prob) * float(emission_prob)\n sequence_table[i][0] = ['__START__', states[i]]\n\n # iterative/recursive case - state to state\n for i in range(1, len(_sentence)):\n\n # storage for prev\n prev_optimal = 0.0\n prev_state_seq = []\n\n for j in range(len(states)):\n try:\n # find e(xi|yj)\n emission_prob = float(_emission_df[(_sentence[i], states[j])])\n except KeyError:\n emission_prob = 0.0\n\n if prev_optimal == 0.0:\n # find optimal from state to state prob\n for k in range(len(states)):\n test_opti = float(value_table[k][i-1])\n if test_opti >= prev_optimal:\n prev_optimal = test_opti\n prev_state_seq = sequence_table[k][i-1]\n\n # given prev optimal, calculate transition prob\n try:\n # find transition prob from prev optimal state to current\n transition_prob = float(_transition_df[(prev_state_seq[-1], states[j])])\n except KeyError:\n transition_prob = 0.0\n\n prob = prev_optimal * transition_prob * emission_prob\n next_state_seq = prev_state_seq + [states[j]]\n\n value_table[j][i] = prob\n sequence_table[j][i] = next_state_seq\n\n # end case - all states to __STOP__\n for i in range(len(states)):\n try:\n transition_prob = _transition_df[(states[i], '__STOP__')]\n except KeyError:\n transition_prob = 0.0\n\n value_table[i][-1] = float(transition_prob) * float(value_table[i][-2])\n\n # take optimal from table and return optimal val and sequence\n max_val = 0\n result_seq = []\n for i in range(len(states)):\n prob = float(value_table[i][-1]) # take all from last\n if max_val == 0 or prob > max_val:\n max_val = prob\n result_seq = sequence_table[i][-1]\n\n return result_seq[1:]",
"def viterbi(p_observations_given_state, p_transition, p_initial):\n p_observations_given_state = numpy.asarray(p_observations_given_state)\n p_transition = numpy.asarray(p_transition)\n p_initial = numpy.asarray(p_initial)\n N, S = p_observations_given_state.shape\n assert p_transition.shape in {(S, S), (N-1, S, S)}\n if p_transition.shape == (S, S):\n p_transition = numpy.array([p_transition for i in range(N-1)])\n assert numpy.allclose(numpy.sum(p_transition, axis=2), 1)\n assert p_initial.shape == (S,)\n assert numpy.allclose(numpy.sum(p_initial), 1)\n\n # convert all probabilities to log probabilities so we can sum instead of\n # multiplying, which better controls numerical error.\n err = numpy.seterr(divide='ignore') # allow log(0) to go to -inf, as desired\n lp_observations_given_state = numpy.log(p_observations_given_state)\n lp_transition = numpy.log(p_transition)\n lp_initial = numpy.log(p_initial)\n numpy.seterr(**err)\n\n states = numpy.arange(S)\n # path[i] always contains the maximum likelihood sequence of states ending at state i\n path = [[i] for i in states]\n # lp_state contains the current log probability of being in the state given the sequence\n # of observations thus far considered.\n lp_state = lp_observations_given_state[0] + lp_initial\n\n for lp_obs, lp_trans in zip(lp_observations_given_state[1:], lp_transition):\n # For each observation after the first timepoint, construct an (S, S)\n # shape array where [si, sj] contains the log probability of going from\n # state si to state sj between time t and t+1.\n # Assume we know for each state si prob(si at time t), the probability\n # of being in that state at that time, then we can calculate the probability\n # of being in any given state sj at time t+1:\n # prob(transition from si at time t to sj at time t+1) = prob(si at t) *\n # prob(si->sj between t and t+1) *\n # prob(observation at t+1 given state sj)\n # prob(j at time t+1) = max_i(prob(i at time t -> j at time t+1))\n #\n # Thus we merely need to keep updating our estimates for the probability\n # of being in each state at each time, and keep a list of the path that\n # lead to each state.\n #\n # The actual code in use is 100% equivalent to the code below; however it\n # is rather more efficient.\n #\n # lp_transition_t = numpy.zeros((s, s), dtype=float)\n # new_path = []\n # lp_state = []\n # for s_to in states:\n # best_from_lp = -numpy.inf\n # for s_from in states:\n # lp_transition_t[s_from, s_to] = lp_state[s_from] + lp_trans[s_from, s_to] + lp_obs[s_to]\n # if lp_transition_t[s_from, s_to] > best_from_lp:\n # best_from = s_from\n # best_from_lp = lp_transition_t[s_from, s_to]\n # lp_state.append(best_from_lp)\n # new_path.append(path[best_from] + [s_to])\n # path = new_path\n lp_transition_t = lp_state[:,numpy.newaxis] + lp_trans + lp_obs[numpy.newaxis,:]\n best_from = numpy.argmax(lp_transition_t, axis=0)\n path = [path[s_from]+[s_to] for s_to, s_from in enumerate(best_from)]\n lp_state = lp_transition_t[best_from, states]\n last_state = numpy.argmax(lp_state)\n return numpy.array(path[last_state])",
"def viterbi(self, e_phi, states_dict):\n \n states = [\"0\",\"1\",\"2\"]\n \n if not states_dict:\n first_dict = {} \n for state in states: \n S_e = self.score(e_phi, state) \n first_dict[state] = (S_e,([]))\n return [first_dict] \n \n else:\n last_dict = states_dict[-1]\n this_dict = {}\n scores = self.scores(e_phi)\n for (state, S_e) in scores.iteritems():\n max_score=-float('inf')\n max_label = None\n for prev in states:\n (Sprev, (Hprev))=last_dict[prev]\n if not Hprev:\n Hstate = [prev] # no history\n else:\n Hstate = Hprev[1:]+[prev] \n t_phi = tfeats(Hstate, self.order)\n \n partial_score = Sprev+self.score(t_phi, state)\n if max_score < partial_score:\n S_max = partial_score\n max_hstate = Hstate\n # write to dict\n this_dict[state]=(S_max+S_e,(max_hstate))# brakets\n states_dict.append(this_dict)\n return states_dict",
"def Modified_Viterbi(_sentence, _model, _emission_df, _transition_df, _2nd_order_df):\n\n if not _sentence:\n return []\n\n # EXECUTE VITERBI\n states = [state for state, _ in _model.y_count.items()]\n states.remove('__START__')\n states.remove('__STOP__')\n\n # keep table of values\n # (len(states) x len(sentence))\n value_table = [[0 for x in range(len(_sentence) + 1)] for y in range(len(states))]\n\n # keep table of sequences\n sequence_table = [[[] for x in range(len(_sentence))] for y in range(len(states))]\n\n # base case - START to all states, 1st order.\n # 2nd order not possible for base case\n for i in range(len(states)):\n # use 1st order, since 2nd order is non-existent\n # transition prob from __START__ to anything\n try:\n # find transition from start to state\n transition_prob = _transition_df[('__START__', states[i])]\n except KeyError:\n transition_prob = 0.0\n\n # error occurs here due to empty _sentence\n try:\n # Find emission of word from state\n emission_prob = _emission_df[(_sentence[0], states[i])]\n except KeyError:\n emission_prob = 0.0\n\n value_table[i][0] = float(transition_prob) * float(emission_prob)\n sequence_table[i][0] = ['__START__', states[i]]\n\n # iterative/recursive case - 2nd order\n # loop through rest of words in sentence\n for i in range(1, len(_sentence)):\n\n # storage for prev\n prev_optimal = 0.0\n prev_state_seq = []\n\n # loop through states for every word\n for j in range(len(states)):\n try:\n # find e(xi|yj), prob emitting word from current state\n emission_prob = float(_emission_df[(states[j], _sentence[i])])\n except KeyError:\n emission_prob = 0\n\n # find prev_optimal\n if prev_optimal == 0.0:\n for k in range(len(states)):\n test_optimal = float(value_table[k][i-1])\n if test_optimal >= prev_optimal:\n prev_optimal = test_optimal\n prev_state_seq = sequence_table[k][i-1]\n\n prev_1 = prev_state_seq[-1]\n prev_2 = prev_state_seq[-2]\n\n # use 2nd order here - modified\n try:\n transition_prob = float(_2nd_order_df[((prev_2, prev_1), states[j])])\n except KeyError:\n transition_prob = 0.0\n\n prob = prev_optimal * transition_prob * emission_prob\n next_state_seq = prev_state_seq + [states[j]]\n\n value_table[j][i] = prob\n sequence_table[j][i] = next_state_seq\n\n # end case - all states to __STOP__\n for i in range(len(states)):\n prev_state_seq = sequence_table[i][-1]\n prev_1 = prev_state_seq[-1]\n prev_2 = prev_state_seq[-2]\n try:\n transition_prob = float(_2nd_order_df[((prev_2, prev_1), '__STOP__')])\n except KeyError:\n transition_prob = 0.0\n\n value_table[i][-1] = float(transition_prob) * float(value_table[i][-2])\n\n max_val = 0\n result_seq = []\n for i in range(len(states)):\n prob = float(value_table[i][-1]) # take all from last\n if max_val == 0 or prob > max_val:\n max_val = prob\n result_seq = sequence_table[i][-1]\n\n return result_seq[1:]",
"def viterbi(self, emb, obs, seq_lengths, normalize_observation=True):\n batch_size = len(seq_lengths)\n max_seq_length = seq_lengths.max().item()\n\n # initialize states\n self._initialize_states(embs=emb, obs=obs, normalize_observation=normalize_observation)\n # maximum probabilities\n log_delta = torch.zeros([batch_size, max_seq_length, self.n_hidden], device=self.device)\n # most likely previous state on the most probable path to z_t = j. a[0] is undefined.\n pre_states = torch.zeros([batch_size, max_seq_length, self.n_hidden], dtype=torch.long, device=self.device)\n\n # the initial delta state\n log_delta[:, 0, :] = self.log_state_priors + self.log_emiss_probs[:, 0, :]\n for t in range(1, max_seq_length):\n # udpate delta and a. It does not matter where we put the emission probabilities\n max_log_prob, argmax_val = log_maxmul(\n log_delta[:, t-1, :].unsqueeze(1),\n self.log_trans[:, t, :, :] + self.log_emiss_probs[:, t, :].unsqueeze(1)\n )\n log_delta[:, t, :] = max_log_prob.squeeze(1)\n pre_states[:, t, :] = argmax_val.squeeze(1)\n\n # The terminal state\n batch_max_log_prob = list()\n batch_z_t_star = list()\n\n for l_delta, length in zip(log_delta, seq_lengths):\n max_log_prob, z_t_star = l_delta[length-1, :].max(dim=-1)\n batch_max_log_prob.append(max_log_prob)\n batch_z_t_star.append(z_t_star)\n\n # Trace back\n batch_z_star = [[z_t_star.item()] for z_t_star in batch_z_t_star]\n for p_states, z_star, length in zip(pre_states, batch_z_star, seq_lengths):\n for t in range(length-2, -1, -1):\n z_t = p_states[t+1, z_star[0]].item()\n z_star.insert(0, z_t)\n\n # compute the smoothed marginal p(z_t = j | obs_{1:T})\n self._forward_backward(seq_lengths)\n log_marginals = self.log_alpha + self.log_beta\n norm_marginals = torch.exp(log_marginals - logsumexp(log_marginals, dim=-1, keepdim=True))\n batch_marginals = list()\n for marginal, length in zip(norm_marginals, seq_lengths):\n mgn_list = marginal[1:length].detach().cpu().numpy()\n batch_marginals.append(mgn_list)\n\n return batch_z_star, batch_marginals",
"def viterbi(obs: List[int], pi: List[float], A: np.ndarray, B: np.ndarray) -> List[int]:\n pi_log = np.log(pi)\n A_log = np.log(A)\n B_log = np.log(B)\n states = A.shape[0]\n n = len(obs)\n\n D_log = np.zeros((states, n))\n backtrack = np.zeros((states, n - 1)).astype(int)\n D_log[:, 0] = pi_log + B_log[:, obs[0]]\n\n for j in range(1, n):\n for i in range(states):\n temp_sum = A_log[:, i] + D_log[:, j - 1]\n D_log[i, j] = np.max(temp_sum) + B_log[i, obs[j]]\n backtrack[i, j - 1] = np.argmax(temp_sum)\n\n state = np.zeros(n).astype(int)\n state[-1] = np.argmax(D_log[:, -1])\n for n in range(n - 2, -1, -1):\n state[n] = backtrack[int(state[n + 1]), n]\n state = state.tolist()\n return state",
"def tag(self, observations):\n tags = []\n\n # lambda expression of the sum of negative log probs\n cost = lambda p, q: - float(p + q)\n\n for t in range(1, len(observations)):\n # the new observation\n observation = observations[t]\n for i in range(len(self.states)):\n state = self.states[i]\n # TODO update the viterbi and backpointer data structures\n # compute the emission prob\n p_emission = self.emission_PD[state].logprob(observation)\n # compute all the possible costs\n costs = []\n for j in range(len(self.states)):\n pre_state = self.states[j]\n pre_cost = self.viterbi[pre_state, t-1]\n p_transition = self.transition_PD[pre_state].logprob(state)\n # new cost should be sum of the previous cost (corresponding\n # to the jth state) and the newly computed cost\n new_cost = cost(p_emission, p_transition) + pre_cost\n costs.append(new_cost)\n # pick out the minimum cost\n min_cost_index = np.argmin(costs)\n # update the Viterbi table with the best cost\n self.viterbi[state, t] = costs[min_cost_index]\n # update the backpointer with the best index\n self.backpointer[state, t] = min_cost_index\n\n # TODO\n # Add cost of termination step (for transition to </s> , end of sentence).\n terminal_costs = []\n # for each of the final states, compute the cost which consists of the previous\n # cost that corresponds to the state, and the transition probability only (the\n # emission probability does not matter because there is no emission at all!).\n for s in range(len(self.states)):\n last_state = self.states[s]\n last_cost = self.viterbi[last_state, len(observations) - 1]\n last_p_transition = self.transition_PD[last_state].logprob('</s>')\n terminal_cost = cost(0, last_p_transition) + last_cost\n terminal_costs.append(terminal_cost)\n # find out the best path by selecting the path that gives the lowest overall cost\n best_path_cost = min(terminal_costs)\n best_path_index = np.argmin(terminal_costs)\n # complete the viterbi and backpointer tables\n self.viterbi['</s>', len(observations)] = best_path_cost\n self.backpointer['</s>', len(observations)] = best_path_index\n\n # TODO\n # Reconstruct the tag sequence using the backpointer list.\n # Return the tag sequence corresponding to the best path as a list.\n # The order should match that of the words in the sentence.\n\n # backtrack the best states chosen by the algorithm\n best_path = []\n choice = '</s>'\n for t in range(len(observations), 0, -1):\n index = self.backpointer[choice, t]\n choice = self.states[index]\n best_path.append(choice)\n # reverse the backtrace to obtain the desired tags\n tags = reversed(best_path)\n\n return tags",
"def viterbi(log_emlik, log_startprob, log_transmat, forceFinalState=True):\n N, M = log_emlik.shape # (# timesteps, # states)\n B = np.zeros((N,M))\n V = np.zeros((N,M)) \n\n # initialisation\n V[0,:] = log_startprob + log_emlik[0,:] \n\n # induction\n for t in range(1,N):\n # vectorise\n x = np.tile(V[t-1,:],(M,1)) + log_transmat.T\n V[t,:] = np.max(x, axis=1) + log_emlik[t,:]\n B[t,:] = np.argmax(x, axis=1)\n\n # recover best path, looking for state sequence S that maximises P(S,X|emission probs)\n # TODO if forceFinalState\n end_state = np.argmax(V[N-1,:]) \n \n viterbi_path = [B[N-1,end_state]]\n viterbi_loglik = np.max(V[N-1,:])\n\n s_star = int(end_state)\n for t in range(N-2,-1,-1):\n s_star = int(B[t+1,s_star]) # optimal state at timestep t\n viterbi_path.append(s_star)\n\n assert len(viterbi_path) == N\n\n return viterbi_loglik, viterbi_path[::-1]",
"def viterbi1(self, e_phi, states_dict):\n \n states = [\"0\",\"1\",\"2\"]\n \n if not states_dict:\n first_dict = {} \n for state in states: \n S_e = self.score(e_phi, state) \n first_dict[state] = (S_e,([]))\n return [first_dict] \n \n else:\n last_dict = states_dict[-1]\n this_dict = {}\n for state in states:\n S_e = self.score(e_phi, state)\n max_score=-float('inf')\n max_label = None\n for prev in states:\n (Sprev, (Hprev))=last_dict[prev]\n if not Hprev:\n Hstate = [prev] # no history\n else:\n Hstate = Hprev[1:]+[prev] \n t_phi = tfeats(Hstate, self.order)\n \n partial_score = Sprev+self.score(t_phi, state)\n if max_score < partial_score:\n S_max = partial_score\n max_hstate = Hstate\n # write to dict\n this_dict[state]=(S_max+S_e,(max_hstate))# brakets\n states_dict.append(this_dict)\n return states_dict",
"def backwardVariableGeneration(self):\n self.beta = zeros((self.noOfEmmittingStates+2, self.T + 1))\n\n # initialisation\n for j in range(self.noOfEmmittingStates+1):\n self.beta[j,-1] = self.transitionMatrix[j,-1]\n self.beta[-1,-1] = 1.0\n\n # main recursion\n for t in range(self.T, 1, -1):\n for j in range(self.noOfEmmittingStates, 0, -1):\n partialSum = 0\n for k in range(1, self.noOfEmmittingStates+1):\n partialSum += (self.transitionMatrix[j,k-1] * self.b[k-1,t-1] * self.beta[k,t])\n self.beta[j,t-1] = partialSum\n\n # first column\n partialSum = 0\n for k in range(1, self.noOfEmmittingStates+1):\n partialSum += (self.transitionMatrix[0,k-1] * self.b[k-1,0] * self.beta[k,1])\n self.beta[0,0] = partialSum\n\n # likelihood of observed sequence, p(O|lambda)\n self.observationLikelihood = self.alpha[-1,-1]",
"def Viterbi(words:Sequence[str], train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Sequence[Tuple[str, str]]:\n state = []\n tags_set = list(set([pair[1] for pair in train_bag]))\n\n for key, word in enumerate(words):\n # initialise list of probability column for a given observation\n p = []\n for tag in tags_set:\n if key == 0:\n transition_p = tags_df.loc['.', tag]\n else:\n transition_p = tags_df.loc[state[-1], tag]\n\n # compute emission and state probabilities\n emission_p_parts = word_given_tag(word, tag)\n emission_p = emission_p_parts[0]/emission_p_parts[1]\n state_probability = emission_p * transition_p\n p.append(state_probability)\n\n p_max = max(p)\n # getting state for which probability is maximum\n state_max = tags_set[p.index(p_max)]\n state.append(state_max)\n return list(zip(words, state))",
"def initialise(self, observation):\n # Initialise viterbi, including\n # transition from <s> to observation\n # use costs (-log-base-2 probabilities)\n # TODO\n # empty everything\n self.viterbi = dict()\n self.backpointer = dict()\n # lambda expression of the sum of negative log probs\n cost = lambda p, q: - float(p + q)\n # The Viterbi table should be m*n where m is the number of states\n # and n is the number of words.\n # Initialliy, for each state, we calculate the emission probability\n # (the prob of observation given the state), and the transition\n # probability (state given the start symbol), sum the negative logs of\n # them to get the corresponding cost.\n # I chose to use dict() to implement the Viterbi table because it supports\n # a pair of keys, i.e. [state, t]\n for i in range(len(self.states)):\n state = self.states[i]\n p_obs_given_pos = self.emission_PD[state].logprob(observation)\n p_pos_given_start = self.transition_PD['<s>'].logprob(state)\n self.viterbi[state, 0] = cost(p_obs_given_pos, p_pos_given_start)\n\n # Initialise backpointer\n # TODO\n # Initialise the backpointer by filling in m 0s. Again, use the pair\n # key: [state, t].\n self.backpointer[state, 0] = 0",
"def Viterbi_Transition(words:Sequence[str], train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Sequence[Tuple[str, str]]:\n state = []\n all_tags = list(set([pair[1] for pair in train_bag]))\n\n for word_idx, word in enumerate(words):\n # initialise list of probability column for a given observation\n p = []\n for tag in all_tags:\n if word_idx == 0:\n transition_p = tags_df.loc['.', tag]\n else:\n transition_p = tags_df.loc[state[-1], tag]\n\n # compute emission and state probabilities\n emission_p_parts = word_given_tag(word, tag)\n emission_p = emission_p_parts[0]/emission_p_parts[1]\n\n if word in V:\n state_probability = transition_p * emission_p\n else:\n state_probability = transition_p\n\n p.append(state_probability)\n\n p_max = max(p)\n # getting state for which probability is maximum\n state_max = all_tags[p.index(p_max)]\n state.append(state_max)\n return list(zip(words, state))",
"def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)",
"def obs_model(self, state, rnn_hidden):\n state_hidden = self.state_hidden(torch.cat([state, rnn_hidden], dim=-1)) # [batch, hidden]\n # obs = self.obs(state_hidden) #[batch, hidden]\n obs = state_hidden\n obs_mean = self.obs_mean(obs) # [batch, output_size]\n # obs_sigma = F.softplus(self.obs_sigma(obs))+self._min_stddev #[batch, output_size]\n obs_sigma = torch.exp(self.obs_sigma(obs))\n\n return obs_mean, obs_sigma",
"def forward(self, input, hidden_states):\n h, c = self.ih2h(input, hidden_states[0])\n next_hiddens = [(h, c)]\n h, c = self.h2h(h, hidden_states[1])\n next_hiddens.append((h, c))\n output = self.log_softmax(self.h2o(h))\n return output, next_hiddens",
"def V_belief(s, n):\n\n if n == 0:\n return 0, ''\n else:\n v = {}\n sx_u = {}\n for ui in U: # for each possible action\n sx = B[ui].dot(s) # next belief state\n r = lnc.dot(sx) # expected immediate reward\n v[ui] = r + V_belief(sx, n-1)[0]\n sx_u[ui] = sx\n\n vmax = max(v.values())\n umax = max(v, key=v.get)\n sxmax = sx_u[umax]\n\n return vmax, umax, sxmax",
"def viterbi(prob_matrix):\n TINY = 1e-6 # to avoid NaNs in logs\n\n # if prob_matrix is 1D, make it 2D\n if len(np.shape(prob_matrix)) == 1:\n prob_matrix = [prob_matrix]\n \n length = len(prob_matrix)\n\n probs = np.zeros_like(prob_matrix)\n backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1\n \n for i in [0,1,2,3,4]:\n probs[0][i] = np.log(prob_matrix[0][i]+TINY)\n \n # {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single\n for t in range(1, length):\n # E, S -> B | B, M -> M | B, M -> E | E, S -> S\n previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]]\n for i in range(5):\n prevs = previous_of[i]\n max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])]\n backpt[t][i] = max_id\n probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id]\n\n seq = np.ones(length, 'int32') * -1\n #print(probs[length-1])\n seq[length-1] = np.argmax(probs[length-1])\n #print(seq[length-1])\n max_prob = probs[length-1][seq[length-1]]\n for t in range(1, length):\n seq[length-1-t] = backpt[length-t][seq[length-t]]\n \n return seq",
"def viterbi(seq, emission_mat, transition_mat, k_counter):\r\n k_dim = k_counter + NOT_MOTIF_STATES\r\n N = len(seq)\r\n prob_mat = wrap_log(np.zeros([k_dim, N]))\r\n trace_mat = np.zeros([k_dim, N])\r\n prob_mat[0, 0] = wrap_log(1)\r\n for j in range(1, N):\r\n curr_letter = prob_mat[:, j - 1].reshape((-1, 1))\r\n potential_trans = curr_letter + transition_mat\r\n max_values = np.max(potential_trans, axis=0).T\r\n trace_mat[:, j] = np.argmax(potential_trans, axis=0).T\r\n prob_mat[:, j] = max_values + emission_mat[:, emission_dict[seq[j]]]\r\n # begin trace\r\n motif_order = EMPTY_STRING\r\n curr_k = int(np.argmax(prob_mat[:, -1]))\r\n for j in range(N - 1, -1, -1):\r\n last_motif_state = k_dim - STATES_AT_END\r\n if FIRST_MOTIF_STATE <= curr_k <= last_motif_state:\r\n motif_order = MOTIF + motif_order\r\n else:\r\n motif_order = BACKGROUND + motif_order\r\n curr_k = int(trace_mat[curr_k, j])\r\n return motif_order[1:-1]",
"def beta(self,state,time,observation):\n trans = self.transition_map\n em = self.emission_map\n states = self.states\n O = observation\n\n @memoize\n def beta_helper(i,t,O):\n #print('State: ' + str(i))\n #print('Time: ' + str(t))\n #assert that the world is safe\n assert (t >= 0)\n assert (t <= len(O))\n #grab the base case\n if t == len(O):\n return 1\n #recursive application, equation 9.11\n else:\n if O[t] == ',':\n print(\"HERE\")\n import sys\n sys.exit(1)\n return sum(beta_helper(j,t+1,O)*trans[i][j]*em[i][O[t]] for j in states)\n\n return beta_helper(state,time,O)",
"def analyze_data(O, nstates, nsamples=1000, nobservations=None):\n\n # Time interval.\n tau = 0.001 # time interval (s) for plotting\n\n # Truncate O to number of observations.\n if nobservations:\n print \"Using only %d observations\" % nobservations\n O = [ o_t[0:nobservations] for o_t in O ]\n else:\n nobservations = len(O[0])\n\n # Generate MLHMM.\n print \"Generating MLHMM...\"\n estimator = bhmm.MLHMM(O, nstates)\n\n print \"Initial guess:\"\n print str(estimator.hmm.output_model)\n print estimator.hmm.transition_matrix\n print estimator.hmm.stationary_distribution\n\n # Plot initial guess.\n s_t = None\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-guess-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(estimator.hmm, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n print \"Fitting HMM...\"\n mle = estimator.fit()\n\n # Plot.\n s_t = mle.hidden_state_trajectories[0]\n import numpy as np\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-mlhmm-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(mle, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n # Initialize BHMM with MLHMM model.\n print \"Sampling models from BHMM...\"\n sampler = bhmm.BHMM(O, nstates, initial_model=mle)\n bhmm_models = sampler.sample(nsamples=nsamples, save_hidden_state_trajectory=False)\n\n # Generate a sample saving a hidden state trajectory.\n final_models = sampler.sample(nsamples=1, save_hidden_state_trajectory=True)\n\n # Plot final BHMM sample.\n model = final_models[0]\n s_t = model.hidden_state_trajectories[0]\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-bhmm-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(model, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n return [mle, bhmm_models]",
"def forwardVariableGeneration(self):\n self.alpha = zeros((self.noOfEmmittingStates+2, self.T + 1))\n\n # initialistation\n self.alpha[0,0] = 1.0\n self.alpha[1:,0] = 0.0\n self.alpha[0,1:] = 0.0\n\n # main recursion\n for t in range(1, self.T+1):\n for j in range(1, self.noOfEmmittingStates+1):\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k, t-1] * self.transitionMatrix[k, j-1])\n self.alpha[j, t] = self.b[j-1, t-1] * partialSum\n # since must end in final state, last alpha for states with zero transition\n # prob to last state must be zero?\n for row in range(self.transitionMatrix.shape[0]):\n if self.transitionMatrix[row,-1] == 0.0:\n self.alpha[row,-1] = 0.0\n # fwd prob variable for final state at 'last' timestep gets bumped into the\n # final column to save having a needless column\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k,-1] * self.transitionMatrix[k,-1])\n self.alpha[-1,-1] = partialSum\n\n # likelihood of observed sequence, p(O|lambda)\n self.observationLikelihood = self.alpha[-1,-1]",
"def gibbs_step(self, visible):\n hidden_prob = self.probabilities_hidden(visible)\n hidden_state = self.sample(hidden_prob)\n visible_prob = self.probabilities_visible(hidden_state)\n visible_state = visible_prob\n return hidden_prob, hidden_state, visible_prob, visible_state",
"def viterbi(self, O):\n\n predecessor = numpy.ones([len(O), len(self)], dtype = int) * -1\n delta = numpy.zeros([len(O), len(self)])\n B = numpy.zeros([len(self), len(O)])\n\n for j in range(len(self.S)):\n delta[0, j] = self.log_P[j] + self.S[j].b(O[0])\n\n for t in range(1, delta.shape[0]):\n for j in range(delta.shape[1]):\n #\n _temp_ = delta[t - 1, :] + self.A.log_transitions[:, j]\n #\n _from_ = numpy.argmax(_temp_)\n predecessor[t, j] = _from_\n delta[t, j] = delta[t - 1, _from_] + self.S[j].b(O[t])\n #\n #\n if self.A.force_to_one_terminal_state:\n _best_ = len(delta[-1]) - 1 # According to Transitions.py the terminal state is the last one\n else:\n _best_ = numpy.argmax(delta[-1, :])\n seq = numpy.ones(len(O)) * -1\n t = len(O) - 1\n i = _best_\n while t > 0:\n seq[t] = i\n i = predecessor[t, i]\n t = t - 1\n #\n return delta[-1, _best_], seq",
"def viterbi_log_likelihood(A, C, B_O):\n I = A.shape[0] # Number of states\n N = B_O.shape[1] # Length of observation sequence\n tiny = np.finfo(0.).tiny\n A_log = np.log(A + tiny)\n C_log = np.log(C + tiny)\n B_O_log = np.log(B_O + tiny)\n\n # Initialize D and E matrices\n D_log = np.zeros((I, N))\n E = np.zeros((I, N-1)).astype(np.int32)\n D_log[:, 0] = C_log + B_O_log[:, 0]\n\n # Compute D and E in a nested loop\n for n in range(1, N):\n for i in range(I):\n temp_sum = A_log[:, i] + D_log[:, n-1]\n D_log[i, n] = np.max(temp_sum) + B_O_log[i, n]\n E[i, n-1] = np.argmax(temp_sum)\n\n # Backtracking\n S_opt = np.zeros(N).astype(np.int32)\n S_opt[-1] = np.argmax(D_log[:, -1])\n for n in range(N-2, -1, -1):\n S_opt[n] = E[int(S_opt[n+1]), n]\n\n return S_opt",
"def act(self, observation):\n \n for i in range(len(self.counts)):\n if self.counts[i] == 0:\n return i\n \n exp_val = [math.exp(val / self.t) for val in self.values]\n tot_exp_val = np.sum(exp_val)\n self.softmaxvalues = exp_val / tot_exp_val\n\n l = np.random.random()\n probacumul = 0\n for i in range(len(self.softmaxvalues)):\n probacumul += self.softmaxvalues[i]\n if probacumul > l:\n return i",
"def evaluate(observations, model, states=None, log=False):\r\n N = model.N\r\n T = observations.shape[0]\r\n A = numpy.log(model.A)\r\n B = numpy.log(model.B)\r\n\r\n if states is None:\r\n alphas = forward_path(observations, numpy.log(model.pi), A, B, T, N)\r\n\r\n \"\"\" Termination \"\"\"\r\n result = add_logs(alphas[T-1, :])\r\n if log:\r\n return result\r\n else:\r\n return math.exp(result)\r\n\r\n else:\r\n result = 0\r\n for i in range(T):\r\n result += B[states[i], observations[i]]\r\n\r\n if log:\r\n return result\r\n else:\r\n return math.exp(result)",
"def get_state_observed_max(self):\n maxValues = numpy.zeros(self.get_num_variables())\n i = 0\n for v in self.variables:\n maxValues[i] = v.get_max_value()\n i += 1\n return maxValues",
"def Viterbi(self, sent):\n viterbi = defaultdict(dict)\n backpointer = defaultdict(dict)\n sent_tag = []\n pos_list = [end_token]\n viterbi['0'] = 1.0\n\n # Initialization step\n # This loop will run for all the tags of each first word (sent[1][0])(word next to <S>) in dictionary\n for tag in self.dictionary[sent[1][0]]:\n # if any sentance in our trained data starts with a word that has same tag as \"state\"\n if (start_token, tag) in self.transitions:\n viterbi[str(1)][tag] = self.transitions[(start_token, tag)] + self.emissions[(sent[1][0], tag)]\n else:\n viterbi[str(1)][tag] = -float('inf')\n backpointer[str(1)][tag] = start_token\n\n # Recursion step\n # This loop will run for rest of the tuples (word, pos) after first tuple in \"sent\"\n for i in xrange(2, len(sent)):\n # This loop will run for all the tags of each word (sent[idx][0]) in dictionary\n for tag in self.dictionary[sent[i][0]]:\n maximum_value = -float(\"inf\")\n maximum_loc = []\n # This loop will run for all the tags in previous word (sent[idx-1][0]) in dictionary\n for prev_tag in self.dictionary[sent[i - 1][0]]:\n # if any sentance in our trained data has (privious tag, current tag) or (pre_state, state) of given word\n if (prev_tag, tag) in self.transitions:\n t = viterbi[str(i - 1)][prev_tag] + self.transitions[(prev_tag, tag)]\n else:\n t = -float('inf')\n if t >= maximum_value:\n maximum_value = t\n maximum_loc = prev_tag\n\n viterbi[str(i)][tag] = maximum_value + self.emissions[(sent[i][0], tag)]\n backpointer[str(i)][tag] = maximum_loc\n\n t = end_token\n for i in xrange(1, len(sent)):\n t = backpointer[str(len(sent) - i)][t]\n pos_list.append(t)\n\n for tup in sent:\n sent_tag.append((tup[0], pos_list.pop()))\n\n #print \"viterbi:\", viterbi\n #print \"backpointer:\", backpointer\n #print \"sent_tagged\", sent_tag\n\n return sent_tag"
]
| [
"0.6980931",
"0.6643485",
"0.6631735",
"0.6539058",
"0.6513881",
"0.6479943",
"0.64096594",
"0.6368848",
"0.6295443",
"0.6259586",
"0.61511296",
"0.6142033",
"0.6089733",
"0.59984404",
"0.59506834",
"0.5934529",
"0.59035957",
"0.58897674",
"0.5864158",
"0.5853739",
"0.5813723",
"0.57968193",
"0.57473695",
"0.5744582",
"0.5728079",
"0.56968665",
"0.56924874",
"0.56753063",
"0.56157464",
"0.5607999"
]
| 0.7409377 | 0 |
Get the restaurant json or None Use the default ones if mocks are requested | def get_restaurant(id):
with current_app.app_context():
if current_app.config["USE_MOCKS"]:
id -= 1 # restaurant IDs starting by 1
if 0 <= id < len(restaurants):
return restaurants[id]
else:
return None
else:
return get_from(current_app.config["REST_SERVICE_URL"]+"/restaurants/"+str(id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_random_restaurant(self, request, **kwargs):\n restaurant = Restaurant.objects.order_by(\n '?'\n ).select_related(\n 'address'\n ).prefetch_related(\n 'employees'\n ).first()\n serializer = RestaurantFullInfoSerializer(restaurant)\n return Response(serializer.data)",
"def test_get_no_restaurants(self):\n resp = self.test_client.get(self.API_BASE, headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n\n resp_dict = json.loads(resp.data)\n self.assertTrue('success' in resp_dict)\n self.assertEqual(resp_dict['success'], True)\n self.assertTrue('restaurants' in resp_dict)\n self.assertEqual(type(resp_dict['restaurants']), list)\n self.assertEqual(len(resp_dict['restaurants']), 0)",
"def get_json_data():\n return None",
"def choose_json():\n url = mock_http_get.call_args[0][0]\n if url.endswith(\".expanded.json\"):\n return dtdl_expanded_json\n else:\n return dtdl_json",
"def Restaurant_get_info() -> Restaurant:\r\n name = input(\"Please enter the restaurant's name: \")\r\n cuisine = input(\"Please enter the kind of food served: \")\r\n phone = input(\"Please enter the phone number: \")\r\n menu = menu_enter()\r\n return Restaurant(name, cuisine, phone, menu)",
"def test_user_get_restaurants_list(self):\n response = self.client.get('/api/places/', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_get_two_restaurants(self):\n from espresso import db\n from espresso import Restaurant\n\n name_1 = 'Restaurant Italiano'\n db.session.add(Restaurant(name=name_1, creator='[email protected]'))\n name_2 = 'Restaurant Français'\n db.session.add(Restaurant(name=name_2, creator='[email protected]'))\n db.session.commit()\n\n resp = self.test_client.get(self.API_BASE, headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(len(resp_dict['restaurants']), 2)\n self.assertEqual(resp_dict['restaurants'][0]['name'], name_1)\n self.assertEqual(resp_dict['restaurants'][1]['name'], name_2)",
"def test_get_restaurant(self):\n url = \"/get_restaurants\"\n response = app.test_client().get(url)\n response_json = response.json\n with open('expected_responses/restaurants.json', 'r') as f:\n datastore = json.load(f)\n\n assert datastore == response_json, logging.error(\n \"GET Restaurants Failed!\")\n logging.info(\"GET Restaurants API Tested\")",
"def restaurant_only():\n work_time = {\n \"Понедельник\": \"8:00-23:00\",\n \"Вторник\": \"8:00-23:00\",\n \"Среда\": \"8:00-23:00\",\n \"Четверг\": \"8:00-23:00\",\n \"Пятница\": \"8:00-23:00\",\n \"Суббота\": \"8:00-23:00\",\n \"Воскресенье\": \"Выходной\",\n }\n restaurant = Restaurant(\"Снежинка\", work_time, False)\n return restaurant",
"def test_get_restaurants(self):\n address = {'number': '375',\n 'street': 'Noe St',\n 'city': 'San Francisco',\n 'zip': '94114'}\n\n with self.app.app_context():\n restaurants = ordrin.get_restaurants(address)\n\n # Ordr.in returns a test entry as the first item in the list when\n # when hitting their testing servers.\n entry = restaurants[0]\n self.assertEquals(entry['na'], 'Test Merchant 20130315')\n self.assertEquals(entry['id'], 23917)",
"def findARestaurant(mealType,location):\n\t#1. Use getGeocodeLocation to get the latitude and longitude coordinates of the location string.\n\t# lat_lng = f\"{getGeocodeLocation(location)}\"\n\t# lat_lng_formatted = lat_lng[lat_lng.find(\"(\")+1:lat_lng.find(\")\")]\n\tlatitude, longitude = getGeocodeLocation(location)\n\n\t#2. Use foursquare API to find a nearby restaurant with the latitude, longitude, and mealType strings.\n\t#HINT: format for url will be something like https://api.foursquare.com/v2/venues/search?client_id=CLIENT_ID&client_secret=CLIENT_SECRET&v=20130815&ll=40.7,-74&query=sushi\n\turl = (f\"https://api.foursquare.com/v2/venues/search?client_id={foursquare_client_id}&client_secret={foursquare_client_secret}&v={version}&ll={latitude},{longitude}&intent=browse&radius=10000&query={mealType}&limit=10\")\n\th = httplib2.Http()\n\tresult = json.loads(h.request(url, \"GET\")[1])\n\n\t#3. Grab the first restaurant\n\tvenue_id = result[\"response\"][\"venues\"][0][\"id\"]\n\tvenue_name = result[\"response\"][\"venues\"][0][\"name\"]\n\tvenue_location = result[\"response\"][\"venues\"][0][\"location\"]\n\n\t#4. Get a 300x300 picture of the restaurant using the venue_id (you can change this by altering the 300x300 value in the URL or replacing it with 'orginal' to get the original picture\n\timg_url = (f\"https://api.foursquare.com/v2/venues/{venue_id}/photos?client_id={foursquare_client_id}&client_secret={foursquare_client_secret}&v={version}&group=venue&limit=10\")\n\timg_h = httplib2.Http()\n\timg_result = json.loads(img_h.request(img_url, \"GET\")[1])\n\tprint(img_result)\n\n\t#5. Grab the first image\n\tif len(img_result[\"response\"][\"photos\"][\"items\"]) > 0:\n\t\timg_url = f\"{img_url_pre_lim['prefix']}300x300{img_url_pre_lim['suffix']}\"\n\n\t#6. If no image is available, insert default a image url\n\telse:\n\t\timg_url = \"https://cps-static.rovicorp.com/3/JPG_400/MI0003/711/MI0003711195.jpg?partner=allrovi.com\"\n\n\t#7. Return a dictionary containing the restaurant name, address, and image url\t\n\tresult = {\"name\": venue_name, \"address\": venue_location.get(\"address\",\"\"), \"img_url\": img_url}\n\tprint(result)\n\treturn result",
"def load_restaurants():\n try:\n with open(CACHE_FILE) as infile:\n print(\"Cache found, loading from file {}\".format(CACHE_FILE))\n restaurants = json.load(infile)\n except Exception:\n print(\"No cache found, loading from API\")\n restaurants = get_restaurants()\n with open(CACHE_FILE, 'w+') as outfile:\n json.dump(restaurants, outfile)\n return restaurants\n return restaurants",
"def get_restaurant(yelp_id):\n\n access_token = get_access_token()\n\n query_url = \"https://api.yelp.com/v3/businesses/{yelp_id}\".format(\n yelp_id=yelp_id)\n\n headers = {'Authorization': 'Bearer {token}'.format(\n token=access_token\n )}\n\n restaurant = requests.get(query_url, headers=headers)\n\n return restaurant.json()",
"def test_user_retrieve_restaurant(self):\n response = self.client.get(f'/api/places/1/', format='json')\n self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def restaurantMenuItemJson(restaurant_id, menu_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n menuItem = session.query(MenuItem).filter_by(id=menu_id).one()\n return jsonify(MenuItem=[menuItem.serialize])\n except exc.NoResultFound:\n return redirect(url_for('mainPage'))",
"def test_get_details(self):\n restaurant_id = 23917\n with self.app.app_context():\n details = ordrin.get_details(restaurant_id)\n\n self.assertEquals(details['name'], 'Test Merchant 20130315',\n 'Check restaurant name on test details.')\n self.assertEquals(details['id'], restaurant_id,\n 'Check restaurant id on test details.')\n self.assertTrue(details['delivers'], 'Check delivery flag on test entry.')\n self.assertTrue(details['allows_asap'],\n 'Check asap flag on test details.')\n self.assertAlmostEqual(details['location'][0], 42.825685,\n 'Check latitude on test details.')\n self.assertAlmostEqual(details['location'][1], -73.879458,\n 'Check longitude on test details.')\n self.assertEquals(details['partner'], 'delivery.com',\n 'Check delivery partner on test details.')\n self.assertEquals(details['address'], '123 FAKE ST',\n 'Check address on test details.')\n self.assertTrue(False)",
"def get_json_data(self, json_data=None):\n return json_data or {}",
"def get_restaurants(user_id, **kwargs):\n search_criteria_values = get_search_criteria_values(**kwargs)\n if search_criteria_values:\n rest_ids = get_rest_ids_by_search_criteria(**search_criteria_values)\n if rest_ids:\n return get_rest_info_by_rest_id(rest_ids)\n\n print 'There was nothing found'\n return []",
"def resolveResult(self, restaurants):\n restaurant_list = []\n for restaurant in restaurants:\n restaurant_list.append({'Name': restaurant['restaurant']['name'], \"cuisines\": [x.strip() for x in restaurant['restaurant']['cuisines'].split(',')],\n \"lat\": restaurant['restaurant']['location']['latitude'], \"long\": restaurant['restaurant']['location']['longitude'], \"highlights\": restaurant['restaurant']['highlights'], \"Thumb\": restaurant['restaurant']['thumb'],\n \"user_Rating\": restaurant['restaurant']['user_rating']['aggregate_rating'],\"phone_Numbers\": restaurant['restaurant']['phone_numbers']})\n cuisineDict = { \"Chinese\":1, \"Korean\":2,\"Australia\":3,\"Japanese\":4,}\n WordDict = {1: \"cozy\",2: \"tasty\",3:'amazing',4:'flavorful',5:'yummy'}\n for i in range(len(restaurant_list)):\n icon = 5\n cuisines = restaurant_list[i][\"cuisines\"]\n adjective = WordDict[random.randint(1,5)]\n comment = \"This is a \"+ adjective\n if cuisines:\n if \"Chinese\" in cuisines:\n icon = 1\n elif \"Korean\" in cuisines:\n icon = 2\n elif \"Australia\" in cuisines:\n icon = 3\n elif \"Japanese\" in cuisines:\n icon = 4\n else:\n icon = 5\n comment = comment + \" \" + cuisines[0]\n restaurant_list[i]['icon'] = icon\n comment = comment + \" restaurant\"\n restaurant_list[i]['comment'] = comment\n res = {\"restaurants\":restaurant_list }\n return res",
"def test_get_restaurant_by_id_none(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n # Since this is a freshly created table, the only id should be 1.\n # id 2 does not exist.\n resp = self.test_client.get(self.API_BASE + '/2', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 404)",
"def collect_data(self, data: Restaurant) -> Restaurant:\n return data",
"def __read_temp_data(self) -> dict:\n\n out = dict()\n\n with open(self.FILE_NAME) as json_file:\n data = json.load(json_file)\n \n for entry in data:\n restaurant = Restaurant.from_json(entry)\n out[restaurant.name_seq_nr] = restaurant\n\n return out",
"def test_get_restaurant_review_list_success(self):\n client = Client()\n res_id = Restaurant.objects.get(name='TEST_REST').id\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/restaurant/'+str(res_id)+'/')\n self.assertEqual(response.status_code, 200)\n self.assertIn('TEST_CONTENT3', response.content.decode())\n response = client.get('/api/restaurant/'+str(res_id+1)+'/')\n self.assertEqual(response.json(), [])",
"def getFood(self):\n return self.data.food",
"def get(self, restaurant_name):\n try:\n data = mysql.get_restaurant(restaurant_name)\n self.res_status['result'] = data\n self.write(json.dumps(self.res_status))\n self.finish()\n\n except Exception as e:\n self.res_status['result'] = 'error'\n self.write(json.dumps(self.res_status))\n self.set_status(403)\n self.finish()\n print(traceback.format_exc(e))",
"def get(self, request, restaurant_id, *args, **kwargs):\n self.restaurant = get_object_or_404(Restaurant, id=restaurant_id)\n return super().get(request, *args, **kwargs)",
"def _get_fruit(self, _id):\n body = {\n '_id': _id,\n }\n headers = {\n 'content-type': 'application/json',\n }\n response = self.fetch(\n '/fruit/get',\n method='POST',\n headers=tornado.httputil.HTTPHeaders(headers),\n body=json.dumps(body))\n if response.code == httplib.NOT_FOUND:\n return None\n self.assertEqual(response.code, httplib.OK)\n return json.loads(response.body)",
"def test_get_json_spec(self):\n pass",
"def test_get_one(self):\n expected_response = {\n \"id\": 100, \"time\": \"23:58:59\", \"transport_name\": '', \"position\": 0,\n \"way\": 100, \"end_place\": 200, \"start_place\": 100\n }\n url = reverse('route', kwargs={'way_id': self.route.way_id, 'route_id': self.route.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(json.dumps(expected_response), json.loads(response.content))",
"def test_get_restaurant_by_id(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['name'], name)"
]
| [
"0.6806192",
"0.6267271",
"0.6147111",
"0.6061143",
"0.58997214",
"0.58851624",
"0.58745176",
"0.586676",
"0.5860428",
"0.58145684",
"0.58047366",
"0.57518137",
"0.5727421",
"0.5716943",
"0.5638375",
"0.5630991",
"0.5625062",
"0.55808204",
"0.55531573",
"0.55188113",
"0.5496493",
"0.5475156",
"0.5472288",
"0.54596245",
"0.54516536",
"0.541289",
"0.53594494",
"0.53476083",
"0.53360057",
"0.53346074"
]
| 0.64233977 | 1 |
Get the list fo the restaurant's tables or None Use the default ones if mocks are requested | def get_tables(id):
with current_app.app_context():
if current_app.config["USE_MOCKS"]:
id -= 1 # restaurant IDs starting by 1
if 0 <= id < len(restaurants):
return tables[id]
else:
return None
else:
return get_from(current_app.config["REST_SERVICE_URL"]+"/restaurants/"+str(id)+"/tables") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tables(self):\n r = self.client.query(\"show tables\")\n if r:\n tables = [tuple(reversed(x.split(','))) for x in filter(None, r.split('\\n'))][1:]\n FT.table_cache = dict(tables)\n return tables\n else:\n logging.error(\"get_tables: no response\")",
"def get_my_tables(self):\n qnum = self.master('sql', att={'type': 'table'}) # it's a Table._call_() function call\n if self.run():\n return (self.table_factory(self.get_table_info(result[0])) for result in self.results[qnum])\n else:\n print('An error has occurred when initializing the database.')",
"def __getListTables(self):\n\n listTables = \"{\\\\*\\\\listtable\\n\"\n overrideTables = \"{\\\\listoverridetable\\n\"\n for listDef in self.lists:\n id = listDef.id\n listTables += listDef.getRtf()\n overrideTables += (\"{\\\\listoverride\\\\listid%d\"\n \"\\\\listoverridecount0\\\\ls%d}\\n\" % (id, id))\n return listTables + \"}\\n\" + overrideTables + \"}\\n\"",
"def list_todo_table(self):\n if self.is_todo_table_empty():\n print(\"nothing to do!\")\n return []\n else:\n return self.make_list_from_task()",
"def getTables(self):\n\treturn self.dbNames",
"def tables(self) -> list:\n return self.list_tables()",
"def get_tables(self):\n return self._get_types_from_default_ns(Table)",
"def test_get_table_list(self):\n db_introspection = DatabaseIntrospection(self.connection)\n cursor = mock.MagicMock()\n\n def list_tables(*args, **kwargs):\n return [[\"Table_1\", \"t\"], [\"Table_2\", \"t\"]]\n\n cursor.run_sql_in_snapshot = list_tables\n table_list = db_introspection.get_table_list(cursor=cursor)\n self.assertEqual(\n table_list,\n [\n TableInfo(name=\"Table_1\", type=\"t\"),\n TableInfo(name=\"Table_2\", type=\"t\"),\n ],\n )",
"def tables(self, select_title=\"\"):\n if select_title == \"\":\n # Return everything\n return copy.copy(self.__tables)\n return find_tables_by_title(self.__tables, select_title)",
"def tables(self, select_title=\"\"):\n if select_title == \"\":\n # Return everything\n return copy.copy(self.__tables)\n return find_tables_by_title(self.__tables, select_title)",
"def get_tables(self, provider=None):\n\n if self.tables is None:\n self.tables = {}\n\n provider = self.get_provider(provider)\n if provider is None:\n return None\n\n r = requests.get(\n self._url('/dataproviders/{:s}/tables'.format(provider)), \n headers={'Authorization': self.token},\n proxies=self.proxy)\n r.raise_for_status()\n self.tables[provider] = r.json()\n log.info('provider {:s} has {:d} table(s)'.format(provider, len(r.json())))\n\n return",
"def list_tables(service):\n r = _post(service)\n if 'tables' in r:\n return [table(p) for p in r['tables']]\n return None",
"def get_table_list(self):\n # the \\\"{{}}\\\" is where the sql command will be added via a second `.format()`\n container_command = \"docker exec {} sh -c \\\"{{}}\\\"\".format(self.mysql_container)\n sql_command = \"mysql {} --execute='SHOW TABLES FROM {};'\".format(self.mysql_credentials, self.database_name)\n table_list = self.shell(container_command.format(sql_command))\n table_list = table_list.split(\"\\n\")\n assert table_list[0] == \"Tables_in_{}\".format(self.database_name)\n return table_list[1:]",
"def get_all_restaurants():\n return list(Restaurant.objects.all().values())",
"def get_tables(self, db_name):\n pass",
"def get_a_table(restaurant_id, number_of_people, booking_datetime, excluded=-1):\r\n\r\n is_open, rest = restaurant_is_open(restaurant_id, booking_datetime) # check is the restaurant is open on that date\r\n if is_open is None: # connection error with the restaurant microservice\r\n return None\r\n if not is_open: \r\n return -2\r\n\r\n tables = get_tables(restaurant_id) # return the list of tables of the restaurant\r\n\r\n if tables is None: # connection error with the restaurant microservice\r\n return None\r\n if tables == []:\r\n return -1\r\n\r\n delta = int(rest[\"occupation_time\"])\r\n starting_period = booking_datetime - datetime.timedelta(hours=delta)\r\n ending_period = booking_datetime + datetime.timedelta(hours=delta)\r\n\r\n # the list of the tables occupied or booked in the same period as the booking\r\n occupied = db.session.query(Booking.table_id).select_from(Booking)\\\r\n .filter(Booking.restaurant_id == restaurant_id)\\\r\n .filter(starting_period < Booking.booking_datetime)\\\r\n .filter(Booking.booking_datetime < ending_period )\\\r\n .filter(Booking.id != excluded)\\\r\n .all()\r\n \r\n free_tables = [t for t in tables if ( ((t[\"id\"],) not in occupied) and (t[\"capacity\"] >= number_of_people) )] # returns the free table usable by this number of people\r\n free_tables.sort(key=lambda x:x[\"capacity\"]) # order the tables from the smaller\r\n\r\n if free_tables == []: # no free tables\r\n return -1\r\n return free_tables[0][\"id\"] # return the smaller table that can be used\r",
"def test(self):\r\n # Establish connection and execute a query that returns\r\n # a table with the names of each table in the database.\r\n # Close the connection. Convert the table to a list and\r\n # return it.\r\n try:\r\n Connection = mariadb.connect(\r\n user = self.Name,\r\n host = self.Host,\r\n password= self.Password,\r\n port=3306)\r\n TestQuery = Connection.cursor()\r\n TestQuery.execute('USE moleculardata')\r\n TestQuery.execute('SHOW TABLES')\r\n TestQuery.close()\r\n Connection.close()\r\n return [j for sub in TestQuery for j in sub]\r\n # Exception to catch database errors. Exceptions could include:\r\n # problem connecting to the database or errors in the data query\r\n # request. \r\n # Returns an empty list.\r\n except mariadb.Error as e:\r\n print('Unable open connection {}.'.format(e))\r\n return[]",
"def test_get_tables(self):\n url = \"/get_tables\"\n data = {\n \"restaurant\": 1\n }\n response = app.test_client().post(url,\n json=data,\n content_type='application/json')\n assert response.status_code == 200, logging.error(\n \"Getting Tables Failed!\")\n logging.info(\"GET Tables Tested!\")",
"async def test_tornado_list_tables(self):\n\n tables = self.r.table_list().run(self.conn)\n assert isinstance(tables, list)",
"def tables(self):\n result = self.execute(self.commands.get_tables(self.name))\n return [x[0] for x in result]",
"def query_tables(self):\n # Find all tables\n tables_q = \"SELECT name FROM sqlite_master WHERE type = 'table' AND name NOT LIKE \\'sqlite_%\\';\"\n tables = self.query(tables_q)\n # print(tables)\n return tables",
"def showTables(database: str) -> list:\n\n bd = _database(database)\n\n if bd:\n\n temp = []\n\n for tabla in bd[\"tablas\"]:\n temp.append(tabla[\"nombre\"])\n\n return temp\n\n else:\n return None",
"def get_tables_name_and_type(self) -> Optional[Iterable[Tuple[str, str]]]:\n try:\n schema_name = self.context.database_schema.name.__root__\n if self.source_config.includeTables:\n for table_and_type in self.query_table_names_and_types(schema_name):\n table_name = self.standardize_table_name(\n schema_name, table_and_type.name\n )\n table_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=table_name,\n skip_es_search=True,\n )\n if filter_by_table(\n self.source_config.tableFilterPattern,\n table_fqn\n if self.source_config.useFqnForFiltering\n else table_name,\n ):\n self.status.filter(\n table_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield table_name, table_and_type.type_\n\n if self.source_config.includeViews:\n for view_name in self.inspector.get_view_names(schema_name):\n view_name = self.standardize_table_name(schema_name, view_name)\n view_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=view_name,\n )\n\n if filter_by_table(\n self.source_config.tableFilterPattern,\n view_fqn\n if self.source_config.useFqnForFiltering\n else view_name,\n ):\n self.status.filter(\n view_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield view_name, TableType.View\n except Exception as err:\n logger.warning(\n f\"Fetching tables names failed for schema {schema_name} due to - {err}\"\n )\n logger.debug(traceback.format_exc())",
"def tables(cls):\n if not hasattr(cls, '_tables'):\n cls.parse_attributes()\n return cls._tables",
"def list_tables(self):\n return LIST_TABLES(db=self.db)",
"def _get_table(self):\n\t\treturn self._table",
"def get(self):\n return TableDetails.query.all(), 200",
"def showTables():\n global cursor\n #cursor.execute('SELECT * FROM *')\n cursor.execute('''SELECT * FROM sqlite_master WHERE type='table' ''')\n\n tables = cursor.fetchall()\n print \"Tables available are:\"\n print tables[0]",
"def get(self):\n active_tables = []\n tables = TableDetails.query.all()\n for table in tables:\n if table.table_status != \"Empty\":\n active_tables.append(table)\n return active_tables, 200",
"def _get_table(self, cursor):\n raise NotImplementedError"
]
| [
"0.61924666",
"0.6052941",
"0.59938204",
"0.59800273",
"0.5927211",
"0.58977866",
"0.58351445",
"0.5808453",
"0.57723635",
"0.57723635",
"0.5734585",
"0.5732754",
"0.57127583",
"0.5697837",
"0.56965154",
"0.5685882",
"0.5673168",
"0.5667938",
"0.5657687",
"0.5557201",
"0.5543739",
"0.55315673",
"0.552563",
"0.5507685",
"0.5503799",
"0.5481224",
"0.54687905",
"0.544986",
"0.54491526",
"0.5445509"
]
| 0.74126005 | 0 |
Add a new reservation Return the booking id, otherwise Return None if a db error occured | def add_booking(user_id, rest_id, number_of_people, booking_datetime, table_id, entrance_datetime=None):
try:
booking = Booking()
booking.restaurant_id = rest_id
booking.user_id = user_id
booking.booking_datetime = booking_datetime
booking.entrance_datetime = entrance_datetime
booking.number_of_people = number_of_people
booking.table_id = table_id
booking.datetime = datetime.datetime.now()
db.session.add(booking)
db.session.commit()
return booking.id
except:
db.session.rollback()
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert_reservation(house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm):\n sql = \"\"\"INSERT INTO %s VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s', %s, '%s', %s) RETURNING reservation_id;\"\"\"\n conn = None\n reservation_id = None\n try:\n # read database configuration\n params = config()\n # connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # create a new cursor\n cur = conn.cursor()\n # execute the INSERT statement\n print(sql % (house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm))\n cur.execute(sql, (house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm))\n # get the generated id back\n vendor_id = cur.fetchone()[0]\n # commit the changes to the database\n conn.commit()\n # close communication with the database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n return reservation_id",
"def reservation_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"reservation_id\")",
"async def add_reservation_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n arrival_date = request.args[\"arrival_date\"][0]\n departure_date = request.args[\"departure_date\"][0]\n status = request.args[\"status\"][0]\n reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)\n if reservation_id == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"reservation_id\": reservation_id})",
"def booking_insert_query(booking_date, pass_id):\n q = \"\"\"\n INSERT INTO booking(booking_date, pass_id)\n VALUES (%s, %s);\n\n SELECT currval(pg_get_serial_sequence('booking','booking_id'));\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(q, (booking_date, pass_id))\n booking_id = cursor.fetchone()\n cursor.close()\n return booking_id",
"def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = request.json['team_id']\n team = Team.query.get(team_id)\n if team is None:\n abort(400, 'invalid team id')\n\n if not (token_user.has_permission('reservation.create') and team.has_member(token_user)):\n abort(403)\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n if start >= end:\n abort(400, \"start time must be before end time\")\n\n res = Reservation(team=team, room=room, created_by=token_user,\n start=start, end=end)\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().add(res)\n get_db().commit()\n\n return '', 201",
"def reservation_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"reservation_id\")",
"def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400",
"def SaveToReservationSQL(self, order, recipientid):\n\n # get counter, increase it, save counter, and use for reservation\n # managerid, recipientid,\n # insert reservation \\'{reservation_guid}\\\n # insert people\n # insert subclaim\n\n cursor = self.cursor\n\n reservation_guid = order[\"id\"]\n km_number = order[\"crmid\"]\n cursor.execute('select TOP 1 id from reservation where trash=0 and (guid=? or ndog=?) order by id desc',\n (reservation_guid, km_number))\n\n row = cursor.fetchone()\n if (not row):\n reservation_new = 1\n reservationid = None\n else:\n reservation_new = 0\n reservationid = row[0]\n\n # check subclaims\n # reservation_to_delete=row[0]\n # query='select id from subclaim where claimid=?'\n # cursor.execute(query,reservation_to_delete)\n # rows=cursor.fetchall()\n # if rows :\n # query='select number from reservation where id=?'\n # cursor.execute(query,reservation_to_delete)\n # row = cursor.fetchone()\n # self.number = row[0]\n\n # TODO - update existing reservation\n # return 0\n\n # query='update reservation set trash=1 where id=?'\n # cursor.execute(query,reservation_to_delete)\n\n # create reservation if it is missing\n\n if reservation_new == 0:\n\n cursor.execute('select number from reservation where id=? and trash=0', reservationid)\n row = cursor.fetchone()\n number = row[0]\n self.number = number\n\n else:\n number = km_number\n self.number = number\n\n print('Dogovor number ', number, 'KM', km_number, 'reservationid ', reservationid)\n\n manager_guid = order[\"manager\"][\"id\"]\n query = f'select id from recipient where guid=\\'{manager_guid}\\''\n cursor.execute(query)\n row = cursor.fetchone()\n humanid = row[0]\n\n guid = order[\"id\"]\n currency = order[\"cruises\"][0][\"currency\"]\n print(currency)\n\n date_created = datetime.fromisoformat(order[\"created\"][:order[\"created\"].find('.')])\n\n query = '''\ninsert into dbo.[reservation]\n([number], [cdate], [recipientid], [humanid], [officeid], [legalid], [statusid],\n [pdate], [currencyid],[ndog],[guid])\nvalues (?,?,?,?,?,?,?,?,?,?,?)\n'''\n\n # TODO officeid by manager, legalid by owner, statusid?\n ## if reservation is not exist create new, else update\n values = (\n km_number, date_created, recipientid, humanid, 29921, 136, 2, date_created, currencymap[currency],\n order[\"crmid\"],\n guid)\n print(values)\n if (reservation_new == 1) and (km_number):\n cursor.execute(query, values)\n cursor.execute(\"select IDENT_CURRENT('reservation')\")\n row = cursor.fetchone()\n id = row[0]\n cursor.execute('exec ChangesLog_AddNew ?,?,?,?,?,?,?,?,?,?,?,?,?', (\n 'robot python', 1, 'reservation', id, km_number, 'reservation', id, str(id), None, None, '', None, ''))\n\n\n elif (reservation_new == 0) and (km_number):\n update_query = \"\"\" update dbo.[reservation] \n set cdate = ?, recipientid=?, humanid = ?, officeid=?, legalid=?, statusid=?, pdate=?, currencyid=?, guid =?, ndog = ? where id=?\"\"\"\n cursor.execute(update_query, (\n date_created, recipientid, humanid, 29921, 136, 2, date_created, currencymap[currency], guid, km_number,\n reservationid))\n id = reservationid\n else:\n id = 0\n return id, reservation_new",
"def validate_and_save(self, reservation, form):\n if not reservation.validate():\n context_data = self.get_context_data(reservation=reservation)\n context_data[\"error\"] = self.get_error_message(form, reservation)\n return render(self.request, self.template_name, context_data)\n\n reservation.save()\n return redirect(calendar_url_reservation(reservation))",
"def add_booking():\n try:\n \n carid = request.form[\"carid\"]\n userid = request.form[\"userid\"]\n fromdate = request.form[\"fromdate\"].strip()\n todate = request.form[\"todate\"].strip()\n\n print(fromdate, \"|\", todate)\n\n car = Car.query.get(carid)\n car.isavailable = False\n\n user = User.query.get(userid)\n user_email = user.email\n\n fromdate_obj = datetime.datetime.strptime(fromdate, '%Y-%m-%d')\n todate_obj = datetime.datetime.strptime(todate, '%Y-%m-%d')\n \n summary = \"Car Booking. Car id: \" + carid\n\n cal = CalendarUtil()\n resp = cal.addToCalendar(user_email, fromdate_obj, todate_obj, summary)\n cal_event_id = resp['id']\n booking = Booking(carid=carid, userid=userid, fromdate=fromdate, todate=todate, caleventid= cal_event_id, isactive=True)\n\n test = db.session.add(booking)\n db.session.commit()\n return bookingSchema.jsonify(booking)\n except Exception as ex:\n print(\"Failed to add event to calender. Exception: \", str(ex))\n return jsonify(None)",
"def reservation(self):\n return self.request.get('reservation', None)",
"def insert_meeting(self, title, start_date, end_date):\n db_connection = DbConnection()\n\n try:\n connection = db_connection.get_connection()\n\n cursor = connection.cursor()\n cursor.execute(self.insert_sql, (title, start_date, end_date))\n meeting_id = cursor.fetchone()['id']\n connection.commit()\n\n cursor.close()\n db_connection.close_connection()\n except psycopg2.DatabaseError as e:\n raise\n\n else:\n\n return meeting_id",
"def select_reservation(self, ctx: dataclasses.dataclass) -> ResultE[dataclasses.dataclass]:\n pk = cf.get_int_or_none(ctx.pk) or 0\n if pk <= 0:\n return self._error('Missed Reservation ID', ctx, ReservationErrors.missed_reservation)\n try:\n data = self._reservations_repo.get(pk)\n except Exception as err:\n return self._error(\n f\"Error select Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.error, exc=err\n )\n if data == Nothing:\n return self._error(\n f\"Unknown Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.missed_reservation\n )\n if hasattr(ctx, 'source'):\n ctx.source = data.unwrap()\n else:\n ctx.reservation = data.unwrap()\n return Success(ctx)",
"def update_booking(booking_id, number_of_people, booking_datetime, table_id, entrance_datetime=None):\r\n try:\r\n booking = db.session.query(Booking).filter_by(id = booking_id).first()\r\n if booking is None:\r\n return None\r\n booking.booking_datetime = booking_datetime\r\n booking.entrance_datetime = entrance_datetime\r\n booking.number_of_people = number_of_people\r\n booking.table_id = table_id\r\n db.session.add(booking)\r\n db.session.commit()\r\n return booking.id\r\n except:\r\n db.session.rollback()\r\n return None",
"def reservation_update(token_user, res_id):\n if not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n res = Reservation.query.get(res_id)\n if res is None:\n abort(400, 'invalid reservation id')\n\n if not token_user.has_permission('reservation.update.elevated'):\n is_my_reservation = any(map(lambda m: m.id == token_user.id,\n res.team.members))\n if not (is_my_reservation and\n token_user.has_permission('reservation.update')):\n abort(403, 'insufficient permissions to update reservation')\n\n res.room = room\n res.start = start\n res.end = end\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().commit()\n\n return '', 204",
"def addBooking(self, booking):\n self.bookings.addBooking(booking.getID())",
"def create_reservation(self, gs_id, vehicle_id, user_id):\n\n # create the reservation\n reservation = Reservation(self.settings, gs_id, vehicle_id, user_id)\n status, model = reservation.create()\n\n # return status\n if status:\n json_res = model.to_json()\n return True, json_res\n else:\n return False, None",
"def save_car_reservation(car_id, username, date_from, date_to):\n car = get_car_identified_by_id(car_id)\n price = calc_total_price(car.price, date_from, date_to)\n session = start_session()\n new_car_reservation = CarReservation(car_id, username, date_from, date_to, price)\n session.add(new_car_reservation)\n session.commit()\n queryset = session.query(CarReservation).filter(and_(CarReservation.id_car.__eq__(car_id),\n CarReservation.id_user.__eq__(username),\n CarReservation.date_from.__eq__(date_from),\n CarReservation.date_to.__eq__(date_to),\n CarReservation.price.__eq__(price)))\n reservation = queryset2list(queryset)[0]\n session.close()\n return reservation.id_reservation",
"def post(self, flight_id):\n data = request.get_json()\n seat = 1\n if data:\n seat = data.get('seat')\n current_user = get_jwt_identity()\n try:\n flight = get_flight(flight_id)\n if not flight:\n return generate_response('Selected flight not available', 400)\n\n if seat == 1 and flight.booked_economy < flight.airplane.economy_seats:\n data = dict(booked_economy=flight.booked_economy+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Economy seat flight reservation successfull', 201)\n\n if seat == 2 and flight.booked_business < flight.airplane.business_seats:\n data = dict(booked_business=flight.booked_business+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Business seat flight reservation successfull', 201)\n\n except Exception as e:\n db.session.rollback()\n return jsonify({'error': str(e)}), 401",
"def test_reservation_id_one_instance(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id)\n self.assertEqual(len(refs), 1)\n self.assertEqual(refs[0]['reservation_id'], resv_id)",
"def room_add():\n if not json_param_exists('number'):\n abort(400, 'invalid room number')\n\n if not isinstance(request.json['number'], str):\n abort(400, 'room number must be string')\n\n num = request.json['number']\n room = Room(number=num)\n\n try:\n get_db().add(room)\n get_db().commit()\n except IntegrityError:\n abort(409, 'room number is already in use')\n return json.dumps(room.as_dict(include_features=False)), 201",
"def AddOrGetRescuerId(self, firstname, lastname, title, phone,\n mobile, email, address, suburbId):\n rescuerID = None\n args = [firstname, lastname, title, phone,\n mobile, email, address, suburbId]\n try:\n result_args = self.cursor.callproc(\"add_or_get_rescuer\", args)\n # process the result\n for result in self.cursor.stored_results():\n for r in result:\n # this should be the rescuer's id number if success\n rescuerID = r[0]\n # else error message caught\n self.conn.commit()\n return rescuerID\n except Exception as e:\n return \"Error:\" + e.message",
"def reservation(self):\n return self._reservation",
"def get_res_by_id(res_id):\n # look up ID, if non-exist return error message\n res = session.query(Reservation).filter_by(id=res_id).first()\n if not res:\n return jsonify({'error': 'no reservation with id {} found'.format(res_id)}), 400\n return jsonify({'reservation': res.serialize()})",
"def _post(self, data):\n new_ticket_id = DB_TICKET_TABLE.insert(data)\n return new_ticket_id",
"def insertRoom(cursor, room: Room) -> None:\n\n # TODO: Calculate the values. room object may not contain the creator\n # or maybe remove that feature\n cursor.execute(\n \"INSERT INTO rooms VALUES (?, ?, ?)\",\n (roomID, roomName, creator)\n )",
"def reservation(self, reservation):\n\n self._reservation = reservation",
"def cancel_room():\n try:\n user = User.get_user()\n except ValueError as err:\n return jsonify({\"error\": str(err)})\n\n booking_id = request.form.get(\"booking_id\")\n if not booking_id:\n return jsonify({\"error\": \"No booking id sent to server!\"})\n if \",\" in booking_id:\n return jsonify({\"error\": \"Only one booking may be cancelled at a time.\"})\n\n booking = StudySpacesBooking.query.filter_by(booking_id=booking_id).first()\n if booking:\n if (booking.user is not None) and (booking.user != user.id):\n return jsonify({\"error\": \"Unauthorized: This reservation was booked by someone else.\"}), 400\n if booking.is_cancelled:\n return jsonify({\"error\": \"This reservation has already been cancelled.\"}), 400\n\n if booking_id.isdigit():\n sessionid = request.form.get(\"sessionid\")\n if not sessionid:\n return jsonify({\"error\": \"No session id sent to server.\"}), 400\n try:\n wharton.delete_booking(sessionid, booking_id)\n save_wharton_sessionid()\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n lid=1,\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': [{\"booking_id\": booking_id, \"cancelled\": True}]})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n else:\n resp = studyspaces.cancel_room(booking_id)\n if \"error\" not in resp:\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': resp})",
"def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_rent = datetime.date.today()\n self.end_time_rent = self.start_time_rent + datetime.timedelta(days=7)\n self.reservation.isrented = True\n self.reservation.save()\n return super(Rental, self).save(*args, **kwargs)",
"async def add_hotel_endpoint(request):\n hotel_name = request.args[\"hotel_name\"][0]\n hotel_id = model.add_hotel(hotel_name)\n return json({\"hotel_id\": hotel_id})"
]
| [
"0.6992789",
"0.69501656",
"0.68939716",
"0.67820656",
"0.6615095",
"0.66042477",
"0.64920753",
"0.62454337",
"0.61765397",
"0.6087743",
"0.6031231",
"0.60201854",
"0.59403574",
"0.59373236",
"0.5842294",
"0.5815116",
"0.57967556",
"0.5778315",
"0.5752786",
"0.5684641",
"0.5646357",
"0.5635363",
"0.5624297",
"0.5594418",
"0.5524542",
"0.55028635",
"0.5496418",
"0.54651076",
"0.5403308",
"0.535773"
]
| 0.71016914 | 0 |
Return a free table if it is available, otherwise Return 1 if there are no free tables Return 2 if the restaurant is closed Return None if it is impossible to connect with the restaurant microservice. | def get_a_table(restaurant_id, number_of_people, booking_datetime, excluded=-1):
is_open, rest = restaurant_is_open(restaurant_id, booking_datetime) # check is the restaurant is open on that date
if is_open is None: # connection error with the restaurant microservice
return None
if not is_open:
return -2
tables = get_tables(restaurant_id) # return the list of tables of the restaurant
if tables is None: # connection error with the restaurant microservice
return None
if tables == []:
return -1
delta = int(rest["occupation_time"])
starting_period = booking_datetime - datetime.timedelta(hours=delta)
ending_period = booking_datetime + datetime.timedelta(hours=delta)
# the list of the tables occupied or booked in the same period as the booking
occupied = db.session.query(Booking.table_id).select_from(Booking)\
.filter(Booking.restaurant_id == restaurant_id)\
.filter(starting_period < Booking.booking_datetime)\
.filter(Booking.booking_datetime < ending_period )\
.filter(Booking.id != excluded)\
.all()
free_tables = [t for t in tables if ( ((t["id"],) not in occupied) and (t["capacity"] >= number_of_people) )] # returns the free table usable by this number of people
free_tables.sort(key=lambda x:x["capacity"]) # order the tables from the smaller
if free_tables == []: # no free tables
return -1
return free_tables[0]["id"] # return the smaller table that can be used
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self):\n free_tables = []\n tables = TableDetails.query.all()\n for table in tables:\n if table.table_status == \"Empty\":\n free_tables.append(table)\n return free_tables, 200",
"def _get_or_create_table(self):\n\n table_schema = self._get_table_schema()\n try:\n table_description = self.client.create_table(**table_schema)\n logging.info('DynamoDB Table %s did not exist, creating.',self.table_name)\n\n # In case we created the table, wait until it becomes available.\n self._wait_for_table_status('ACTIVE')\n logging.info('DynamoDB Table %s is now available.',self.table_name)\n\n self.client.update_time_to_live(\n TableName=self.table_name,\n TimeToLiveSpecification={\n 'Enabled': True,\n 'AttributeName': self._expiry_field.name\n }\n )\n logging.info('DynamoDB Table %s now expires items',self.table_name)\n\n return table_description\n\n except ClientError as e:\n error_code = e.response['Error'].get('Code', 'Unknown')\n # If table exists, do not fail, just return the description.\n if error_code == 'ResourceInUseException':\n return self.client.describe_table(TableName=self.table_name)\n else:\n raise e",
"def available(self) -> bool:\n return self._table.is_connected",
"def check_table(table_name = None):\n\n if table_name is None:\n table_name = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n \n result = None\n\n try:\n cur = conn.cursor()\n cur.execute(\"\"\"\n USE %s\n \"\"\"%(config['db'], ))\n\n cur.execute(\"\"\"\n SHOW TABLES;\n \"\"\")\n \n all_tables = cur.fetchall()\n if (table_name,) in all_tables:\n result = True\n else:\n result = False\n except Exception as e:\n print(\"check_table FAILED\")\n print(e)\n\n conn.close()\n tunnel.close()\n return result",
"def _check_available() -> None:\n current_session().query(\"1\").from_statement(text(\"SELECT 1\")).all()",
"def table(self):\n if not self.exists:\n return None\n return self._get_table()",
"def is_available():",
"def Available(self) -> int:",
"def Available(self) -> int:",
"def Available(self) -> int:",
"def __len__(self):\n return self.dbms.getNbTables(self.db)",
"def has_table(self, table_name, timeout):\n _abstract()",
"def has_table(self, table_name, timeout):\n _abstract()",
"def get_table_count(table_name):\n conn = get_connect()\n cursor = conn.execute(\"SELECT COUNT(*) FROM \" + table_name)\n count = cursor.fetchall()[0][0]\n conn.close()\n return count",
"def get_tables(id):\r\n with current_app.app_context():\r\n if current_app.config[\"USE_MOCKS\"]:\r\n id -= 1 # restaurant IDs starting by 1\r\n if 0 <= id < len(restaurants):\r\n return tables[id]\r\n else:\r\n return None\r\n else:\r\n return get_from(current_app.config[\"REST_SERVICE_URL\"]+\"/restaurants/\"+str(id)+\"/tables\")",
"def has_table(self, table):\n con = self.connection\n cur = con.cursor()\n res = cur.execute(\"\"\"SELECT COUNT(*) FROM sqlite_master\n WHERE type='table' AND name='%s'\"\"\" % table)\n tcnt = cur.fetchall()\n cur.close()\n if tcnt[0][0] > 0:\n return True\n else:\n return False",
"def check_table(self):\n self.missing()\n return self._table(self._data_list)",
"def check_if_table_exists(self, table_name):\n cursor = self.conn.cursor()\n cursor.execute(\"SELECT EXISTS(SELECT * FROM information_schema.tables WHERE table_name=%s)\", (table_name,)), \n self.conn.commit()\n return cursor.fetchone()[0]",
"def is_todo_table_empty(self):\n cur = self.conn.execute(\"\"\"SELECT COUNT(*) FROM todo;\"\"\")\n if cur != None:\n row = cur.fetchone()\n if row[0] == 0:\n return True\n else:\n return False\n print(\"is_todo_table_empty: table does not exist\")",
"def get_table_size(self, table):\n sql = f''' SELECT COUNT(*) FROM {table}'''\n connection = self.__create_connection()\n cur = connection.cursor()\n cur.execute(sql)\n return cur.fetchone()[0]",
"def _get_table(self):\n\t\treturn self._table",
"def table_exists(table_name):\n cnx = create_connection()\n cursor = cnx.cursor()\n try:\n cursor.execute(\"SHOW TABLES LIKE '\" + table_name + \"'\")\n except:\n cnx.close()\n cursor.close()\n print(\"Show Tables query didn't execute.\")\n return\n\n rows = cursor.fetchall()\n cnx.close()\n cursor.close()\n\n if len(rows):\n return True\n else:\n return False",
"def table_exists(self, mode):\n\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n\n return bool(ref)",
"def find_table(table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n if table == '*':\n tb_exists = \"SELECT name FROM sqlite_master WHERE type='table'\"\n else:\n tb_exists = \"SELECT name FROM sqlite_master WHERE type='table' AND name='\" + table + \"'\"\n fetched = conn.execute(tb_exists).fetchone()\n conn.close()\n except Exception as e:\n print(\"Error when trying to find table \" + table + \" in database file \" + db_file)\n return False\n else:\n return fetched",
"def getNumTables(self):\n return self.numtables",
"def check_link_availability(self, link):\n circuits = self.load_circuits()\n total = 0\n for circuit in circuits:\n exists = circuit.get_link(link)\n if exists:\n total += exists.bandwidth\n if total + link.bandwidth > 100000000000: # 100 Gigabits\n return None\n return total",
"def _get_table(self, cursor):\n raise NotImplementedError",
"def showTables():\n global cursor\n #cursor.execute('SELECT * FROM *')\n cursor.execute('''SELECT * FROM sqlite_master WHERE type='table' ''')\n\n tables = cursor.fetchall()\n print \"Tables available are:\"\n print tables[0]",
"def use_table(self):\n connection = self._get_connection()\n cursor = connection.cursor()\n cursor.execute(\n 'select exists(select * from information_schema.tables where table_name=%s)',\n (self.table,),\n )\n if cursor.fetchone()[0]:\n self.logger.info('Using existing table')\n else:\n try:\n cursor.execute(\n f'CREATE TABLE {self.table} ( \\\n ID VARCHAR PRIMARY KEY, \\\n DOC BYTEA);'\n )\n self.logger.info('Successfully created table')\n except (Exception, psycopg2.Error) as error:\n self.logger.error('Error while creating table!')\n connection.commit()\n self._close_connection(connection)",
"def free_tier():\n return AccountTier.objects.get(id=1)"
]
| [
"0.59448147",
"0.57143354",
"0.56816226",
"0.5603177",
"0.5588787",
"0.54065543",
"0.5360168",
"0.53469735",
"0.53469735",
"0.53469735",
"0.5253404",
"0.5236414",
"0.5236414",
"0.5196033",
"0.5178952",
"0.51766515",
"0.5149163",
"0.51316524",
"0.5122474",
"0.5119748",
"0.5105781",
"0.510488",
"0.5087896",
"0.50776005",
"0.5074328",
"0.5052721",
"0.5050397",
"0.50454503",
"0.5036718",
"0.5036072"
]
| 0.657818 | 0 |
Check if a restaurant is open in a given datetime Return true if the restaurant is open (with the json of the restaurant) Return false if the restaurant is closed (with the json of the restaurant) eturn None if it is impossible to connect with the restaurant microservice. | def restaurant_is_open(restaurant_id, booking_datetime):
rest = get_restaurant(restaurant_id)
if rest is None: # error with the microservice
return (None,None)
else:
if (booking_datetime.weekday()+1) in rest["closed_days"]:
return (False,rest)
now = datetime.datetime.now()
booking = now.replace( hour=booking_datetime.hour, minute=booking_datetime.minute, second=0, microsecond=0 )
if rest["first_opening_hour"] is not None and rest["first_closing_hour"] is not None:
opening = now.replace( hour=int(rest["first_opening_hour"]), minute=0, second=0, microsecond=0 )
closing = now.replace( hour=int(rest["first_closing_hour"]), minute=0, second=0, microsecond=0 )
if opening <= booking <= closing:
return (True,rest)
if rest["second_opening_hour"] is not None and rest["second_closing_hour"] is not None:
opening = now.replace( hour=int(rest["second_opening_hour"]), minute=0, second=0, microsecond=0 )
closing = now.replace( hour=int(rest["second_closing_hour"]), minute=0, second=0, microsecond=0 )
if opening <= booking <= closing:
return (True,rest)
return (False,rest) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restaurant_opening(H):\n\n if len(H) == 0:\n return [False,False,False]\n\n # opening hours that only state that the shop open leads to unreliable\n # results. This is because the shop never close ! so will be available\n # all the time.\n if all_open(H):\n return [False,False,False]\n\n # hack\n H = before_midnight(H)\n\n breakfast_ranges = [(\"08:30\",\"09:00\"),(\"09:00\",\"09:30\"),(\"09:30\",\"10:00\"),(\"10:00\",\"10:30\")]\n dinner_ranges = [(\"11:30\",\"11:45\"),(\"11:45\",\"12:00\"),(\"12:15\",\"12:30\"),(\"12:30\",\"12:45\"),(\"12:45\",\"13:00\"),(\"13:15\",\"14:15\")]\n supper_ranges = [(\"17:00\",\"18:00\"), (\"18:00\",\"19:00\"), (\"19:00\",\"20:00\"), (\"20:00\",\"21:00\")]\n\n breakfast_opened = any(businesshours(H, breakfast_ranges))\n dinner_opened = any(businesshours(H, dinner_ranges))\n supper_opened = any(businesshours(H, supper_ranges))\n\n result = [breakfast_opened,dinner_opened,supper_opened]\n return result",
"def marketOpen():\n tz = conf['GLOBAL']['timezone']\n today = datetime.today().astimezone(pytz.timezone(tz))\n today_fmt = today.strftime('%Y-%m-%d')\n tdHoursURL = conf['TD']['hoursURL']\n key = conf['TD']['key']\n params = {\n 'apikey': key,\n 'date': today_fmt\n }\n\n request = requests.get(\n url=tdHoursURL,\n params=params\n ).json()\n \n \n if request['equity']['EQ']['isOpen'] is True:\n return(True)\n else:\n return(False)",
"def is_opening(random_act, starttime, endtime):\n weekday = starttime.isoweekday()\n\n opening_time_interval = config.RANDOM_ACTIVITY_CONFIG[\n config.OPENING_TIME][random_act][weekday]\n open_time = starttime.replace(\n hour=0, minute=0,\n second=0, microsecond=0) + opening_time_interval[0]\n close_time = starttime.replace(\n hour=0, minute=0,\n second=0, microsecond=0) + opening_time_interval[1]\n # print(open_time, starttime, close_time)\n if open_time <= starttime < close_time:\n return True\n else:\n return False",
"def office_is_open_on_datetime(iso_datetime):\n is_open = False\n d_time = datetime.fromisoformat(iso_datetime)\n d_date = date(d_time.year, d_time.month, d_time.day)\n schedule = AppointmentService.APPOINTMENT_SCHEDULE.get(d_date.weekday(), {})\n if schedule:\n begin_time = datetime.combine(d_date, schedule['begin'])\n end_time = datetime.combine(d_date, schedule['end'])\n if begin_time <= d_time <= end_time:\n is_open = True\n\n return is_open",
"def check_if_open(bursa: pd.DataFrame, exchange: str) -> bool:\n exchange = exchange.upper()\n if exchange in bursa.index.values:\n tz = bursa.loc[exchange][\"timezone\"]\n exchange_df = bursa.loc[exchange]\n elif exchange in bursa[\"short_name\"].values:\n tz = bursa.loc[bursa[\"short_name\"] == exchange][\"timezone\"].values[0]\n exchange_df = bursa.loc[bursa[\"short_name\"] == exchange]\n exchange_df = exchange_df.iloc[0].transpose()\n utcmoment_naive = datetime.utcnow()\n utcmoment = utcmoment_naive.replace(tzinfo=pytz.utc)\n local_datetime = utcmoment.astimezone(pytz.timezone(tz))\n market_open = datetime.strptime(exchange_df[\"market_open\"], \"%H:%M:%S\")\n market_close = datetime.strptime(exchange_df[\"market_close\"], \"%H:%M:%S\")\n after_market_open = local_datetime.time() >= market_open.time()\n before_market_close = local_datetime.time() <= market_close.time()\n try:\n lunchbreak_start = datetime.strptime(\n exchange_df[\"lunchbreak_start\"], \"%H:%M:%S\"\n )\n lunchbreak_end = datetime.strptime(exchange_df[\"lunchbreak_end\"], \"%H:%M:%S\")\n\n after_lunch_start = local_datetime.time() >= lunchbreak_start.time()\n before_lunch_end = local_datetime.time() <= lunchbreak_end.time()\n except Exception:\n after_lunch_start = False\n before_lunch_end = False\n\n if local_datetime.weekday() >= 5:\n result = False\n else:\n result = (\n after_market_open\n and before_market_close\n and not (after_lunch_start and before_lunch_end)\n )\n\n return result",
"def check_market_status():\n # today = datetime.datetime.now(pytz.timezone('America/New_York')).date()\n today_utc = pd.to_datetime('now').date()\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_utc - pd.Timedelta('10 days'), end_date=today_utc)\n if today_utc in open_days.index:\n return open_days\n else:\n return None",
"def office_is_open_on_date(iso_date):\n d_time = datetime.fromisoformat(iso_date)\n d_date = date(d_time.year, d_time.month, d_time.day)\n schedule = AppointmentService.APPOINTMENT_SCHEDULE.get(d_date.weekday(), {})\n return schedule != {}",
"def check_market_status():\n today_ny = datetime.datetime.now(pytz.timezone('America/New_York'))\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny)\n if today_ny.date() in open_days.index:\n return open_days\n else:\n return None",
"def json_has_access_now(self, json_str):\n\n day2day = {'mon': 0,\n 'tues': 1,\n 'wed': 2,\n 'thurs': 3,\n 'fri': 4,\n 'sat': 5,\n 'sun': 6\n }\n\n try:\n today = datetime.date.today().weekday()\n cur_time = datetime.datetime.now().time()\n data = json.loads(str(json_str))\n #print(\"data = {}\\n\\n\".format(data))\n for day, times in data.items():\n #print(\"day = [{}] day2day: [{}]\".format(today, day2day[day]))\n if today == day2day[day]:\n start_t = datetime.datetime.strptime(times['start'], '%H:%M:%S').time()\n end_t = datetime.datetime.strptime(times['end'], '%H:%M:%S').time()\n #print(\"{} <= {} and {} >= {}\".format(start_t, cur_time, end_t, cur_time))\n if start_t <= cur_time and end_t >= cur_time:\n return True\n\n except ValueError:\n print(\"ValueError!!!one1! \\njson_str = {}\".format(json_str))\n return False\n\n return False",
"def is_opening(self):\n now = timezone.now()\n return self.start_date.date() >= now.date()",
"def opened_at(self, datetime: datetime) -> None:",
"def check_open():\n print(\"***** Check if Business is Open/Closed *****\")\n while True:\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n if business_object['is_open'] == 1:\n print(\"This business is open!\")\n else:\n print(\"This business is closed!\")\n\n print()\n\n print_business(business_object)",
"def __wait_for_door_to_open(self, check_freq, min_dist):\n ##\n try:\n rate = rospy.Rate(check_freq)\n current_distance = 0.0\n while not rospy.is_shutdown() and current_distance < min_dist:\n current_distance = self._minFrontValueSP().value\n rate.sleep()\n rospy.loginfo(\"************ DOOR IS OPENED ! ****************\")\n return GoalStatus.SUCCEEDED, True\n except rospy.ServiceException as e:\n rospy.logerr(\"Service min_front_value_srv could not process request: {error}\".format(error=e))\n return GoalStatus.ABORTED, None\n except Exception as e:\n rospy.logerr(\"Service min_front_value_srv could not process request: {error}\".format(error=e))\n return GoalStatus.ABORTED, None",
"def polls_open(self):\n if self.is_city_of_london:\n return datetime.time(8, 0)\n\n return datetime.time(7, 0)",
"def check_availability(url_str, datetime_fetched=None):\n wayback_url = \"http://archive.org/wayback/available\"\n params = {\n 'url': url_str.split('?')[0],\n }\n if datetime_fetched is not None:\n params['timestamp'] = datetime_fetched.strftime(\"%Y%m%d%H%M%S\")\n\n response = requests.get(wayback_url, params=params, timeout=30)\n r_json = response.json()\n\n # let's be nice and convert the returned timestamp to a datetime obj\n # wayback timestamps are in the form YYYYMMDDhhmmss\n if \"archived_snapshots\" in r_json and \\\n \"closest\" in r_json['archived_snapshots'] and \\\n \"timestamp\" in r_json['archived_snapshots']['closest']:\n wb_timestamp = r_json['archived_snapshots']['closest']['timestamp']\n r_json['archived_snapshots']['closest']['datetime'] = datetime.strptime(wb_timestamp, \"%Y%m%d%H%M%S\")\n\n return r_json",
"def open_restaurant(self):\n\t\tprint(f\"The restaurant is open.\")",
"def open_restaurant(self):\r\n print(\"The restaurant is open now \")",
"def open_restaurant(self):\n print(f\"{self.restaurant_name} is now open!\")",
"def open_restaurant(self):\n print(f\"{self.restaurant_name} is now open!\")",
"def open_restaurant(self):\n print(f\"{self.restaurant_name} is now open!\")",
"async def get(\n self, request: web.Request, datetime: Optional[str] = None\n ) -> web.Response:\n datetime_ = None\n if datetime:\n datetime_ = dt_util.parse_datetime(datetime)\n\n if datetime_ is None:\n return self.json_message(\"Invalid datetime\", HTTP_BAD_REQUEST)\n\n now = dt_util.utcnow()\n\n one_day = timedelta(days=1)\n if datetime_:\n start_time = dt_util.as_utc(datetime_)\n else:\n start_time = now - one_day\n\n if start_time > now:\n return self.json([])\n\n end_time = request.query.get(\"end_time\")\n if end_time:\n end_time = dt_util.parse_datetime(end_time)\n if end_time:\n end_time = dt_util.as_utc(end_time)\n else:\n return self.json_message(\"Invalid end_time\", HTTP_BAD_REQUEST)\n else:\n end_time = start_time + one_day\n entity_ids = request.query.get(\"filter_entity_id\")\n if entity_ids:\n entity_ids = entity_ids.lower().split(\",\")\n include_start_time_state = \"skip_initial_state\" not in request.query\n significant_changes_only = (\n request.query.get(\"significant_changes_only\", \"1\") != \"0\"\n )\n\n minimal_response = \"minimal_response\" in request.query\n\n hass = request.app[\"hass\"]\n\n return cast(\n web.Response,\n await hass.async_add_executor_job(\n self._sorted_significant_states_json,\n hass,\n start_time,\n end_time,\n entity_ids,\n include_start_time_state,\n significant_changes_only,\n minimal_response,\n ),\n )",
"def opened_at(self) -> datetime | None:",
"def validate_current_time(self,data):\n # cuando la fecha actual esta adelnta de la fecha de inicio eso significa que el viaje ya inicio\n # si ya inicio todo esta bien \n ride = self.context['view'].get_object()\n if data <= ride.departure_date:\n raise serializers.ValidationError('Ride has not started yet')\n\n return data",
"def isMarketOpen(self):\n if not self.normalDay:\n return False\n now = datetime.now()\n if now.hour >= 9 and now.hour < 16:\n if now.hour == 9 and now.minute < 30:\n return False\n return True\n return False",
"def open_restaurant(self):\r\n\t\tprint(self.restaurant_name.title() + \" is open\")",
"def test_GET_startdate(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1122, 'date_of_expense': '10-01-2021'})\n self.assertEqual(rv.status_code, 201)\n resl = self.client().get('/expenses/?start_date=01-01-2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(resl.status_code, 200)\n results = json.loads(resl.data)\n self.assertEqual(results['items'][0]['date_of_expense'], self.expense['date_of_expense'])",
"def open_restaurant(self):\n\t\tprint(\"The restaurant is now open!\")",
"def acceptable(self):\n now = datetime.datetime.now()\n origin = datetime.datetime.combine(self.date, datetime.time.min)\n start = origin + datetime.timedelta(hours=6)\n end = origin + datetime.timedelta(days=1)\n morning = end + datetime.timedelta(hours=6)\n if now < origin or now > morning:\n return 0\n if now >= end or now <= start:\n return 1\n return 3",
"def is_open(self) -> bool:\n return self.__interval is not None",
"def contact_now(date: str) -> bool:\n\n time_date = string_to_datetime(date)\n return date_is_today(time_date) or date_is_in_past(time_date)"
]
| [
"0.64628744",
"0.64410496",
"0.63645446",
"0.6307019",
"0.6250015",
"0.59982455",
"0.594884",
"0.58953434",
"0.5853086",
"0.58084",
"0.570417",
"0.55151856",
"0.5453661",
"0.53701514",
"0.53474367",
"0.53282684",
"0.53273594",
"0.53005165",
"0.53005165",
"0.53005165",
"0.529688",
"0.52804846",
"0.52668506",
"0.52517045",
"0.5220505",
"0.52037674",
"0.5173557",
"0.5161971",
"0.5160314",
"0.51513827"
]
| 0.82387584 | 0 |
Given a user id, get all application cases that belong to the Company where the user is in | def get_application_cases_by_uid(uid, conditions=[]):
cid = employee.get_employee_by_user(userid=uid).company.id
return ApplicationCase.objects.filter(job__company_id=cid, *conditions) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_companies(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"view_company\", klass=models.Company)\n return [x.id for x in userCompanies]",
"def for_company(cls, company_id):\n return cls.objects.filter(vacancy__company__id=company_id)",
"def get_all_companies_and_people():",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_companies(request):\n companies = Company.objects.all()\n context={'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)",
"def get_isAdminOf(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"change_company\", klass=models.Company, accept_global_perms=False)\n return [x.id for x in userCompanies]",
"def get_companies(self, obj):\n groupCompanies = get_objects_for_group(\n obj, \"view_company\", klass=models.Company)\n return [x.id for x in groupCompanies]",
"def get_owned_apps(self):\n user = users.get_current_user()\n if not user:\n return []\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n return user_info.owned_apps\n else:\n return []\n except Exception as err:\n logging.exception(err)\n return []",
"def get_application_by_user_id(user_id):\n try:\n cursor.execute(\"select * from applications where user_id = %s\", (user_id,))\n retrieved_application = cursor.fetchone()\n retrieved_application = Application(id=retrieved_application[0], party_name=retrieved_application[1],\n office_name=retrieved_application[2], user_id=retrieved_application[3],\n date_created=retrieved_application[4],status=retrieved_application[5])\n return retrieved_application.json_dumps()\n except Exception:\n return False",
"def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs",
"def test_companies_company_id_data_bank_accounts_account_id_get(self):\n pass",
"def get_conections(user_id, limit):\n url_conect = \"https://bio.torre.co/api/people/{}/connections?limit={}\".format(user_id, limit)\n request = get(url_conect)\n if request.status_code != 200:\n abort(404, 'Not Found')\n return request.json()",
"def get_all_incidents_created_by_a_user(self, createdby):\n sql = \"SELECT * FROM incidences WHERE incidences.createdBy=\\'%s\\'\" % (\n createdby)\n curr = Db().cur\n curr.execute(sql)\n output = curr.fetchall()\n return output",
"def available_clients(self, user_id: int) -> List[str]:\n return list(self.clients[user_id])",
"def get_queryset(self):\n user_full = UserFullName.objects.get(id=self.request.user.id)\n queryset = CaseTrack.objects.filter(user_to=user_full, done=False)\n return queryset",
"def test_companies_company_id_connections_connection_id_data_bank_accounts_get(self):\n pass",
"def fetch_incoming(user_id):\n user = user_collection.find_one({\"_id\": user_id})\n user_incomings = [] if user[\"incoming\"] is None else user[\"incoming\"]\n incomings_list = list()\n for item in user_incomings:\n requested_user_id = user_collection.find_one({\"_id\": item[\"user_id\"]})[\n \"userid\"\n ]\n requested_project_name = project_collection.find_one(\n {\"_id\": item[\"project_id\"]}\n )[\"projectTitle\"]\n requested_project_id = project_collection.find_one(\n {\"_id\": item[\"project_id\"]}\n )[\"_id\"]\n requested_project_owner = project_collection.find_one(\n {\"_id\": item[\"project_id\"]}\n )[\"owner\"]\n incomings_list.append(\n {\n \"user\": requested_user_id,\n \"requestedProject\": requested_project_name,\n \"project_id\": requested_project_id,\n \"project_owner\": requested_project_owner,\n }\n )\n return incomings_list",
"def all_companies(login_details):\n output = None\n sql = u'SELECT client_company_ID ' \\\n u'FROM client_company_TBL;'\n\n c, conn = connection(login_details)\n try:\n c.execute(sql)\n values = c.fetchall()\n if values is not None:\n output = values\n finally:\n conn_close(c, conn)\n\n return output",
"def get_companies(self):\n response = self.do_request('/undertaking/list')\n if response:\n return response.json()",
"def get_active_company(request):\n from project.models import get_user_profile_ex\n profile = get_user_profile_ex(request.user)\n try:\n company = profile.active_company\n except:\n company = None\n if company is None:\n raise Exception('Please select active company in user\\'s profile')\n return company",
"def test_companies_company_id_connections_connection_id_data_bank_accounts_account_id_get(self):\n pass",
"def get_developer_apps_by_user(user_id: int) -> List[Dict]:\n db = db_session.get_db_read_replica()\n with db.scoped_session() as session:\n developer_apps = (\n session.query(DeveloperApp)\n .filter(\n DeveloperApp.user_id == user_id,\n DeveloperApp.is_current == True,\n DeveloperApp.is_delete == False,\n )\n .all()\n )\n return query_result_to_list(developer_apps)",
"def get_user_job_detail(user_id):\n\n return JobDetail.query.filter(JobCompletedApplication.user_id == user_id).join(JobCompletedApplication).order_by(JobCompletedApplication.application_date_submitted.desc()).all()",
"def requested_courses(self, user_id: str) -> np.ndarray:\n self.retrieve_leads()\n\n return self.leads_df[self.leads_df['user_id'] == user_id]['course_id'].values",
"def get_companies(self):\n url = 'companies'\n result = self.get(url)\n return result['companies']",
"def get_services_by_company(company_id: int) -> QuerySet:\n return ServiceDocument.search().filter(\"term\", **{\"company.id\": company_id}).to_queryset()",
"def get_available_companies(team):"
]
| [
"0.6131878",
"0.5925701",
"0.5477436",
"0.54555124",
"0.54555124",
"0.54555124",
"0.54555124",
"0.5412679",
"0.5296511",
"0.52784586",
"0.52468145",
"0.5147777",
"0.5003169",
"0.49420902",
"0.49417678",
"0.49276674",
"0.49187526",
"0.489076",
"0.48836246",
"0.4882932",
"0.48608795",
"0.48542374",
"0.4851066",
"0.4802125",
"0.47986224",
"0.47711655",
"0.47623459",
"0.4754432",
"0.4724011",
"0.47236446"
]
| 0.72097003 | 0 |
Generate satellite body geometry | def create_sat_body(self):
# Dimensions of body
SAT_SIZE = self.ANI_SCALE*self.SAT_SCALE*np.asarray(self.SAT_PROPS["Size"])/2
bx = SAT_SIZE[0]
by = SAT_SIZE[1]
bz = SAT_SIZE[2]
# Create vertices in body frame
ind = 0
V = []
for x in [-1, 1]:
for y in [-1, 1]:
for z in [-1, 1]:
V.append((bx*x, by*y, bz*z))
# Create faces
F = [
(0, 1, 3, 2),
(4, 5, 7, 6),
(0, 1, 5, 4),
(2, 3, 7, 6),
(0, 2, 6, 4),
(1, 3, 7, 5)
]
# Create building blocks of polydata
sat = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(len(V)):
points.InsertPoint(i, V[i])
for i in range(len(F)):
polys.InsertNextCell(self.mkVtkIdList(F[i]))
for i in range(len(V)):
scalars.InsertTuple1(i, i)
# Assign the pieces to the vtkPolyData.
sat.SetPoints(points)
del points
sat.SetPolys(polys)
del polys
sat.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(sat)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.5, 0.5, 0.5)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetSpecular(1.0)
actor.GetProperty().SetSpecularPower(5.0)
actor.GetProperty().SetDiffuse(0.2)
# Move to sat position
actor.SetPosition(0, 0, -self.SAT_PROPS["Alt"])
return actor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __bodies_char(self):\n # Body values, G*M_body\n mu_body = {} # km^3 kg^-1 s^-2\n mu_body[\"Sun\"] = 132712440017.99\n mu_body[\"Moon\"] = 4902.8005821478\n mu_body[\"Earth\"] = 398600.4415\n\n mu_body[\"Mars\"] = 42828.314258067 # Mars, GM\n mu_body[\"Jupiter\"] = 126712767.8578 # Jupiter, GM\n mu_body[\"Saturn\"] = 37940626.061137 # Saturn, GM\n mu_body[\"Uranus\"] = 5794549.0070719 # Uranus, GM\n mu_body[\"Neptune\"] = 6836534.0638793 # Neptune, GM\n mu_body[\"Pluto\"] = 981.600887707 # Pluto, GM\n\n mu_body[\"Phobos\"] = 0.0007112 # Phobos, GM\n mu_body[\"Titan\"] = 8978.1382 # Titan, GM\n mu_body[\"Ganymede\"] = 9887.834 # Ganymede, GM\n mu_body[\"Titania\"] = 228.2 # Titania, GM\n mu_body[\"Triton\"] = 1427.598 # Triton, GM\n mu_body[\"Charon\"] = 102.30 # Charon, GM\n\n #########\n distances = {} # km, diistance between the two primaries\n distances[\"EarthMoon\"] = 384400\n distances[\"SunEarth\"] = 149600000\n\n distances[\"SunMars\"] = 227944135\n distances[\"SunJupiter\"] = 778279959\n distances[\"SunSaturn\"] = 1427387908\n distances[\"SunUranus\"] = 2870480873\n distances[\"SunNeptune\"] = 4498337290\n distances[\"SunPluto\"] = 5907150229\n\n distances[\"MarsPhobos\"] = 9376\n distances[\"JupiterGanymede\"] = 1070400\n distances[\"SaturnTitan\"] = 1221865\n distances[\"UranusTitania\"] = 436300\n distances[\"NeptuneTriton\"] = 354759\n distances[\"PlutoCharon\"] = 17536\n\n return mu_body, distances",
"def makePhysicsBody(self):\n space = self.environment.space\n geom = GeomBox(space, self.dim)\n geom.setPosition(self.centerPos)\n geom.setCategoryBits(2)\n geom.setCollideBits(1)\n self.geomList = [geom]",
"def _full_structure_geometry(self):\n # Characterized borehole structures\n borehole_structures = self._characterize_shearzones()\n\n # Tunnel shearzone data\n tunnel_structures = self.tunnel_structures\n\n structures = pd.concat(\n [borehole_structures, tunnel_structures], ignore_index=True, sort=False\n )\n\n # Fill NaN-values in all columns to 0 except in column 'shearzone', for which we do nothing.\n structures = structures.fillna(\n value={**{s: 0 for s in borehole_structures}, **{\"shearzone\": np.nan}}\n )\n\n mapping = {\n \"x\": \"x\",\n \"y\": \"y\",\n \"z\": \"z\",\n \"depth\": \"depth\",\n \"upward_gradient\": \"upward_gradient\",\n \"azimuth\": \"azimuth_bh\",\n }\n borehole_to_global_coords(structures, **mapping)\n\n return structures",
"def generateBody(self):\n # get the anims\n animDict = self.generateAnimDict()\n \n # NOTE: It is always phase 3.5 because the models are there\n # while everything else is in phase 5.\n filePrefix, bodyPhase = ModelDict[self.style.body]\n self.loadModel(\"phase_3.5\" + filePrefix + \"mod\")\n self.loadAnims(animDict)\n self.setSuitClothes()",
"def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return",
"def create_spacecraft_geometry():\r\n\r\n bounds_lower = [3, 7, 33]\r\n funcs_lower = [0, lambda y: y ** 1.5, 0]\r\n\r\n bounds_upper = None\r\n funcs_upper = 100\r\n\r\n x_max = 10\r\n x_min = 0\r\n resolution = 200\r\n\r\n spacecraft = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return spacecraft",
"def load_body(data):\n\n name = data[\"name\"]\n parent = None\n if \"parent\" in data:\n parent = data[\"parent\"]\n texture = data[\"texture\"]\n basecolor = data[\"basecolor\"]\n radius = data[\"radius\"]\n axial_tilt = data[\"axial_tilt\"]\n sidereal_rotation_period = data[\"sidereal_rotation_period\"] * dts\n mass = data[\"mass\"]\n has_orbit = False\n orbit = None\n has_ring = False\n ring_texture = None\n ring_inner_radius = None\n ring_outer_radius = None\n\n if \"orbit\" in data:\n has_orbit = True\n orbit = load_orbit(data[\"orbit\"])\n if \"ring\" in data:\n ring_data = data[\"ring\"]\n has_ring = True\n ring_texture = ring_data[\"texture\"]\n ring_inner_radius = ring_data[\"radius\"][\"inner\"]\n ring_outer_radius = ring_data[\"radius\"][\"outer\"]\n\n body = None\n\n if has_orbit:\n body = OrbitingBody(None, name, texture, basecolor, radius, orbit, axial_tilt, sidereal_rotation_period, mass)\n if has_ring:\n body.renderer = OrbitingBodyWithRingRenderer()\n body = setup_ring_renderer(ring_texture, ring_inner_radius, ring_outer_radius, body)\n else:\n body = StationaryBody(None, name, texture, basecolor, radius, axial_tilt, sidereal_rotation_period, mass)\n\n body.parent_internal_name = parent\n return body",
"def example_world():\n get_triangles = metis.geometry.box2d_triangles_from_shapely\n\n obstacle_geometry = shapely.geometry.box(0, 0, 10, 10)\n obstacle_geometry = obstacle_geometry.difference(\n obstacle_geometry.buffer(-.2))\n obstacle_geometry = obstacle_geometry.union(\n shapely.geometry.LineString([(5, 0), (5, 10)]).buffer(.1, cap_style=2))\n obstacle_geometry = obstacle_geometry.difference(\n shapely.geometry.Point(5, 2.5).buffer(1, cap_style=1))\n obstacle_geometry = obstacle_geometry.difference(\n shapely.geometry.Point(5, 7.5).buffer(1, cap_style=1))\n\n world = b2.world()\n obstacles = world.CreateStaticBody()\n for triangle in get_triangles(obstacle_geometry):\n _ = obstacles.CreateFixture(shape=triangle)\n\n agent = world.CreateDynamicBody()\n agent_geometry = shapely.geometry.Polygon([\n (2./3., 0.), (-1./3., .4), (-1./3., -.4)])\n for triangle in get_triangles(agent_geometry):\n _ = agent.CreateFixture(shape=triangle)\n\n boxes = [world.CreateDynamicBody() for _ in xrange(2)]\n for box in boxes:\n box.CreateFixture(shape=b2.polygonShape(box=(.8, .8)))\n\n bodies = {'robot': agent, 'box1': boxes[0], 'box2': boxes[1]}\n sample_configuration = {\n 'robot': (1, 2, 0), 'box1': (3, 2, -.2), 'box2': (5, 2.5, 0.1)}\n\n return world, bodies, sample_configuration",
"def setup_terrain(self):\r\n self.terrain_scale = LVector3(512, 512, 100)\r\n self.terrain_pos = LVector3(-256, -256, -70)\r\n # sample values for a 4096 x 4096px heightmap.\r\n #self.terrain_scale = LVector3(4096, 4096, 1000)\r\n #self.terrain_pos = LVector3(-2048, -2048, -70)\r\n \"\"\"\r\n Diamond_subdivision is an alternating triangulation scheme and may\r\n produce better results.\r\n \"\"\"\r\n use_diamond_subdivision = True\r\n \r\n \"\"\"\r\n Construct the terrain\r\n Without scaling, any ShaderTerrainMesh is 1x1x1 units.\r\n \"\"\"\r\n self.terrain_node = ShaderTerrainMesh()\r\n \"\"\"\r\n Set a heightfield, the heightfield should be a 16-bit png and\r\n have a quadratic size of a power of two.\r\n \"\"\"\r\n heightfield = Texture()\r\n heightfield.read(self.heightfield_fn)\r\n heightfield.set_keep_ram_image(True) \r\n self.terrain_node.heightfield = heightfield\r\n \r\n # Display characteristic values of the heightfield texture\r\n #minpoint, maxpoint, avg = LPoint3(), LPoint3(), LPoint3()\r\n #heightfield.calc_min_max(minpoint, maxpoint)\r\n #heightfield.calc_average_point(avg, 0.5, 0.5, 0.5)\r\n #print(\"avg: {} min: {} max: {}\".format(avg.x, minpoint.x, maxpoint.x))\r\n\r\n \"\"\"\r\n Set the target triangle width. For a value of 10.0 for example,\r\n the ShaderTerrainMesh will attempt to make every triangle 10 pixels\r\n wide on screen.\r\n \"\"\"\r\n self.terrain_node.target_triangle_width = 10.0\r\n if use_diamond_subdivision:\r\n \"\"\"\r\n This has to be specified before calling .generate()\r\n The default is false.\r\n \"\"\"\r\n load_prc_file_data(\"\", \"stm-use-hexagonal-layout true\")\r\n \r\n self.terrain_node.generate()\r\n \"\"\"\r\n Attach the terrain to the main scene and set its scale. With no scale\r\n set, the terrain ranges from (0, 0, 0) to (1, 1, 1)\r\n \"\"\"\r\n self.terrain = self.render.attach_new_node(self.terrain_node)\r\n self.terrain.set_scale(self.terrain_scale)\r\n self.terrain.set_pos(self.terrain_pos)\r\n \"\"\"\r\n Set a vertex and a fragment shader on the terrain. The\r\n ShaderTerrainMesh only works with an applied shader.\r\n \"\"\"\r\n terrain_shader = Shader.load(Shader.SL_GLSL, \r\n \"samples/shader-terrain/terrain.vert.glsl\", \r\n \"samples/shader-terrain/terrain.frag.glsl\")\r\n self.terrain.set_shader(terrain_shader)\r\n self.terrain.set_shader_input(\"camera\", base.camera)\r\n # Set some texture on the terrain\r\n grass_tex = self.loader.load_texture(\r\n \"samples/shader-terrain/textures/grass.png\")\r\n grass_tex.set_minfilter(SamplerState.FT_linear_mipmap_linear)\r\n grass_tex.set_anisotropic_degree(16)\r\n self.terrain.set_texture(grass_tex)\r\n\r\n \"\"\"\r\n Set up the DynamicHeightfield (it's a type of PfmFile). We load the\r\n same heightfield image as with ShaderTerrainMesh.\r\n \"\"\"\r\n self.DHF = DynamicHeightfield()\r\n self.DHF.read(self.heightfield_fn)\r\n \"\"\"\r\n Set up empty PfmFiles to prepare stuff in that is going to\r\n dynamically modify our terrain.\r\n \"\"\"\r\n self.StagingPFM = PfmFile()\r\n self.RotorPFM = PfmFile()\r\n \r\n \"\"\"\r\n Set up the BulletHeightfieldShape (=collision terrain) and give it\r\n some sensible physical properties.\r\n \"\"\"\r\n self.HFS = BulletHeightfieldShape(self.DHF, self.terrain_scale.z,\r\n STM=True)\r\n if use_diamond_subdivision:\r\n self.HFS.set_use_diamond_subdivision(True)\r\n HFS_rigidbody = BulletRigidBodyNode(\"BulletTerrain\")\r\n HFS_rigidbody.set_static(True)\r\n friction = 2.0\r\n HFS_rigidbody.set_anisotropic_friction(\r\n LVector3(friction, friction, friction/1.3))\r\n HFS_rigidbody.set_restitution(0.3)\r\n HFS_rigidbody.add_shape(self.HFS)\r\n self.world.attach(HFS_rigidbody)\r\n \r\n HFS_NP = NodePath(HFS_rigidbody)\r\n HFS_NP.reparent_to(self.worldNP)\r\n \"\"\"\r\n This aligns the Bullet terrain with the ShaderTerrainMesh rendered\r\n terrain. It will be exact as long as the terrain vertex shader from\r\n the STM sample is used and no additional tessellation shader.\r\n For Bullet (as for other physics engines) the origin of objects is at\r\n the center.\r\n \"\"\"\r\n HFS_NP.set_pos(self.terrain_pos + self.terrain_scale/2)\r\n HFS_NP.set_sx(self.terrain_scale.x / heightfield.get_x_size())\r\n HFS_NP.set_sy(self.terrain_scale.y / heightfield.get_y_size())\r\n \r\n # Disables Bullet debug rendering for the terrain, because it is slow.\r\n #HFS_NP.node().set_debug_enabled(False)\r\n \r\n \"\"\"\r\n Finally, link the ShaderTerrainMesh and the BulletHeightfieldShape to\r\n the DynamicHeightfield. From now on changes to the DynamicHeightfield\r\n will propagate to the (visible) ShaderTerrainMesh and the (collidable)\r\n BulletHeightfieldShape.\r\n \"\"\"\r\n self.HFS.set_dynamic_heightfield(self.DHF)\r\n self.terrain_node.set_dynamic_heightfield(self.DHF)",
"def geometry():\n return Geometry()",
"def build_dom():\n glass_thickness = 10 #mm\n size = 100 #mm\n # outside of the glass envelope\n outside_mesh = make.sphere(size)\n # inside of the glass envelope\n inside_mesh = make.sphere(size-glass_thickness)\n\n # outside solid with ice on the outside, and glass on the inside\n outside_solid = Solid(outside_mesh,glass,ice) \n\n inside_surface = r7081hqe_photocathode\n inside_color = 0x00ff00\n\n # construct the inside solid\n inside_solid = Solid(inside_mesh,vacuum,glass,surface=inside_surface,\n color=inside_color)\n\n # you can add solids and meshes!\n return outside_solid + inside_solid",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)",
"def create_pressure_vessel_geometry():\r\n\r\n # configure sigmoid function\r\n bounds_upper = [3, 6]\r\n h = 5\r\n w = 6\r\n\r\n sigmoid_function = lambda x: (1 / (1 + np.exp(-1 * h * x + w))) + 1\r\n\r\n sigmoid_function_reverse = lambda x: 1 / (1 + np.exp(h * x - w - 18)) + 1\r\n\r\n funcs_upper = [sigmoid_function, sigmoid_function_reverse]\r\n\r\n bounds_lower = None\r\n funcs_lower = 0\r\n\r\n x_max = 6\r\n x_min = 0\r\n resolution = 10000\r\n\r\n pressure_vessel = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return pressure_vessel",
"def b_create_base_geometry(b_name):\n b_obj = b_create_cube(b_name)\n return b_obj",
"def obtain_geometries(self):\n\n assert isinstance(self.ts, TS)\n\n \n symbol_dict = {\n 17: \"Cl\",\n 9: \"F\",\n 8: \"O\",\n 7: \"N\",\n 6: \"C\",\n 1: \"H\",\n }\n atoms = []\n\n parser = ccread(self.log_file, loglevel=logging.ERROR)\n\n for atom_num, coords in zip(parser.atomnos, parser.atomcoords[-1]):\n atoms.append(Atom(symbol=symbol_dict[atom_num], position=coords))\n \n self.ts._ase_molecule = Atoms(atoms)\n self.ts.update_coords_from(\"ase\")\n\n self.pre_geometry = self.ts.ase_molecule.copy()\n self.post_geometry = self.ts.ase_molecule.copy()\n\n for vib, displacements in self.vibrations:\n if vib < 0: # Finding the imaginary frequency\n self.post_geometry.arrays[\"positions\"] -= displacements\n\n return self.pre_geometry, self.post_geometry",
"def create_outer_walls(space,width,height):\n static_lines = [pymunk.Segment(space.static_body, (0.0, 0.0), (width, 0.0), 0.0),\n pymunk.Segment(space.static_body, (width, 0.0), (width, height), 0.0),\n pymunk.Segment(space.static_body, (width, height), (0.0, height), 0.0),\n pymunk.Segment(space.static_body, (0.0, 600.0), (0.0, 0.0), 0.0)]\n for line in static_lines:\n line.friction = 0.5\n line.elasticity = 0.9\n\n return static_lines",
"def spatial(self):",
"def create_ring(self):\n\t\tself.north_coords = numpy.add(self.center, self.north)\n\t\tself.northeast_coords = numpy.add(self.center, self.northeast)\n\t\tself.east_coords = numpy.add(self.center, self.east)\n\t\tself.southeast_coords = numpy.add(self.center, self.southeast)\n\t\tself.south_coords = numpy.add(self.center, self.south)\n\t\tself.southwest_coords = numpy.add(self.center, self.southwest)\n\t\tself.west_coords = numpy.add(self.center, self.west)\n\t\tself.northwest_coords = numpy.add(self.center, self.northwest)",
"def create_scene(self):\n \n self.scene=soya.World()",
"def build_world(config):\n\n def build_object(obj):\n if obj[\"shape\"] not in _SUPPORTED_SHAPES:\n raise ValueError(f\"Unsupported object: {obj['shape']}.\")\n\n return objects.Sphere(\n center=vector.Point(*obj[\"center\"]),\n radius=obj[\"radius\"],\n material=materials.get_material(obj[\"material_type\"],\n obj[\"material_color\"],\n obj.get(\"material_fuzz\")),\n )\n\n return objects.World(\n objects=[build_object(obj) for obj in config.objects.values()])",
"def nbody_solve(t,y, G,masses):\r\n N_bodies = int(len(y) / 6)\r\n solved_vector = np.zeros(y.size)\r\n for i in range(N_bodies):\r\n ioffset = i * 6 \r\n for j in range(N_bodies):\r\n joffset = j*6\r\n solved_vector[ioffset] = y[ioffset+3]\r\n solved_vector[ioffset+1] = y[ioffset+4]\r\n solved_vector[ioffset+2] = y[ioffset+5]\r\n if i != j:\r\n dx = y[ioffset] - y[joffset]\r\n dy = y[ioffset+1] - y[joffset+1]\r\n dz = y[ioffset+2] - y[joffset+2] \r\n r = (dx**2+dy**2+dz**2)**0.5\r\n ax = (-G*masses[j] / r**3) * dx\r\n ay = (-G*masses[j] / r**3) * dy\r\n az = (-G*masses[j] / r**3) * dz\r\n #ax = ax.value\r\n #ay = ay.value\r\n #az = az.value\r\n solved_vector[ioffset+3] += ax\r\n solved_vector[ioffset+4] += ay\r\n solved_vector[ioffset+5] += az \r\n return solved_vector",
"def generate_mos(laygen, objectname_pfix, placement_grid, routing_grid_m1m2, devname_mos_boundary, devname_mos_body,\n devname_mos_dmy, m=1, m_dmy=0, origin=np.array([0,0])):\n pg = placement_grid\n rg12 = routing_grid_m1m2\n pfix = objectname_pfix\n\n # placement\n imbl0 = laygen.relplace(name=\"I\" + pfix + 'BL0', templatename=devname_mos_boundary, gridname=pg, xy=origin)\n refi=imbl0\n if not m_dmy==0:\n imdmyl0 = laygen.relplace(name=\"I\" + pfix + 'DMYL0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])\n refi=imdmyl0\n else:\n imdmyl0 = None\n im0 = laygen.relplace(name=\"I\" + pfix + '0', templatename=devname_mos_body, gridname=pg, refobj=refi, shape=[m, 1])\n refi=im0\n if not m_dmy==0:\n imdmyr0 = laygen.relplace(name=\"I\" + pfix + 'DMYR0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])\n refi=imdmyr0\n else:\n imdmyr0 = None\n imbr0 = laygen.relplace(name=\"I\" + pfix + 'BR0', templatename=devname_mos_boundary, gridname=pg, refobj=imdmyr0)\n md=im0.elements[:, 0]\n #route\n #gate\n rg0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=md[0].pins['G0'], refobj1=md[-1].pins['G0'])\n for _md in md:\n laygen.via(name=None, xy=[0, 0], refobj=_md.pins['G0'], gridname=rg12)\n #drain\n rdl0=laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=md[0].pins['D0'], refobj1=md[-1].pins['D0'])\n for _md in md:\n laygen.via(name=None, xy=[0, 1], refobj=_md.pins['D0'], gridname=rg12)\n #source\n rs0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=md[0].pins['S0'], refobj1=md[-1].pins['S1'])\n for _md in md:\n laygen.via(name=None, xy=[0, 0], refobj=_md.pins['S0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=md[-1].pins['S1'], gridname=rg12)\n #dmy\n if m_dmy>=2:\n mdmyl=imdmyl0.elements[:, 0]\n mdmyr=imdmyr0.elements[:, 0]\n laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyl[0].pins['D0'], refobj1=mdmyl[-1].pins['D0'])\n laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyr[0].pins['D0'], refobj1=mdmyr[-1].pins['D0'])\n laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyl[0].pins['S0'], refobj1=mdmyl[-1].pins['S1'])\n laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyr[0].pins['S0'], refobj1=mdmyr[-1].pins['S1'])\n for _mdmyl in mdmyl:\n laygen.via(name=None, xy=[0, 1], refobj=_mdmyl.pins['D0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=_mdmyl.pins['S0'], gridname=rg12)\n for _mdmyr in mdmyr:\n laygen.via(name=None, xy=[0, 1], refobj=_mdmyr.pins['D0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=_mdmyr.pins['S1'], gridname=rg12)\n return [imbl0, imdmyl0, im0, imdmyr0, imbr0]",
"def create_rocket_engine_geometry():\r\n\r\n bounds_lower = [3, 7, 33]\r\n funcs_lower = [0, lambda y: y ** 1.5, 0]\r\n\r\n bounds_upper = None\r\n funcs_upper = 100\r\n\r\n x_max = 10\r\n x_min = 0\r\n resolution = 10000\r\n\r\n rocket_engine = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return rocket_engine",
"def envelope(self): # -> BaseGeometry:\n ...",
"def filter_ground(jparams):\n\n # load las file and relevant parameters\n point_cloud = File(jparams['input-las'], mode='r')\n scale = point_cloud.header.scale[0]\n print(point_cloud.header.min)\n print('- Flattening point cloud')\n gridded_pc = point_cloud_to_grid(point_cloud=point_cloud, tf=jparams['thinning-factor'],\n cell_size=int(jparams['gf-cellsize'] / scale))\n\n ground_points, unprocessed_points, ll_origin = gridded_pc[0], gridded_pc[1], gridded_pc[2]\n\n print('- Growing terrain')\n dt = startin.DT()\n dt.insert(list(ground_points))\n dt = grow_terrain(tin=dt, p=unprocessed_points, gp=ground_points,\n max_distance=int(jparams['gf-distance'] / scale),\n max_angle=jparams['gf-angle'])\n\n print('- Writing point cloud')\n with File(jparams['output-las'], mode='w', header=point_cloud.header) as out_file:\n gp = dt.all_vertices()[1:]\n out_file.X = [p[0] for p in gp]\n out_file.Y = [p[1] for p in gp]\n out_file.Z = [p[2] for p in gp]\n\n print('- Creating raster (TIN)\\n\\t- Interpolating (TIN)')\n dg = tin_interp(tin=dt, cell_size=int(jparams['grid-cellsize'] / scale))\n\n print('\\t- Writing Esri Ascii (TIN)')\n write_asc(grid=np.rot90(dg[0]) * scale + point_cloud.header.min[2],\n cell_size=jparams['grid-cellsize'],\n fn=jparams['output-grid-tin'],\n origin=(point_cloud.header.min[0]+dg[1][0]*scale, point_cloud.header.min[1] + dg[1][1]*scale),\n depth=2)\n\n print('- Creating raster (IDW)\\n\\t- Interpolating (IDW)')\n ig = idw_interp(tin=dt, cell_size=int(jparams['grid-cellsize'] / scale),\n radius=jparams['idw-radius'] / scale, \n power=jparams['idw-power'])\n\n print('\\t- Writing Esri Ascii (IDW)')\n write_asc(grid=np.rot90(ig[0]) * scale + point_cloud.header.min[2],\n cell_size=jparams['grid-cellsize'],\n fn=jparams['output-grid-idw'],\n origin=(point_cloud.header.min[0]+ig[1][0]*scale, point_cloud.header.min[1]+ig[1][1]*scale),\n depth=2)\n\n return",
"def makeSkeleton(self):\n model = \"phase_5/models/char/cog\" + string.upper(self.style.body) + \"_robot-zero\"\n anims = self.generateAnimDict()\n\n # remember the current anim\n anim = self.getCurrentAnim()\n\n # grab the drop shadow\n dropShadow = self.dropShadow\n if not dropShadow.isEmpty():\n dropShadow.reparentTo(hidden)\n \n # remove the old geometry\n self.removePart(\"modelRoot\")\n\n # load the skeleton geometry\n self.loadModel(model)\n self.loadAnims(anims)\n\n # set the scale on the skeleton actor (plus a little extra to make it look right)\n self.getGeomNode().setScale(self.scale * 1.0173)\n self.generateHealthBar()\n self.generateCorporateMedallion()\n # set the appropriate tie texture\n self.generateCorporateTie()\n self.setHeight(self.height)\n\n \n # some of the geometry needs to be backfaced and billboarded\n parts = self.findAllMatches('**/pPlane*')\n for partNum in range(0, parts.getNumPaths()):\n #print 'found billboarded part!'\n bb = parts.getPath(partNum)\n bb.setTwoSided(1)\n \n # redo the nametag and drop shadow\n self.setName(TTLocalizer.Skeleton)\n nameInfo = TTLocalizer.SuitBaseNameWithLevel % {\"name\": self.name,\n \"dept\": self.getStyleDept(),\n \"level\": self.getActualLevel(),}\n self.setDisplayName( nameInfo )\n\n # re-find the useful nulls\n self.leftHand = self.find(\"**/joint_Lhold\")\n self.rightHand = self.find(\"**/joint_Rhold\")\n self.shadowJoint = self.find(\"**/joint_shadow\")\n self.nametagNull = self.find(\"**/joint_nameTag\")\n \n if not dropShadow.isEmpty():\n dropShadow.setScale(0.75)\n if not self.shadowJoint.isEmpty():\n dropShadow.reparentTo(self.shadowJoint)\n\n # start the animation again\n self.loop(anim)\n\n # set the flag\n self.isSkeleton = 1",
"def regenerate(self, random_state):\n self._walls_body.geom.clear()\n corridor_width = variation.evaluate(self._corridor_width,\n random_state=random_state)\n corridor_length = variation.evaluate(self._corridor_length,\n random_state=random_state)\n self._current_corridor_length = corridor_length\n self._current_corridor_width = corridor_width\n\n self._ground_plane.pos = [corridor_length / 2, 0, 0]\n self._ground_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, corridor_width / 2, 1]\n\n self._left_plane.pos = [\n corridor_length / 2, corridor_width / 2, _SIDE_WALL_HEIGHT / 2]\n self._left_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._right_plane.pos = [\n corridor_length / 2, -corridor_width / 2, _SIDE_WALL_HEIGHT / 2]\n self._right_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._near_plane.pos = [\n -_CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]\n self._near_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._far_plane.pos = [\n corridor_length + _CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]\n self._far_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]",
"def ground_contact_geoms(self):\n raise NotImplementedError",
"def draw_body(node, body):\r\n\t\tx,y,z = body.getPosition()\r\n\t\tnode.setPosition(vector3df(x,y,z)*10)\r\n\t\tw,xx,yy,zz = body.getQuaternion()\r\n\t\tnode.setRotation(vector3df(degrees(xx), degrees(yy), degrees(zz)))\r\n\t\tif body.shape == \"box\":\r\n\t\t\tsx,sy,sz = body.boxsize\r\n\t\t\tnode.setScale(vector3df(sx,sy,sz))",
"def generateBaseMesh(cls, region, options):\n centralPath = options['Central path']\n segmentProfile = options['Segment profile']\n segmentCount = options['Number of segments']\n startPhase = options['Start phase'] % 360.0\n proximalLength = options['Proximal length']\n transverseLength = options['Transverse length']\n proximalInnerRadius = options['Proximal inner radius']\n proximalTCWidth = options['Proximal tenia coli width']\n proximalTransverseInnerRadius = options['Proximal-transverse inner radius']\n proximalTransverseTCWidth = options['Proximal-transverse tenia coli width']\n transverseDistalInnerRadius = options['Transverse-distal inner radius']\n transverseDistalTCWidth = options['Transverse-distal tenia coli width']\n distalInnerRadius = options['Distal inner radius']\n distalTCWidth = options['Distal tenia coli width']\n segmentSettings = segmentProfile.getScaffoldSettings()\n\n elementsCountAroundTC = segmentSettings['Number of elements around tenia coli']\n elementsCountAroundHaustrum = segmentSettings['Number of elements around haustrum']\n cornerInnerRadiusFactor = segmentSettings['Corner inner radius factor']\n haustrumInnerRadiusFactor = segmentSettings['Haustrum inner radius factor']\n segmentLengthEndDerivativeFactor = segmentSettings['Segment length end derivative factor']\n segmentLengthMidDerivativeFactor = segmentSettings['Segment length mid derivative factor']\n tcCount = segmentSettings['Number of tenia coli']\n tcThickness = segmentSettings['Tenia coli thickness']\n elementsCountAround = (elementsCountAroundTC + elementsCountAroundHaustrum) * tcCount\n\n elementsCountAlongSegment = segmentSettings['Number of elements along segment']\n elementsCountThroughWall = segmentSettings['Number of elements through wall']\n wallThickness = segmentSettings['Wall thickness']\n mucosaRelThickness = segmentSettings['Mucosa relative thickness']\n submucosaRelThickness = segmentSettings['Submucosa relative thickness']\n circularRelThickness = segmentSettings['Circular muscle layer relative thickness']\n longitudinalRelThickness = segmentSettings['Longitudinal muscle layer relative thickness']\n useCrossDerivatives = segmentSettings['Use cross derivatives']\n useCubicHermiteThroughWall = not (segmentSettings['Use linear through wall'])\n elementsCountAlong = int(elementsCountAlongSegment * segmentCount)\n\n # Colon coordinates\n lengthToDiameterRatio = 24\n wallThicknessToDiameterRatio = 0.1\n teniaColiThicknessToDiameterRatio = 0.25 * wallThicknessToDiameterRatio\n relativeThicknessListColonCoordinates = [1.0 / elementsCountThroughWall for n3 in range(elementsCountThroughWall)]\n\n firstNodeIdentifier = 1\n firstElementIdentifier = 1\n\n # Central path\n tmpRegion = region.createRegion()\n centralPath.generate(tmpRegion)\n cx, cd1, cd2, cd12 = extractPathParametersFromRegion(tmpRegion,\n [Node.VALUE_LABEL_VALUE, Node.VALUE_LABEL_D_DS1,\n Node.VALUE_LABEL_D_DS2, Node.VALUE_LABEL_D2_DS1DS2])\n # for i in range(len(cx)):\n # print(i, '[', cx[i], ',', cd1[i], ',', cd2[i], ',', cd12[i], '],')\n del tmpRegion\n\n # find arclength of colon\n length = 0.0\n elementsCountIn = len(cx) - 1\n sd1 = interp.smoothCubicHermiteDerivativesLine(cx, cd1, fixAllDirections=True,\n magnitudeScalingMode=interp.DerivativeScalingMode.HARMONIC_MEAN)\n for e in range(elementsCountIn):\n arcLength = interp.getCubicHermiteArcLength(cx[e], sd1[e], cx[e + 1], sd1[e + 1])\n # print(e+1, arcLength)\n length += arcLength\n segmentLength = length / segmentCount\n # print('Length = ', length)\n elementAlongLength = length / elementsCountAlong\n\n # Sample central path\n sx, sd1, se, sxi, ssf = interp.sampleCubicHermiteCurves(cx, cd1, elementsCountAlong)\n sd2, sd12 = interp.interpolateSampleCubicHermite(cd2, cd12, se, sxi, ssf)\n\n # Generate variation of radius & tc width along length\n lengthList = [0.0, proximalLength, proximalLength + transverseLength, length]\n innerRadiusList = [proximalInnerRadius, proximalTransverseInnerRadius,\n transverseDistalInnerRadius, distalInnerRadius]\n innerRadiusAlongElementList, dInnerRadiusAlongElementList = interp.sampleParameterAlongLine(lengthList,\n innerRadiusList,\n elementsCountAlong)\n\n tcWidthList = [proximalTCWidth, proximalTransverseTCWidth, transverseDistalTCWidth, distalTCWidth]\n tcWidthAlongElementList, dTCWidthAlongElementList = interp.sampleParameterAlongLine(lengthList,\n tcWidthList,\n elementsCountAlong)\n\n # Account for reduced haustrum appearance in transverse and distal pig colon\n if tcCount == 2:\n haustrumInnerRadiusFactorList = [haustrumInnerRadiusFactor, haustrumInnerRadiusFactor * 0.75,\n haustrumInnerRadiusFactor * 0.5, haustrumInnerRadiusFactor * 0.2]\n haustrumInnerRadiusFactorAlongElementList = \\\n interp.sampleParameterAlongLine(lengthList, haustrumInnerRadiusFactorList, elementsCountAlong)[0]\n else:\n haustrumInnerRadiusFactorAlongElementList = [haustrumInnerRadiusFactor] * (elementsCountAlong + 1)\n\n # Create annotation groups for colon sections\n elementsAlongInProximal = round(proximalLength / elementAlongLength)\n elementsAlongInTransverse = round(transverseLength / elementAlongLength)\n elementsAlongInDistal = elementsCountAlong - elementsAlongInProximal - elementsAlongInTransverse\n elementsCountAlongGroups = [elementsAlongInProximal, elementsAlongInTransverse, elementsAlongInDistal]\n\n colonGroup = AnnotationGroup(region, get_colon_term(\"colon\"))\n\n if tcCount == 1:\n proximalGroup = AnnotationGroup(region, get_colon_term(\"proximal colon\"))\n transverseGroup = AnnotationGroup(region, get_colon_term(\"transverse colon\"))\n distalGroup = AnnotationGroup(region, get_colon_term(\"distal colon\"))\n annotationGroupAlong = [[colonGroup, proximalGroup],\n [colonGroup, transverseGroup],\n [colonGroup, distalGroup]]\n\n elif tcCount == 2:\n spiralGroup = AnnotationGroup(region, get_colon_term(\"spiral colon\"))\n transverseGroup = AnnotationGroup(region, get_colon_term(\"transverse colon\"))\n distalGroup = AnnotationGroup(region, get_colon_term(\"distal colon\"))\n annotationGroupAlong = [[colonGroup, spiralGroup],\n [colonGroup, transverseGroup],\n [colonGroup, distalGroup]]\n\n elif tcCount == 3:\n ascendingGroup = AnnotationGroup(region, get_colon_term(\"ascending colon\"))\n transverseGroup = AnnotationGroup(region, get_colon_term(\"transverse colon\"))\n descendingGroup = AnnotationGroup(region, get_colon_term(\"descending colon\"))\n annotationGroupAlong = [[colonGroup, ascendingGroup],\n [colonGroup, transverseGroup],\n [colonGroup, descendingGroup]]\n\n annotationGroupsAlong = []\n for i in range(len(elementsCountAlongGroups)):\n elementsCount = elementsCountAlongGroups[i]\n for n in range(elementsCount):\n annotationGroupsAlong.append(annotationGroupAlong[i])\n\n xExtrude = []\n d1Extrude = []\n d2Extrude = []\n d3UnitExtrude = []\n sxRefExtrudeList = []\n\n if elementsCountThroughWall == 1:\n relativeThicknessList = [1.0]\n annotationGroupsThroughWall = [[]]\n else:\n relativeThicknessList = [mucosaRelThickness, submucosaRelThickness,\n circularRelThickness, longitudinalRelThickness]\n mucosaGroup = AnnotationGroup(region, get_colon_term(\"colonic mucosa\"))\n submucosaGroup = AnnotationGroup(region, get_colon_term(\"submucosa of colon\"))\n circularMuscleGroup = AnnotationGroup(region, get_colon_term(\"circular muscle layer of colon\"))\n longitudinalMuscleGroup = AnnotationGroup(region, get_colon_term(\"longitudinal muscle layer of colon\"))\n annotationGroupsThroughWall = [[mucosaGroup], [submucosaGroup],\n [circularMuscleGroup], [longitudinalMuscleGroup]]\n\n # Create object\n colonSegmentTubeMeshInnerPoints = ColonSegmentTubeMeshInnerPoints(\n region, elementsCountAroundTC, elementsCountAroundHaustrum, elementsCountAlongSegment,\n tcCount, segmentLengthEndDerivativeFactor, segmentLengthMidDerivativeFactor,\n segmentLength, wallThickness, cornerInnerRadiusFactor, haustrumInnerRadiusFactorAlongElementList,\n innerRadiusAlongElementList, dInnerRadiusAlongElementList, tcWidthAlongElementList,\n startPhase)\n\n for nSegment in range(segmentCount):\n # Create inner points\n xInner, d1Inner, d2Inner, transitElementList, segmentAxis, annotationGroupsAround \\\n = colonSegmentTubeMeshInnerPoints.getColonSegmentTubeMeshInnerPoints(nSegment)\n\n # Project reference point for warping onto central path\n start = nSegment * elementsCountAlongSegment\n end = (nSegment + 1) * elementsCountAlongSegment + 1\n sxRefList, sd1RefList, sd2ProjectedListRef, zRefList = \\\n tubemesh.getPlaneProjectionOnCentralPath(xInner, elementsCountAround, elementsCountAlongSegment,\n segmentLength, sx[start:end], sd1[start:end], sd2[start:end],\n sd12[start:end])\n\n # Warp segment points\n xWarpedList, d1WarpedList, d2WarpedList, d3WarpedUnitList = tubemesh.warpSegmentPoints(\n xInner, d1Inner, d2Inner, segmentAxis, sxRefList, sd1RefList, sd2ProjectedListRef,\n elementsCountAround, elementsCountAlongSegment, zRefList, innerRadiusAlongElementList[start:end],\n closedProximalEnd=False)\n\n # Store points along length\n xExtrude += xWarpedList if nSegment == 0 else xWarpedList[elementsCountAround:]\n d1Extrude += d1WarpedList if nSegment == 0 else d1WarpedList[elementsCountAround:]\n d2Extrude += d2WarpedList if nSegment == 0 else d2WarpedList[elementsCountAround:]\n d3UnitExtrude += d3WarpedUnitList if nSegment == 0 else d3WarpedUnitList[elementsCountAround:]\n sxRefExtrudeList += sxRefList if nSegment == 0 else sxRefList[elementsCountAround:]\n\n contractedWallThicknessList = colonSegmentTubeMeshInnerPoints.getContractedWallThicknessList()\n\n # Create coordinates and derivatives\n xList, d1List, d2List, d3List, curvatureList = tubemesh.getCoordinatesFromInner(xExtrude, d1Extrude,\n d2Extrude, d3UnitExtrude, contractedWallThicknessList, relativeThicknessList,\n elementsCountAround, elementsCountAlong, elementsCountThroughWall, transitElementList)\n\n relaxedLengthList, xiList = colonSegmentTubeMeshInnerPoints.getRelaxedLengthAndXiList()\n\n closedProximalEnd = False\n\n if tcThickness > 0:\n tubeTCWidthList = colonSegmentTubeMeshInnerPoints.getTubeTCWidthList()\n xList, d1List, d2List, d3List, annotationArrayAround = getTeniaColi(\n region, xList, d1List, d2List, d3List, curvatureList, tcCount, elementsCountAroundTC,\n elementsCountAroundHaustrum, elementsCountAlong, elementsCountThroughWall,\n tubeTCWidthList, tcThickness, sxRefExtrudeList, annotationGroupsAround,\n closedProximalEnd)\n\n # Create flat coordinates\n xFlat, d1Flat, d2Flat = createFlatCoordinatesTeniaColi(\n xiList, relaxedLengthList, length, wallThickness, relativeThicknessList, tcCount, tcThickness,\n elementsCountAroundTC, elementsCountAroundHaustrum, elementsCountAlong,\n elementsCountThroughWall, transitElementList, closedProximalEnd)\n\n # Create colon coordinates\n xColon, d1Colon, d2Colon = createColonCoordinatesTeniaColi(xiList, relativeThicknessListColonCoordinates,\n lengthToDiameterRatio,\n wallThicknessToDiameterRatio,\n teniaColiThicknessToDiameterRatio, tcCount,\n elementsCountAroundTC,\n elementsCountAroundHaustrum,\n elementsCountAlong, elementsCountThroughWall,\n transitElementList, closedProximalEnd)\n\n # Create nodes and elements\n nextNodeIdentifier, nextElementIdentifier, annotationGroups = createNodesAndElementsTeniaColi(\n region, xList, d1List, d2List, d3List, xFlat, d1Flat, d2Flat, xColon, d1Colon, d2Colon,\n \"colon coordinates\", elementsCountAroundTC, elementsCountAroundHaustrum, elementsCountAlong,\n elementsCountThroughWall, tcCount, annotationGroupsAround, annotationGroupsAlong,\n annotationGroupsThroughWall, firstNodeIdentifier, firstElementIdentifier, useCubicHermiteThroughWall,\n useCrossDerivatives, closedProximalEnd)\n\n else:\n # Create flat coordinates\n xFlat, d1Flat, d2Flat = tubemesh.createFlatCoordinates(\n xiList, relaxedLengthList, length, wallThickness, relativeThicknessList, elementsCountAround,\n elementsCountAlong, elementsCountThroughWall, transitElementList)\n\n # Create colon coordinates\n xColon, d1Colon, d2Colon = tubemesh.createOrganCoordinates(xiList, relativeThicknessListColonCoordinates,\n lengthToDiameterRatio,\n wallThicknessToDiameterRatio,\n elementsCountAround,\n elementsCountAlong, elementsCountThroughWall,\n transitElementList)\n\n # Create nodes and elements\n nextNodeIdentifier, nextElementIdentifier, annotationGroups = tubemesh.createNodesAndElements(\n region, xList, d1List, d2List, d3List, xFlat, d1Flat, d2Flat, xColon, d1Colon, d2Colon,\n \"colon coordinates\", elementsCountAround, elementsCountAlong, elementsCountThroughWall,\n annotationGroupsAround, annotationGroupsAlong, annotationGroupsThroughWall,\n firstNodeIdentifier, firstElementIdentifier, useCubicHermiteThroughWall, useCrossDerivatives,\n closedProximalEnd)\n\n return annotationGroups"
]
| [
"0.60452974",
"0.59936213",
"0.58286685",
"0.5799699",
"0.5784664",
"0.57215995",
"0.57152975",
"0.56586725",
"0.56239074",
"0.55475634",
"0.55233556",
"0.55116284",
"0.5497383",
"0.5475909",
"0.54728085",
"0.545801",
"0.5440318",
"0.5348923",
"0.5345914",
"0.5342128",
"0.5298819",
"0.52985984",
"0.5295119",
"0.528384",
"0.5268799",
"0.52635366",
"0.5240063",
"0.52028865",
"0.51941955",
"0.5179158"
]
| 0.68961835 | 0 |
Create satellite solar panel geometry | def create_sat_panels(self):
# Dimensions of body
SAT_SIZE = self.ANI_SCALE*self.SAT_SCALE*np.asarray(self.SAT_PROPS["Size"])/2
bx = SAT_SIZE[0]
by = SAT_SIZE[1]
bz = SAT_SIZE[2]
# Panel length
L = bx
# Panels
theta = self.PANEL_ANGLE*pi/180
px1 = bx - L*sin(theta)
py1 = by + L*cos(theta)
pz1 = bz
px2 = px1 + L*sin(theta)
py2 = py1 + L*cos(theta)
pz2 = pz1
# Vertices
V = [
(-bx, by, -bz),
(-bx, by, bz),
(-px1, py1, pz1),
(-px1, py1, -pz1),
(-px1, py1, -pz1),
(-px1, py1, pz1),
(-px2, py2, pz2),
(-px2, py2, -pz2),
(-bx, -by, -bz),
(-bx, -by, bz),
(-px1, -py1, pz1),
(-px1, -py1, -pz1),
(-px1, -py1, -pz1),
(-px1, -py1, pz1),
(-px2, -py2, pz2),
(-px2, -py2, -pz2)
]
# Create faces
F = [
(0, 1, 2, 3),
(4, 5, 6, 7),
(8, 9, 10, 11),
(12, 13, 14, 15)
]
# Create building blocks of polydata
sat = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(len(V)):
points.InsertPoint(i, V[i])
for i in range(len(F)):
polys.InsertNextCell(self.mkVtkIdList(F[i]))
for i in range(len(V)):
scalars.InsertTuple1(i, i)
# Assign the pieces to the vtkPolyData.
sat.SetPoints(points)
del points
sat.SetPolys(polys)
del polys
sat.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(sat)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0., 0., 0.8)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularPower(10.0)
actor.GetProperty().SetDiffuse(0.2)
# Move to sat position
actor.SetPosition(0, 0, -self.SAT_PROPS["Alt"])
return actor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_satellite(ax, coo_x, coo_y, coo_z):\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\n tr = np.transpose(np.vstack((coo_x.cartesian.xyz.value, coo_y.cartesian.xyz.value, coo_z.cartesian.xyz.value)))\n\n alpha_czti = 0.5\n alpha_radiator = 0.5\n alpha_sat = 0.3\n\n color_czti = 'yellow'\n color_radiator = 'black'\n color_sat = 'green'\n\n c_w2 = 0.15 # czti half-width\n c_h = 0.30 # czti height\n c_hr = 0.40 # czti radiator height\n sat_w = 0.6\n\n # For each surface, do the following:\n # verts = []\n # verts.append([tuple(tr.dot(np.array[cx, cy, cz]))])\n # surf = Poly3DCollection(verts)\n # surf.set_alpha()\n # surf.set_color()\n # ax.add_collection3d(surf)\n \n # +x rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n \n # +y rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n\n # -y rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n \n # -x radiator plate\n verts = []\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, c_hr]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, c_hr]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_radiator)\n surf.set_color(color_radiator)\n ax.add_collection3d(surf)\n\n # # Bottom CZTI only\n # verts = []\n # verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n # surf = Poly3DCollection([verts])\n # surf.set_alpha(alpha_czti)\n # surf.set_color(color_czti)\n # ax.add_collection3d(surf)\n\n # Satellite top\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite bottom\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n\n ax.add_collection3d(surf)\n\n # Satellite back (radiator side)\n verts = []\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite front (opposite radiator side)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n #dpix_mask Satellite right (-y, common to czti)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite left (+y)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n return",
"def ccs4_map(cfg_set_tds,figsize_x=12,figsize_y=12,hillshade=True,radar_loc=True,radar_vis=True):\r\n \r\n ## Load DEM and Swiss borders\r\n shp_path_CH = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/swissBOUNDARIES3D_1_3_TLM_LANDESGEBIET.shp\")\r\n shp_path_Kantone = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET.shp\")\r\n shp_path_count = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/CCS4_merged_proj_clip_G05_countries.shp\")\r\n dem_path = os.path.join(cfg_set_tds[\"root_path\"],u\"data/DEM/ccs4.png\")\r\n visi_path = os.path.join(cfg_set_tds[\"root_path\"],u\"data/radar/radar_composite_visibility.npy\")\r\n\r\n dem = Image.open(dem_path)\r\n dem = np.array(dem.convert('P'))\r\n\r\n sf_CH = shapefile.Reader(shp_path_CH)\r\n sf_KT = shapefile.Reader(shp_path_Kantone)\r\n sf_ct = shapefile.Reader(shp_path_count)\r\n\r\n ## Setup figure\r\n fig_extent = (255000,965000,-160000,480000)\r\n fig, axes = plt.subplots(1, 1)\r\n fig.set_size_inches(figsize_x, figsize_y)\r\n \r\n ## Plot altitude / hillshading\r\n if hillshade:\r\n ls = colors.LightSource(azdeg=315, altdeg=45)\r\n axes.imshow(ls.hillshade(-dem, vert_exag=0.05),\r\n extent=fig_extent, cmap='gray', alpha=0.5)\r\n else:\r\n axes.imshow(dem*0.6, extent=fig_extent, cmap='gray', alpha=0.5)\r\n \r\n ## Get borders of Cantons\r\n try:\r\n shapes_KT = sf_KT.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for KT_i, shape in enumerate(shapes_KT):\r\n x = np.array([i[0] for i in shape.points[:]])\r\n y = np.array([i[1] for i in shape.points[:]])\r\n endpoint = np.where(x==x[0])[0][1]\r\n x = x[:endpoint]\r\n y = y[:endpoint]\r\n axes.plot(x,y,color='darkred',linewidth=0.5,zorder=5)\r\n\r\n ## Get borders of neighbouring countries\r\n try:\r\n shapes_ct = sf_ct.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for ct_i, shape in enumerate(shapes_ct):\r\n if ct_i in [0,1]:\r\n continue\r\n x = np.array([i[0] for i in shape.points[:]])\r\n y = np.array([i[1] for i in shape.points[:]])\r\n x[x<=255000] = 245000\r\n x[x>=965000] = 975000\r\n y[y<=-159000] = -170000\r\n y[y>=480000] = 490000\r\n if ct_i in [3]:\r\n axes.plot(x[20:170],y[20:170],color='black',linewidth=0.5)\r\n if ct_i in [2]:\r\n ## Delete common border of FR and CH:\r\n x_south = x[y<=86000]; y_south = y[y<=86000]\r\n x_north = x[np.logical_and(np.logical_and(y>=270577,y<=491000),x>510444)]\r\n #x_north = x[np.logical_and(y>=270577,y<=491000)]\r\n y_north = y[np.logical_and(np.logical_and(y>=270577,y<=491000),x>510444)]\r\n #y_north = y[np.logical_and(y>=270577,y<=491000)]\r\n axes.plot(x_south,y_south,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north,y_north,color='black',linewidth=0.5,zorder=4)\r\n if ct_i in [4]:\r\n ## Delete common border of AT and CH:\r\n x_south = x[np.logical_and(x>=831155,y<235000)]\r\n y_south = y[np.logical_and(x>=831155,y<235000)]\r\n #x_north1 = x[np.logical_and(x>=756622,y>=260466)]\r\n x_north1 = x[np.logical_and(np.logical_and(x>=758622,y>=262466),x<=794261)]\r\n #y_north1 = y[np.logical_and(x>=756622,y>=260466)]\r\n y_north1 = y[np.logical_and(np.logical_and(x>=758622,y>=262466),x<=794261)]\r\n y_north2 = y[np.logical_and(np.logical_and(x>=774261,y>=229333),x<=967000)]\r\n x_north2 = x[np.logical_and(np.logical_and(x>=774261,y>=229333),x<=967000)]\r\n y_north2 = np.concatenate([y_north2[np.argmin(x_north2):],y_north2[:np.argmin(x_north2)]])\r\n x_north2 = np.concatenate([x_north2[np.argmin(x_north2):],x_north2[:np.argmin(x_north2)]])\r\n x_LI = x[np.logical_and(np.logical_and(x<=773555,y>=214400),y<=238555)]\r\n y_LI = y[np.logical_and(np.logical_and(x<=773555,y>=214400),y<=238555)]\r\n axes.plot(x_south,y_south,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north1,y_north1,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north2,y_north2,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_LI,y_LI,color='black',linewidth=0.5,zorder=4)\r\n else:\r\n continue\r\n #axes.plot(x,y,color='black',linewidth=1,zorder=4)\r\n\r\n ## Get Swiss borders\r\n try:\r\n #shp_records = sf_CH.shapeRecords()\r\n shapes_CH = sf_CH.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for ct_i, shape in enumerate(shapes_CH): #sf_CH.shapeRecords():\r\n if ct_i!=0: continue\r\n x = np.array([i[0]-2000000 for i in shape.points[:]])\r\n y = np.array([i[1]-1000000 for i in shape.points[:]])\r\n endpoint = np.where(x==x[0])[0][1]\r\n x = x[:endpoint]\r\n y = y[:endpoint]\r\n \r\n ## Convert to swiss coordinates\r\n #x,y = lonlat2xy(lon, lat)\r\n axes.plot(x,y,color='darkred',linewidth=1,zorder=3)\r\n\r\n ## Add weather radar locations:\r\n if radar_loc:\r\n weather_radar_y = [237000,142000,100000,135000,190000]\r\n weather_radar_x = [681000,497000,708000,604000,780000]\r\n axes.scatter(weather_radar_x,weather_radar_y,marker=\"D\",#s=2,\r\n color='orange',edgecolor='black',zorder=10)\r\n \r\n ## Add radar visibility:\r\n if radar_vis:\r\n arr_visi = np.load(visi_path)\r\n arr_visi[arr_visi<9000] = 0\r\n arr_visi2 = morph.binary_opening(morph.binary_erosion(arr_visi, structure=np.ones((4,4))), structure=np.ones((4,4)))\r\n arr_visi[arr_visi<9000] = np.nan\r\n axes.imshow(arr_visi, cmap=\"gray\", alpha=0.2, extent=fig_extent)\r\n arr_visi[np.isnan(arr_visi)] = 1\r\n #axes.contour(arr_visi[::-1,:], levels=[2], cmap=\"gray\", linewidths=2,\r\n # linestyle=\"solid\", alpha=0.5, extent=fig_extent)\r\n #arr_visi = arr_visi[::4, ::4]\r\n #ys, xs = np.mgrid[arr_visi.shape[0]:0:-1,\r\n # 0:arr_visi.shape[1]]\r\n #axes.scatter(xs.flatten(), ys.flatten(), s=4,\r\n # c=arr_visi.flatten().reshape(-1, 3), edgecolor='face')\r\n \r\n ## Add further elements:\r\n axes.set_xlim([255000,965000])\r\n axes.set_ylim([-160000,480000])\r\n axes.grid()\r\n axes.set_ylabel(\"CH1903 Northing\")\r\n axes.set_xlabel(\"CH1903 Easting\")\r\n axes.get_xaxis().set_major_formatter( \\\r\n ticker.FuncFormatter(lambda x, p: format(int(x), \",\").replace(',', \"'\")))\r\n axes.get_yaxis().set_major_formatter( \\\r\n ticker.FuncFormatter(lambda x, p: format(int(x), \",\").replace(',', \"'\")))\r\n plt.yticks(rotation=90, verticalalignment=\"center\")\r\n return fig, axes, fig_extent",
"def world_map_template():\n fig, ax = plt.subplots(figsize=(20, 10))\n ax.plot(\n laea_x(np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(0, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(0, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), 0),\n laea_y(np.linspace(-np.pi, np.pi), 0),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), np.pi / 6),\n laea_y(np.linspace(-np.pi, np.pi), np.pi / 6),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), -np.pi / 6),\n laea_y(np.linspace(-np.pi, np.pi), -np.pi / 6),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), np.pi / 3),\n laea_y(np.linspace(-np.pi, np.pi), np.pi / 3),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), -np.pi / 3),\n laea_y(np.linspace(-np.pi, np.pi), -np.pi / 3),\n color=\"k\",\n zorder=10,\n )\n ax.text(0, 1.47, r\"$|0\\rangle$\", fontsize=20)\n ax.text(0, -1.53, r\"$|1\\rangle$\", fontsize=20)\n ax.text(0.05, 0.05, r\"$|+\\rangle$\", fontsize=20)\n ax.text(2.9, 0, r\"$|-\\rangle$\", fontsize=20)\n ax.text(-3.2, 0, r\"$|-\\rangle$\", fontsize=20)\n\n return fig, ax",
"def plot_root(self, root):\n vessel = Marker()\n #visualizations points and lines..\n vessel.header.frame_id = \"map\"\n vessel.header.stamp = rospy.get_rostime()\n vessel.ns = \"markers\"\n vessel.id = 1\n vessel.type = vessel.ARROW\n vessel.action = vessel.ADD\n \n vessel.scale.x = 10*self.rviz_tuning_plt\n vessel.scale.y = 2*self.rviz_tuning_plt\n vessel.scale.z = 2*self.rviz_tuning_plt\n vessel.color.r = 0.0\n vessel.color.g = 0.0\n vessel.color.b = 1.0\n vessel.color.a = 1.0\n # A value of ros.Duration() means never to auto-delete.\n vessel.lifetime = rospy.Duration()\n # Add x,y,z position to pose\n vessel.pose.position.x = root.x\n vessel.pose.position.y = root.y\n vessel.pose.position.z = 0\n # add quaternion to pose\n quat = self.euler_to_quaternion(0, 0, root.alpha)\n vessel.pose.orientation.x = quat[0]\n vessel.pose.orientation.y = quat[1]\n vessel.pose.orientation.z = quat[2]\n vessel.pose.orientation.w = quat[3]\n self.pub_root.publish(vessel)",
"def create_spacecraft_geometry():\r\n\r\n bounds_lower = [3, 7, 33]\r\n funcs_lower = [0, lambda y: y ** 1.5, 0]\r\n\r\n bounds_upper = None\r\n funcs_upper = 100\r\n\r\n x_max = 10\r\n x_min = 0\r\n resolution = 200\r\n\r\n spacecraft = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return spacecraft",
"def create_pressure_vessel_geometry():\r\n\r\n # configure sigmoid function\r\n bounds_upper = [3, 6]\r\n h = 5\r\n w = 6\r\n\r\n sigmoid_function = lambda x: (1 / (1 + np.exp(-1 * h * x + w))) + 1\r\n\r\n sigmoid_function_reverse = lambda x: 1 / (1 + np.exp(h * x - w - 18)) + 1\r\n\r\n funcs_upper = [sigmoid_function, sigmoid_function_reverse]\r\n\r\n bounds_lower = None\r\n funcs_lower = 0\r\n\r\n x_max = 6\r\n x_min = 0\r\n resolution = 10000\r\n\r\n pressure_vessel = Geometry(x_max, x_min, resolution,\r\n bounds_upper, funcs_upper,\r\n bounds_lower, funcs_lower)\r\n\r\n return pressure_vessel",
"def make_map(data,LatLonBox):\n\n proj = ccrs.LambertConformal(central_longitude=data.StationLongitude,\n central_latitude=data.StationLatitude)\n\n fig = plt.figure(figsize=(17,11))\n ax = plt.subplot(111,projection=proj)\n \n ax.coastlines('50m', 'black', linewidth=2, zorder=2)\n\n reader = shpreader.Reader('/Users/chowdahead/Documents/shapefiles/countyl010g_shp_nt00964/countyl010g.shp')\n counties = list(reader.geometries())\n COUNTIES = cfeature.ShapelyFeature(counties,ccrs.PlateCarree())\n ax.add_feature(COUNTIES, facecolor='none',edgecolor='w')\n # Grab state borders\n state_borders = cfeature.NaturalEarthFeature(\n category='cultural', name='admin_1_states_provinces_lines',\n scale='50m', facecolor='none')\n ax.add_feature(state_borders, edgecolor='w', linewidth=1, zorder=3)\n \n ocean = cfeature.NaturalEarthFeature('physical', 'ocean', scale='50m',\n edgecolor='face',\n facecolor=cfeature.COLORS['water'])\n land = cfeature.NaturalEarthFeature('physical', 'land', scale='50m',\n edgecolor='face',\n facecolor=\"k\")\n\n ax.add_feature(ocean, zorder=-1)\n ax.add_feature(land, zorder=-1)\n ax.set_facecolor('black')\n \n ax.set_extent(LatLonBox,ccrs.PlateCarree())\n \n return fig,ax,proj",
"def _plot_sr_surface(self, varname):\n\n fig = plt.figure()\n varname = self.layer_lookup[varname]\n data = self.node_data[varname]\n x_node_loc = np.arange(self.mins[0], self.maxs[0], self.min_grid_size) + self.min_grid_size/2\n y_node_loc = np.arange(self.mins[1], self.maxs[1], self.min_grid_size) + self.min_grid_size/2\n lon2d, lat2d = np.meshgrid(x_node_loc, y_node_loc)\n\n # mask NaN values\n data_m = np.ma.array(data, mask=np.isnan(data))\n plt.pcolormesh(lon2d, lat2d, data_m.T, vmin=data_m.min(), vmax=data_m.max())",
"def spatial(self):",
"def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return",
"def generateStationPlot(dir_path, traj_list, color_scheme='light'):\n\n\n # Choose the color scheme\n cs = MapColorScheme()\n \n if color_scheme == 'light':\n cs.light()\n\n else:\n cs.dark()\n\n\n plt.figure(figsize=(19.2, 10.8))\n\n # Init the map\n m = Basemap(projection='cyl', resolution='i')\n\n # Draw the coast boundary and fill the oceans with the given color\n m.drawmapboundary(fill_color=cs.map_background)\n\n # Fill continents, set lake color same as ocean color\n m.fillcontinents(color=cs.continents, lake_color=cs.lakes, zorder=1)\n\n # Draw country borders\n m.drawcountries(color=cs.countries)\n m.drawstates(color=cs.states, linestyle='--')\n\n\n\n ### PLOT WORLD MAP ###\n\n # Group stations into countries\n country_dict = {}\n for traj in traj_list:\n\n for obs in traj.observations:\n\n # Extract country code\n country_code = obs.station_id[:2]\n\n if country_code not in country_dict:\n country_dict[country_code] = {}\n \n\n if obs.station_id not in country_dict[country_code]:\n country_dict[country_code][obs.station_id] = [obs.lat, obs.lon]\n\n\n\n # Plot stations in all countries\n for country_code in country_dict:\n\n station_dict = country_dict[country_code]\n\n # Extract lat/lon\n lat = np.degrees([station_dict[station_id][0] for station_id in station_dict])\n lon = np.degrees([station_dict[station_id][1] for station_id in station_dict])\n\n # Convert lat/lon to x/y\n x, y = m(lon, lat)\n\n plt.scatter(x, y, s=0.75, zorder=5, label=\"{:s}: {:d}\".format(country_code, len(lat)))\n\n\n plt.legend(loc='lower left')\n\n plt.tight_layout()\n\n plt.savefig(os.path.join(dir_path, \"world_map.png\"), dpi=100)\n\n plt.close()\n\n ### ###",
"def mapSky(self):\n import aplpy\n\n # Plot with aplpy\n self.gc = aplpy.FITSFigure(self.image, figure=self.f, \n dimensions=[0,1], slices=[0,0], subplot=[0.1, 0.9, 0.9, 0.9])\n \n # Coordinate Grid\n if self.grid:\n self.gc.add_grid()\n self.gc.grid.set_color(self.color)\n self.gc.grid.set_alpha(0.3)\n self.gc.grid.set_linewidth(0.2)\n\n self._colorBar()\n self._plotDisplay()",
"def init_fig():\r\n # Set the axis and plot titles\r\n orbit, = ax.plot([], [], [])\r\n satellite, = ax.plot([], [], [], 'o', color='red')\r\n earth, = ax.plot([], [], [], 'o', color='green')\r\n time_text.set_text('')\r\n ax.set_title(Title_3D, fontsize=22)\r\n ax.set_xlim3d([-lim, lim])\r\n ax.set_xlabel('I\\n[km]')\r\n ax.set_ylim3d([-lim, lim])\r\n ax.set_ylabel('J\\n[km]')\r\n ax.set_zlim3d([-lim, lim])\r\n ax.set_zlabel('K\\n[km]')\r\n # plot Earth\r\n\r\n u = np.linspace(0, 2 * np.pi, 100)\r\n v = np.linspace(0, np.pi, 100)\r\n x = R_moon * np.outer(np.cos(u), np.sin(v))\r\n y = R_moon * np.outer(np.sin(u), np.sin(v))\r\n z = R_moon * np.outer(np.ones(np.size(u)), np.cos(v))\r\n ax.plot_wireframe(x, y, z, color=\"grey\", label=\"Moon\", linewidth=0.3, rstride=7, cstride=7)\r\n # Must return the list of artists, but we use a pass\r\n # through so that they aren't created multiple times\r\n return orbit, satellite, earth, time_text",
"def full_sky_car_template(ncomp, res):\n\n if ncomp == 3:\n pre = (3,)\n else:\n pre = ()\n\n res = res * np.pi / (180 * 60)\n temp = so_map()\n shape, wcs = enmap.fullsky_geometry(res=res, dims=pre)\n temp.data = enmap.zeros(shape, wcs=wcs, dtype=None)\n temp.pixel = \"CAR\"\n temp.nside = None\n temp.ncomp = ncomp\n temp.geometry = temp.data.geometry[1:]\n temp.coordinate = \"equ\"\n return temp",
"def render(self):\r\n super().render()\r\n layers, titles, latVect, lonVect = self.make_layers()\r\n LON, LAT = np.meshgrid(lonVect, latVect)\r\n lon = LON.flatten()\r\n lat = LAT.flatten()\r\n for i in range(len(layers)):\r\n vals = layers[i].flatten()\r\n hovertext = []\r\n for k in range(len(vals)):\r\n hovertext.append('lon: {:.2f}<br>lat: {:.2f}<br>{}: {:.1e}'.format(lon[k], lat[k], self.variable + self.unit,vals[k]))\r\n if self.levels == 0:\r\n data = [\r\n go.Heatmap(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n zmin=self.vmin,\r\n zmax=self.vmax,\r\n hoverinfo='text',\r\n text=hovertext \r\n )\r\n ]\r\n elif self.levels > 0:\r\n data = [\r\n go.Contour(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n hoverinfo='text',\r\n text=hovertext, \r\n connectgaps=False,\r\n contours=dict(\r\n coloring='heatmap',\r\n showlabels=True,\r\n start=self.vmin,\r\n end=self.vmax,\r\n size=(self.vmax-self.vmin) / float(self.levels)\r\n )\r\n # line=dict(smoothing=0.85) \r\n )\r\n ] \r\n\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel}\r\n ) \r\n\r\n\r\n\r\n if self.surface3D:\r\n data = [\r\n go.Surface(\r\n x=lonVect,\r\n y=latVect,\r\n z=layers[i],\r\n colorscale=self.cmap,\r\n # hoverinfo='text',\r\n # text=hovertext \r\n )\r\n ]\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n scene = dict(\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel},\r\n zaxis={'title': self.variable + self.unit}\r\n )\r\n ) \r\n\r\n\r\n self._save_plotly_(go, data, layout)",
"def create_figure():\n data = requests.get('https://msds603-swolemate-s3.s3.us-west-2.amazonaws.com/shiqi_xycoords.json').json()\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n lwrist = [v for record in data for k, v in record.items() if k=='left_wrist']\n x = [i[0] for i in lwrist]\n y = [i[1] for i in lwrist]\n axis.scatter(x,y)\n axis.set_xlabel('X')\n axis.set_ylabel('Y')\n axis.set_title('Left Wrist Position')\n return fig",
"def __init__(self, **kwargs):\n\n tmp = defs.copy()\n tmp.update(kwargs)\n\n for kw in tmp:\n setattr(self, kw, tmp[kw])\n \n if self.left is None:\n self.left = pl.rcParams['figure.subplot.left']\n if self.right is None:\n self.right = pl.rcParams['figure.subplot.right']\n if self.bottom is None:\n self.bottom = pl.rcParams['figure.subplot.bottom']\n if self.top is None:\n self.top = pl.rcParams['figure.subplot.top']\n \n self.l = self.left\n self.r = self.right\n self.b = self.bottom\n self.t = self.top \n \n self.square = self.dims[0] == self.dims[1]\n \n if (self.diagonal is not None) and not self.square:\n raise ValueError('Must have square matrix to use diagonal=True')\n\n self.dims = tuple(self.dims)\n self.J, self.K = self.dims # J = nrows, K = ncols\n self.nrows = self.J\n self.ncols = self.K\n \n if type(self.padding) is float:\n self.padding = tuple([self.padding]* 2)\n \n # Size of an individual panel (in inches)\n self.pane_size = np.array(self.figsize) * np.array([self.r-self.l, self.t-self.b])\n self.pane_size *= np.array(self.panel_size)\n\n # Now, figure out the size of the entire figure (in inches)\n self.panel_size = np.zeros(2)\n \n # After these two lines, self.panel_size is equal to the size of the\n # panel-filled area of the window (in inches)\n self.panel_size[0] = self.pane_size[0] * self.K + self.padding[0] * (self.K - 1)\n self.panel_size[1] = self.pane_size[1] * self.J + self.padding[1] * (self.J - 1) \n\n # Add empty area above/below and left/right of panel-filled area\n self.panel_size[0] += self.figsize[0] * (self.left + (1. - self.right))\n self.panel_size[1] += self.figsize[1] * (self.bottom + (1. - self.top))\n\n self.panel_size_rel = self.pane_size / self.panel_size\n\n self.share_x = self.padding[1] <= 0.2\n self.share_y = self.padding[0] <= 0.2 \n self.share_all = self.share_x and self.share_y\n\n self.dx = self.shift_x\n self.dy = self.shift_y\n\n # Create figure\n if type(self.fig) is not int:\n new_fig = False\n l, r = self.fig.subplotpars.left, self.fig.subplotpars.right\n b, t = self.fig.subplotpars.bottom, self.fig.subplotpars.top\n else:\n self.fig = pl.figure(self.fig, self.panel_size)\n new_fig = True\n\n # Adjust padding\n if self.preserve_margins:\n l = self.left * self.figsize[0] / self.panel_size[0]\n r = (self.left * self.figsize[0] + self.K * self.pane_size[0]) \\\n / self.panel_size[0]\n b = self.bottom * self.figsize[1] / self.panel_size[1]\n t = (self.bottom * self.figsize[1] + self.J * self.pane_size[1]) \\\n / self.panel_size[1]\n else:\n l, r, b, t = self.left, self.right, self.bottom, self.top\n \n self.fig.subplots_adjust(left=l, right=r, bottom=b, top=t, \n wspace=self.padding[0], hspace=self.padding[1])\n \n self.l, self.r, self.b, self.t = l, r, b, t\n\n # Important attributes for identifying individual panels\n self.N = int(np.prod(self.dims))\n self.elements = list(np.reshape(np.arange(self.N), self.dims))\n self.elements.reverse()\n self.elements = np.array(self.elements)\n\n # Dimensions of everything (in fractional units)\n #self.window = {'left': l, 'right': r, 'top': t, \n # 'bottom': b, 'pane': ((r-l) / float(dims[0]), (t-b) / float(dims[1]))}\n\n self.xaxes = self.elements[-1]\n self.yaxes = list(zip(*self.elements))[0] \n self.lowerleft = self.elements[-1][0]\n self.lowerright = self.elements[-1][-1]\n self.upperleft = self.elements[0][0]\n self.upperright = self.elements[0][-1]\n \n if self.square:\n self.diag = np.diag(self.elements) \n self.interior = list(self.elements.ravel())\n for element in self.diag:\n self.interior.remove(element)\n else:\n self.diag = None \n \n self.left = []\n self.right = []\n self.bottom = []\n self.top = []\n for i in range(self.N):\n k, j = self.axis_position(i) # col, row\n \n if j == 0:\n self.bottom.append(i)\n if j == self.nrows - 1:\n self.top.append(i) \n if k == 0:\n self.left.append(i)\n if k == self.ncols - 1:\n self.right.append(i) \n\n self.interior = []\n for i in range(self.N):\n if i in self.left:\n continue\n if i in self.bottom:\n continue\n \n self.interior.append(i)\n\n # Create subplots\n e_fl = self.elements.flatten()\n self.grid = [None for i in range(self.N)]\n for i in range(self.N): \n j, k = self.axis_position(i)\n \n if self.diagonal == 'lower':\n if k >= (self.dims[1] - j) and i not in self.diag:\n continue\n if self.diagonal == 'upper':\n if k < (self.dims[1] - j) and i not in self.diag:\n continue \n \n #if self.diagonal == 'lower' and j == k and (j, k) != (0, 0):\n # continue\n #if self.diagonal == 'upper' and j == k and (j, k) != (self.J-1, self.K-1):\n # continue\n \n if self.square:\n if i in self.diag and not self.keep_diagonal:\n continue\n \n if new_fig:\n self.grid[i] = AxisConstructor(self.fig, self.J, self.K, e_fl[i]+1)\n else:\n\n # col, row = j, k\n\n lef = l + j * self.panel_size_rel[0] \\\n + self.padding[0] + self.dx\n bot = b + k * self.panel_size_rel[1] \\\n + self.padding[1] + self.dy\n\n rect = [lef, bot, self.panel_size_rel[0], self.panel_size_rel[1]]\n\n self.grid[i] = self.fig.add_axes(rect)",
"def drawCoordinatePlane_region():\r\n turtle2 = t.Screen()\r\n turtle2.title(\"Life Expectancy versus Region\")\r\n t2.speed(0)\r\n t3.speed(0)\r\n setTurtle(t0)\r\n setTurtle(t1)\r\n setTurtle(t2)\r\n setTurtle(t3)\r\n drawAxes(t0)\r\n t1.left(90)\r\n drawAxes(t1)\r\n t0.pu()\r\n t0.fd(-80)\r\n t0.lt(90)\r\n drawlabels(t0, t1)\r\n drawPoints(t0, t1)\r\n t0.pu()\r\n t1.pu()\r\n t2.pu()\r\n t3.pu()\r\n t0.goto(initialCoordinates())\r\n t1.goto(initialCoordinates())\r\n t2.goto(initialCoordinates())\r\n t3.goto(initialCoordinates())\r\n t1.lt(90)",
"def viewer(\n self, units='nm', \n draw_edges=True, draw_vertices=True,\n color_by='radius'\n ):\n try:\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D \n from matplotlib import cm\n except ImportError:\n print(\"Skeleton.viewer requires matplotlib. Try: pip install matplotlib --upgrade\")\n return\n\n RADII_KEYWORDS = ('radius', 'radii', 'r')\n COMPONENT_KEYWORDS = ('component', 'components', 'c')\n\n fig = plt.figure(figsize=(10,10))\n ax = Axes3D(fig)\n ax.set_xlabel(units)\n ax.set_ylabel(units)\n ax.set_zlabel(units)\n\n # Set plot axes equal. Matplotlib doesn't have an easier way to\n # do this for 3d plots.\n X = self.vertices[:,0]\n Y = self.vertices[:,1]\n Z = self.vertices[:,2]\n\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n mid_x = (X.max()+X.min()) * 0.5\n mid_y = (Y.max()+Y.min()) * 0.5\n mid_z = (Z.max()+Z.min()) * 0.5\n ax.set_xlim(mid_x - max_range, mid_x + max_range)\n ax.set_ylim(mid_y - max_range, mid_y + max_range)\n ax.set_zlim(mid_z - max_range, mid_z + max_range)\n ### END EQUALIZATION CODE ###\n\n component_colors = ['k', 'deeppink', 'dodgerblue', 'mediumaquamarine', 'gold' ]\n\n def draw_component(i, skel):\n component_color = component_colors[ i % len(component_colors) ]\n\n if draw_vertices:\n xs = skel.vertices[:,0]\n ys = skel.vertices[:,1]\n zs = skel.vertices[:,2]\n\n if color_by in RADII_KEYWORDS:\n colmap = cm.ScalarMappable(cmap=cm.get_cmap('rainbow'))\n colmap.set_array(skel.radii)\n\n normed_radii = skel.radii / np.max(skel.radii)\n yg = ax.scatter(xs, ys, zs, c=cm.rainbow(normed_radii), marker='o')\n cbar = fig.colorbar(colmap)\n cbar.set_label('radius (' + units + ')', rotation=270)\n elif color_by in COMPONENT_KEYWORDS:\n yg = ax.scatter(xs, ys, zs, color=component_color, marker='.')\n else:\n yg = ax.scatter(xs, ys, zs, color='k', marker='.')\n\n if draw_edges:\n for e1, e2 in skel.edges:\n pt1, pt2 = skel.vertices[e1], skel.vertices[e2]\n ax.plot( \n [ pt1[0], pt2[0] ],\n [ pt1[1], pt2[1] ],\n zs=[ pt1[2], pt2[2] ],\n color=(component_color if not draw_vertices else 'silver'),\n linewidth=1,\n )\n\n if color_by in COMPONENT_KEYWORDS:\n for i, skel in enumerate(self.components()):\n draw_component(i, skel)\n else:\n draw_component(0, self)\n\n plt.show()",
"def create_ring(self):\n\t\tself.north_coords = numpy.add(self.center, self.north)\n\t\tself.northeast_coords = numpy.add(self.center, self.northeast)\n\t\tself.east_coords = numpy.add(self.center, self.east)\n\t\tself.southeast_coords = numpy.add(self.center, self.southeast)\n\t\tself.south_coords = numpy.add(self.center, self.south)\n\t\tself.southwest_coords = numpy.add(self.center, self.southwest)\n\t\tself.west_coords = numpy.add(self.center, self.west)\n\t\tself.northwest_coords = numpy.add(self.center, self.northwest)",
"def make_planet(npix0,osf,xc,yc,rplanet,map0):\n npix=int(np.floor(npix0*osf))\n make_planet_c(npix,npix,xc*osf,yc*osf,rplanet*osf,map0)\n planet=map0.copy().reshape((npix0,osf,npix0,osf))\n planet=planet.mean(axis=3).mean(axis=1)\n return planet",
"def create_meshregion_component(\n self, scale_factor=1.0, name=\"Component_Region\", restore_padding_values=[50, 50, 50, 50, 50, 50]\n ):\n self.modeler.edit_region_dimensions([0, 0, 0, 0, 0, 0])\n\n verticesID = self.modeler.oeditor.GetVertexIDsFromObject(\"Region\")\n\n x_values = []\n y_values = []\n z_values = []\n\n for id in verticesID:\n tmp = self.modeler.oeditor.GetVertexPosition(id)\n x_values.append(tmp[0])\n y_values.append(tmp[1])\n z_values.append(tmp[2])\n\n scale_factor = scale_factor - 1\n delta_x = (float(max(x_values)) - float(min(x_values))) * scale_factor\n x_max = float(max(x_values)) + delta_x / 2.0\n x_min = float(min(x_values)) - delta_x / 2.0\n\n delta_y = (float(max(y_values)) - float(min(y_values))) * scale_factor\n y_max = float(max(y_values)) + delta_y / 2.0\n y_min = float(min(y_values)) - delta_y / 2.0\n\n delta_z = (float(max(z_values)) - float(min(z_values))) * scale_factor\n z_max = float(max(z_values)) + delta_z / 2.0\n z_min = float(min(z_values)) - delta_z / 2.0\n\n dis_x = str(float(x_max) - float(x_min))\n dis_y = str(float(y_max) - float(y_min))\n dis_z = str(float(z_max) - float(z_min))\n\n min_position = self.modeler.Position(str(x_min) + \"mm\", str(y_min) + \"mm\", str(z_min) + \"mm\")\n mesh_box = self.modeler.primitives.create_box(min_position, [dis_x + \"mm\", dis_y + \"mm\", dis_z + \"mm\"], name)\n\n self.modeler.primitives[name].model = False\n\n self.modeler.edit_region_dimensions(restore_padding_values)\n return dis_x, dis_y, dis_z",
"def create_scene(self, ):\n self.scene = create_scene(\n self.opt.splats_img_size, self.opt.splats_img_size, self.opt.fovy,\n self.opt.focal_length, self.opt.n_splats)",
"def WindingDesign(main):\n oEditor = main['ANSYS']['oEditor']\n\n # Slots number\n Slots = main['ANSYS']['FixedVariables']['Slots']\n\n # SlotType\n SlotType = main['ANSYS']['FixedVariables']['SlotType']\n\n # Geimetric parameters\n g = main['ANSYS']['DesignProperties']['Stator']['g']\n\n Hs0 = main['ANSYS']['DesignProperties']['Slot']['Hs0']\n Hs1 = main['ANSYS']['DesignProperties']['Slot']['Hs1']\n Hs2 = main['ANSYS']['DesignProperties']['Slot']['Hs2']\n Bs1 = main['ANSYS']['DesignProperties']['Slot']['Bs1']\n Bs2 = main['ANSYS']['DesignProperties']['Slot']['Bs2']\n\n DiaGap = main['ANSYS']['DesignProperties']['Rotor']['DiaGap']\n\n # Coils Arrange ABC\n PhasesABC = main['ANSYS']['Winding']['ABC']\n\n # Color used for phases\n Color = main['ANSYS']['Winding']['Color']\n\n oEditor.CreateUserDefinedPart(\n [\n \"NAME:UserDefinedPrimitiveParameters\",\n \"DllName:=\"\t\t, \"RMxprt/LapCoil.dll\",\n \"Version:=\"\t\t, \"16.0\",\n \"NoOfParameters:=\"\t, 22,\n \"Library:=\"\t\t, \"syslib\",\n [\n \"NAME:ParamVector\",\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"DiaGap\",\n \"Value:=\"\t\t, \"DiaGap+g*2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"DiaYoke\",\n \"Value:=\"\t\t, \"DiaYoke\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Length\",\n \"Value:=\"\t\t, \"0mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Skew\",\n \"Value:=\"\t\t, \"0deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Slots\",\n \"Value:=\"\t\t, str(int(Slots))\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SlotType\",\n \"Value:=\"\t\t, str(int(SlotType))\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs0\",\n \"Value:=\"\t\t, \"Hs0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs1\",\n \"Value:=\"\t\t, \"Hs1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Hs2\",\n \"Value:=\"\t\t, \"Hs2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs0\",\n \"Value:=\"\t\t, \"Bs0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs1\",\n \"Value:=\"\t\t, \"Bs1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Bs2\",\n \"Value:=\"\t\t, \"Bs2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Rs\",\n \"Value:=\"\t\t, \"Rs\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"FilletType\",\n \"Value:=\"\t\t, \"0\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"Layers\",\n \"Value:=\"\t\t, \"2\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"CoilPitch\",\n \"Value:=\"\t\t, \"1\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"EndExt\",\n \"Value:=\"\t\t, \"5mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SpanExt\",\n \"Value:=\"\t\t, \"25mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"BendAngle\",\n \"Value:=\"\t\t, \"0deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"SegAngle\",\n \"Value:=\"\t\t, \"10deg\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"LenRegion\",\n \"Value:=\"\t\t, \"200mm\"\n ],\n [\n \"NAME:Pair\",\n \"Name:=\"\t\t, \"InfoCoil\",\n \"Value:=\"\t\t, \"0\"\n ]\n ]\n ],\n [\n \"NAME:Attributes\",\n \"Name:=\"\t\t, \"LapCoil1\",\n \"Flags:=\"\t\t, \"\",\n \"Color:=\"\t\t, \"(143 175 143)\",\n \"Transparency:=\"\t, 0,\n \"PartCoordinateSystem:=\", \"Global\",\n \"UDMId:=\"\t\t, \"\",\n \"MaterialValue:=\"\t, \"\\\"copper\\\"\",\n \"SurfaceMaterialValue:=\", \"\\\"\\\"\",\n \"SolveInside:=\"\t\t, True,\n \"ShellElement:=\"\t, False,\n \"ShellElementThickness:=\", \"0mm\",\n \"IsMaterialEditable:=\"\t, True,\n \"UseMaterialAppearance:=\", False,\n \"IsLightweight:=\"\t, False\n ]\n )\n\n # Body Separation\n oEditor.SeparateBody(\n [\n \"NAME:Selections\",\n \"Selections:=\"\t\t, \"LapCoil1\",\n \"NewPartsModelFlag:=\"\t, \"Model\"\n ],\n [\n \"CreateGroupsForNewObjects:=\", False\n ]\n )\n\n # Average Slot Width\n AverWidth = (Bs2 + Bs1)/2\n\n # Average Radius\n AverRadius = DiaGap/2 + g + Hs0 + Hs1 + Hs2*0.75\n\n # Angle to shift and find the kth tooth\n ShiftSlot = 1/Slots*np.pi\n\n # Angle to fond the corrent layer\n ShiftLayer = np.arctan(AverWidth/4/AverRadius)\n\n # List to save the coils sides names\n WindingNames = [[], [], []]\n\n # Phases name to employed\n PhaseNames = ['A', 'B', 'C']\n\n for phase, row in enumerate(PhasesABC):\n\n PhaseName = [[], []]\n\n for coil, slot in enumerate(row):\n\n SlotAngle = np.abs(slot)/Slots*2*np.pi - ShiftSlot\n\n if coil % 2 == 1:\n SlotAngle = SlotAngle - ShiftLayer\n\n else:\n SlotAngle = SlotAngle + ShiftLayer\n\n x = np.cos(SlotAngle)*AverRadius\n y = np.sin(SlotAngle)*AverRadius\n\n Name0 = oEditor.GetBodyNamesByPosition(\n [\n \"NAME:Parameters\",\n \"XPosition:=\", str(x)+\"mm\",\n \"YPosition:=\", str(y)+\"mm\",\n \"ZPosition:=\", \"0mm\"\n ]\n )\n\n C = Color[phase]\n\n if np.sign(slot) == 1:\n\n CoilSideName = PhaseNames[phase]+\"In\"+str(np.abs(coil))\n\n PhaseName[0] += [CoilSideName]\n\n oEditor.ChangeProperty(\n [\n \"NAME:AllTabs\",\n [\n \"NAME:Geometry3DAttributeTab\",\n [\n \"NAME:PropServers\",\n Name0[0]\n ],\n [\n \"NAME:ChangedProps\",\n [\n \"NAME:Name\",\n \"Value:=\"\t\t,\n CoilSideName\n ],\n [\n \"NAME:Color\",\n \"R:=\"\t\t\t, C[0],\n \"G:=\"\t\t\t, C[1],\n \"B:=\"\t\t\t, C[2]\n ],\n\n ]\n ]\n ]\n )\n else:\n\n CoilSideName = PhaseNames[phase]+\"Out\"+str(np.abs(coil))\n\n PhaseName[1] += [CoilSideName]\n\n oEditor.ChangeProperty(\n [\n \"NAME:AllTabs\",\n [\n \"NAME:Geometry3DAttributeTab\",\n [\n \"NAME:PropServers\",\n Name0[0]\n ],\n [\n \"NAME:ChangedProps\",\n [\n \"NAME:Name\",\n \"Value:=\"\t\t,\n CoilSideName\n ],\n [\n \"NAME:Color\",\n \"R:=\"\t\t\t, C[0],\n \"G:=\"\t\t\t, C[1],\n \"B:=\"\t\t\t, C[2],\n ],\n\n ]\n ]\n ]\n )\n\n WindingNames[phase] += PhaseName\n\n main['ANSYS']['Winding']['CoilNames'] = WindingNames\n\n return main",
"def create_solids(self):\n\n plasma = self.create_plasma()\n pf_coils = self.create_pf_coils()\n tf_coil = self.create_tf_coils()\n vessel = self.create_vessel_components()\n\n shapes_and_components = plasma + pf_coils + vessel[:-1] + tf_coil\n self.shapes_and_components = shapes_and_components\n\n return shapes_and_components",
"def plot_skyplot(\n self,\n figure_name: str=\"plot_skyplot_{system}.{FIGURE_FORMAT}\",\n ) -> List[pathlib.PosixPath]:\n figure_paths = list()\n \n # Convert azimuth to range 0-360 degree\n azimuth = self.dset.site_pos.azimuth\n idx = azimuth < 0\n azimuth[idx] = 2 * np.pi + azimuth[idx]\n \n # Convert zenith distance from radian to degree\n zenith_distance = np.rad2deg(self.dset.site_pos.zenith_distance)\n \n # Generate x- and y-axis data per system\n for sys in sorted(self.dset.unique(\"system\")):\n x_arrays = []\n y_arrays = []\n labels = []\n \n figure_path = self.figure_dir / figure_name.replace(\"{system}\", sys).replace(\"{FIGURE_FORMAT}\", FIGURE_FORMAT)\n figure_paths.append(figure_path)\n \n for sat in sorted(self.dset.unique(\"satellite\")):\n if not sat.startswith(sys):\n continue\n idx = self.dset.filter(satellite= sat)\n x_arrays.append(azimuth[idx])\n y_arrays.append(zenith_distance[idx])\n labels.append(sat)\n \n # Plot with polar projection\n # TODO: y-axis labels are overwritten after second array plot. Why? What to do?\n plot(\n x_arrays=x_arrays,\n y_arrays=y_arrays,\n xlabel=\"\",\n ylabel=\"\",\n y_unit=\"\",\n labels=labels,\n figure_path=figure_path,\n opt_args={\n \"colormap\": \"hsv\",\n \"figsize\": (7, 7.5),\n \"legend\": True,\n \"legend_ncol\": 6,\n \"legend_location\": \"bottom\",\n \"plot_to\": \"file\",\n \"plot_type\": \"scatter\",\n \"projection\": \"polar\",\n \"title\": f\"Skyplot for {enums.gnss_id_to_name[sys]}\\n Azimuth [deg] / Elevation[deg]\",\n \"xlim\": [0, 2 * np.pi],\n \"ylim\": [0, 90],\n \"yticks\": (range(0, 90, 30)), # sets 3 concentric circles\n \"yticklabels\": (map(str, range(90, 0, -30))), # reverse labels from zenith distance to elevation\n },\n )\n \n return figure_paths",
"def plot_sed(self,period=6.,projection='lambert',geopolygons=None, showfig=True, vmin=0, vmax=None, hillshade=False):\n\t\tif hillshade:\n\t\t\talpha = 0.5\n\t\telse:\n\t\t\talpha =1.\n\t\tm = self._get_basemap(projection=projection, geopolygons=geopolygons,hillshade=hillshade)\n\t\tgroup = self['%g_sec'%( period )]\n\t\tx, y = m(group['lonArr'].value, group['latArr'].value)\n\t\tmy_cmap = pycpt.load.gmtColormap('./cv.cpt')\n\t\tsed_Arr = group['sed_Arr'].value\n\t\tsed_Arr_msk = group['sed_Arr_msk'].value\n\t\tif vmin == None:\n\t\t\tvmin = np.nanmin(sed_Arr[~sed_Arr_msk])\n\t\t\tvmin = np.floor(vmin/5.)*5.\n\t\tif vmax == None:\n\t\t\tvmax = np.nanmax(sed_Arr[~sed_Arr_msk])\n\t\t\tvmax = np.ceil(vmax/5.)*5.\n\t\tim = m.pcolormesh(x, y, np.ma.masked_array(sed_Arr,mask=sed_Arr_msk), cmap=my_cmap, shading='gouraud', vmin=vmin, vmax=vmax, alpha=alpha)\n\t\tcb = m.colorbar(im, \"bottom\", size=\"3%\", pad='2%', format='%d')\n\t\tcb.set_label('Sediment thickness (m)', fontsize=12, rotation=0)\n\t\tcb.set_alpha(1)\n\t\tcb.draw_all()\n\t\tax = plt.gca() # only plot the oceanic part for JdF\n\t\t# ax.set_xlim(right=x_max)\n\t\tif showfig:\n\t\t\tplt.show()\n\t\treturn",
"def solar_model():\n \n latitude, longitude, timezone, elevation = location_input()\n year, time = time_input()\n\n lat_r = latitude/180*np.pi\n lon_r = longitude/180*np.pi \n n = 0\n for i in range(1900,year):\n if i%4 == 0:\n n += 366\n else:\n n+=365\n JulD = n + time + 2415018.5 - (timezone)/24\n LT = time - int(time)\n JC = (JulD - 2451545) / 36525\n x = 46.815 + JC * (0.00059 - JC * 0.001813)\n M_OE = 23 + (26 + (21.448 - JC * x) / 60) / 60\n EEO = 0.016708634 - JC * (0.000042037 + 0.0000001267 * JC)\n GMAS = 357.52911 + JC * (35999.05029 - 0.0001537 * JC)\n GMAS_r = m.radians(GMAS)\n GMLS = (280.46646 + JC * (36000.76983 + JC * 0.0003032))%360\n GMLS_r = m.radians(GMLS)\n Obliq_C = M_OE + 0.00256 * np.cos((125.04 - 1934.136 * JC) / 180 * np.pi)\n Obliq_C_r = m.radians(Obliq_C)\n SEC = np.sin(GMAS_r) * (1.914602 - JC * (0.004817 + 0.000014 * JC)) + np.sin(2 * GMAS_r) * (0.019993 - 0.000101 * JC) + np.sin(3 * GMAS_r) * 0.000289\n STL = GMLS + SEC\n SAL = STL - 0.00569 - 0.00478 * np.sin((125.04 - 1934.136 * JC) / 180 * np.pi)\n SAL_r = m.radians(SAL)\n sin_Delta = np.sin(Obliq_C_r) * np.sin(SAL_r)\n Delta_r = np.arcsin(sin_Delta) #in radians \n Var_y = np.tan((Obliq_C / 2) / 180 * np.pi) * np.tan((Obliq_C / 2) / 180 * np.pi)\n EOT_prime = Var_y * np.sin(2 * GMLS_r) - 2 * EEO * np.sin(GMAS_r) + 4 * EEO * Var_y * np.sin(GMAS_r) * np.cos(2 * GMLS_r) - 0.5 * Var_y * Var_y * np.sin(4 * GMLS_r) - 1.25 * EEO * EEO * np.sin(2 * GMAS_r)\n EOT = 4 * EOT_prime / np.pi * 180 \n TST = (LT * 1440 + EOT + 4 * longitude - 60 * timezone)%1440\n if TST / 4 < 0:\n Omega = TST/4+180\n else:\n Omega = TST/4 - 180 \n Omega_r = m.radians(Omega)\n \n cos_Zenith = np.sin(lat_r) * np.sin(Delta_r) + np.cos(lat_r) * np.cos(Delta_r) * np.cos(Omega_r)\n Zenith_r = np.arccos(cos_Zenith) #in radians\n Aprime_r = np.arccos((np.sin(lat_r) * np.cos(Zenith_r) - np.sin(Delta_r)) / (np.cos(lat_r) * np.sin(Zenith_r)))\n Aprime = Aprime_r / np.pi * 180\n if Omega > 0:\n Azimuth = (Aprime + 180) % 360 #in degrees\n else:\n Azimuth = (540 - Aprime) % 360 #in degrees \n Azimuth_r = Azimuth / 180 * np.pi\n Elev_angle = (np.pi)/2 - Zenith_r\n\n \n # calculate incidence angle\n # Beta is equal to angle of tilted surface to horizontal (in radians)\n Beta = 45 # in degrees\n Beta_r = m.radians(Beta)\n \n cos_incidence = np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r) * np.sin(Azimuth_r) * np.sin(Omega_r) \n incidence_ang_r = np.arccos(cos_incidence)\n \n return Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle",
"def __init__(self):\n self.lattices = []\n self.meshfns = []",
"def system_fleet_dimensioning(self):"
]
| [
"0.6237249",
"0.6226212",
"0.5892257",
"0.5789716",
"0.57470626",
"0.5711296",
"0.56723964",
"0.5627063",
"0.5626375",
"0.55648106",
"0.55581516",
"0.5531766",
"0.5512465",
"0.55034286",
"0.5503299",
"0.54912096",
"0.54896516",
"0.54868186",
"0.54823357",
"0.54696953",
"0.54648453",
"0.5459745",
"0.54573625",
"0.5452147",
"0.5451946",
"0.54419714",
"0.54360056",
"0.5394691",
"0.5391009",
"0.53446925"
]
| 0.67812073 | 0 |
Create FOV actor for camera | def create_cam_fov(self, name):
# Vertices of FOV
V = [
(0, 0, -self.SAT_PROPS["Alt"]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 0]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 1]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 2]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 3])
]
# Faces of FOV
F = [(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 1)]
# Create building blocks of polydata
cam = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(5):
points.InsertPoint(i, V[i])
for i in range(4):
polys.InsertNextCell( self.mkVtkIdList(F[i]))
for i in range(5):
scalars.InsertTuple1(i,i)
# Assign the pieces to the vtkPolyData.
cam.SetPoints(points)
del points
cam.SetPolys(polys)
del polys
cam.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(cam)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.5, 1, 0.5)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetOpacity(0.1)
return actor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_camera_fov(args_, client_, new_fov):\n\n args_.camera_bp.set_attribute(\"fov\", \"%s\" % new_fov)\n args_.camera_depth_bp.set_attribute(\"fov\", \"%s\" % new_fov)\n\n # destroy the original actor and make a new camera object\n args_.rgb_camera.camera_actor.stop()\n args_.depth_camera.camera_actor.stop()\n commands_ = [\n # destroy the previous actor first\n carla.command.DestroyActor(args_.depth_camera.camera_actor.id),\n carla.command.DestroyActor(args_.rgb_camera.camera_actor.id),\n # spawn the new actor\n carla.command.SpawnActor(\n args_.camera_bp, carla.Transform(), args_.spectator),\n carla.command.SpawnActor(\n args_.camera_depth_bp, carla.Transform(), args_.spectator),\n ]\n response_ = client_.apply_batch_sync(commands_)\n camera_actor_ids_ = [r.actor_id for r in response_[-2:]]\n camera_, camera_depth_ = world.get_actors(\n camera_actor_ids_)\n\n args_.rgb_camera = Camera(camera_, width=args_.width,\n height=args_.height,\n fov=new_fov,\n camera_type=\"rgb\")\n\n args_.depth_camera = Camera(\n camera_depth_, camera_type=\"depth\")\n\n args_.prev_camera_fov = new_fov",
"def setFov(self,fov):\n self.light.node().getLens().setFov(fov)",
"def fov(self, fov):\n self.ptr.fov(fov)",
"def pov_render(self, camera_position = (0,0,-10), camera_target = (0,0,0)):\n\n \"\"\"\n f=pov.File(\"demo.pov\",\"colors.inc\",\"stones.inc\")\n \n cam = pov.Camera(location=camera_position, sky=(1,0,1),look_at=camera_target)\n light = pov.LightSource( camera_position, color=\"White\")\n \n povObjs = [cam, light]\n for obj in self.objects[1:]:\n # test coordinate transfroms\n # print M\n # vectors = np.array([[0,0,0,1], #origin\n # [1,0,0,1], # x\n # [0,1,0,1], # y\n # [0,0,1,1]]).transpose() # z\n # origin,x,y,z = (T*vectors).transpose()\n povObjs.append(povObj(obj))\n \n #print tuple(povObjs)\n f.write(*tuple(povObjs))\n f.close()\n #sphere1 = pov.Sphere( (1,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n #sphere2 = pov.Sphere( (0,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n # composite2 = None#pov.Difference(sphere1, sphere2)\n # \n \n \n \n \n \n # f.write( cam, composite2, light )\n # f.close()\n subprocess.call(\"povray +H2400 +W3200 demo.pov\", shell=True)\n os.system(\"open demo.png\")\n \"\"\"",
"def fov(self, fov: float):\n assert type(fov) in (int, float)\n self._fov[self.projection_mode.value] = fov\n self._reset_matrix()",
"def test_f_from_hfov(self):\n width = 700\n height = 480\n hfov = 60\n vfov = 60\n\n # TODO(marcus): make sure these expected values are correct!\n actual = tesse_ros_bridge.utils.fx_from_hfov(hfov, width)\n expected = 606.2177826491071\n self.assertEqual(actual, expected)\n\n actual = tesse_ros_bridge.utils.fy_from_vfov(vfov, height)\n expected = 415.69219381653056\n self.assertEqual(actual, expected)",
"def fov(self) -> float:\n return self._fov[self.projection_mode.value]",
"def imageFromFOV(self, view_x=None, view_y=None): # pragma: no cover\n # to be overloaded by the child class.\n return 0",
"def create_camera(self, ego_actor):\r\n transform = self.get_camera_transform()\r\n if not transform:\r\n transform = carla.Transform()\r\n\r\n spawn_object_request = SpawnObjectRequest()\r\n spawn_object_request.type = \"sensor.camera.rgb\"\r\n spawn_object_request.id = \"spectator_view\"\r\n spawn_object_request.attach_to = ego_actor.id\r\n spawn_object_request.transform = trans.carla_transform_to_ros_pose(transform)\r\n spawn_object_request.random_pose = False\r\n spawn_object_request.attributes.extend([\r\n KeyValue(\"image_size_x\", str(self.camera_resolution_x)),\r\n KeyValue(\"image_size_y\", str(self.camera_resolution_y)),\r\n KeyValue(\"fov\", str(self.camera_fov))\r\n ])\r\n\r\n response = self.spawn_object_service(spawn_object_request)\r\n if response.id == -1:\r\n raise Exception(response.error_string)\r\n\r\n self.camera_actor = self.world.get_actor(response.id)",
"def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, 1)",
"def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)",
"def getFov(self):\n return self.light.node().getLens().getFov()",
"def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)",
"def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)",
"def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)",
"def __init__(self, env, transform=None,\n fx=529, fy=525, cx=328, cy=267, near=0.01, far=10.0, \n width=640, height=480):\n \n self.env = env;\n self.lock = Lock()\n self.camera = openravepy.RaveCreateSensor(env, 'offscreen_render_camera')\n self.set_intrinsic(fx, fy, cx, cy, near, far, width, height)\n self.camera.Configure(openravepy.Sensor.ConfigureCommand.PowerOn);\n if (transform is None):\n self.set_transform(numpy.eye(4))\n else:\n self.set_transform(transform)",
"def __init__(self, position, focal_point, viewup):\n self._position = position\n self._focal_point = focal_point\n self._viewup = viewup",
"def test_register_fov(self):\n task = MesoscopeFOV(self.session_path, device_collection='raw_imaging_data', one=self.one)\n mlapdv = {'topLeft': [2317.2, -1599.8, -535.5], 'topRight': [2862.7, -1625.2, -748.7],\n 'bottomLeft': [2317.3, -2181.4, -466.3], 'bottomRight': [2862.7, -2206.9, -679.4],\n 'center': [2596.1, -1900.5, -588.6]}\n meta = {'FOV': [{'MLAPDV': mlapdv, 'nXnYnZ': [512, 512, 1], 'roiUUID': 0}]}\n with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest:\n task.register_fov(meta, 'estimate')\n calls = mock_rest.call_args_list\n self.assertEqual(3, len(calls))\n\n args, kwargs = calls[1]\n self.assertEqual(('fields-of-view', 'create'), args)\n expected = {'data': {'session': None, 'imaging_type': 'mesoscope', 'name': 'FOV_00', 'stack': None}}\n self.assertEqual(expected, kwargs)\n\n args, kwargs = calls[2]\n self.assertEqual(('fov-location', 'create'), args)\n expected = ['field_of_view', 'default_provenance', 'coordinate_system', 'n_xyz', 'provenance', 'x', 'y', 'z',\n 'brain_region']\n self.assertCountEqual(expected, kwargs.get('data', {}).keys())\n self.assertEqual(5, len(kwargs['data']['brain_region']))\n self.assertEqual([512, 512, 1], kwargs['data']['n_xyz'])\n self.assertIs(kwargs['data']['field_of_view'], mock_rest().get('id'))\n self.assertEqual('E', kwargs['data']['provenance'])\n self.assertEqual([2317.2, 2862.7, 2317.3, 2862.7], kwargs['data']['x'])\n\n # Check dry mode with suffix input = None\n for file in self.session_path.joinpath('alf', 'FOV_00').glob('mpciMeanImage.*'):\n file.replace(file.with_name(file.name.replace('_estimate', '')))\n self.one.mode = 'local'\n with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest:\n out = task.register_fov(meta, None)\n mock_rest.assert_not_called()\n self.assertEqual(1, len(out))\n self.assertEqual('FOV_00', out[0].get('name'))\n locations = out[0]['location']\n self.assertEqual(1, len(locations))\n self.assertEqual('L', locations[0].get('provenance', 'L'))",
"def focallengthFromFOV(self, view_x=None, view_y=None): # pragma: no cover\n # to be overloaded by the child class.\n return 0",
"def test_vfov_from_hfov(self):\n width = 700\n height = 480\n hfov = 60\n\n # TODO(marcus): make sure these expected values are correct!\n actual = tesse_ros_bridge.utils.vfov_from_hfov(hfov, width, height)\n expected = 43.19696059328124\n self.assertEqual(actual, expected)",
"def project(self, win_width, win_height, fov, viewer_distance):\n\t\tfactor = fov / (viewer_distance + self.z)\n\t\tx = self.x * factor + win_width / 2\n\t\ty = -self.y * factor + win_height / 2\n\t\treturn Point3D(x, y, 1)",
"def __init__(self, eye=vec([0, 0, 0]), target=vec([0, 0, -1]), up=vec([0, 1, 0]),\n vfov=90.0, aspect=1.0):\n self.eye = eye\n self.aspect = aspect\n # TODO A5 copy implementation from A4\n self.target = target\n self.vfov = np.radians(vfov)\n self.w = normalize(eye - target)\n self.u = normalize(np.cross(up, self.w))\n self.v = np.cross(self.w, self.u)",
"def camera(*args, aspectRatio: Union[float, bool]=0.0, cameraScale: Union[float, bool]=0.0,\n centerOfInterest: Union[float, bool]=0.0, clippingPlanes: bool=True, depthOfField:\n bool=True, displayFieldChart: bool=True, displayFilmGate: bool=True,\n displayFilmOrigin: bool=True, displayFilmPivot: bool=True, displayGateMask:\n bool=True, displayResolution: bool=True, displaySafeAction: bool=True,\n displaySafeTitle: bool=True, fStop: Union[float, bool]=0.0, farClipPlane:\n Union[float, bool]=0.0, farFocusDistance: Union[float, bool]=0.0, filmFit:\n Union[AnyStr, bool]=\"\", filmFitOffset: Union[float, bool]=0.0, filmRollOrder:\n Union[AnyStr, bool]=\"\", filmRollValue: Union[float, bool]=0.0, filmTranslateH:\n Union[float, bool]=0.0, filmTranslateV: Union[float, bool]=0.0, focalLength:\n Union[float, bool]=0.0, focusDistance: Union[float, bool]=0.0, homeCommand:\n Union[AnyStr, bool]=\"\", horizontalFieldOfView: Union[float, bool]=0.0,\n horizontalFilmAperture: Union[float, bool]=0.0, horizontalFilmOffset: Union[float,\n bool]=0.0, horizontalPan: Union[float, bool]=0.0, horizontalRollPivot: Union[float,\n bool]=0.0, horizontalShake: Union[float, bool]=0.0, journalCommand: bool=True,\n lensSqueezeRatio: Union[float, bool]=0.0, lockTransform: bool=True, motionBlur:\n bool=True, name: Union[AnyStr, bool]=\"\", nearClipPlane: Union[float, bool]=0.0,\n nearFocusDistance: Union[float, bool]=0.0, orthographic: bool=True,\n orthographicWidth: Union[float, bool]=0.0, overscan: Union[float, bool]=0.0,\n panZoomEnabled: bool=True, position: Union[List[float, float, float], bool]=None,\n postScale: Union[float, bool]=0.0, preScale: Union[float, bool]=0.0, renderPanZoom:\n bool=True, rotation: Union[List[float, float, float], bool]=None, shakeEnabled:\n bool=True, shakeOverscan: Union[float, bool]=0.0, shakeOverscanEnabled: bool=True,\n shutterAngle: Union[float, bool]=0.0, startupCamera: bool=True,\n stereoHorizontalImageTranslate: Union[float, bool]=0.0,\n stereoHorizontalImageTranslateEnabled: bool=True, verticalFieldOfView: Union[float,\n bool]=0.0, verticalFilmAperture: Union[float, bool]=0.0, verticalFilmOffset:\n Union[float, bool]=0.0, verticalLock: bool=True, verticalPan: Union[float, bool]=0.0,\n verticalRollPivot: Union[float, bool]=0.0, verticalShake: Union[float, bool]=0.0,\n worldCenterOfInterest: Union[List[float, float, float], bool]=None, worldUp:\n Union[List[float, float, float], bool]=None, zoom: Union[float, bool]=0.0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def setupCamera(self):\n\t\tself.eye = self.vr.newEye(\"test_cam\")\n\t\tself.eye.reposition(0.0, 1.0, 0.5, 0.0, 0.0, 0.0)\n\t\tself.eye.setFOV(self.config.camFOV)\n\t\n\t\tself.video.clear(\"black\")\n\t\tself.video.show(self.eye, 0, 0)",
"def __init__(self,fovraws,ralohi=(),declohi=()\n ,obs_pos=None,obs_vel=None,obs_year=None\n ):\n ### Get count of items in FOV sequence; ensure it is 2 or more\n ### and ralohi and declohi are empty, or that fovraws is empty\n ### and ralohi and declohi have 2 values each\n (self.fovraws\n ,self.ralohi\n ,self.declohi\n ,self.obs_pos\n ,self.obs_vel\n ,self.obs_year\n ,)= fovraws,list(ralohi),list(declohi),obs_pos,obs_vel,obs_year\n self.L = len(fovraws)\n assert (1<self.L and not (self.ralohi+self.declohi)\n ) or (0==self.L and 2==len(self.ralohi) and 2==len(self.declohi)\n ), 'Invalid vertices in FOV'\n\n ################################\n ### Initialize: FOV RA,Dec pairs; FOV type (assume polygon); FOV\n ### vector triples; list of RA,Dec boxes\n self.radecdegs = list()\n self.fovtype = 1<self.L and FOV.POLYGONTYPE or FOV.RADECBOXTYPE\n self.uvfovxyzs,fovsum = list(),sp.vpack(0.,0.,0.)\n self.radec_boxes = list()\n rdba = self.radec_boxes.append ### Shorthand to append box to list\n\n ################################\n ### Parse list of vertices:\n ### - [list,float] => Circle (cone)\n ### - [list,list] => RA,Dec box\n ### - [list,list,list,...] => Polygon\n for vertex in fovraws:\n\n ### For second of two vertices ...\n if 1==len(self.radecdegs) and 2==self.L:\n ### Two-vertex items are either a conic FOV, or an [RA,Dec] box\n try:\n ### If second item in list is a float, then it's a half-angle\n ### of the cone\n self.hangdeg = float(vertex)\n assert self.hangdeg < 90.0,'Cone half-angle is not less than 90degrees'\n assert self.hangdeg > 0.0,'Cone half-angle is not greater than 0degrees'\n self.hangrad = self.hangdeg * rpd\n self.min_cosine = math.cos(self.hangrad)\n self.uv_cone_axis = self.uvfovxyzs[0]\n self.fovtype = FOV.CIRCLETYPE\n break\n except AssertionError as e:\n raise\n except:\n ### If the above fails, then it's the second corner of the box\n self.fovtype = FOV.RADECBOXTYPE\n\n ### Parse one vertex\n ra,dec,uvxyz = parse_inertial(vertex)\n\n ### Append RA,Dec and unit vector XYZ onto their resepective lists\n self.radecdegs.append((ra,dec,))\n self.uvfovxyzs.append(uvxyz)\n fovsum = sp.vadd(fovsum,uvxyz)\n\n ################################\n ### Calculate RA,DEC limits as list of [ralo,rahi,declo,dechi] boxes\n ### - .radec_boxes is a list; rdba is .radec_boxes.append\n ### - List will have multiple RA,Dec boxes if FOV crosses the Prime\n ### Meridian (PM) an even number of times.\n\n if self.fovtype == FOV.RADECBOXTYPE:\n ### RA,DEC box FOV: calculate limits; handle PM crossing\n if 2==self.L:\n ras,decs = zip(*self.radecdegs)\n ralo,rahi = sorted(ras)\n declo,dechi = sorted(decs)\n if 180 > (rahi-ralo):\n rdba([ralo,rahi,declo,dechi])\n else:\n rdba([0.0,ralo,declo,dechi])\n rdba([rahi,360.0,declo,dechi])\n else:\n if self.ralohi[1] > self.ralohi[0]:\n rdba(self.ralohi+self.declohi)\n else:\n rdba([self.ralohi[0],360.0]+self.declohi)\n rdba([0.0,self.ralohi[1]]+self.declohi)\n\n elif self.fovtype == FOV.CIRCLETYPE:\n ### Circular FOV: DEC limits determine RA limits; handle PM Xing\n ra,dec = self.radecdegs[0]\n fovdeclo = dec - self.hangdeg\n fovdechi = dec + self.hangdeg\n\n if fovdeclo < -90.0 or fovdechi > 90.0:\n ### A pole is in the FOV; use full RA range\n fovralo,fovrahi = 0.0,360.0\n fovdeclo,fovdechi = max([fovdeclo,-90.0]),min([fovdechi,+90.0])\n\n elif fovdeclo == -90.0 or fovdechi == 90.0:\n ### A pole is on the FOV circumference; RA range is 180 degrees\n fovralo,fovrahi = ra-90.0,ra+90.0\n\n else:\n ### The FOV excludes the poles; calculate the RA range, using\n ### the formula validated in script validate_delta_ra_formula.py\n tanhang,tandec = math.tan(self.hangrad),math.tan(dec*rpd)\n sinhang,cosdec = math.sin(self.hangrad),math.cos(dec*rpd)\n coshang = math.cos(self.hangrad)\n T = sinhang / math.sqrt(1.0 - ((tanhang*tandec)**2))\n deltara = dpr * math.atan(T / (cosdec * coshang))\n fovralo,fovrahi = ra-deltara,ra+deltara\n\n ### Ensure RA limits are within range [0:360] (N.B. inclusive)\n if fovralo < 0.0: fovralo += 360.0\n if fovrahi > 360.0: fovrahi -= 360.0\n\n if fovralo <= fovrahi:\n ### RA lo <= RA hi: no PM crosssing\n rdba([fovralo,fovrahi,fovdeclo,fovdechi])\n else:\n ### RA hi < RA hi: there is a PM crosssing\n rdba([0.0,fovrahi,fovdeclo,fovdechi])\n rdba([fovralo,360.,fovdeclo,fovdechi])\n\n else:\n assert self.fovtype == FOV.POLYGONTYPE\n ### Polygonal FOV: build frame where all vertices will be\n ### projected onto the plane Z=1\n\n ### .uvavg: unit vector = mean of all vertices, will be +Z\n self.uvavg = sp.vhat(fovsum)\n\n ### Create rotation matrix to FOV frame: +Z is mean of vertices'\n ### directions (.uvavg); +X will be a direction that is not\n ### parallel to any side of the polygon\n ### - Start with temporary matrix with +Z as defined above; +X\n ### toward vertex at largest angle from .uvavg\n vother = min([(sp.vdot(self.uvavg,v),list(v),) for v in self.uvfovxyzs])[1]\n tmpmtx = sp.twovec(self.uvavg,3,vother,1)\n ### - Rotate all vectors to that frame; scale Z components to 1.0\n vtmps = list()\n for v in self.uvfovxyzs:\n ### - Ensure all vertices are in the same hemisphere\n assert 0.0 < sp.vdot(self.uvavg,v),'All vertices are not in the same hemisphere'\n vtmp = sp.mxv(tmpmtx,v)\n vtmps.append(sp.vscl(1.0/vtmp[2],vtmp))\n\n ### Find largest azimuth gap between any two sides: that azimuth\n ### will be direction of +X in the final rotation matrix\n ### - Get azimuths of all sides of polygon, in range [-PI:PI]\n azimuths,vlast = list(),vtmps[-1]\n for v in self.uvfovxyzs:\n azimuths.append(numpy.arctan((v[1]-vlast[1])/(v[0]-vlast[0])))\n vlast = v\n ### - Sort angles and add [least angle plus PI] to end of list\n azimuths.sort()\n azimuths.append(azimuths[0]+sp.pi())\n ### - Find largest delta-azimuth and its index\n dazimuths = [hi-lo for hi,lo in zip(azimuths[1:],azimuths[:-1])]\n maxdaz = max(dazimuths)\n imaxdaz = dazimuths.index(maxdaz)\n ### - Calculate azimuth from to mean of that delta-azimuth,\n meanaz = azimuths[imaxdaz] + (maxdaz / 2.0)\n\n ### Final matrix: add rotation of tmpmtx around +Z by that angle\n self.mtxtofov = sp.mxm(sp.rotate(meanaz,3),tmpmtx)\n\n ### Apply final rotation matrix, store results in .uvlclxyzs\n tmpmtx = sp.twovec(self.uvavg,3,vother,1)\n self.uvlclxyzs = [self.rotate_to_local(v) for v in self.uvfovxyzs]\n\n ### Calculate upper and lower RA and Dec limits, with PM crossings\n los,his = list(),list()\n ### - Create [[RA,Dec],[X,Y,Z]] pairs list; ensure last is off PM\n pairs = list(zip(self.radecdegs,self.uvfovxyzs))\n pop_count = 0\n while pairs[-1][0][0] == 0.0:\n pop_count += 1\n assert pop_count < self.L,'All vertices are on the Prime Meridian'\n pairs.append(pairs.pop(0))\n\n ### Count PM crossings\n self.crossing_count = 0\n lastra = pairs[-1][0][0]\n zero_count = 0\n for (ra,dec,),xyz in pairs:\n if ra == 0.0:\n zero_count += 1\n if lastra > 180.0: ra = 360.0\n if 180 < abs(ra-lastra): self.crossing_count += 1\n lastra = ra\n\n if 0==self.crossing_count or 1==(1&self.crossing_count):\n ### If there are either no, or an odd number, of PM crossings,\n ### then use the pairs as-is for a single FOV\n subfovs = [pairs]\n if self.crossing_count:\n ### - For odd crossing count, one pole or the other must be\n ### in the FOV; init full RA range, that pole for Dec ranges\n ralo,rahi = 0.0,360.0\n if sp.vdot(self.uvavg,[0,0,1]) > 0.0: declo = dechi = +90.0\n else : declo = dechi = -90.0\n else:\n ### - For zero crossing count, initialize inverted ranges\n ralo,rahi = 360.0,0.0\n declo,dechi = +90.0,-90.0\n subranges = [[ralo,rahi,declo,dechi]]\n\n else:\n ### If there are an even, non-zero number of PM crossings, break\n ### them into two sub-FOVs, one on either side of the PM\n\n eastfov,westfov = list(),list()\n\n if zero_count:\n ### If there are any zero RA values, rotate the pairs to\n ### ensure a zero-RA pair is the first, so it and the non-zero\n ### last pair will be assigned to the correct side of the PM\n while pairs[0][0][0]!=0.0: pairs.append(pairs.pop(0))\n else:\n ### If there are no zero RA values, rotate the pairs to ensure\n ### a crossing occurs between the last and first pair, so the\n ### corresponding zero crossing will be assigned to the\n ### correct side of the PM\n while abs(pairs[0][0][0]-pairs[-1][0][0])<180:\n pairs.append(pairs.pop(0))\n\n ### Write vertices into the two sub-FOVs\n\n ### - Set last-vertex values for first item in pairs\n (lastra,lastdec,),lastxyz = pairs[-1]\n\n for pair in pairs:\n ### - Loop over vertex pairs ((RA,DEC,),Cartesian_Vector)\n (ra,dec,),xyz = pair\n\n if ra == 0.0:\n\n ### - When RA=0, the previous RA determines if it's 0 ar 360\n if lastra >= 180.0:\n ra = 360.0\n westfov.append([(ra,dec,),xyz])\n iswest = True\n else:\n eastfov.append(pair)\n iswest = False\n\n elif abs(lastra-ra) >= 180.0:\n\n ### - When the change in RA>=180, the PM is being crossed\n\n ### - Find the mid-vector where the PM is crossed\n k1 = -xyz[1] / (lastxyz[1]-xyz[1])\n midxyz = sp.vhat(sp.vlcom(1.0-k1,xyz,k1,lastxyz))\n middec = dpr * sp.recrad(midxyz)[2]\n\n ### - Add that mid-vector, with RA=360, to the west FOV\n westfov.append([(360.0,middec,),midxyz])\n\n ### - Determine if vector is west\n iswest = ra >= 180.0\n\n ### - Add that mid-vector, with RA=0, to the east FOV ...\n if (ra > 0.0) and (not iswest):\n ### - ... only if the ra is not already 0, as it will be\n ### added in the next step\n eastfov.append([(0.0,middec,),midxyz])\n\n ### Add the vector to either east or west FOV\n if iswest: westfov.append(pair)\n else : eastfov.append(pair)\n\n else:\n\n ### PM was not crossed, add vector to same FOV, as last time\n if iswest: westfov.append(pair)\n else : eastfov.append(pair)\n\n ### - Set last-vertex values for next item in pairs\n (lastra,lastdec,),lastxyz = (ra,dec,),xyz\n\n ### - Create subfovs list of east and west FOVs; set subranges\n subfovs = [eastfov,westfov]\n subranges = [[360.0,0.0,90.0,-90.0],[360.0,0.0,90.0,-90.0]]\n\n ### To here, we have list of FOV(s) and list of range(s); use them\n ### to determine RA,DEC box(es) to use for database query\n\n while subfovs:\n\n ### Get sub-FOV, sub-range; set last vertex's XYZ\n subfov,(ralo,rahi,declo,dechi,) = subfovs.pop(),subranges.pop()\n lastxyz = subfov[-1][-1]\n\n for pair in subfov:\n ### Each element of subfov comprises (RA,Dec) and vertex XYZ\n ### - xyz is a unit vector\n (ra,dec,),xyz = pair\n\n ### - Adjust RA limits as needed from RA of vertex\n if ra > rahi: rahi = ra\n elif ra < ralo: ralo = ra\n\n ### - Set Dec extrema from DEC of vertex\n maxdec = mindec = dec\n\n ### - Calculate Dec extrema from lastxyz to xyz\n ### -- Normal to plane of lastxyz and syz\n sidenormal = sp.vcrss(lastxyz,xyz)\n ### -- Z-rates along great circle at lastxyz and at xyz\n lastdz = sp.vcrss(sidenormal,lastxyz)[2]\n dz = sp.vcrss(sidenormal,xyz)[2]\n if 0.0 > (lastdz*dz):\n ### -- If sign of Z-rates differs, there should be an\n ### extreme value between lastxyz and xyz\n ### --- Get vector perpendicular to side normal on equator\n ### --- Use that to calculate the unit vector at Dec extreme\n equinox = sp.vcrss([0,0,1],sidenormal)\n vtoextremez = sp.ucrss(sidenormal,equinox)\n ### --- Cosine of angle between lastxyz and xyz\n mindot = sp.vdot(lastxyz,xyz)\n for none in [None,None]:\n ### --- Two cases: vtoextremez and -vtoextremez\n ### - Angles from vtoextremez to lastxyz and to xyz\n ### must be less than angle between lastxyz and xyz\n ### so cosines of those angles must be greater\n lastxyzdot = sp.vdot(lastxyz,vtoextremez)\n xyzdot = sp.vdot(xyz,vtoextremez)\n if lastxyzdot>mindot and xyzdot>mindot:\n ### --- Adjust maxdec and mindec as needed\n try : extremedec = dpr * math.asin(vtoextremez[2])\n except: extremedec = dpr * sp.recrad(vtoextremez)[2]\n if extremedec > maxdec: maxdec = extremedec\n elif extremedec < mindec: mindec = extremedec\n break\n ### --- Invert vtoextremez for next pass\n vtoextremez = sp.vminus(vtoextremez)\n\n ### - Adjust Dec limits as needed from Dec extrema of side\n if maxdec > dechi: dechi = maxdec\n if mindec < declo: declo = mindec\n lastxyz = xyz\n\n ### Append calculated RA,Dec box(es)\n rdba((ralo,rahi,declo,dechi,))\n\n ### Put None in .localxyzs, in .v_for_stellar_aberr, and in\n ### .v_for_parallax; if no stellar aberration or parallax is\n ### explicitly applied to define it later, then .localxyzs will be\n ### calculated on the fly\n self.localxyzs = None\n self.v_for_stellar_aberr = None\n self.v_for_parallax = None",
"def __init__(self, camera_func, width, height):\n self.camera_func = camera_func\n self.state = pygame.Rect(0, 0, width, height)",
"def calc_focal_values(w, h, fov):\n cx = w / 2\n cy = h / 2\n f = w / (2 * (np.tan(fov * np.pi / 360)))\n return (cx, cy), f",
"def test04_fov_axis(variants_vec_spectral, origin, direction, fov):\n\n def check_fov(camera, sample):\n ray, _ = camera.sample_ray(0, 0, sample, 0)\n assert dr.allclose(dr.acos(dr.dot(ray.d, direction)) * 180 / dr.pi, fov / 2)\n\n # In the configuration, aspect==1.5, so 'larger' should give the 'x'-axis\n for fov_axis in ['x', 'larger']:\n camera = create_camera(origin, direction, fov=fov, fov_axis=fov_axis)\n for sample in [[0.0, 0.5], [1.0, 0.5]]:\n check_fov(camera, sample)\n\n # In the configuration, aspect==1.5, so 'smaller' should give the 'y'-axis\n for fov_axis in ['y', 'smaller']:\n camera = create_camera(origin, direction, fov=fov, fov_axis=fov_axis)\n for sample in [[0.5, 0.0], [0.5, 1.0]]:\n check_fov(camera, sample)\n\n # Check the 4 corners for the `diagonal` case\n camera = create_camera(origin, direction, fov=fov, fov_axis='diagonal')\n for sample in [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]:\n check_fov(camera, sample)",
"def fov(self, rounded=False):\r\n fov = self.nativePointer().fov\r\n if rounded:\r\n return int(round(fov))\r\n return fov",
"def __init__(self, record_video=True, video_name='video.avi', lower_color=(20, 80, 20), upper_color=(30, 255, 255)):\n self.video = cv2.VideoCapture(0)\n\n # We need to check if camera \n # is opened previously or not \n if not self.video.isOpened():\n print(\"Error reading video file\")\n\n # We need to set resolutions.\n # so, convert them from float to integer. \n self.frame_width = int(self.video.get(3))\n self.frame_height = int(self.video.get(4))\n self.fps = self.video.get(cv2.CAP_PROP_FPS)\n self.size = (self.frame_width, self.frame_height)\n # Below VideoWriter object will create \n # a frame of above defined The output \n # is stored in file with the name stored in self.video_name.\n self.record_video = record_video\n if self.record_video:\n self.video_result = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'MJPG'), self.fps, self.size)\n\n # define the lower and upper boundaries of the colored\n # ball in the HSV color space\n self.lower_color = lower_color\n self.upper_color = upper_color\n self.x = 0\n self.y = 0\n self.is_ball_visible = False\n self.radius = 10"
]
| [
"0.67196584",
"0.6497814",
"0.64449346",
"0.6268035",
"0.6130942",
"0.6108091",
"0.6069316",
"0.59878516",
"0.5951577",
"0.5824285",
"0.58094376",
"0.5759047",
"0.5749021",
"0.5749021",
"0.5749021",
"0.5739862",
"0.5734905",
"0.5704752",
"0.56909734",
"0.565619",
"0.5634168",
"0.5622622",
"0.561765",
"0.56150603",
"0.5572987",
"0.55506253",
"0.5541735",
"0.553202",
"0.55120164",
"0.55024534"
]
| 0.7520698 | 0 |
Create text actor for view labels | def create_text(self, settings, viewport):
viewport = np.array(viewport)
viewport[[0, 2]] = self.WIN_H_SCALE*viewport[[0, 2]]
viewport[[1, 3]] = self.WIN_V_SCALE*viewport[[1, 3]]
viewport = list(viewport)
# Set defaults if not specified
defaults = {
"Size": 20,
"Anchor": "SW",
"X offset": 0.02,
"Y offset": 0.02,
"Font": "Montserrat",
"Colour": self.COLOUR_FONT
}
for key in defaults:
try:
settings[key]
except KeyError:
settings[key] = defaults[key]
# Position
margin = (
self.TEXT_SCALE*settings["X offset"]*(self.ANCHOR[settings["Anchor"]][0] - 1),
self.TEXT_SCALE*settings["Y offset"]*(self.ANCHOR[settings["Anchor"]][1] - 1)
)
posx = int((viewport[0] + 0.5*self.ANCHOR[settings["Anchor"]][0]*(viewport[2] - viewport[0]) - margin[0])*self.SCREEN_SIZE[0])
posy = int((viewport[1] + 0.5*self.ANCHOR[settings["Anchor"]][1]*(viewport[3] - viewport[1]) - margin[1])*self.SCREEN_SIZE[1])
# Properties
props = vtk.vtkTextProperty()
props.SetFontFamily(vtk.VTK_FONT_FILE)
if settings["Font"] == "Montserrat-SemiBold":
props.SetFontFile("./fonts/Montserrat-SemiBold.ttf")
elif settings["Font"] == "Consolas":
props.SetFontFile("./fonts/consola.ttf")
elif settings["Font"] is "7Segment":
props.SetFontFile("./fonts/digital-7 (mono).ttf")
else:
props.SetFontFile("./fonts/Montserrat.ttf")
props.SetFontSize(int(self.TEXT_SCALE*settings["Size"]))
props.SetColor(settings["Colour"])
props.SetJustification(self.ANCHOR[settings["Anchor"]][0])
props.SetVerticalJustification(self.ANCHOR[settings["Anchor"]][1])
# Create actor
actor = vtk.vtkTextActor()
actor.SetInput(settings["String"])
actor.SetDisplayPosition(posx, posy)
actor.SetTextProperty(props)
return actor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_label(self, x, y, text, width=50, **config):\n\n self.main_canvas.create_text(x, y, text='%6s' % text, width=width, **config)",
"def create_label(self, on, text: str):\n return tk.Label(on, font=self.FONT, bg=self.BG_COLOR, text=text)",
"def setupScreenText(self) :\n\t\t# Create object to show avatar's position on the screen.\n\t\t# Update actual text using setText method on object.\n\t\tself.avPos = showText(0.92)\n\n \t\t# Create object to show a list of visible avatars\n \t\tself.showNumVisible = showText(0.85)\n \t\tself.visList = []\n\n\t\t# Create object for displaying keyboard shortcuts\n\t\tself.helpText = showText(0.78)\n\t\tself.helpText.setText(\"h: for help\")",
"def draw_text(label_text, label_position, scene):\n\n # Distance of camera from focus point to determine text size\n distance_from_center = mag(scene.center - scene.camera.pos)\n\n # Far away = smaller text, closer = larger text (up to a min (20) and max (40))\n # Typically 5->20 units away\n # (eqn and limits modified to suit display better) = -1.3333 * distance_from_center + 46.6667\n label_height = -1.3333 * distance_from_center + 36.6667 # Calculate label height\n label_height = max(min(label_height, 35), 10) # Limit to 10->35\n label_xoffset = 0\n label_yoffset = 0\n label_space = 0\n label_font = 'serif'\n label_text_colour = color.black\n label_line_color = color.white\n label_bg_opacity = 0\n label_linewidth = 0.1\n\n the_label = label(\n canvas=scene,\n pos=label_position,\n text=label_text,\n height=label_height,\n xoffset=label_xoffset,\n yoffset=label_yoffset,\n space=label_space,\n font=label_font,\n color=label_text_colour,\n linecolor=label_line_color,\n opacity=label_bg_opacity,\n linewidth=label_linewidth\n )\n\n return the_label",
"def Label(self) -> str:",
"def make_text(location, msg, pad=0):\r\n text = tkinter.Label(location, text='\\n{}\\n'.format(msg),\r\n wraplength=600, justify='left', anchor='w',\r\n padx=pad, pady=pad, font=DEFAULT_FONT)\r\n\r\n return text",
"def __init__(self, text, font, pos, color=(255, 255, 255)):\r\n self.pos = pos\r\n self.label = font.render(text, 1, color)",
"def name_id_text(self):\n text = Marker()\n text.header = self._header\n text.type = Marker.TEXT_VIEW_FACING\n text.action = Marker.ADD\n text.scale.z = 0.05\n text.color = self.GREEN\n text.pose = deepcopy(self.POSE)\n text.pose.position.x = self._p1.x\n text.pose.position.y = (self._p1.y + self._p5.y) / 2\n text.pose.position.z = self._p1.z\n text.text = \"{} #{}\".format(self._object.object_name, self._track_id)\n return text",
"def create_labels(self):\n for name in self.name_to_phone:\n temp_labels = Label(text=name)\n self.root.ids.main.add_widget(temp_labels)",
"def DrawLabel(self, screen):\r\n screen.blit(self.label, self.pos)",
"def addLabel(*args):",
"def drawLabels(self):\r\n if self.sensors == None or self.sensors == []:\r\n return\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[0]])\r\n self.c.create_text(30,20,text=self.sensors[self.sensor_ids[0]],fill=col,anchor=tk.NW)\r\n if len(self.sensor_ids) == 2:\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[1]])\r\n self.c.create_text(30,40,text=self.sensors[self.sensor_ids[1]],fill=col,anchor=tk.NW)",
"def __init__(self, text, pos=(0, 0), anchor='ll',\n size=16,\n sizeUnits=POINT_UNITS,\n color=(1, 0, 1, 1),\n dpi=None,\n yPointsUp=None,\n font_file=None,\n angle=0.,\n glyphs=GlyphTypes.BASE,\n filterControl=True):\n self.logger = namedLogger(__name__, self.__class__)\n if dpi:\n if Label.DPI is None:\n Label.DPI = dpi\n else:\n self.DPI = dpi\n if yPointsUp:\n if Label.Y_POINTS_UP is None:\n Label.Y_POINTS_UP = yPointsUp\n else:\n self.Y_POINTS_UP = yPointsUp\n self.color = color\n self.glyphs = glyphs\n self.filterControl = filterControl\n\n # Model matrix\n self.transform = Transform()\n\n if font_file is None:\n font_file = Path(__file__).parent.joinpath(Label.DEFAULT_FONT)\n self.font = Font(font_file)\n self._lineWidth = 0\n self._labelWidth = 0\n self._labelHeight = self.font.table['linespace']\n self.setSize(size, sizeUnits)\n\n self._baseInd = np.array([0, 1, 2, 2, 3, 0], np.uint32)\n self.allVertices = None\n self.allIndices = None\n self.extracted = {}\n # Offet, kerning, next_char_shift\n self._string_metric = []\n\n # Set text\n if self.filterControl:\n text = self._filterControl(text)\n self.shader = Shader.fromString(*self._getShaderCode())\n self._setText(text)\n self._setMesh()\n self.model = Transform()\n self.setPos(*pos, anchor)\n self.setRotation(angle)",
"def text_plot(self):\n if self.stext is not None:\n # Create text object :\n self.stextmesh = visu.Text(text=self.stext, color=self.stextcolor,\n font_size=self.stextsize, pos=self.xyz,\n bold=True, name='SourcesText')\n\n # Set text texture :\n self.stextmesh.set_gl_state('translucent', depth_test=True)\n\n # Apply a transformation to text elements to not cover sources :\n self.stextmesh.transform = vist.STTransform(\n translate=self.stextshift)\n else:\n self.stextmesh = visu.Text(name='NoneText')",
"def add_text_label(self, name, value=None, label=None, location=(None,0)):\n widget=widget_label.LVTextLabel(self,value=value)\n widget.setObjectName(_fromUtf8(self.name+\"_\"+name))\n return self.add_simple_widget(name,widget,label=label,add_indicator=False,location=location)",
"def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)",
"def text(self) -> None:\n label_space = tk.Label(self)\n label_space.grid(row=0)\n label_book_number = tk.Label(self, text=f'Номер книги:')\n label_book_number.grid(row=1, column=0, ipady=5)\n label_title = tk.Label(self, text='Название книги:')\n label_title.grid(row=2, column=0, padx=5)\n label_author = tk.Label(self, text='Автор:')\n label_author.grid(row=3, column=0, pady=5)\n label_genre = tk.Label(self, text='Жанр:')\n label_genre.grid(row=4, column=0)",
"def write_label(self, contig_name, width, height, font, title_width, upper_left, vertical_label,\n strand, canvas, horizontal_centering=False, center_vertical=False, chop_text=True,\n label_color=(50, 50, 50, 255)):\n upper_left = list(upper_left) # to make it mutable\n shortened = contig_name[-title_width:] # max length 18. Last characters are most unique\n txt = Image.new('RGBA', (width, height))#, color=(0,0,0,50))\n txt_canvas = ImageDraw.Draw(txt)\n text_width = txt_canvas.textsize(shortened, font)[0]\n if not chop_text and text_width > width:\n txt = Image.new('RGBA', (text_width, height)) # TODO performance around txt_canvas\n txt_canvas = ImageDraw.Draw(txt)\n if center_vertical or vertical_label: # Large labels are centered in the column to look nice,\n # rotation indicates strand in big text\n vertically_centered = (height // 2) - multi_line_height(font, shortened, txt)//2\n else: # Place label at the beginning of gene based on strand\n vertically_centered = height - multi_line_height(font, shortened, txt) # bottom\n if strand == \"+\":\n vertically_centered = 0 # top of the box\n txt_canvas.multiline_text((0, max(0, vertically_centered)), shortened, font=font,\n fill=label_color)\n if vertical_label:\n rotation_direction = 90 if strand == '-' else -90\n txt = txt.rotate(rotation_direction, expand=True)\n upper_left[1] += -4 if strand == '-' else 4\n if horizontal_centering:\n margin = width - text_width\n upper_left[0] += margin // 2\n canvas.paste(txt, (upper_left[0], upper_left[1]), txt)",
"def tagview(tab,label,x,y):\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n col=classifc[label]\r\n labnow=classif[label]\r\n# print (labnow, text)\r\n if label == 'back_ground':\r\n deltay=30\r\n else:\r\n# deltay=25*((labnow-1)%5)\r\n deltay=40+10*(labnow-1)\r\n\r\n viseg=cv2.putText(tab,label,(x, y+deltay), font,0.3,col,1)\r\n return viseg",
"def create_item(window, text, x, y, parent, color=(100,193,212), command=None, bevel=True):\r\n font = pygame.font.Font(None, 20)\r\n text_width, text_height = font.size(text)\r\n text_surface = font.render(text,True,(0,0,0))\r\n rect = pygame.Rect(x-int(text_width/2.0),\r\n y-int(text_height/2.0),\r\n text_width,\r\n text_height)\r\n if command:\r\n return Button(text_surface=text_surface,\r\n color=color,\r\n bounds_relative_to_parent=rect,\r\n parent_bounds=parent.bounds,\r\n command=command,\r\n window=window,\r\n bevel=bevel)\r\n else:\r\n return Label(text_surface=text_surface,\r\n color=color,\r\n bounds_relative_to_parent=rect,\r\n parent_bounds=parent.bounds,\r\n window=window)",
"def create_labels(self):\n for name in self.names:\n new_label = Label(text=name)\n self.root.ids.names_box.add_widget(new_label)",
"def draw_labels(self):\n x = PygameUI.Label('Inactive Events')\n x.frame = pygame.Rect(4, 4, 150, 30)\n self.scene.add_child(x)\n \n x = PygameUI.Label('Active Events')\n x.frame = pygame.Rect(Menu.scene.frame.w-150, 4, 150, 30)\n self.scene.add_child(x)",
"def create_label(self, org, name):\n pass",
"def draw_label(label_text, label_position, scene):\n\n # Custom settings for the label\n label_height = 10\n label_xoffset = 0\n label_yoffset = 50\n label_space = 20\n label_font = 'serif'\n label_text_colour = color.black\n label_line_color = color.black\n\n the_label = label(\n canvas=scene,\n pos=label_position,\n text=label_text,\n height=label_height,\n xoffset=label_xoffset,\n yoffset=label_yoffset,\n space=label_space,\n font=label_font,\n color=label_text_colour,\n linecolor=label_line_color\n )\n\n return the_label",
"def create_labels(self):\n for name in self.names:\n temp_button = Label(text=name)\n self.root.ids.label_box.add_widget(temp_button)",
"def create_title(text, y=PADDING, screen=None):\n if screen is None:\n screen = lv.scr_act()\n lbl = lv.label(screen)\n lbl.set_style(0, styles[\"title\"])\n lbl.set_text(text)\n lbl.set_long_mode(lv.label.LONG.BREAK)\n lbl.set_width(HOR_RES-2*PADDING)\n lbl.set_x(PADDING)\n lbl.set_align(lv.label.ALIGN.CENTER)\n lbl.set_y(y)\n return lbl",
"def draw_label(self):\n x, y, z, phi, theta, psi = self.airplane.eta\n u, v, w, p, q, r = self.airplane.nu\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot = self.airplane.nu_dot\n alpha = np.arctan(w/u)\n V_a = np.sqrt(u**2+v**2+w**2)\n beta = np.arcsin(v/V_a)\n\n self.labels[0].text = 'Roll [deg]: %.2f' % (phi*180/np.pi,)\n self.labels[0].draw()\n self.labels[1].text = 'Pitch [deg]: %.2f' % (theta*180/np.pi,)\n self.labels[1].draw()\n self.labels[3].text = 'Pos: (%.2f, %.2f, %.2f)' % (x, y, z)\n self.labels[3].draw()\n self.labels[4].text = 'Speed: %.2f (%.2f, %.2f, %.2f)' % (V_a, u, v, w)\n self.labels[4].draw()\n self.labels[5].text = 'Acceleration: (%.2f, %.2f, %.2f)' % (u_dot, v_dot, w_dot)\n self.labels[5].draw()\n self.labels[6].text = 'Angle of attack: %.2f' % (alpha,)\n self.labels[6].draw()\n self.labels[7].text = 'Sideslip angle: %.2f' % (beta,)\n self.labels[7].draw()\n\n self.labels[9].text = 'Drag: %.2f' % (self.airplane.f_drag,)\n self.labels[9].draw()\n self.labels[10].text = 'Lift: %.2f' % (self.airplane.f_lift,)\n self.labels[10].draw()\n self.labels[11].text = 'Thruster: %.2f' % (self.airplane.f_thruster,)\n self.labels[11].draw()\n self.labels[12].text = 'Elevators: %.2f' % (self.airplane.elevator,)\n self.labels[12].draw()\n self.labels[13].text = 'Ailerons: %.2f' % (self.airplane.aileron,)\n self.labels[13].draw()\n self.labels[14].text = 'Rudder angle: %.2f' % (self.airplane.rudder_angle,)\n self.labels[14].draw()\n self.labels[15].text = 'Flaps: %.2f' % (self.airplane.flaps,)\n self.labels[15].draw()\n\n if (alpha > CRITICAL_STALL_ANGLE):\n self.stall_warning.text = 'Stall!'\n self.stall_warning.draw()",
"def renderLabel(self):\n self.render = self.font.render(self.text, True, self.color)\n self.rect = self.render.get_rect()",
"def __init__(self, font, color, text=\"\", top=0, left=0, bottom=None, right=None):\n self.text = text\n self.font = font\n self.color = color\n self.top = top\n self.left = left\n self.bottom = bottom\n self.right = right\n self.renderLabel()",
"def __init__(self, text, separator_line_thickness, label_type, dpi=(600, 600)):\n \n def get_text_on_label(text, label_type):\n \"\"\"Format how the text will look on the label.\n \n text - Text to be placed on the label.\n label_type - One of the types specifying the label layout.\n \"\"\"\n text_on_label = \"\".join([c for c in text if c in string.ascii_letters + string.digits])\n if label_type == 0:\n text_on_label = \"\"\n elif label_type == 1 or label_type == 2 or label_type == 4:\n text_on_label = \"\\n\".join([text_on_label[:4],\n text_on_label[4:8],\n text_on_label[8:12],\n text_on_label[12:]])\n elif label_type == 3:\n text_on_label = \"\\n\".join([\"-\".join([text_on_label[:4],\n text_on_label[4:8]]),\n \"-\".join([text_on_label[8:12],\n text_on_label[12:]])])\n else:\n text_on_label = \"\"\n return text_on_label\n \n self.label_image = None\n self.text_on_label = get_text_on_label(text, label_type)\n self.label_type = label_type\n self.separator_line_thickness = separator_line_thickness\n self.dpi = dpi"
]
| [
"0.6539994",
"0.6291919",
"0.61044025",
"0.6095112",
"0.6014266",
"0.59630674",
"0.5954388",
"0.594855",
"0.59343463",
"0.59293854",
"0.5919707",
"0.59102863",
"0.5881727",
"0.5864653",
"0.5849347",
"0.584334",
"0.5827427",
"0.5817509",
"0.58104134",
"0.5785886",
"0.57827777",
"0.5771549",
"0.5764385",
"0.57572705",
"0.57512814",
"0.57495755",
"0.57451814",
"0.5710885",
"0.5703677",
"0.56910765"
]
| 0.649773 | 1 |
Makes a vtkIdList from a Python iterable | def mkVtkIdList(self, it):
vil = vtk.vtkIdList()
for i in it:
vil.InsertNextId(int(i))
return vil | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_arr2vtkIdList(self):\n a = [1, 2, 3, 4, 5]\n p = array_handler.array2vtkIdList(a)\n for i, j in enumerate(a): \n self.assertEqual(p.GetId(i), j)\n p = vtk.vtkIdList()\n ident = id(p)\n p = array_handler.array2vtkIdList(numpy.array(a), p)\n for i, j in enumerate(a): \n self.assertEqual(p.GetId(i), j)\n self.assertEqual(id(p), ident)\n \n self.assertRaises(AssertionError, array_handler.array2vtkIdList,\n [[1,2,3]])",
"def at(self, *args):\n return _libsbml.IdList_at(self, *args)",
"def getIDs():",
"def test_id_array(self):\n arr = vtk.vtkIdTypeArray()\n arr.SetNumberOfTuples(10)\n for i in range(10):\n arr.SetValue(i, i)\n\n np = array_handler.vtk2array(arr)\n self.assertEqual(numpy.all(np == range(10)), True)",
"def genIdList(numId, idSize):\n\tiDs = []\n\tfor i in range(numId):\n\t\tiDs.append(genID(idSize))\n\treturn iDs",
"def VtInt(list):\n return win32com.client.VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_I2, list)",
"def ids(self):\n return self.obj_to_id.values()",
"def object_ids(self):\n return self._extract_set('id')",
"def get_ids(self):\n return [item.id for item in self.items]",
"def get_ids_as_slice_or_list(self):\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)",
"def iter_triple_ids(self) -> Iterable[List[int]]:\n raise NotImplementedError",
"def get_ids(self) -> List[str]:",
"def get_id_iterable(cls):\r\n return range(cls.MAXIMUM_REPLICABLES)",
"def _build_ID_sets(self):\n raise NotImplementedError",
"def identities(self, generator=False, **kwargs):\n g = self._iter(body=self._identities_iter(), **kwargs)\n if generator:\n return g\n\n return list(g)",
"def list_ids(self, start: int = None, end: int = None) -> List:\n return [i.unique_id for i in self.data[start:end]]",
"def IDs(self, default=[{}]):\n tmp = self.data.get('ids', default)\n return [HEP.IDObject(i) for i in tmp]",
"def polygon_ids(self):\n return self.get_ids()",
"def __init__(self, *args):\n this = _libsbml.new_IdList(*args)\n try: self.this.append(this)\n except: self.this = this",
"def convert_to_ids(self, terms):\n vec = [self.get_id(label) for label in terms]\n return vec",
"def ids(self):\n return list(self._id_generator())",
"def ids(self):\n return list(self._id_generator())",
"def get_raster_ids(self):\n return numpy.array(range(self._lo_atom, self._lo_atom + self._n_atoms))",
"def ints(xs: Iterable) -> list[int]:\n return lmap(int, xs)",
"def genNumIdList(numId, idSize):\n\tiDs = []\n\tfor i in range(numId):\n\t\tiDs.append(genNumID(idSize))\n\treturn iDs",
"def _text_to_ids(self, *Xs, max_length=None):\n return Xs",
"def _build_iterable(self):",
"def vertex_ids(self):\n return self.get_ids()",
"def mk_lst_atnum(self):\n\t\telem_rnge=[]\n\t\tfor i in self.atom_num_lst:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telements.append(element)\n\t\treturn elements\n\t\tprint elements",
"def posIdc(vec):\n for idx in vec:\n if idx == 0:\n continue\n if idx > 0:\n return tuple(vec)\n else:\n return tuple(-np.array(vec))"
]
| [
"0.6913012",
"0.651734",
"0.6138567",
"0.6030419",
"0.572271",
"0.57053196",
"0.56407267",
"0.5620092",
"0.55757225",
"0.5573427",
"0.55544776",
"0.5554155",
"0.5551495",
"0.55412483",
"0.5510335",
"0.55083036",
"0.5506482",
"0.5440368",
"0.5425658",
"0.5404241",
"0.5389161",
"0.5389161",
"0.5365418",
"0.5354198",
"0.5352772",
"0.5336644",
"0.52868056",
"0.52821046",
"0.527655",
"0.5253648"
]
| 0.7700176 | 0 |
Attaches another entity at a position offset from the attachment site. | def attach_offset(self, entity, offset, attach_site=None):
frame = self.attach(entity, attach_site=attach_site)
frame.pos = offset
return frame | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_entity(self, ent):\n self.tiles[ent.position[x]][ent.position[y]].add_entity(ent)",
"def offset(self, offset):\n self._offset += offset",
"def attach(self, construct, start_domain, start_index, end_domain, end_index, check_attachment_sites=False):\n if isinstance(start_domain, six.string_types):\n start_domain = self[start_domain]\n if start_domain not in self._domains:\n raise ValueError(\"no '{}' domain in '{}'.\".format(start_domain, self.name))\n\n if isinstance(end_domain, six.string_types):\n end_domain = self[end_domain]\n if end_domain not in self._domains:\n raise ValueError(\"no '{}' domain in '{}'.\".format(end_domain, self.name))\n\n if start_index == '...':\n start_index = 0\n if start_index < 0:\n start_index += len(start_domain)\n if check_attachment_sites and start_index not in start_domain.attachment_sites:\n raise ValueError(\"Position {} of domain {} is not an attachment site.\".format(start_index, start_domain.name))\n\n if end_index == '...':\n end_index = len(end_domain)\n if end_index < 0:\n end_index += len(end_domain)\n if check_attachment_sites and end_index not in end_domain.attachment_sites:\n raise ValueError(\"Position {} of domain {} is not an attachment site.\".format(end_index, end_domain.name))\n\n self._attachments[start_domain] = self.Attachment(\n start_domain, start_index, end_domain, end_index, construct)",
"def move_to(self, entity, location):\n y, x = location\n if not y in range(self.size) or not x in range(self.size):\n return\n y, x = entity.location\n self.grid[y][x].contents.remove(entity)\n entity.location = location\n y, x = location\n self.grid[y][x].contents.append(entity)\n for ent in self.grid[y][x].contents:\n try:\n if not ent.player_enter_callback is None:\n ent.player_enter_callback(ent)\n except AttributeError:\n pass",
"def back_entities_embedding(self, entity):\n self.ent_embs.ent_embs.weight.data[entity] = self.source_entity",
"def add(self, other, offset=(0,0)):\n if (isinstance(other, Scene)):\n for item in other.items:\n newitem = item.clone()\n newitem.pan(offset)\n self.items.add(newitem)\n elif (isinstance(other, SvgObject)):\n newitem = other.clone()\n newitem.pan(offset)\n self.items.add(newitem)",
"def InsertElement(self, position, element):\n self.__context.builder.DocumentElementInsert(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n position, element)",
"def update_entity_embedding(self, entity, ims, mu):\n self.source_entity = self.ent_embs.ent_embs.weight.data[entity]\n self.ent_embs.ent_embs.weight.data[entity] = mu * self.source_entity + (1 - mu) * torch.mean(ims, dim=0)",
"def add(diagram1, diagram2, offset):\n raise NotImplementedError",
"def add_via(self,loc,size=1):\n loc = self.convert_point_to_units(vector3d(loc[0],loc[1],0))\n self.cell.add_via_center(layers=self.layers,\n offset=vector(loc.x,loc.y),\n size=(size,size))",
"def attach(self, destination): \r\n self.destination=destination",
"def attach(self, destination): \r\n self.destination= destination",
"def in_place_offset(self, offset):\n self.p += offset * self.cross_z.normalized()",
"def _store_entities(self, doc: Doc, entities: List[Span]) -> None:\n # store the entities at the right place\n if self._destination:\n doc._.set(self._destination, entities)\n else:\n doc.ents = entities",
"def set_offset(self, offset):\r\n for b in self.buf:\r\n b.set_offset(offset)",
"def addElement(self, element, position):\n xOffset, yOffset = position\n element.move((self.position[0] + xOffset, self.position[1] + yOffset))\n self.elements.append(element)",
"def attach_entity(self, child_id, parent_id):\n \n # If you create cycles in the children parent relations (a->b->a)\n # the whole subtree (a and b in this case) won't be updated anymore.\n # Normally you'd want to avoid cycles.\n attachment = getattr(self.entities[parent_id],\n self.attachment_system.system_id)\n if attachment.has_ancestor(child_id):\n raise ValueError(\"Cycle in relationtree detected.\")\n # Alternative:\n #if self.attachment_system.has_ancestor_by_id(parent_id, child_id): \n self.attachment_system.attach_child(parent_id, child_id)",
"def set_local_coordinates(self, entity_id, x, y):\n position = getattr(self.entities[entitiy_id],\n self.local_position_system)\n position.x = x \n position.y = y",
"def offset(self, offset):\n self._offset = offset",
"def offset(self, offset_vector):\n vertices = self.vertices + offset_vector;\n self.vertices = vertices;",
"def place_entity(entity, base, x, y):\n \n img = entity.copy().convert(\"RGBA\")\n\n # Get random angle for placement\n angle = random.randint(-ROTATION_RATE, ROTATION_RATE)\n img = img.rotate(angle, expand=1)\n\n # Placement\n base.paste(img, (x, y), img)",
"def set_position(self, az_pos, el_pos):\n raise NotImplementedError()",
"def seek(self, offset, relativeTo):\n self.oFile.seek(offset, relativeTo)",
"def offset(self, offset):\n raise NotImplementedError(\"This should have been implemented.\")",
"def offset(self, offset):\n\n self._offset = offset",
"def offset(self, offset):\n\n self._offset = offset",
"def offset(self, offset):\n\n self._offset = offset",
"def attach(self, phy_layer):\n self._attached_phys.append(phy_layer)",
"def drag_and_drop_by_offset(self, elem, x, y):\n ActionChains(self.driver).drag_and_drop_by_offset(elem, xoffset=x, yoffset=y).perform()",
"def extend_pos(self, start: int, end: int) -> None:"
]
| [
"0.5485562",
"0.5465371",
"0.52700096",
"0.52345645",
"0.5217608",
"0.5122235",
"0.5115227",
"0.5114092",
"0.5091192",
"0.5039015",
"0.5038015",
"0.5035762",
"0.5021496",
"0.49517542",
"0.49258757",
"0.4868918",
"0.48621973",
"0.4830751",
"0.48174015",
"0.48109263",
"0.48000935",
"0.4779221",
"0.4774078",
"0.47706237",
"0.47676507",
"0.47676507",
"0.47676507",
"0.47575867",
"0.4755318",
"0.47359577"
]
| 0.7699143 | 0 |
Returns content of URI or splited by \\n config. | def adv_get_content(URI=None, config=None):
if URI != None:
content = get_file(URI)
elif config != None:
if isinstance(config, basestring):
content = config.split('\n')
elif hasattr(config, '__iter__'): # iterable
content = config
else:
raise exception("can't hadle config, it must be string or iterable object")
else:
raise exceptions.TypeError('config or URI must be specified')
return content | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_content(content, uri=False):\n # Rosette API may balk at non-Latin characters in a URI so we can get urllib\n # to %-escape the URI for us\n if uri:\n unquoted = urllib.parse.unquote(content)\n return urllib.parse.quote(unquoted, '/:')\n if content is None:\n content = sys.stdin.read()\n elif os.path.isfile(content):\n with open(content, mode='r') as f:\n content = f.read()\n return content",
"def get_content(content, uri=False):\n if content is None:\n content = sys.stdin.read()\n elif os.path.isfile(content):\n with open(content, mode='r') as f:\n content = f.read()\n # Rosette API may balk at non-Latin characters in a URI so we can get urllib\n # to %-escape the URI for us\n if uri:\n unquoted = urllib.parse.unquote(content)\n content = urllib.parse.quote(unquoted, '/:')\n return content",
"def newline_list(value):\n return value.strip().splitlines()",
"def testLeadingAndTrailingText(self):\n self.assertEqual([\"http://123.123.123.123\"], grab('fooasdf asdf a http://123.123.123.123 asdfasdf', self.needScheme))",
"def getRequestContent(self):\n return \"\\n\".join(self.request.getReader().lines().toArray())",
"def content_sep():\n return _config.contentsep",
"def read(self):\n path = os.path.expanduser(self.path)\n with open(path, encoding=\"utf-8\") as f:\n return f.read().splitlines()",
"def read_url(self, url: str) -> str:\n return requests.get(url, headers=self.headers).text",
"def efile_string(self):\n fp = self.efile_handle()\n if (fp):\n return \"\\n\".join([line.rstrip() for line in fp])\n return None",
"def __get_url(self, conf):\n url_file = conf[self.conf_item.get_url_list_file()]\n url_list = list()\n map((lambda url: url_list.append(url.strip())), open(url_file))\n return url_list",
"def extract_URLs(self, input_file_name):\n file = open(input_file_name, 'r')\n lines = []\n for line in file.readlines():\n # Don't add empty lines.\n if len(line.strip()) > 0:\n lines.append(line.strip())\n return lines",
"def read(self):\n lines = self.readlines()\n if lines:\n try:\n return ''.join(lines)\n except TypeError:\n return ''.join(force_text(line) for line in lines)\n else:\n return None",
"def content(self):\n return \"\".join(self.lines)",
"def content_uri(self) -> str:\n return pulumi.get(self, \"content_uri\")",
"def get(self):\n if self.file:\n self._read()\n config = self.client_file.parseString(self.content)\n return config",
"def getvalue(self):\n lines = [l.rstrip() for l in self.f.getvalue().splitlines()]\n return \"\\n\".join(lines)",
"def read(self):\n return ''.join(self.content)",
"def raw_url(self) -> str:\n return self.url_as(raw=True)",
"def getcontents(self, fs_url, mode='rb', encoding=None, errors=None, newline=None):\n fs, path = self.parse(fs_url)\n return fs.getcontents(path, mode, encoding=encoding, errors=errors, newline=newline)",
"def uri(self):\n parts = []\n # if I have a scheme\n if self.scheme: parts.append('{}:'.format(self.scheme))\n # if I have an authority\n if self.authority: parts.append('//{}'.format(self.authority))\n # if I have an address\n if self.address: parts.append('{}'.format(self.address))\n # if I have a query\n if self.query: parts.append('?{}'.format(self.query))\n # if I have a fragment\n if self.fragment: parts.append('#{}'.format(self.fragment))\n # assemble and return\n return ''.join(parts)",
"def slurp(path):\n with open(path) as f:\n return f.read().strip()",
"def contents(self, n, m):\n str = \"\"\n subset = self.getlines(n, m)\n for line in subset:\n str = str + line + \"\\n\"\n return str.rstrip(\"\\n\")",
"def get_lines(self):\n return self.split('\\n')",
"def contents(filepath):\n f = open(filepath, 'r')\n rval = [x.rstrip(\"\\r\\n\") for x in f.readlines()]\n f.close()\n return rval",
"def getContents(self):\n normal_body_regex = re.compile(r'[ \\n\\r\\t]+')\n return normal_body_regex.sub(' ', self.contents)",
"def test_normalize_linefeeds():\n text = \"\"\"show hostname\\r\nshow version\\r\\r\nshow inventory\\r\\r\\r\nshow interfaces\n\\r\"\"\"\n expected = \"\"\"show hostname\nshow version\nshow inventory\nshow interfaces\n\"\"\"\n connection = FakeBaseConnection(RESPONSE_RETURN=\"\\n\")\n result = connection.normalize_linefeeds(text)\n assert result == expected",
"def get_content_from_ulr(self):\n response = urllib.request.urlopen(self.url)\n if response.getcode() != 200:\n self.logger.info(\"Cisco - get_content_from_url()\")\n raise ConnectionError('Unable to load ', self.url)\n content = response.read()\n response.close()\n return content",
"def contents(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"contents\", _args)\n return _ctx.execute_sync(str)",
"def readln(self):\n line = self.file.readline().decode(\"latin-1\")\n if line == \"\":\n self.eof = True\n return line.strip(\"\\n\")",
"def _get_string(self):\n result = self.sfile.readline().rstrip('\\n')\n return result"
]
| [
"0.57403713",
"0.56008327",
"0.5137154",
"0.5094208",
"0.49758965",
"0.49627182",
"0.49498793",
"0.4949379",
"0.49068105",
"0.48820636",
"0.4856582",
"0.48470175",
"0.48469394",
"0.48453036",
"0.48230538",
"0.48224425",
"0.4791141",
"0.47874144",
"0.47825134",
"0.4767716",
"0.47639254",
"0.47593912",
"0.47334802",
"0.47086033",
"0.4703342",
"0.47008848",
"0.46993542",
"0.46835494",
"0.46733287",
"0.46721277"
]
| 0.6217384 | 0 |
Save several arrays into a single file in uncompressed ``.npz`` format. If arguments are passed in with no keywords, the corresponding variable names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword arguments are given, the corresponding variable names, in the ``.npz`` file will match the keyword names. | def savez(file, *args, **kwds):
ary_list = []
for a in args:
ary_list.append(array_create.array(a, bohrium=False))
return numpy.savez(file, *ary_list, **kwds) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def savez_compressed(file, *args, **kwds):\n\n ary_list = []\n for a in args:\n ary_list.append(array_create.array(a, bohrium=False))\n return numpy.savez_compressed(file, *ary_list, **kwds)",
"def save_arrays_to_npz(data: Union[dict, list], file_path: str):\n arrays = list(data.values()) if isinstance(data, dict) else data\n if not all([isinstance(arrays[i], np.ndarray) for i in range(len(arrays))]):\n raise ValueError(\"Incorrect data arrays\")\n\n if os.path.dirname(file_path):\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n file_path += \".npz\" if \".npz\" != file_path[-4:] else \"\"\n\n if isinstance(data, dict):\n np.savez(file=file_path, **data)\n else:\n args = {str(i): arrays[i] for i in range(len(arrays))}\n np.savez(file=file_path, **args)",
"def dump_npy(filename: str, obj, **kwargs):\n return np.save(filename, obj)",
"def dump_npz(filename: str, obj, **kwargs):\n return np.savez(filename, obj)",
"def save_file(output_file_path_str, data_arr,\r\n var_name=None, t0_datetime=None, filetype=None,\r\n cfg_set=None, var_type=None, var_unit=None, longname=None, dt=None):\r\n \r\n if filetype!=\"npy\":\r\n ## Check arguments needed when potentially creating a NetCDF file.\r\n if (t0_datetime is None or filetype is None or\r\n cfg_set is None or var_type is None) and cfg_set is None:\r\n print(\"either the non-compulsory arguments are provided or \"+\r\n \"a cfg_set dictionary has to be provided\")\r\n ## Set filetype to the one stated in cfg_set (if not provided)\r\n if filetype is None: filetype = cfg_set[\"save_type\"]\r\n\r\n ## Save numpy file (npy/npz):\r\n if filetype == \"npy\":\r\n if \"disparr\" not in output_file_path_str and type(data_arr) is not list:\r\n np.save(output_file_path_str, data_arr)\r\n elif \"disparr\" in output_file_path_str and len(data_arr)==4:\r\n output_file_path_str = output_file_path_str[:-1]+\"z\"\r\n if var_name!=[\"Dx\",\"Dy\",\"Vx\",\"Vy\"]: raise ValueError('Ordering must be \"Dx\",\"Dy\",\"Vx\",\"Vy\"')\r\n np.savez(output_file_path_str, Dx=data_arr[0], Dy=data_arr[1],\r\n Vx=data_arr[2], Vy=data_arr[3])\r\n elif \"disparr\" in output_file_path_str and len(data_arr)==2:\r\n output_file_path_str = output_file_path_str[:-1]+\"z\"\r\n if var_name!=[\"UV_vec\",\"UV_vec_sp\"]: raise ValueError('Ordering must be \"UV_vec\",\"UV_vec_sp\"')\r\n np.savez(output_file_path_str, UV_vec=data_arr[0], UV_vec_sp=data_arr[1])\r\n else: raise ValueError(\"saving procedure for list of arrays into npz file not yet implemented\")\r\n \r\n ## Save NetCDF file (nc)\r\n elif filetype == \"nc\":\r\n \r\n if t0_datetime is None: t0_datetime = cfg_set[\"t0\"]\r\n if dt is None:\r\n dt = cfg_set[\"time_change_factor\"]*cfg_set[\"timestep\"]\r\n \r\n ## Read auxilary data from cfg_set file:\r\n if var_name==[\"Dx\",\"Dy\",\"Vx\",\"Vy\"] or var_name==[\"UV_vec\",\"UV_vec_sp\"]:\r\n var_unit = [\"Pixel \"+str(dt)+\"min-1\",\"Pixel \"+str(dt)+\"min-1\",\\\r\n \"km \"+str(dt)+\"min-1\",\"km \"+str(dt)+\"min-1\"]\r\n var_type = np.float32\r\n longname = [\"Displacement eastward\",\"Displacement northward\",\\\r\n \"Optical flow eastward\",\"Optical flow northward\"]\r\n else:\r\n if var_type is None and type(var_name) is not list:\r\n var_type = cfg_set[\"type_dict\"][var_name]\r\n if var_unit is None and type(var_name) is not list:\r\n var_unit = cfg_set[\"unit_dict\"][var_name]\r\n if longname is None and type(var_name) is not list:\r\n longname = cfg_set[\"abbrev_dict\"][var_name]\r\n \r\n ## Further checks whether all the necessary data is provided:\r\n if var_type is None and cfg_set is None:\r\n raise ValueError(\"either a variable type (var_type) or \"+\r\n \"a cfg_set dictionary has to be provided\")\r\n if var_type is None and var_name not in cfg_set[\"var_list\"]:\r\n raise ValueError(\"variable name (var_name) not found in cfg_set dictionary\")\r\n if var_unit is None and cfg_set is None:\r\n raise ValueError(\"either a variable unit (var_unit) or \"+\r\n \"a cfg_set dictionary has to be provided\")\r\n if var_unit is None and var_name not in cfg_set[\"var_list\"]:\r\n raise ValueError(\"variable name (var_name) not found in cfg_set dictionary\")\r\n \r\n ## Make description for different datasets:\r\n var_descib = var_name\r\n if \"_orig\" in output_file_path_str:\r\n description_nc = \"Original observation of \"\r\n elif \"_disp_resid_combi\" in output_file_path_str:\r\n description_nc = \"Displaced observation (with residual movement correction with one displacement) of \"\r\n elif \"_disp_resid\" in output_file_path_str:\r\n description_nc = \"Displaced observation (with residual movement correction with one displacement) of \"\r\n elif \"_disparr_UV_resid_combi\" in output_file_path_str:\r\n description_nc = \"Displacement field (with residual movement)\"\r\n var_descib = \"\"\r\n elif \"_disparr_UV_resid\" in output_file_path_str:\r\n description_nc = \"Residual displacement field\"\r\n var_descib = \"\"\r\n elif \"_disparr_UV\" in output_file_path_str:\r\n description_nc = \"Displacement field\"\r\n var_descib = \"\"\r\n elif \"_disp\" in output_file_path_str:\r\n description_nc = \"Displaced observation of \"\r\n else:\r\n print(\" *** Warning: No description added to NetCDF file ***\")\r\n \r\n description = description_nc+var_descib\r\n \r\n ## Save as NetCDF file:\r\n save_nc(output_file_path_str,data_arr,var_name,var_type,var_unit,longname,\r\n t0_datetime,description,dt=dt)\r\n else: raise ValueError(\"filetype must either be npy or nc.\")",
"def np_savez_compressed(file, *args, **kwds):\n if isinstance(file, basestring):\n if not file.endswith('.npz'):\n file = file + '.npz'\n elif is_pathlib_path(file):\n if not file.name.endswith('.npz'):\n file = file.parent / (file.name + '.npz')\n else:\n raise RuntimeError(\"Please specify filename in string format\")\n\n zip_fd, zip_tempfile = tempfile.mkstemp(suffix='.npz')\n np.savez_compressed(zip_tempfile, *args, **kwds)\n\n tf.gfile.MakeDirs(os.path.dirname(file))\n tf.gfile.Copy(zip_tempfile, file, overwrite=True)\n os.close(zip_fd)",
"def save_pickles(filename, *args):\n with gzip.open(filename, 'wb') as outfile:\n for thing in args:\n pickle.dump(thing, outfile)",
"def savez(d,file):\n np.savez(file,row=d.row,col=d.col,data=d.data,shape=d.shape)",
"def save_array(array, filename):\n np.save(filename, array)",
"def save(filename, vars):\n p = {}\n for var in vars:\n p[var.name] = var\n\n # 3. Evaluate all tensors at once\n keys = list(p.keys())\n values = tf.get_default_session().run([p[k] for k in keys])\n p = dict(zip(keys, values))\n\n # 3. Write.\n np.savez(filename, **p)",
"def export_npz(self, filename, dtype=np.float32):\n np.savez(\n filename, impulse_response=self.in_time.astype(dtype), samplerate=self.fs\n )",
"def save_any_to_npy(save_dict={}, name='any.npy'):\n np.save(name, save_dict)",
"def to_zarr(self, *args, **kwargs):\n if (\n len(args) == 1\n and isinstance(args[0], str)\n and args[0].endswith(\".zarr.zip\")\n ):\n if {\"compression\", \"mode\"}.issuperset(kwargs.keys()):\n import zarr\n\n with zarr.ZipStore(args[0], **kwargs) as store:\n self.to_zarr(store)\n return\n return super().to_zarr(*args, **kwargs)",
"def quick_save_array(data, file_name, delimiter=',', ):\n data.tofile(file_name, sep=delimiter)",
"def save_data(self, name, from_attrs=[], **data):\n data[\"data_version\"] = self.data_version\n\n file_opts = {}\n for opt in [\"map_tag\", \"iter_index\", \"bp_opts\", \"extra_tag\"]:\n if opt in data:\n file_opts[opt] = data.pop(opt)\n\n output_file = self.get_filename(name, ext=\".npz\", **file_opts)\n if not output_file:\n return\n\n for attr in from_attrs:\n if hasattr(self, attr):\n data[attr] = getattr(self, attr)\n\n np.savez_compressed(output_file, **data)\n self.log(\"Saved output data to {}\".format(output_file), \"debug\")\n data[\"output_file\"] = output_file\n return data",
"def save_to_file(samps, filename, save_as_numpy):\n with open(filename, 'wb') as out_file:\n if save_as_numpy:\n np.save(out_file, samps, allow_pickle=False, fix_imports=False)\n else:\n samps.tofile(out_file)",
"def save(file, arr, allow_pickle=True, fix_imports=True):\n\n return numpy.save(file, array_create.array(arr, bohrium=False), allow_pickle, fix_imports)",
"def save_to_array(x, y):\n\n with open(settings.data(\"x.npy\"), \"wb\") as file:\n np.save(file, x)\n\n with open(settings.data(\"y.npy\"), \"wb\") as file:\n np.save(file, y)",
"def save_as_numpy(self, filename, compressed=False):\n logger.warn(\n 'Saving in npz format loses timestamp and ROI information.')\n logger.warn('Consider saving in FITS or HDF5 formats instead.')\n save_func = np.savez_compressed if compressed else np.savez\n save_func(filename, *self.to_list())",
"def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', core_ids=self.core_ids, cx_ids=self.cx_ids)",
"def save_npz(save_dict={}, name='model.npz'):\n rename_dict = {}\n for k, value in enumerate(save_dict):\n rename_dict.update({'param'+str(k) : value.eval()})\n np.savez(name, **rename_dict)\n print('Model is saved to: %s' % name)",
"def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', chip_ids=self.chip_ids, core_ids=self.core_ids, cx_ids=self.cx_ids)",
"def save_results(self, *args):\n try:\n filename = args[0]\n except IndexError:\n filename = self.filename\n results = {}\n results['gp_pred'] = self.gp_predictions\n results['func_val'] = self.target_func_vals\n results['inds_all'] = np.array(self.indices_all)\n results['vals_all'] = np.array(self.vals_all)\n np.save(filename+\".npy\", results)",
"def save_npys(data, model_name, output_string):\n for k, v in data.iteritems():\n output = os.path.join(\n output_string,\n '%s_%s' % (model_name, k)\n )\n np.save(output, v)",
"def main():\n train_src = read_file(SRC_TRAIN)\n train_tgt = read_file(TRGT_TRAIN)\n val_src = read_file(SRC_VAL)\n val_tgt = read_file(TRGT_VAL)\n # val = read_files(VAL_FILES)\n np.savez(\n DATA_NPZ_NAME, train_src=train_src, train_tgt=train_tgt, val_src=val_src, val_tgt=val_tgt)",
"def save_npz(tmpdir_factory: _pytest.tmpdir.TempdirFactory):\n filename = \"test.npz\"\n fn1 = tmpdir_factory.mktemp(\"data\").join(filename)\n data = {\"test1\": np.arange(0, 50), \"test2\": np.arange(50, 100)}\n\n loader.save_npz_file(str(fn1), data)\n return str(fn1), data",
"def save_equilibrator_bin_data(self, npz_file_name):\n preprocess_dict = {'cids': self.params['cids']}\n for k, v in self.params.items():\n if k.find('preprocess_') != -1:\n preprocess_dict[k.replace('preprocess_', '')] = v\n np.savez_compressed(npz_file_name, **preprocess_dict)",
"def dump(filename, data):\n _savez(filename, [], data, True, allow_pickle=False)",
"def write_array(uri: str):\n a1_data = np.reshape(np.arange(1, 26), (5, 5))\n l1_data = np.arange(5, 0, -1)\n l2_data = np.arange(-2, 3)\n l3_data = np.linspace(-1.0, 1.0, 5)\n with tiledb.open(uri, \"w\") as array:\n array[:] = {\"a1\": a1_data, \"l1\": l1_data, \"l2\": l2_data, \"l3\": l3_data}",
"def writeArray(fname,arr):\n fh = open(fname,'w')\n fh.write('%d\\n' % arr.shape[0])\n fh.write('%d\\n' % arr.shape[1])\n for x in range(arr.shape[0]):\n for y in range(arr.shape[1]):\n if arr.dtype == np.complex:\n fh.write('%.7e %.7e\\n' % (arr[x,y].real, arr[x,y].imag))\n else:\n fh.write('%.7e\\n' % (arr[x,y]))\n fh.close()"
]
| [
"0.73051775",
"0.71861243",
"0.7068409",
"0.70044583",
"0.6802454",
"0.6675411",
"0.64989364",
"0.6470921",
"0.64208746",
"0.6399053",
"0.6364537",
"0.63464135",
"0.6261641",
"0.6238662",
"0.62357056",
"0.6207225",
"0.6186934",
"0.61552876",
"0.61445135",
"0.61101615",
"0.61088514",
"0.6093148",
"0.60589916",
"0.6006964",
"0.6001938",
"0.5987688",
"0.5970746",
"0.5965168",
"0.595324",
"0.59319997"
]
| 0.7774841 | 0 |
Save several arrays into a single file in compressed ``.npz`` format. If keyword arguments are given, then filenames are taken from the keywords. If arguments are passed in with no keywords, then stored file names are arr_0, arr_1, etc. | def savez_compressed(file, *args, **kwds):
ary_list = []
for a in args:
ary_list.append(array_create.array(a, bohrium=False))
return numpy.savez_compressed(file, *ary_list, **kwds) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def savez(file, *args, **kwds):\n\n ary_list = []\n for a in args:\n ary_list.append(array_create.array(a, bohrium=False))\n return numpy.savez(file, *ary_list, **kwds)",
"def save_arrays_to_npz(data: Union[dict, list], file_path: str):\n arrays = list(data.values()) if isinstance(data, dict) else data\n if not all([isinstance(arrays[i], np.ndarray) for i in range(len(arrays))]):\n raise ValueError(\"Incorrect data arrays\")\n\n if os.path.dirname(file_path):\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n file_path += \".npz\" if \".npz\" != file_path[-4:] else \"\"\n\n if isinstance(data, dict):\n np.savez(file=file_path, **data)\n else:\n args = {str(i): arrays[i] for i in range(len(arrays))}\n np.savez(file=file_path, **args)",
"def np_savez_compressed(file, *args, **kwds):\n if isinstance(file, basestring):\n if not file.endswith('.npz'):\n file = file + '.npz'\n elif is_pathlib_path(file):\n if not file.name.endswith('.npz'):\n file = file.parent / (file.name + '.npz')\n else:\n raise RuntimeError(\"Please specify filename in string format\")\n\n zip_fd, zip_tempfile = tempfile.mkstemp(suffix='.npz')\n np.savez_compressed(zip_tempfile, *args, **kwds)\n\n tf.gfile.MakeDirs(os.path.dirname(file))\n tf.gfile.Copy(zip_tempfile, file, overwrite=True)\n os.close(zip_fd)",
"def dump_npy(filename: str, obj, **kwargs):\n return np.save(filename, obj)",
"def dump_npz(filename: str, obj, **kwargs):\n return np.savez(filename, obj)",
"def save_pickles(filename, *args):\n with gzip.open(filename, 'wb') as outfile:\n for thing in args:\n pickle.dump(thing, outfile)",
"def to_zarr(self, *args, **kwargs):\n if (\n len(args) == 1\n and isinstance(args[0], str)\n and args[0].endswith(\".zarr.zip\")\n ):\n if {\"compression\", \"mode\"}.issuperset(kwargs.keys()):\n import zarr\n\n with zarr.ZipStore(args[0], **kwargs) as store:\n self.to_zarr(store)\n return\n return super().to_zarr(*args, **kwargs)",
"def save_file(output_file_path_str, data_arr,\r\n var_name=None, t0_datetime=None, filetype=None,\r\n cfg_set=None, var_type=None, var_unit=None, longname=None, dt=None):\r\n \r\n if filetype!=\"npy\":\r\n ## Check arguments needed when potentially creating a NetCDF file.\r\n if (t0_datetime is None or filetype is None or\r\n cfg_set is None or var_type is None) and cfg_set is None:\r\n print(\"either the non-compulsory arguments are provided or \"+\r\n \"a cfg_set dictionary has to be provided\")\r\n ## Set filetype to the one stated in cfg_set (if not provided)\r\n if filetype is None: filetype = cfg_set[\"save_type\"]\r\n\r\n ## Save numpy file (npy/npz):\r\n if filetype == \"npy\":\r\n if \"disparr\" not in output_file_path_str and type(data_arr) is not list:\r\n np.save(output_file_path_str, data_arr)\r\n elif \"disparr\" in output_file_path_str and len(data_arr)==4:\r\n output_file_path_str = output_file_path_str[:-1]+\"z\"\r\n if var_name!=[\"Dx\",\"Dy\",\"Vx\",\"Vy\"]: raise ValueError('Ordering must be \"Dx\",\"Dy\",\"Vx\",\"Vy\"')\r\n np.savez(output_file_path_str, Dx=data_arr[0], Dy=data_arr[1],\r\n Vx=data_arr[2], Vy=data_arr[3])\r\n elif \"disparr\" in output_file_path_str and len(data_arr)==2:\r\n output_file_path_str = output_file_path_str[:-1]+\"z\"\r\n if var_name!=[\"UV_vec\",\"UV_vec_sp\"]: raise ValueError('Ordering must be \"UV_vec\",\"UV_vec_sp\"')\r\n np.savez(output_file_path_str, UV_vec=data_arr[0], UV_vec_sp=data_arr[1])\r\n else: raise ValueError(\"saving procedure for list of arrays into npz file not yet implemented\")\r\n \r\n ## Save NetCDF file (nc)\r\n elif filetype == \"nc\":\r\n \r\n if t0_datetime is None: t0_datetime = cfg_set[\"t0\"]\r\n if dt is None:\r\n dt = cfg_set[\"time_change_factor\"]*cfg_set[\"timestep\"]\r\n \r\n ## Read auxilary data from cfg_set file:\r\n if var_name==[\"Dx\",\"Dy\",\"Vx\",\"Vy\"] or var_name==[\"UV_vec\",\"UV_vec_sp\"]:\r\n var_unit = [\"Pixel \"+str(dt)+\"min-1\",\"Pixel \"+str(dt)+\"min-1\",\\\r\n \"km \"+str(dt)+\"min-1\",\"km \"+str(dt)+\"min-1\"]\r\n var_type = np.float32\r\n longname = [\"Displacement eastward\",\"Displacement northward\",\\\r\n \"Optical flow eastward\",\"Optical flow northward\"]\r\n else:\r\n if var_type is None and type(var_name) is not list:\r\n var_type = cfg_set[\"type_dict\"][var_name]\r\n if var_unit is None and type(var_name) is not list:\r\n var_unit = cfg_set[\"unit_dict\"][var_name]\r\n if longname is None and type(var_name) is not list:\r\n longname = cfg_set[\"abbrev_dict\"][var_name]\r\n \r\n ## Further checks whether all the necessary data is provided:\r\n if var_type is None and cfg_set is None:\r\n raise ValueError(\"either a variable type (var_type) or \"+\r\n \"a cfg_set dictionary has to be provided\")\r\n if var_type is None and var_name not in cfg_set[\"var_list\"]:\r\n raise ValueError(\"variable name (var_name) not found in cfg_set dictionary\")\r\n if var_unit is None and cfg_set is None:\r\n raise ValueError(\"either a variable unit (var_unit) or \"+\r\n \"a cfg_set dictionary has to be provided\")\r\n if var_unit is None and var_name not in cfg_set[\"var_list\"]:\r\n raise ValueError(\"variable name (var_name) not found in cfg_set dictionary\")\r\n \r\n ## Make description for different datasets:\r\n var_descib = var_name\r\n if \"_orig\" in output_file_path_str:\r\n description_nc = \"Original observation of \"\r\n elif \"_disp_resid_combi\" in output_file_path_str:\r\n description_nc = \"Displaced observation (with residual movement correction with one displacement) of \"\r\n elif \"_disp_resid\" in output_file_path_str:\r\n description_nc = \"Displaced observation (with residual movement correction with one displacement) of \"\r\n elif \"_disparr_UV_resid_combi\" in output_file_path_str:\r\n description_nc = \"Displacement field (with residual movement)\"\r\n var_descib = \"\"\r\n elif \"_disparr_UV_resid\" in output_file_path_str:\r\n description_nc = \"Residual displacement field\"\r\n var_descib = \"\"\r\n elif \"_disparr_UV\" in output_file_path_str:\r\n description_nc = \"Displacement field\"\r\n var_descib = \"\"\r\n elif \"_disp\" in output_file_path_str:\r\n description_nc = \"Displaced observation of \"\r\n else:\r\n print(\" *** Warning: No description added to NetCDF file ***\")\r\n \r\n description = description_nc+var_descib\r\n \r\n ## Save as NetCDF file:\r\n save_nc(output_file_path_str,data_arr,var_name,var_type,var_unit,longname,\r\n t0_datetime,description,dt=dt)\r\n else: raise ValueError(\"filetype must either be npy or nc.\")",
"def save_array(array, filename):\n np.save(filename, array)",
"def savez(d,file):\n np.savez(file,row=d.row,col=d.col,data=d.data,shape=d.shape)",
"def save_as_numpy(self, filename, compressed=False):\n logger.warn(\n 'Saving in npz format loses timestamp and ROI information.')\n logger.warn('Consider saving in FITS or HDF5 formats instead.')\n save_func = np.savez_compressed if compressed else np.savez\n save_func(filename, *self.to_list())",
"def quick_save_array(data, file_name, delimiter=',', ):\n data.tofile(file_name, sep=delimiter)",
"def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', core_ids=self.core_ids, cx_ids=self.cx_ids)",
"def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', chip_ids=self.chip_ids, core_ids=self.core_ids, cx_ids=self.cx_ids)",
"def _write_files_(self, arrs, filepath):\n\t\t# If there's a dot in the filepath prepend the filepath\n\t\tif '.' in filepath:\n\t\t\tfilename, ext = gen_io.remove_file_extension(filepath)\n\t\t\tarrs = {f\"{filename}_{key}.{ext}\": arrs[key] for key in arrs}\n\t\t\tprint(arrs.keys())\n\t\t\n\t\t# Else just assume it's a folder\n\t\telse:\n\t\t\tif not os.path.isdir(filepath):\n\t\t\t\tos.makedirs(filepath)\n\n\t\t\tarrs = {f\"{filepath}/{key}.npy\": arrs[key] for key in arrs}\n\n\t\t# Now write the files.\n\t\tfor key in arrs:\n\t\t\tfilepath = gen_io.create_unique_filepath(key)\n\t\t\tnp.save(key, arrs[key])",
"def save_to_file(samps, filename, save_as_numpy):\n with open(filename, 'wb') as out_file:\n if save_as_numpy:\n np.save(out_file, samps, allow_pickle=False, fix_imports=False)\n else:\n samps.tofile(out_file)",
"def export_npz(self, filename, dtype=np.float32):\n np.savez(\n filename, impulse_response=self.in_time.astype(dtype), samplerate=self.fs\n )",
"def save_data(self, name, from_attrs=[], **data):\n data[\"data_version\"] = self.data_version\n\n file_opts = {}\n for opt in [\"map_tag\", \"iter_index\", \"bp_opts\", \"extra_tag\"]:\n if opt in data:\n file_opts[opt] = data.pop(opt)\n\n output_file = self.get_filename(name, ext=\".npz\", **file_opts)\n if not output_file:\n return\n\n for attr in from_attrs:\n if hasattr(self, attr):\n data[attr] = getattr(self, attr)\n\n np.savez_compressed(output_file, **data)\n self.log(\"Saved output data to {}\".format(output_file), \"debug\")\n data[\"output_file\"] = output_file\n return data",
"def to_files(self, gen, filenames=None):\n\n if filenames:\n self.filenames = filenames\n\n for f, arr in zip(self.pathgen, gen):\n np.save(f, arr)",
"def save_any_to_npy(save_dict={}, name='any.npy'):\n np.save(name, save_dict)",
"def store(obj, filename, suffix = ''):\n # It is a numpy array\n if type(obj) == np.ndarray:\n path,f = writefile(filename, obj_id='numpy_objs', suffix=suffix)\n json.dump(obj, fp=f, cls=NumpyEncoder,\n separators=(',', ':'), sort_keys=True, indent=4)\n print '> saved with JSON to {}'.format(path)\n else:\n path, f = writefile(filename, obj_id='other_objs', suffix=suffix)\n pickle.dump(obj, file=f)\n print '> saved with dill (pickled) to {}'.format(path)\n return path",
"def save_results(self, *args):\n try:\n filename = args[0]\n except IndexError:\n filename = self.filename\n results = {}\n results['gp_pred'] = self.gp_predictions\n results['func_val'] = self.target_func_vals\n results['inds_all'] = np.array(self.indices_all)\n results['vals_all'] = np.array(self.vals_all)\n np.save(filename+\".npy\", results)",
"def write_compressed_skims(skims, output=\"emmemat.zarr\"):\n known_exts = (\".zarr\", \".zarr.zip\")\n if not any(output.endswith(k) for k in known_exts):\n raise NotImplementedError(output)\n if output.endswith(\".zarr\"):\n skims.to_zarr(output, mode='a')\n elif output.endswith(\".zarr.zip\"):\n if os.path.exists(output):\n raise FileExistsError(output)\n with zarr.ZipStore(output, mode='w') as store:\n skims.to_zarr(store)",
"def save(filename, vars):\n p = {}\n for var in vars:\n p[var.name] = var\n\n # 3. Evaluate all tensors at once\n keys = list(p.keys())\n values = tf.get_default_session().run([p[k] for k in keys])\n p = dict(zip(keys, values))\n\n # 3. Write.\n np.savez(filename, **p)",
"def generate(inputFilename, outputFilename = defaultFileName, \n sizeOfReducedSample = DEFSIZEOFREDUCEDSAMPLE, \n centerEta = DEFCENTERETA, centerPhi = DEFCENTERPHI): \n listOfSignals = convert(inputFilename)\n arrayOfSignals = np.array(listOfSignals)\n arrayOfSignals.shape\n np.save(outputFilename, arrayOfSignals, allow_pickle=False)\n print(\"npy array name: \",outputFilename)",
"def save_array_as(arr, loc=None):\n if loc is None:\n root = tk.Tk()\n root.loc = filedialog.asksaveasfilename(initialdir=\"/\",\n title=\"Save as\",\n filetypes=((\"npy files\", \"*.npy\"),\n (\"all files\", \"*.*\"))\n )\n np.save(root.loc, arr)\n root.destroy()\n else:\n np.save(loc, arr)",
"def writeArray(fname,arr):\n fh = open(fname,'w')\n fh.write('%d\\n' % arr.shape[0])\n fh.write('%d\\n' % arr.shape[1])\n for x in range(arr.shape[0]):\n for y in range(arr.shape[1]):\n if arr.dtype == np.complex:\n fh.write('%.7e %.7e\\n' % (arr[x,y].real, arr[x,y].imag))\n else:\n fh.write('%.7e\\n' % (arr[x,y]))\n fh.close()",
"def save(file, arr, allow_pickle=True, fix_imports=True):\n\n return numpy.save(file, array_create.array(arr, bohrium=False), allow_pickle, fix_imports)",
"def save_reconstructions(reconstructions, out_dir):\n out_dir.mkdir(exist_ok=True)\n for fname, recons in reconstructions.items():\n file_path = out_dir/fname\n np.save(file_path,recons)",
"def save_to_array(x, y):\n\n with open(settings.data(\"x.npy\"), \"wb\") as file:\n np.save(file, x)\n\n with open(settings.data(\"y.npy\"), \"wb\") as file:\n np.save(file, y)"
]
| [
"0.7675963",
"0.6940585",
"0.68568796",
"0.68325424",
"0.68274915",
"0.6609121",
"0.6530841",
"0.6502681",
"0.6309862",
"0.62696505",
"0.6235707",
"0.622537",
"0.62017405",
"0.61851525",
"0.61754113",
"0.6117313",
"0.60857856",
"0.60235023",
"0.6009357",
"0.6003201",
"0.5970286",
"0.59599763",
"0.5954543",
"0.5907037",
"0.59044975",
"0.589859",
"0.5897177",
"0.58824676",
"0.5879784",
"0.5853254"
]
| 0.74806476 | 1 |
Construct an array from a text file, using regular expression parsing. The returned array is always a structured array, and is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields of the structured array. | def fromregex(file, regexp, dtype, bohrium=True):
return array_create.array(numpy.fromregex(file, regexp, dtype), bohrium=bohrium) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array",
"def extract_arrays(f):\n arrays = re.findall(arrayregex,f)\n return arrays",
"def _parse_txt(path, n_channels):\n f = open(path)\n lines = f.readlines()\n f.close()\n\n geom = np.zeros((0, 2))\n\n for i, line in zip(range(n_channels), lines):\n line = line.replace('\\r', '')\n line = line.replace('\\n', '')\n row = line.split(' ')\n geom = np.vstack((geom, row[:2])).astype('float')\n\n return geom",
"def read(cls, event_file, regex=regex):\n with open(event_file, 'r') as f:\n filedata = f.read()\n event_matches = re.finditer(regex, filedata, re.VERBOSE + re.MULTILINE)\n list_ = [i.groupdict() for i in event_matches]\n #util.ipshell()\n for event in list_: # convert numbers to float and int types\n for key, item in event.iteritems():\n if util.isint(item):\n event[key] = int(item)\n elif util.isfloat(item):\n event[key] = float(item)\n else:\n event[key] = item.strip()\n #if event[key] == '':\n # event[key] = None\n #if key == 'depth' and regex == cls.regex:\n # event[key] *= 1\n #util.ipshell()\n log.info('Read event information of %d events from events event_file %s' % (len(list_), event_file))\n return cls(list_)",
"def parseFile(filename):\n\n Parse.data = []\n with open(filename, \"r\") as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data",
"def load_collection_from_fp(fileobj):\n matchers = []\n for line in fileobj:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n tag = line.split(\":\")[0]\n if tag.isdigit():\n matchers += parse_rule_id_matchers(line)\n elif tag == \"group\":\n matchers += parse_group_name_matchers(line)\n elif tag == \"re\":\n matchers.append(parse_rule_re_matcher(line))\n else:\n raise InvalidRuleMatchError(line)\n return RuleMatcherCollection(matchers)",
"def read_regex(event_file, regex=regex_GEOFON, creation_info='GEOFON'):\n with open(event_file, 'r') as f:\n filedata = f.read()\n event_matches = re.finditer(regex, filedata, re.VERBOSE + re.MULTILINE)\n list_ = [i.groupdict() for i in event_matches]\n events = []\n for event in list_:\n # convert numbers to float and int types\n for key, item in event.iteritems():\n if util.isint(item):\n event[key] = int(item)\n elif util.isfloat(item):\n event[key] = float(item)\n else:\n event[key] = item.strip()\n if 'latitude_sign' in event and event['latitude_sign'] == 'S':\n event['latitude'] = -event['latitude']\n if 'longitude_sign' in event and event['longitude_sign'] == 'W':\n event['longitude'] = -event['longitude']\n if 'AM' in event:\n ci = creation_info + (' automatic' if event['AM'] == 'A' else ' manual')\n else:\n ci = creation_info\n ev = Event(event_type='earthquake', creation_info=ci,\n origins=[Origin(time=UTC(event['time']),\n latitude=event['latitude'],\n longitude=event['longitude'],\n depth=event['depth'])],\n magnitudes=[Magnitude(mag=event['magnitude'],\n magnitude_type='M')],\n event_descriptions=[EventDescription(event['flinn'],\n 'flinn-engdahl region')]\n if 'flinn' in event else None\n )\n events.append(ev)\n events.sort(key=lambda x: x.origins[0].time)\n return Catalog(events)",
"def cast_txt_to_numpy(iuput_file):\n # Load the txt file\n with open(iuput_file, 'r') as tmpfile:\n lines = tmpfile.readlines()\n\n # Restore the numpy array\n holder = []\n for line in lines:\n holder.append([float(x) for x in line.split(' ')])\n\n # Construct the numpy array\n holder = np.array(holder)\n\n return holder",
"def text_parser(self, txt):\n output_list = []\n txt = txt.replace(\" \", \"\").upper()\n while txt:\n number = re.search(\"^[-0-9.]+\", txt)\n parentheses = re.search(\"^[()]\", txt)\n\n func_targets = '|'.join([\"^\" + func for func in self.functions])\n function = re.search(func_targets, txt)\n\n op_targets = '|'.join([\"^\" + op for op in self.operators])\n operator = re.search(op_targets, txt)\n\n text_index = 0\n if number:\n output_list.append(float(number.group(0)))\n text_index = number.end(0)\n elif parentheses:\n output_list.append((parentheses.group(0)))\n text_index = parentheses.end(0)\n elif function:\n output_list.append(self.functions[function.group(0)])\n text_index = function.end(0)\n elif operator:\n output_list.append((self.operators[operator.group(0)]))\n text_index = operator.end(0)\n\n txt = txt[text_index:]\n\n return output_list",
"def read_text_regex(\n filename: str, newline: Pattern, fields: Optional[Pattern]\n) -> Table:\n res = None\n with open(filename, encoding=\"utf-8\") as file:\n for line in file:\n match_obj = newline.search(line)\n if match_obj:\n if res:\n yield res\n res = {}\n groups = match_obj.groups()\n if len(groups) > 1:\n res[groups[0]] = groups[1]\n elif len(groups) > 0:\n res[\"_id_\"] = groups[0]\n if res is None:\n continue\n if not fields:\n continue\n for match_obj in fields.finditer(line):\n res[match_obj[1]] = match_obj[2]\n if res:\n yield res",
"def loadtxt(fname, dtype=float, comments='#', delimiter=None,\n converters=None, skiprows=0, usecols=None, unpack=False,\n ndmin=0, bohrium=True):\n\n f = numpy.loadtxt(fname, dtype, comments, delimiter, converters, skiprows, usecols, unpack, ndmin)\n return array_create.array(f, bohrium=bohrium)",
"def parse_tsp_file(file):\n # define regular expressions for the fields to parse\n regexes = {'name': re.compile(\"NAME : (.*)\"),\n 'comment': re.compile(\"COMMENT : (?!STARTNODE :|STARTNODES : |CLUSTERS :)(.*)\"),\n 'single_start': re.compile(\"COMMENT : STARTNODE : ([0-9])+\"),\n 'multi_start': re.compile(\"COMMENT : STARTNODES : (.*)\"),\n 'nodes':\n re.compile(\n r\"([0-9]+)\\ *([0-9]*\\.?[0-9]*)\\ *([0-9]*\\.?[0-9]*)\",\n re.MULTILINE),\n 'groups': re.compile(\"COMMENT : CLUSTERS : (.*)\")}\n # initialize results\n result = {'name': 'No Name', 'comment': '', 'startnodes': [],\n 'nodes': [], 'groups': []}\n # Define application rules\n\n def apply_match(regex_name, match):\n \"\"\"Applies a specific processing rule for each regex sperately as the\n fields vary in data types and structures\"\"\"\n if regex_name is 'name':\n result['name'] = match.group(1)\n elif regex_name is 'single_start':\n result['startnodes'] = [int(match.group(1))]\n elif regex_name is 'multi_start':\n result['startnodes'] = ast.literal_eval(match.group(1))\n elif regex_name is 'groups':\n result['groups'] = ast.literal_eval(\n match.group(1).replace(\" \", \"\"))\n elif regex_name is 'comment':\n result['comment'] += match.group(1) + \"\\n\"\n elif regex_name is 'nodes':\n result['nodes'].append([int(float(match.group(2))),\n int(float(match.group(3)))])\n # Process the lines in the file and check for matches for each regular\n # expression\n _file = open(file, 'r')\n lines = _file.readlines()\n for line in lines:\n if len(line):\n for regex_name in regexes:\n match = re.match(regexes[regex_name], line)\n if match:\n apply_match(regex_name, match)\n _file.close()\n return result",
"def load_txt(file_path):\n lines = load_lines(file_path)\n\n if 'E' in lines[0]:\n dtype = np.float32\n else:\n dtype = np.int32\n\n data = list(map(str.split, lines))\n array = np.array(data, dtype=dtype)\n return array",
"def textread(filepath):\n return np.array(pd.read_csv(filepath, \n sep = \"\\s+|\\t+|\\s+\\t+|\\t+\\s+\",\n header=None,\n comment='#',\n engine='python'))",
"def _get_file_as_array(self, file_):\n file_as_string = \"\"\n for line in file_:\n if \";\" in line:\n line = line[:line.find(\";\")]\n line = (line.replace('\\t', '').replace('\\n', ' ')\n .replace('(', ' ( ').replace(')', ' ) '))\n file_as_string += line\n file_.close()\n return file_as_string.strip().split()",
"def parse_data(fn):\n data = []\n with open(fn, \"rb\") as f:\n for line in f:\n if py_ver == 3:\n # Python 3 code in this block\n dline = \"\".join(filter(lambda char: char != '\"', line.decode())).split(\",\")\n else:\n # Python 2 code in this block\n dline = line.translate(None, '\"').split(\",\")\n \n if len(dline) == 11 and dline[0].isdigit():\n data.append([float(i) for i in dline])\n\n return np.array(data)",
"def parse_file_into_array(filename, separator):\n arr = []\n with open(filename) as file:\n for row in file.read().splitlines():\n try:\n row_arr = [float(cell) for cell in row.split(separator)]\n if 'winequality' in filename:\n row_arr[-1] = 1 if row_arr[-1] > 5 else 0 # convert to binary classification\n elif 'breast-cancer' in filename:\n row_arr[-1] = 1 if row_arr[-1] == 4 else 0 # convert to binary classification\n except ValueError:\n continue\n arr.append(row_arr)\n return arr",
"def extract_data_from_txt(filename: str) -> any:\r\n # List[Dict[str, List[int]]]\r\n ce_file = open(filename, 'r')\r\n ce_text = ce_file.read()\r\n ce_structure_pattern = re.compile(r\"\"\"\r\n \\n(\\w+)\\n # the line with the country name \r\n \\s+Total\\s.*\\n # the line start with at least 1 spaces and \"Total Fossil-Fuel\"\r\n Year\\s+.*\\n\\n # the line start with \"Year Emissions\"\r\n ((\\d+\\s+\\d+\\s+.*\\n)+) # the lines with actual data\r\n \"\"\", re.VERBOSE | re.MULTILINE)\r\n ce_line_pattern = re.compile(r\"\"\"\r\n (\\d+) # year number\r\n \\s+ # the space between year number and Total Fossil-Fuel Emissions data \r\n (\\d+) # the Total Fossil-Fuel Emissions data\r\n \\s+ # the spaces after Total Fossil-Fuel Emissions\r\n .*\\n # the rest of the line\r\n \"\"\", re.VERBOSE)\r\n countries = ce_structure_pattern.findall(ce_text)\r\n\r\n data = [process_row_c(country_data, ce_line_pattern) for country_data in countries]\r\n return data",
"def load_re_from_file(filepath):\r\n regexp = None\r\n with open(filepath,'r') as mlfile:\r\n flagstr = \"\"\r\n for line in mlfile:\r\n cleanline = re.sub(\"//.*$\", \"\", line)\r\n if re.search(\"^\\s*$\", cleanline):\r\n continue\r\n if re.search (\"^#.*$\", cleanline):\r\n flagstr = cleanline[1:]\r\n continue\r\n if regexp is not None:\r\n raise Exception(\"Regular expression file format error\")\r\n else:\r\n regexp = cleanline.rstrip('\\n')\r\n flags = 0\r\n if \"i\" in flagstr:\r\n flags |= re.I\r\n from pydsl.Grammar.Definition import RegularExpression\r\n return RegularExpression(regexp, flags)",
"def read_array(filename, separator=','):\n dtype = np.dtype([('id','S12'),\n ('views','int32'),\n ('location','S140'),\n ('comments','int32'),\n ('tags_n','int32'),\n ('favorites','int32'),\n ('make','S50'),\n ('model','S100')])\n cast = np.cast\n data = [[] for dummy in xrange(len(dtype))]\n f = open(filename, 'r')\n lines = f.readlines()\n for line in lines[1:-100]:\n fields = line.strip().split(separator)\n for i, number in enumerate(fields):\n data[i].append(number)\n for i in xrange(len(dtype)):\n data[i] = cast[dtype[i]](data[i])\n return np.rec.array(data, dtype=dtype)",
"def loadtxt(filename):\n txt = []\n with open(filename, \"r\") as f:\n for line in f:\n txt.append(line.strip())\n return np.asarray(txt)",
"def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels",
"def openfile(filename):\n Data = np.genfromtxt(filename, delimiter = \",\")\n data = [[]]\n for i in range(np.shape(Data)[0]):\n #Stores information row-by-row\n data.append(Data[i][0:])\n return data",
"def parse_labels(file: str) -> ndarray:\n rows = []\n with open(file, 'r', encoding='utf-8') as f:\n for row in f:\n rows.append(row.strip())\n return array(rows)",
"def convert_pattern_format(text):\n parsed_text = []\n # parse text via Pattern's parser\n pattern_parsed_text = Text(parse(text, relations=True, lemmata=True))\n for sentence in pattern_parsed_text:\n s = Sentence()\n s.string = remove_blanks(sentence.string)\n for word in sentence:\n # Patterns tags for each word in the sentence are stored in a new Word-object\n w = Word()\n w.string = word.string\n w.lemma = word.lemma\n w.index = word.index\n w.tag = word.type\n w.entity = \"\"\n # each word is appended to a Sentence-object\n s.words.append(w)\n # each Sentence-object is appended to an array\n parsed_text.append(s)\n return parsed_text",
"def read(filename):\n records = Parser.__load_csv(filename)\n return np.array(records)",
"def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data",
"def _parse_textfile(self):\n\n field_names = list(self.FIELD_NAME_TO_INDEX.keys())\n field_indices = list(self.FIELD_NAME_TO_INDEX.values())\n frame = pd.read_csv(\n self.filepath,\n header=None, # MAGIC file has no header line\n delimiter=self.DELIMITER,\n usecols=field_indices,\n names=field_names,\n converters=self.FIELD_CONVERTERS,\n )\n return frame",
"def tag_parser(file_path: str):\n with open(file_path) as f:\n t = f.read()\n t = t.split(\"Points =\\n\")[1]\n t = t.replace(\" 0.1 1 1 \\\"Marker\\\"\", \"\")\n t = t.replace(\";\", \"\")\n t = t.replace(\" \\n\", \"\\n\")\n t = t[1:]\n t = StringIO(t)\n\n return np.genfromtxt(t, delimiter=' ')",
"def loadData(name):\n inputs = []\n outputs = []\n with open(name) as file:\n data = file.readlines()[2:]\n lines = map(str.split, data)\n for line in lines:\n inputs.append(preparePatterns(line[:-1]))\n outputs.append(float(line[-1]))\n length = len(inputs[0])\n return inputs, outputs, length"
]
| [
"0.7213056",
"0.6166122",
"0.5949893",
"0.59376925",
"0.5915017",
"0.5867279",
"0.5831664",
"0.5794558",
"0.57613015",
"0.575809",
"0.5737247",
"0.5719786",
"0.57041824",
"0.5690172",
"0.55857027",
"0.5577605",
"0.55612797",
"0.55323094",
"0.5509963",
"0.5486947",
"0.5471353",
"0.54515177",
"0.5442694",
"0.5432856",
"0.5428062",
"0.5398303",
"0.53972626",
"0.5371458",
"0.5342147",
"0.5340304"
]
| 0.7399248 | 0 |
a.tofile(arr, fid, sep="", format="%s") Write array 'arr' to a file as text or binary (default). Data is always written in 'C' order, independent of the order of `a`. The data produced by this method can be recovered using the function fromfile(). | def print_to_file(arr, fid, sep="", format="%s"):
f = array_create.array(arr, bohrium=False)
return f.tofile(fid, sep=sep, format=format) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeArray(fname,arr):\n fh = open(fname,'w')\n fh.write('%d\\n' % arr.shape[0])\n fh.write('%d\\n' % arr.shape[1])\n for x in range(arr.shape[0]):\n for y in range(arr.shape[1]):\n if arr.dtype == np.complex:\n fh.write('%.7e %.7e\\n' % (arr[x,y].real, arr[x,y].imag))\n else:\n fh.write('%.7e\\n' % (arr[x,y]))\n fh.close()",
"def ArraytoFile(_array):\n\tfile = open('sort1.txt', 'w')\n\tfor line in _array:\n\t\tfile.write(line+\"\\n\")\n\tfile.close()",
"def numpy_2_file(narray, file, path=OUTPUT_PATH, sep=',' ):\n file_path = path + file\n narrayc = numpy.copy(narray)\n numpy.place(narrayc,numpy.logical_or(narrayc==-1,narrayc==-2), 2)\n dataset = numpy.copy(narrayc).astype(str)\n numpy.place(dataset,dataset=='2', '*')\n d=numpy.atleast_2d(dataset)\n numpy.savetxt(file_path, d, delimiter=sep, fmt='%s')\n return",
"def cast_numpy_to_txt(arr, output_file):\n shape = arr.shape\n arr = arr.reshape([shape[0] * shape[1], shape[2]])\n\n np.savetxt(fname=output_file, X=arr, delimiter=' ', fmt='%.18e', newline='\\n', )",
"def write(A,fn,sep=' '):\n\tf=file(fn,'a')\n\tA = zip(*A)\n\tfor i in A:\n\t\tfor j in i:\n\t\t\tf.writelines([str(j),str(sep),])\n\t\tf.writelines(str('\\n'))\n\tf.close()",
"def create_output_file(arr):\r\n for i in arr:\r\n output_file.write(f'{i[0]}\\t{i[1]}\\n')",
"def to_txt(self, fpath):\n np.savetxt(fpath, self._arr.T)",
"def write_csv_file(array, filename):\n\tnp.savetxt(filename, array, delimiter=\",\")",
"def quick_save_array(data, file_name, delimiter=',', ):\n data.tofile(file_name, sep=delimiter)",
"def binary_out(array, fnam, dt=np.dtype(np.float64), endianness='big', appendDim=False):\r\n if appendDim == True :\r\n fnam_out = fnam + '_'\r\n for i in array.shape[:-1] :\r\n fnam_out += str(i) + 'x' \r\n fnam_out += str(array.shape[-1]) + '.raw'\r\n else :\r\n fnam_out = fnam\r\n arrayout = np.array(array, dtype=dt)\r\n if sys.byteorder != endianness:\r\n arrayout.byteswap(True)\r\n arrayout.tofile(os.path.abspath(fnam_out))",
"def output_file(newarray, filename):\n np.savetxt(filename + \"_formatted.txt\", newarray, delimiter=\" \", fmt=\"%s\")",
"def writeToFile(fil, aks, tid):\r\n\r\n f = open(\"processed_\"+fil, 'w')\r\n \r\n f.write(\"Aks Tid\")\r\n for i in range(len(aks)):\r\n f.write(f\"\\n{aks[i]} {tid[i]}\")\r\n f.close()",
"def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')",
"def writeList2File(filename, array, overwrite=False, separator=';'):\n mode = 'a'\n if overwrite:\n mode = 'w'\n file = open(filename, mode)\n file.write(separator.join(map(str,array)) + '\\n')",
"def array_to_file(filename, a):\n a = normalize_array(a)\n i = Image.fromarray(a.astype('uint8'))\n return i.save(filename)",
"def saveCorpusFile(output_path, arr, format, features):\n def rowMap(x):\n if format == \"csv\":\n if features:\n x = x.split(\",\")[1]\n else:\n parts = x.split(\",\")\n parts.pop(0)\n x = \" \".join(parts)\n return x.replace(\",\", \" \")\n if format == \"tsv\":\n if features:\n x = x.split(\"\\t\")[1]\n else:\n parts = x.split(\"\\t\")\n parts.pop(0)\n x = \" \".join(parts)\n return x.replace(\"\\t\", \" \")\n\n arr_corpus = map(lambda x: rowMap(x), arr)\n with open(output_path, 'w+') as corpusfile:\n for row in arr_corpus:\n corpusfile.write(row + \"\\n\")",
"def save_array(array, filename):\n np.save(filename, array)",
"def save(file, arr, allow_pickle=True, fix_imports=True):\n\n return numpy.save(file, array_create.array(arr, bohrium=False), allow_pickle, fix_imports)",
"def export_to_file(data, filename='class_data.txt', mode='a'):\n with open (filename, mode) as f:\n if mode == \"w\":\n for record in data:\n line = \",\".join(record)\n f.write(line + \"\\n\")\n elif mode == \"a\":\n line = \",\".join(data)\n f.write(line + \"\\n\")\n else:\n raise ValueError('Wrong write mode')",
"def toFile(self,fid):\n stack = []\n for w,b in self.stack:\n w.copy_to_host()\n b.copy_to_host()\n stack.append([w.numpy_array,b.numpy_array])\n\tpickle.dump(stack,fid)",
"def eeg_writeavr(array,tsb,di,file):\t\t\n import shutil as shu\n f=open(file,'w')\n firstline = 'Npts= %i TSB= %i DI= %7.5f SB= %7.5f SC= %i NChan= %i\\n' %(array.shape[1],tsb,di,1,200,array.shape[0]) \n chnam = 'Cz FP1 FP2 F3 F4 C3 C4 P3 P4 O1 O2 F7 F8 T7 T8 P7 P8 Fz Pz FC1 FC2 CP1 CP2 FC5 FC6 CP5 CP6 FT9 FT10 TP9 TP10 PO9 PO10\\n'\n f.write(firstline)\n f.write(chnam)\n for i in range(array.shape[0]):\n tmp = array[i,:]\n f.write(('%7.5f ' * len(tmp)) %tuple(tmp))\n f.write('\\n')\n \n f.close()\n #may want to change this on different machines...\n src = '/Users/crislanting/Projects/EEG/data/33.elp'\n dest = file[:-4] + '.elp'\n shu.copyfile(src,dest)",
"def to_file(self, fname, delimiter=\"\\t\", encoding=\"utf-8\"):\n with open(fname, \"wb\") as fh:\n for key, score in self.ranked_items():\n fh.write(self.to_record(key, score, delimiter).encode(encoding))",
"def write_to_file(info, mode='w', file=\"output4.txt\"):\n with open(file, mode, encoding='utf-8') as f:\n for line in info:\n f.write(' '.join(map(str, line)) + '\\n')",
"def save_txt(data, file_path):\n array = sanitise_array(data)\n\n # If the data is floating then format the values in scientific notation.\n if np.issubdtype(array.dtype, np.floating):\n array = array.astype(np.float32)\n formatter = lambda x: f'{x:.12E}'\n elif np.issubdtype(array.dtype, np.integer):\n array = array.astype(np.int32)\n formatter = lambda x: str(x)\n else:\n raise TypeError(f'Type of the data could not be serialised - {array.dtype}')\n\n lines = [' '.join(formatter(val) for val in row) + '\\n' for row in array]\n with open(file_path, 'w') as f:\n f.writelines(lines)",
"def save_to_file(samps, filename, save_as_numpy):\n with open(filename, 'wb') as out_file:\n if save_as_numpy:\n np.save(out_file, samps, allow_pickle=False, fix_imports=False)\n else:\n samps.tofile(out_file)",
"def put_2Darray(file,array,header='',format='',append='no'):\n lista=[]\n for i in range(array.shape[1]):lista.append(array[:,i])\n lista=tuple(lista)\n put_data(file,lista,header,format,append)",
"def write_arr_to_csv(arr, filename):\n keys = arr[0].keys()\n with open(f\"{filename}.csv\", \"w\", newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(arr)",
"def fip_to_file(fip, path):\n with open(path, \"w\") as f:\n string = \"\"\n for elem in fip:\n string += str(elem[0]) + \" \"\n f.write(string)",
"def array2chomp( arr, savename ):\n rows = map( lambda x: str(x)+'\\n', map( tuple, iter( arr ) ) ) \n with open( savename, 'w' ) as fh:\n fh.writelines( rows )",
"def save_array(self, name: str, array: np.ndarray):\r\n np.savetxt(self._path_for_csv(name), array, delimiter=\",\")"
]
| [
"0.6918701",
"0.6727358",
"0.6621259",
"0.65976906",
"0.65721756",
"0.65588903",
"0.6537441",
"0.65140146",
"0.65048033",
"0.6455233",
"0.64238685",
"0.6413894",
"0.63596934",
"0.62816775",
"0.6238878",
"0.6104446",
"0.60399896",
"0.5977586",
"0.59756196",
"0.594799",
"0.5843374",
"0.5794356",
"0.57850015",
"0.57792383",
"0.57775474",
"0.5738941",
"0.57019264",
"0.56780833",
"0.56638795",
"0.5636097"
]
| 0.85623467 | 0 |
Get the hash method. | def hash_method(self):
return self._hash_class | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hash_algo(self) -> str:\n return self._hash_algo",
"def get_hash(self):\n return self.__hash",
"def get_hash(self):\r\n return",
"def signatureHashAlgorithm(self) -> str:\n hash_algo = self['signature_algorithm'].hash_algo\n return hash_algo",
"def get_hash(self) -> str:\n return self.__hash.hexdigest()",
"def HashAlgorithm(self) -> _n_7_t_0:",
"def __call__(self):\n return self.method.hexdigest()",
"def hash(self):\n return self._hash",
"def hash(self):\n return self.__hash__()",
"def hash(self) -> str:\n return pulumi.get(self, \"hash\")",
"def hash(self) -> bytes:",
"def hash(self) -> str:\r\n ...",
"def __hash__(self):\n return hash(self.hash)",
"def hash(self):\n return Hash.dhash(bytes(self))",
"def calculate_hash_id(self):\n return get_md5_hash(f'{self.type}{self.get_primary_id()}')",
"def hash_string(self):\n return self._hash_string",
"def _hash_func(self):\r\n func_code_h = hash(getattr(self.func, '__code__', None))\r\n return id(self.func), hash(self.func), func_code_h",
"def calc_info_hash(self):\n return \"infohash\"",
"def __Hash(self):\n return self._Hash()",
"def _get_hasher(self):\n import hashlib\n\n # Try making the hash set from the columns marked 'hash'\n indexes = [i for i, c in enumerate(self.columns) if\n c.data.get('hash', False) and not c.is_primary_key]\n\n # Otherwise, just use everything by the primary key.\n if len(indexes) == 0:\n indexes = [\n i for i,\n c in enumerate(\n self.columns) if not c.is_primary_key]\n\n def hasher(values):\n m = hashlib.md5()\n for index in indexes:\n x = values[index]\n try:\n m.update(\n x.encode('utf-8') +\n '|') # '|' is so 1,23,4 and 12,3,4 aren't the same\n except:\n m.update(str(x) + '|')\n return int(m.hexdigest()[:14], 16)\n\n return hasher",
"def get_hash_algorithm(hash_algorithm):\n\n available_methods = {\n 'SHA256': hash.sha256_crypt,\n 'SHA512': hash.sha512_crypt,\n }\n\n if not isinstance(hash_algorithm, str):\n raise ValueError('The parameter \"hash_algorithm\" should be a string.')\n\n if hash_algorithm.upper() not in available_methods:\n raise ValueError('Invalid hash method.')\n\n return available_methods[hash_algorithm]",
"def hash(self):\n raise NotImplementedError() # To be subclassed",
"def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"",
"def __hash__(self):\n return hash((self.SYMBOL, self._.hash_parameters))",
"def getHash(self):\n if self.chash:\n return self.chash\n else:\n self.setHash()\n return self.chash",
"def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())",
"def __hash__(self):\n return self.to_hash()",
"def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)",
"def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)",
"def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode"
]
| [
"0.789659",
"0.73221624",
"0.7316154",
"0.72321606",
"0.71322316",
"0.7026344",
"0.69898903",
"0.69604665",
"0.69488406",
"0.6935332",
"0.6934777",
"0.68786675",
"0.68585414",
"0.6856074",
"0.67770886",
"0.6766313",
"0.6753965",
"0.67056817",
"0.67053574",
"0.6698196",
"0.66958976",
"0.6686234",
"0.6660211",
"0.6654794",
"0.6636446",
"0.6616023",
"0.6611975",
"0.6601961",
"0.6601961",
"0.66015047"
]
| 0.87897944 | 0 |
Get the hash cipher | def hash_cipher(self):
return self._digest_cipher | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def HashAlgorithm(self) -> _n_7_t_0:",
"def hash(self) -> str:\r\n ...",
"def hash_algo(self) -> str:\n return self._hash_algo",
"def hash(self) -> bytes:",
"def digest(self):\n d = MegaCrypto.str_to_a32(self.hash)\n return (d[0] ^ d[1], d[2] ^ d[3])",
"def getHash(self):\n if self.chash:\n return self.chash\n else:\n self.setHash()\n return self.chash",
"def operate_cipher(self):",
"def __get_cipher(self):\n return Fernet(open(self.__key_file, 'rb').read())",
"def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])",
"def gen_hash(self, data):\n password_gen = crypt.encrypt(data)\n return password_gen",
"def default_hash():\n return \"!\"",
"def get_hash(self):\r\n return",
"def _hash(self, string, hash_type):\n hash_types = {\n 'TABLE_OFFSET': 0,\n 'HASH_A': 1,\n 'HASH_B': 2,\n 'TABLE': 3\n }\n seed1 = 0x7FED7FED\n seed2 = 0xEEEEEEEE\n\n for ch in string.upper():\n if not isinstance(ch, int): ch = ord(ch)\n value = self.encryption_table[(hash_types[hash_type] << 8) + ch]\n seed1 = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n seed2 = ch + seed1 + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n return seed1",
"def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()",
"def compute_handshake(stanza_id, secret):\n return unicode(sha(str(stanza_id) + str(secret)).hexdigest().lower())",
"def hash(self) -> str:\n return pulumi.get(self, \"hash\")",
"def get_hash_code(s):\n h = 0\n n = len(s)\n for i, c in enumerate(s):\n h = h + ord(c) * 31 ** (n - 1 - i)\n return StrUtil.convert_4_bytes(h)",
"def hash_string(self):\n return self._hash_string",
"def hash(password):\n return sha256_crypt.encrypt(password)",
"def hash(self, text):\n hashval = 0\n for i in xrange(0, len(text)):\n hashval += ord(text[i])**i\n return hashval",
"def encryptPsw(password):\n # Transform the password into a byte object\n byte = str.encode(password)\n\n # SHA256 the byte object --> HASH object\n middle = hashlib.sha256(byte)\n\n # Convert the HASH object into string\n hash = middle.hexdigest()\n\n return hash",
"def _hash(self, key):\n if self.function == 'fnv':\n h = 2166136261\n for i in range(len(key)):\n h = (h * 16777619) ^ ord(key[i])\n return h\n elif self.function == 'add':\n h = 0\n for i in range(len(key)):\n h += ord(key[i])\n return h",
"def hash_string(to_hash):\n\n chars = string.printable\n\n hashed = \"\"\n\n total = 1\n\n counter = 1\n\n for letter in to_hash:\n\n total *= (chars.index(letter) * counter * len(to_hash)*13)\n\n counter += 1\n\n if counter%3 == 0:\n\n total *= total\n\n total = str(total)[:30]\n\n temp_int = \"\"\n\n for i in range(len(total)):\n\n temp_int += total[i]\n\n if i % 2 != 0:\n\n hashed += chars[int(temp_int)]\n\n temp_int = \"\"\n\n return hashed",
"def hash_function_1(key: str) -> int:\n hash = 0\n for letter in key:\n hash += ord(letter)\n return hash",
"def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])",
"def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()",
"def strongHashFunction(self):\n\t\treturn self._strongHashFunction",
"def current_hash(self):",
"def calculate_hash(self):\n return sha256_2_string(str(self.header()))",
"def calculate_hash(self):\n return sha256_2_string(str(self.header()))"
]
| [
"0.6766753",
"0.651927",
"0.64724076",
"0.6463382",
"0.6461716",
"0.6444886",
"0.6433632",
"0.63657993",
"0.6353914",
"0.6286419",
"0.6285059",
"0.6275059",
"0.6241196",
"0.6235177",
"0.6227181",
"0.6223782",
"0.6220806",
"0.6211417",
"0.6205702",
"0.61994827",
"0.61988443",
"0.6179981",
"0.61661613",
"0.6140791",
"0.61121064",
"0.6110756",
"0.609256",
"0.6075728",
"0.6069385",
"0.6069385"
]
| 0.8253679 | 0 |
Create cookie if not there already. Also deactivates language. | def process_response(self, request, response):
if not request.COOKIES.get('site_language'):
response.set_cookie('site_language',
'')
translation.deactivate()
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setlang(request):\n form = SetLanguageForm(request.POST or None)\n if form.is_valid():\n user_language = form.cleaned_data['language']\n translation.activate(user_language)\n response = HttpResponseRedirect(form.cleaned_data['next'])\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user_language)\n return response",
"def setlang(request):\n next = request.GET.get('next', None)\n if not is_safe_url(url=next, host=request.get_host()):\n next = request.META.get('HTTP_REFERER')\n if not is_safe_url(url=next, host=request.get_host()):\n next = '/'\n response = redirect(next)\n\n lang_code = request.GET.get('language', None)\n if lang_code and check_for_language(lang_code):\n if hasattr(request, 'session'):\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n else:\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN)\n\n return response",
"def get_cookie( name, default=None ):",
"def set_cookie( name, value, **kwargs ) :",
"async def clean_cookie_jar(self):\n\n self._session._cookie_jar.clear()\n # create consent cookie\n random_str = \"\".join([random.choice(string.ascii_lowercase) for _ in range(3)])\n cookie = BaseCookie({\"CONSENT\": f\"YES+{random_str}\"})\n cookie[\"CONSENT\"][\"path\"] = \"/\"\n cookie[\"CONSENT\"][\"domain\"] = \".google.com\"\n cookie[\"CONSENT\"][\"expires\"] = \"Mon, 12 May 2031 15:15:02 GMT\"\n cookie[\"CONSENT\"][\"secure\"] = True\n # add consent cookie to jar\n self._session._cookie_jar.update_cookies(cookie)",
"def set_user_cookie_id():\n #new fresh user\n if not request.cookies.get(config.COOKIE_ADSABS2_NAME):\n if current_user.is_anonymous():\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()\n #the user has already visited the web site\n else:\n if current_user.is_anonymous():\n #if the cookie is a valid UUID it's ok\n curr_cookie = request.cookies.get(config.COOKIE_ADSABS2_NAME)\n try:\n uuid.UUID(curr_cookie)\n g.user_cookie_id = curr_cookie\n #otherwise the app generates a new one\n except ValueError:\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()",
"def delete_cookie(self, name):\r\n self.set_cookie(name, '__deleted__', expires=0)",
"def store_consent(session_obj, name, value):\n Cookie.objects.create(session=session_obj, name=name, value=value)",
"async def _cookie_settings(self, ctx):",
"def set_secure_cookie( name, value, **kwargs ):",
"def get_secure_cookie( name, value=None ):",
"def set_language(request):\n response = HttpResponseRedirect(get_redirect_url(request))\n\n if request.method == 'POST':\n lang_code = request.POST.get('language', None)\n if lang_code and check_for_language(lang_code):\n request.session[settings.LANGUAGE_SESSION_KEY] = lang_code\n\n return response",
"def _update_cookie(self, encoded_data, response):\n if encoded_data:\n response.set_cookie(\n self.cookie_name,\n encoded_data,\n domain=settings.SESSION_COOKIE_DOMAIN,\n secure=settings.SESSION_COOKIE_SECURE or None,\n httponly=settings.SESSION_COOKIE_HTTPONLY or None,\n samesite=settings.SESSION_COOKIE_SAMESITE,\n )\n else:\n response.delete_cookie(\n self.cookie_name,\n domain=settings.SESSION_COOKIE_DOMAIN,\n samesite=settings.SESSION_COOKIE_SAMESITE,\n )",
"async def save(self, request, response) -> None:\n value = self.cipher.encrypt(request.session.dumps().encode())\n cookie = f'{self.cookie_name}={value.decode()}; SameSite=Lax'\n response.headers['Set-Cookie'] = cookie",
"def set_cookie( cookies, name, morsel, **kwargs ) :",
"def set_secure_cookie(self, name, val, remember):\n\n cookie_val = make_secure_val(val)\n cookie_str = '%s=%s; Path=/;' % (name, cookie_val)\n if remember:\n expires = time.time() + 5000 * 24 * 3600 # 5000 days from now\n else:\n expires = time.time() + 24 * 3600\n expires_str = time.strftime(\"%a, %d-%b-%Y %T GMT\",\n time.gmtime(expires))\n expires_date = 'expires= %s;' % expires_str\n cookie_str += expires_date\n self.response.headers.add_header('Set-Cookie', cookie_str)",
"def agree_on_cookie_store(request):\n request.session['isagree'] = True\n return HttpResponse(\"OK\")",
"def test_ctor_no_cookie(self):\n request = self._make_request()\n session = self._makeOne(request)\n session_dict = session.managed_dict\n self.assertDictEqual(session_dict, {})\n self.assertIs(session.new, True)",
"def make_cookie(name, load, seed, expire=0, domain=\"\", path=\"\", timestamp=\"\"):\n cookie = SimpleCookie()\n if not timestamp:\n timestamp = str(int(time.mktime(time.gmtime())))\n signature = cookie_signature(seed, load, timestamp)\n cookie[name] = \"|\".join([load, timestamp, signature])\n if path:\n cookie[name][\"path\"] = path\n if domain:\n cookie[name][\"domain\"] = domain\n if expire:\n cookie[name][\"expires\"] = _expiration(expire, \"%a, %d-%b-%Y %H:%M:%S GMT\")\n\n return tuple(cookie.output().split(\": \", 1))",
"def __cookieAdded(self, cookie):\n if self.__rejectCookie(cookie, cookie.domain()):\n self.__store.deleteCookie(cookie)\n return\n \n self.insertCookie(cookie)\n self.cookiesChanged.emit()",
"def delete_cookie(self, cookie_name=None):\n if cookie_name is None:\n cookie_name = self.default_value[\"name\"]\n\n return self.create_cookie(\"\", \"\", cookie_name=cookie_name, kill=True)",
"def set_cookie_data(storage, messages, invalid=False, encode_empty=False):\n encoded_data = storage._encode(messages, encode_empty=encode_empty)\n if invalid:\n # Truncate the first character so that the hash is invalid.\n encoded_data = encoded_data[1:]\n storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data}\n if hasattr(storage, '_loaded_data'):\n del storage._loaded_data",
"def process_request(self, request):\r\n if request.user.is_authenticated() and 'django_language' not in request.session:\r\n user_pref = UserPreference.get_preference(request.user, LANGUAGE_KEY)\r\n if user_pref:\r\n request.session['django_language'] = user_pref",
"def set_language(request, lang_code):\n next = '/'\n response = http.HttpResponseRedirect(next)\n\n if int(lang_code) == 1:\n lang_code = 'en'\n elif int(lang_code) == 2:\n lang_code = 'ru'\n else:\n lang_code = 'ru'\n\n if lang_code and check_for_language(lang_code):\n\n request.session['django_language'] = lang_code\n\n return response",
"def set_cookie(self, cookie):\n c = self._cookies\n if cookie.domain not in c:\n c[cookie.domain] = {}\n c2 = c[cookie.domain]\n if cookie.path not in c2:\n c2[cookie.path] = {}\n c3 = c2[cookie.path]\n c3[cookie.name] = cookie",
"def getCookie(key):",
"def delete(self, name, path=\"/\", domain=None):\n \n self.response.set_cookie(\n name, \n '', \n path=path, \n domain=domain, \n max_age=0, \n expires=datetime.timedelta(days=-5)\n )",
"def del_cookie(self, name):\n self.set_cookie(name, '__deleted__', expires=0)",
"def revert(self):\n self._cookies_lock.acquire()\n try:\n old_state = cookielib.copy.deepcopy(self._cookies)\n self._cookies = {}\n try:\n self.load()\n except (cookielib.LoadError, IOError):\n self._cookies = old_state\n raise\n\n finally:\n self._cookies_lock.release()",
"def _cc_create_app_cookie_persist_rule(self, cookiename):\n rule_text = \"\"\"\n when RULE_INIT {\n \n # Cookie name prefix\n set static::ck_pattern BIGipServer*, %s\n \n # Log debug to /var/log/ltm? 1=yes, 0=no)\n set static::ck_debug 1\n \n # Cookie encryption passphrase\n # Change this to a custom string!\n set static::ck_pass \"abc123\"\n }\n when HTTP_REQUEST {\n \n if {$static::ck_debug}{log local0. \"Request cookie names: [HTTP::cookie names]\"}\n \n # Check if the cookie names in the request match our string glob pattern\n if {[set cookie_names [lsearch -all -inline [HTTP::cookie names] $static::ck_pattern]] ne \"\"}{\n \n # We have at least one match so loop through the cookie(s) by name\n if {$static::ck_debug}{log local0. \"Matching cookie names: [HTTP::cookie names]\"}\n foreach cookie_name $cookie_names {\n \n # Decrypt the cookie value and check if the decryption failed (null return value)\n if {[HTTP::cookie decrypt $cookie_name $static::ck_pass] eq \"\"}{\n \n # Cookie wasn't encrypted, delete it\n if {$static::ck_debug}{log local0. \"Removing cookie as decryption failed for $cookie_name\"}\n HTTP::cookie remove $cookie_name\n }\n }\n if {$static::ck_debug}{log local0. \"Cookie header(s): [HTTP::header values Cookie]\"}\n }\n }\n when HTTP_RESPONSE {\n \n if {$static::ck_debug}{log local0. \"Response cookie names: [HTTP::cookie names]\"}\n \n # Check if the cookie names in the request match our string glob pattern\n if {[set cookie_names [lsearch -all -inline [HTTP::cookie names] $static::ck_pattern]] ne \"\"}{\n \n # We have at least one match so loop through the cookie(s) by name\n if {$static::ck_debug}{log local0. \"Matching cookie names: [HTTP::cookie names]\"}\n foreach cookie_name $cookie_names {\n \n # Encrypt the cookie value\n HTTP::cookie encrypt $cookie_name $static::ck_pass\n }\n if {$static::ck_debug}{log local0. \"Set-Cookie header(s): [HTTP::header values Set-Cookie]\"}\n }\n } \n \"\"\" % (cookiename)\n return rule_text"
]
| [
"0.6306369",
"0.597832",
"0.5574768",
"0.5574281",
"0.55403256",
"0.5460696",
"0.5437387",
"0.54191154",
"0.54006237",
"0.53784394",
"0.5306191",
"0.5276896",
"0.5240446",
"0.52366024",
"0.52167886",
"0.52082217",
"0.51884407",
"0.51844126",
"0.51611924",
"0.513932",
"0.51298404",
"0.5129225",
"0.51222795",
"0.5122199",
"0.51204455",
"0.511723",
"0.51119953",
"0.5078008",
"0.50607044",
"0.50279796"
]
| 0.63812304 | 0 |
Verifies prediction and label shapes are equal. | def check_shape_equal(pred, labels):
if pred.shape != labels.shape:
raise ValueError('Prediction and labels shapes must be equal:'
f'{pred.shape} vs {labels.shape}.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_label_shapes(labels, preds, shape=0):\n\n if shape == 0:\n label_shape, pred_shape = len(labels), len(preds)\n else:\n label_shape, pred_shape = labels.shape, preds.shape\n\n if label_shape != pred_shape:\n raise ValueError(\"Shape of labels {} does not match shape of \"\n \"predictions {}\".format(label_shape, pred_shape))",
"def check_label_shapes(labels, preds, wrap=False, shape=False):\n if not shape:\n label_shape, pred_shape = len(labels), len(preds)\n else:\n label_shape, pred_shape = labels.shape, preds.shape\n\n if label_shape != pred_shape:\n raise ValueError(\"Shape of labels {} does not match shape of \"\n \"predictions {}\".format(label_shape, pred_shape))\n\n if wrap:\n if isinstance(labels, ndarray.ndarray.NDArray):\n labels = [labels]\n if isinstance(preds, ndarray.ndarray.NDArray):\n preds = [preds]\n\n return labels, preds",
"def _check_same_shape(preds: Tensor, target: Tensor) ->None:\n if preds.shape != target.shape:\n raise RuntimeError(f'Predictions and targets are expected to have the same shape, but got {preds.shape} and {target.shape}.')",
"def check_consistent_shape(X_train, y_train, X_test, y_test, y_train_pred,\n y_test_pred):\n\n # check input data shapes are consistent\n X_train, y_train = check_X_y(X_train, y_train)\n X_test, y_test = check_X_y(X_test, y_test)\n\n y_test_pred = column_or_1d(y_test_pred)\n y_train_pred = column_or_1d(y_train_pred)\n\n check_consistent_length(y_train, y_train_pred)\n check_consistent_length(y_test, y_test_pred)\n\n if X_train.shape[1] != X_test.shape[1]:\n raise ValueError(\"X_train {0} and X_test {1} have different number \"\n \"of features.\".format(X_train.shape, X_test.shape))\n\n return X_train, y_train, X_test, y_test, y_train_pred, y_test_pred",
"def _check_shape(self, y_pred, y):\n if self._type == 'classification':\n if y_pred.ndim != y.ndim + 1:\n raise ValueError('Classification case, dims of y_pred equal dims of y add 1, '\n 'but got y_pred: {} dims and y: {} dims'.format(y_pred.ndim, y.ndim))\n if y.shape != (y_pred.shape[0],) + y_pred.shape[2:]:\n raise ValueError('Classification case, y_pred shape and y shape can not match. '\n 'got y_pred shape is {} and y shape is {}'.format(y_pred.shape, y.shape))\n else:\n if y_pred.ndim != y.ndim:\n raise ValueError('{} case, dims of y_pred need equal with dims of y, but got y_pred: {} '\n 'dims and y: {} dims.'.format(self._type, y_pred.ndim, y.ndim))\n if y_pred.shape != y.shape:\n raise ValueError('{} case, y_pred shape need equal with y shape, but got y_pred: {} and y: {}'.\n format(self._type, y_pred.shape, y.shape))",
"def assert_predictions_equal(first, second, x):\n preds1 = first.predict(x, batch_size=batch_size)\n preds2 = second.predict(x, batch_size=batch_size)\n np.testing.assert_array_equal(preds1, preds2)",
"def test_score(self):\n pred_copy_simple = np.copy(self.regression_single.y_pred)\n pred_copy_boston = np.copy(self.regression_boston.y_pred)\n\n self.assertEqual(pred_copy_simple.shape, self.y_test.shape)\n self.assertEqual(pred_copy_boston.shape, self.boston_y_test.shape)",
"def check_prediction(self):\n predicted_scores = self.sess.run(self.NET.output_with_relu, feed_dict={self.NET.input: self.test_image if len(self.test_image.shape)==4 else [self.test_image]})\n self.original_confidence = np.max(predicted_scores)\n if np.argmax(predicted_scores,1) != self.original_label:\n print(\"Network's Prediction is Already Incorrect!\")\n return True\n else:\n return False",
"def params_check(shape_labels, shape_predictions, out_type, labels_dtype,\n predictions_dtype, shape_weights, weights_dtype):\n\n util.check_shape_rule(shape_labels, min_dim=1, max_dim=1)\n util.check_shape_rule(shape_predictions, min_dim=1, max_dim=1)\n if list(shape_labels) != list(shape_predictions):\n raise RuntimeError(\"The shape of labels and predictions shoud be same\")\n if shape_weights is not None:\n util.check_shape_rule(shape_weights, min_dim=1, max_dim=1)\n if list(shape_labels) != list(shape_weights):\n raise RuntimeError(\"The shape of labels and weights shoud be same\")\n\n check_list = [\"float32\", \"int32\", \"float16\", \"int8\", \"uint8\"]\n if out_type not in check_list:\n raise RuntimeError(\n \"Confusion_matrix only support 'float32', 'int32', 'float16, 'int8, 'uint8\")\n if labels_dtype not in check_list:\n raise RuntimeError(\"labels only support 'float32', 'int32', 'float16, 'int8, 'uint8\")\n if predictions_dtype not in check_list:\n raise RuntimeError(\"predictions only support 'float32', 'int32', 'float16, 'int8, 'uint8\")\n if shape_weights is not None:\n if weights_dtype not in check_list:\n raise RuntimeError(\"weights only support 'float32', 'int32', 'float16, 'int8, 'uint8\")\n\n if shape_weights is not None:\n if not tbe_platform.cce_conf.intrinsic_check_support(\n \"Intrinsic_vconv\", \\\n \"s322f32\") and weights_dtype == \"int32\" and out_type != \"int32\":\n raise RuntimeError(\"This product weights don't support \\\n int32(when out_type is not int32)\")\n if not tbe_platform.cce_conf.intrinsic_check_support(\\\n \"Intrinsic_vconv\", \"f322s32f\") and weights_dtype == \"float32\" \\\n and out_type == \"int32\":\n raise RuntimeError(\"This product weights don't \\\n support float32(when out_type is int32)\")\n if not tbe_platform.cce_conf.intrinsic_check_support(\\\n \"Intrinsic_vconv\", \"f322s32f\") and labels_dtype == \"float32\":\n raise RuntimeError(\"This product labels don't support float32!\")\n if not tbe_platform.cce_conf.intrinsic_check_support(\"Intrinsic_vconv\", \\\n \"f322s32f\") and predictions_dtype == \"float32\":\n raise RuntimeError(\"This product predictions don't support float32!\")",
"def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])",
"def have_same_shapes(array1, array2):\n return array1.shape == array2.shape",
"def check_consistent_length(y_true: List[List[str]], y_pred: List[List[str]]):\n len_true = list(map(len, y_true))\n len_pred = list(map(len, y_pred))\n is_list = set(map(type, y_true)) | set(map(type, y_pred))\n\n if len(y_true) != len(y_pred) or len_true != len_pred:\n message = 'Found input variables with inconsistent numbers of samples:\\n{}\\n{}'.format(len_true, len_pred)\n raise ValueError(message)",
"def _AssertShapesMatch(op_name, in_tensor, out_tensor):\n in_shape = in_tensor.get_shape()\n out_shape = out_tensor.get_shape()\n\n if not in_shape.is_compatible_with(out_shape):\n raise ValueError('%s should not change tensor shape: input %s, '\n 'output %s' % (op_name, in_shape, out_shape))",
"def test_predict(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n true, predictions = model.predict('test')\n expected_size = ((dataset.num_examples('test') //\n model.batch_size) * model.batch_size)\n self.assertEqual(true.shape[0], expected_size)\n self.assertEqual(true.shape, predictions.shape)",
"def test_predict(self):\n \n\n model ,vec, x_testing=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"count\")\n \n model2 ,vec_tfidf, x_testing2=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"tfidf\")\n \n \n \"\"\" Test correct data types and corrrect range of predicted values (1,0) for predict with countVectorizer\"\"\" \n \n self.assertIsInstance(predict(model,x_testing),\n np.ndarray)\n \n self.assertTrue(([0,1] ==np.unique(predict(model2,x_testing2))).all())\n\n \n \"\"\" Test correct data types and corrrect range of predicted values (1,0) for predict with tfidfVectorizer\"\"\" \n \n self.assertIsInstance(predict(model,x_testing),\n np.ndarray)\n \n self.assertTrue(([0,1] ==np.unique(predict(model2,x_testing2))).all())",
"def _test_output_shapes(model):\n assert model.r == r\n assert model.m == m\n assert model.c_.shape == (r,)\n assert model.A_.shape == (r,r)\n assert model.Hc_.shape == (r,r*(r+1)//2)\n assert model.H_.shape == (r,r**2)\n assert model.Gc_.shape == (r,r*(r+1)*(r+2)//6)\n assert model.G_.shape == (r,r**3)\n assert model.B_.shape == (r,m)\n assert hasattr(model, \"datacond_\")\n assert hasattr(model, \"dataregcond_\")\n assert round(model.dataregcond_, 6) <= round(model.datacond_, 6)\n assert hasattr(model, \"residual_\")\n assert hasattr(model, \"misfit_\")\n assert round(model.misfit_, 6) <= round(model.residual_, 6)",
"def _shape_compare(shape1, shape2):\n if len(shape1) != len(shape2):\n return False\n for s1, s2 in zip(shape1, shape2):\n if s1 != s2:\n return False\n return True",
"def compare_predictions():\n validation_labels = np.array(pd.read_csv(val_true_labels_dir + dataset_version + 'validation_labels.csv', index_col=0))\n validation_labels = np.reshape(validation_labels, (-1))\n\n diff_between_files = []\n also1s = []\n also2s = []\n for filename1 in os.listdir(val_predictions_dir):\n if filename1.endswith(\".csv\"):\n for filename2 in os.listdir(val_predictions_dir):\n if filename2.endswith(\".csv\"):\n if filename1 < filename2:\n wrong1 = 0\n wrong2 = 0\n diff_between = 0\n also1 = 0\n also2 = 0\n diff_corr1 = 0\n diff_corr2 = 0\n f1 = np.array(pd.read_csv(val_predictions_dir + filename1, index_col=0))\n f1 = np.reshape(f1, (-1))\n f2 = np.array(pd.read_csv(val_predictions_dir + filename2, index_col=0))\n f2 = np.reshape(f2, (-1))\n for line in range(f1.shape[0]):\n if f1[line] != validation_labels[line]:\n wrong1 += 1\n if f2[line] != validation_labels[line]:\n wrong2 += 1\n if f1[line] != f2[line]:\n diff_between += 1\n if f1[line] == validation_labels[line]:\n diff_corr1 += 1\n if f2[line] == validation_labels[line]:\n diff_corr2 += 1\n if f1[line] != validation_labels[line]:\n if f2[line] != validation_labels[line]:\n also2 += 1\n if f2[line] != validation_labels[line]:\n if f1[line] != validation_labels[line]:\n also1 += 1\n\n diff_between_files.append(diff_between)\n print(filename1)\n print('Wrongly predicted by 1: ' + str(100 * wrong1 / f1.shape[0]) + '%')\n print(filename2)\n print('Wrongly predicted by 2: ' + str(100 * wrong2 / f1.shape[0]) + '%')\n print()\n print('Differences between files: ' + str(100 * diff_between / f1.shape[0]) + '%')\n print(f'\\t of which correct by 1 {100 * diff_corr1 / diff_between}%, by 2 {100 * diff_corr2 / diff_between}%')\n also1s.append(also1 / wrong2)\n also2s.append(also2 / wrong1)\n print('Wrongly predicted by other among wrong ones: ' + str(100 * also2 / wrong1) + '%, ' + str(\n 100 * also1 / wrong2) + '%\\n\\n\\n')\n\n print('Max, min and avg differences between files:')\n print(str(100 * max(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * min(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * np.mean(diff_between_files) / validation_labels.shape[0]) + '%')\n\n print('\\nWrongly predicted by first that were also wrongly predicted by second:')\n print('Max: ' + str(100 * max(also2s)) + '%')\n print('Min: ' + str(100 * min(also2s)) + '%')\n print('Avg: ' + str(100 * np.mean(also2s)) + '%')\n\n print('\\nWrongly predicted by second that were also wrongly predicted by first:')\n print('Max: ' + str(100 * max(also1s)) + '%')\n print('Min: ' + str(100 * min(also1s)) + '%')\n print('Avg: ' + str(100 * np.mean(also1s)) + '%')",
"def test_checkpoints_are_equal():\n model1, X, y, Xval, yval = make_small_model(num_hidden_layers=1)\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model1.compile(loss=loss)\n model1.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n model1.save(\"fit.tf\")\n model1.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS * 2, batch_size=20)\n model1.save(\"refit.tf\")\n\n # same arch, different weights\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"refit.tf\")\n assert same is False, msg\n\n # should be same\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit.tf\")\n print(msg)\n assert same is True, msg\n\n # different architecture\n model2, X, y, Xval, yval = make_small_model(num_hidden_layers=3)\n model2.compile(loss=loss)\n model2.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n model2.save(\"fit2.tf\")\n\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit2.tf\")\n print(msg)\n assert same is False, msg\n\n # coping with trashed files\n cleanup_file(\"fit.tf/saved_model.pb\")\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit2.tf\")\n assert same is False, msg\n same, msg = safekeras.check_checkpoint_equality(\"fit2.tf\", \"fit.tf\")\n assert same is False, msg\n\n same, msg = safekeras.check_checkpoint_equality(\"hello\", \"fit2.tf\")\n assert same is False\n assert \"Error re-loading model from\" in msg\n\n same, msg = safekeras.check_checkpoint_equality(\"fit2.tf\", \"hello\")\n assert same is False\n assert \"Error re-loading model from\" in msg\n\n for name in (\"fit.tf\", \"fit2.tf\", \"refit.tf\"):\n cleanup_file(name)",
"def testQuestionTwo(self):\n self.assertEqual(AnswerQuestionTwo().shape, (5,5), \"Question two's output is not one dimension.\")",
"def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)",
"def check_shape(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return all(attr1.shape.eval() == attr2.shape.eval())",
"def assert_models_equal(first, second):\n # layer names and settings\n assert first.get_config() == second.get_config()\n # model weights\n assert len(first.get_weights()) == len(second.get_weights())\n for w1, w2 in zip(first.get_weights(), second.get_weights()):\n np.testing.assert_array_equal(w1, w2)\n # optimizer\n assert first.optimizer.get_config() == second.optimizer.get_config()",
"def test_multiclass_compare(self):\n dataset = make_fixture(binary=False, split=True)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y.train, dataset.y.test) is oz\n assert oz._mode == COMPARE\n\n # oz.finalize()\n self.assert_images_similar(oz)",
"def _check_shape(placeholder_shape, data_shape):\n\n return True",
"def test_infer_target_shape(self):\n t = OneHotEncode(3)\n assert t.infer_target_shape((2, 5)) == (2, 5, 3)\n\n t = OneHotEncode(2)\n assert t.infer_target_shape((2, 5)) == (2, 5)\n\n t = OneHotEncode(1)\n assert t.infer_target_shape((2, 5)) == (2, 5)",
"def test_check_consistency1():\n\n roi = ROI()\n\n labels = ['a', 'b', 'c']\n lrs = ['l', 'r', 'l']\n\n roi.set_labels(labels, lrs)\n\n roi.check_consistency()\n\n roi.labels.pop()\n\n with raises(InconsistentDataError):\n assert roi.check_consistency()",
"def verify_probability_shapes(probs):\n if probs.ndim == 2:\n num_classes = probs.shape[1]\n if num_classes == 1:\n probs = probs[:, 0]\n probs = binary_converter(probs)\n num_classes = 2\n elif probs.ndim == 1:\n # Cover binary case\n probs = binary_converter(probs)\n num_classes = 2\n else:\n raise ValueError('Probs must have 1 or 2 dimensions.')\n return probs, num_classes",
"def testShapesSame(self, batch_size, in_length, in_channels, out_length,\n out_channels, kernel_shape, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n if use_output_shape:\n output_shape_arg = out_shape\n else:\n output_shape_arg = None\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=output_shape_arg,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, out_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [1, kernel_shape, out_channels, in_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))",
"def check_input_dimension_consistency(self, session_data: \"SessionDataType\"):\n\n if self.share_hidden_layers:\n num_text_features = self._get_num_of_features(session_data, \"text_features\")\n num_intent_features = self._get_num_of_features(\n session_data, \"label_features\"\n )\n\n if num_text_features != num_intent_features:\n raise ValueError(\n \"If embeddings are shared, \"\n \"text features and label features \"\n \"must coincide. Check the output dimensions of previous components.\"\n )"
]
| [
"0.7969878",
"0.7264365",
"0.7258978",
"0.71695817",
"0.6899441",
"0.68151385",
"0.6598533",
"0.6566734",
"0.6538672",
"0.64354575",
"0.6296917",
"0.622556",
"0.6192627",
"0.6162538",
"0.6154577",
"0.6067735",
"0.60317224",
"0.6001732",
"0.5992151",
"0.5929971",
"0.58998036",
"0.5888938",
"0.58783317",
"0.58759063",
"0.58662724",
"0.58437806",
"0.5840902",
"0.58324265",
"0.5821542",
"0.58009636"
]
| 0.87678856 | 0 |
Verifies the nested dictionary dtypes are equal to target dtype. | def check_dtype_equal(input_dict,
target_dtype = jnp.float32,
exclude_list = ()):
flat_input = traverse_util.flatten_dict(input_dict)
for key, value in flat_input.items():
if key[0] in exclude_list:
continue
key_name = '_'.join([str(sub_key) for sub_key in key])
if isinstance(value, jnp.ndarray):
if value.dtype != target_dtype:
raise TypeError(f'Input {key_name} has inconsistent type:'
f'{value.dtype} vs {target_dtype}')
else:
raise TypeError(f'Illegal input type found: {type(value)}.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_dtype_equality(self):\r\n dtypes = get_numeric_types(with_complex=True)\r\n # Perform all pairwise comparisons of dtypes, making sure comparing\r\n # their string representation yields the same result.\r\n for dtype1_idx, dtype1 in enumerate(dtypes):\r\n for dtype2 in dtypes[dtype1_idx + 1:]:\r\n assert (dtype1 == dtype2) == (str(dtype1) == str(dtype2))",
"def dict_arr_eq(d1: DataDict, d2: DataDict):\n return (\n d1.keys() == d2.keys()\n and all(d1[k].shape == d2[k].shape for k in d1.keys())\n and all(np.allclose(d1[k], d2[k]) for k in d1.keys())\n )",
"def check_all_same_schema(feat_dict_list, keys, name):\n if len(feat_dict_list) == 0:\n return\n for fdict in feat_dict_list:\n for k in keys:\n t1 = feat_dict_list[0][k]\n t2 = fdict[k]\n if F.dtype(t1) != F.dtype(t2) or F.shape(t1)[1:] != F.shape(t2)[1:]:\n raise DGLError('Expect all features {}[\"{}\"] to have the same data type'\n ' and feature size, but got\\n\\t{} {}\\nand\\n\\t{} {}.'.format(\n name, k, F.dtype(t1), F.shape(t1)[1:],\n F.dtype(t2), F.shape(t2)[1:]))",
"def _assert_dtypes(self, other: 'PlainFrame'):\n\n left_dtypes = {column.name: column.dtype\n for column in self.parent.plaincolumns}\n\n right_dtypes = {column.name: column.dtype\n for column in other.plaincolumns}\n\n if left_dtypes != right_dtypes:\n msg = \"Mismatching types: \"\n for column, left_dtype in left_dtypes.items():\n right_dtype = right_dtypes[column]\n if left_dtype != right_dtype:\n msg += (\"{} (left '{}' vs. right '{}'\"\n .format(column, left_dtype, right_dtype))\n\n raise AssertionError(msg)",
"def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')",
"def assert_correct_and_equal(self, other: Union[pd.DataFrame, dict]):\n if isinstance(other, dict):\n other = pd.DataFrame.from_records(other)\n if not isinstance(other, pd.DataFrame):\n raise TypeError(\"other must be a dataframe or a dict!\")\n # Sort cols\n cols = list(self._data.columns) + [c for c in other.columns if c not in self._data.columns]\n other = other[cols]\n SampleDataSchema.to_schema().select_columns(self._data.columns).validate(other)\n assert_frame_equal(\n self._data.sort_values(by=list(self._data.columns)).reset_index(drop=True),\n other.sort_values(by=list(self._data.columns)).reset_index(drop=True),\n )",
"def _validate_dtype():\n\n test_array = _spsparse.random(5, 5, density=0.5, format=\"csc\", dtype=np.float32, random_state=50)\n test_comparison = test_array.A\n\n csc_ref, precision_flag = _create_mkl_sparse(test_array)\n\n try:\n csr_ref = _convert_to_csr(csc_ref)\n final_array = _export_mkl(csr_ref, precision_flag)\n if not np.allclose(test_comparison, final_array.A):\n raise ValueError(\"Match failed after matrix conversion\")\n _destroy_mkl_handle(csr_ref)\n finally:\n _destroy_mkl_handle(csc_ref)",
"def dict_equal(d1: Dict, d2: Dict) -> bool:\n\n # iterate over the dict with more keys\n # di is the dictionary to iterate over\n # dj is the one to compare to\n if len(d2) > len(d1):\n di = d2\n dj = d1\n else:\n di = d1\n dj = d2\n for key, value in di.items():\n # check if key is also in d2 and if the value is the same\n if key not in dj.keys():\n return False\n else:\n value_j = dj[key]\n if type(value) is dict and type(value_j) is dict:\n # if its again a dictionary -> recursion\n if not dict_equal(value, value_j):\n return False\n\n elif type(value) is np.ndarray and type(value_j) is np.ndarray:\n if not np.array_equal(value, value_j):\n return False\n\n # check if both are the same type of object\n elif type(value) is not type(value_j):\n return False\n\n elif value != value_j:\n return False\n\n return True",
"def test_writing_unsupported_types_to_hdf5(self):\n some_dict = {}\n some_dict['list_of_ints'] = list(np.arange(5))\n some_dict['list_of_floats'] = list(np.arange(5.1))\n some_dict['weird_dict'] = {'a': 5}\n data1 = new_data(formatter=self.formatter, location=self.loc_provider,\n name='test_missing_attr')\n some_dict['nested_dataset'] = data1\n\n some_dict['list_of_dataset'] = [data1, data1]\n some_dict['list_of_mixed_type'] = ['hello', 4, 4.2]\n\n fp = self.loc_provider(\n io=DataSet.default_io,\n record={'name': 'test_dict_writing'})+'.hdf5'\n F = h5py.File(fp, mode='a')\n self.formatter.write_dict_to_hdf5(some_dict, F)\n new_dict = {}\n self.formatter.read_dict_from_hdf5(new_dict, F)\n # objects are not identical but the string representation should be\n self.assertEqual(str(some_dict['nested_dataset']),\n new_dict['nested_dataset'])\n self.assertEqual(str(some_dict['list_of_dataset']),\n new_dict['list_of_dataset'])\n self.assertEqual(str(some_dict['list_of_mixed_type']),\n new_dict['list_of_mixed_type'])\n\n F['weird_dict'].attrs['list_type'] = 'unsuported_list_type'\n with self.assertRaises(NotImplementedError):\n self.formatter.read_dict_from_hdf5(new_dict, F)",
"def _assert_equal(d_0, d_1):\n # Compare arrays.\n if _is_array_like(d_0):\n try:\n ae(d_0, d_1)\n except AssertionError:\n ac(d_0, d_1)\n # Compare dicts recursively.\n elif isinstance(d_0, dict):\n assert set(d_0) == set(d_1)\n for k_0 in d_0:\n _assert_equal(d_0[k_0], d_1[k_0])\n else:\n # General comparison.\n assert d_0 == d_1",
"def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True",
"def valid_dtype_assertion(expected_dtypes, actual_dtype, name):\n\tassert (actual_dtype in expected_dtypes), \"Invalid dtype of {} should be {}\".format(name, str(expected_dtypes))",
"def test_array_dtype(self):\n dt1 = np.dtype('f4', (2,))\n dt2 = np.dtype('f4', [2])\n dt3 = np.dtype('f4', 2)\n dt4 = np.dtype('f4', 2.1)\n ht1 = h5t.py_create(dt1)\n ht2 = h5t.py_create(dt2)\n ht3 = h5t.py_create(dt3)\n ht4 = h5t.py_create(dt4)\n self.assertEqual(ht1.dtype, dt1)\n self.assertEqual(ht2.dtype, dt1)\n self.assertEqual(ht3.dtype, dt1)\n self.assertEqual(ht4.dtype, dt1)",
"def test_recursive(self):\n dt1 = np.dtype([('a','i'),('b','f')])\n dt2 = np.dtype([('a',dt1),('b','f8')])\n htype = h5t.py_create(dt2)\n self.assertEqual(htype.dtype, dt2)",
"def verifyData(self, expectedDict):\n pass",
"def is_typed_dict(self) -> bool:\n return True",
"def test_encoding_round_trip(cell):\n orig = copy.copy(cell.__dict__)\n cell._from_serializeable_dict(cell._to_serializeable_dict())\n round_trip = cell.__dict__\n for key in cell._allowed:\n if type(orig[key]) == np.ndarray or type(orig[key]) == list:\n assert all(orig[key] == round_trip[key])\n else:\n assert orig[key] == round_trip[key]",
"def compare_nested_dicts(dict1,dict2):\n\n if sorted(dict1.keys()) != sorted(dict2.keys()):\n return False\n\n for key in dict1:\n if isinstance(dict1[key],dict):\n res = compare_nested_dicts(dict1[key],dict2[key])\n if not res:\n return False\n else:\n continue\n if not isinstance(dict1[key],(six.string_types,list,NoneType)) and not np.allclose(dict1[key],dict2[key]):\n return False\n elif isinstance(dict1[key],(six.string_types,list,NoneType)) and not dict1[key] == dict2[key]:\n return False\n\n return True",
"def assertDictStructure(self, expect: dict, actual: dict, path: list = []) -> None:\n self.assertEqual(expect.keys(), actual.keys(),\n msg=f\"Expected field keys are not same: {self.path_to_dict_path(path)}\")\n for key in actual:\n if isinstance(expect[key], dict):\n self.assertIsInstance(actual[key], dict,\n msg=f\"Expected field {self.path_to_dict_path(path+[key])} to be type dict, \"\n f\"got type {type(actual[key])} instead\")\n self.assertDictStructure(expect[key], actual[key], path + [key])\n elif isinstance(expect[key], list):\n self.assertIsInstance(actual[key], list,\n msg=f\"Expected field {self.path_to_dict_path(path+[key])} to be type list, \"\n f\"got type {type(actual[key])} instead\")\n\n if not expect[key]:\n self.assertFalse(actual[key], msg=f\"Expected empty list {self.path_to_dict_path(path+[key])},\"\n f\"received non empty list {actual[key]}\")\n else:\n self.assertTrue(actual[key], msg=f\"Expected list {self.path_to_dict_path(path+[key])},\"\n f\"received empty list {actual[key]}\")\n\n if expect[key] and isinstance(expect[key][0], dict):\n for i, entry in enumerate(actual[key]):\n self.assertDictStructure(expect[key][0], entry, path + [key, i])\n else:\n for i, entry in enumerate(actual[key]):\n self.assertIsInstance(entry, expect[key][0],\n msg=f\"Expected field {self.path_to_dict_path(path+[key, i])} \"\n f\"to be type {expect[key][0]}, got type {type(entry)} instead\")\n else:\n if type(expect[key]) == type:\n self.assertIsInstance(actual[key], expect[key],\n msg=f\"Expected field {self.path_to_dict_path(path+[key])} \"\n f\"to be type {expect[key]}, got type {type(actual[key])} instead\")\n else:\n self.assertIn(type(actual[key]), expect[key].__args__,\n msg=f\"Expected field {self.path_to_dict_path(path+[key])} \"\n f\"to be type {expect[key]}, got type {type(actual[key])} instead\")",
"def assertDictAlmostEqual(self, dict1, dict2):\n self.assertListEqual(dict1.keys(), dict2.keys())\n for i, j in zip(dict1.keys(), dict2.keys()):\n self.assertListAlmostEqual(list(dict1[i]), list(dict2[j]))",
"def _assert_dtype(images):\n dtype = dtypes.as_dtype(images.dtype).base_dtype\n if dtype not in (dtypes.uint8, dtypes.float32):\n raise TypeError('Invalid image dtype {0}, expected uint8 or float32'.format(dtype))\n\n return dtype",
"def check_types(dict_):\n if dict_['UNTREATED']['types'] != dict_['TREATED']['types']:\n for i in range(len(dict_['UNTREATED']['types'])):\n if isinstance(dict_['TREATED']['types'][i], list):\n dict_['UNTREATED']['types'][i] = dict_['TREATED']['types'][i]\n if isinstance(dict_['UNTREATED']['types'][i], list):\n dict_['TREATED']['types'][i] = dict_['UNTREATED']['types'][i]\n\n return dict_",
"def test_stochatreat_output_treat_col_dtype(treatments_dict):\n treatments_df = treatments_dict[\"treatments\"]\n assert treatments_df[\"treat\"].dtype == np.int64, \"Treatment column is missing\"",
"def _check_keys(dict):\n for key in dict.keys():\n if isinstance(dict[key], sio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict",
"def isequal_dict_of_ndarray(first, second):\n if first.keys() != second.keys():\n return False\n return all(np.array_equal(first[key], second[key]) for key in first)",
"def _verify_dict_field(self, _dict, name, types):\n if type(types) != list:\n types = [types]\n if str in types and unicode not in types:\n types.append(unicode)\n if unicode in types and str not in types:\n types.append(str)\n self.assertTrue(name in _dict, msg=\"Missing field '%s'\" % name)\n self.assertTrue(type(_dict[name]) in types,\n msg=\"Erroneous type of the field '%s': \"\n \"found %s, expected any of %s\" % (\n name, str(type(_dict[name])), \",\".join([str(x) for x in types])))",
"def _check_dtype(ds_in, dset_attrs):\n dtype = dset_attrs['dtype']\n attrs = dset_attrs['attrs']\n if ds_in.dtype.name != dtype:\n msg = ('Source dtype ({}) does not match specified dtype ({}), '\n .format(ds_in.dtype, dtype))\n logger.warning(msg)\n warn(msg)\n float_to_int = (np.issubdtype(ds_in.dtype, np.floating)\n and np.issubdtype(dtype, np.integer))\n int_to_float = (np.issubdtype(ds_in.dtype, np.integer)\n and np.issubdtype(dtype, np.floating))\n if float_to_int:\n if not any(c for c in attrs if 'scale_factor' in c):\n msg = ('Cannot downscale from {} to {} without a '\n 'scale_factor!'.format(ds_in.dtype, dtype))\n logger.error(msg)\n raise RuntimeError(msg)\n else:\n msg = 'Converting {} to {}'.format(ds_in.dtype, dtype)\n logger.warning(msg)\n warn(msg)\n elif int_to_float:\n msg = ('Cannot scale up an {} to a {}'\n .format(ds_in.dtype, dtype))\n logger.error(msg)\n raise RuntimeError(msg)\n elif not np.issubdtype(dtype, ds_in.dtype):\n msg = ('Output dtype ({}) has greater precision than input '\n 'dtype ({}), using input dtype'\n .format(dtype, ds_in.dtype))\n logger.warning(msg)\n warn(msg)\n\n dset_attrs['dtype'] = ds_in.dtype\n\n return dset_attrs",
"def test_hash_object_dtype():\r\n\r\n a = np.array([np.arange(i) for i in range(6)], dtype=object)\r\n b = np.array([np.arange(i) for i in range(6)], dtype=object)\r\n\r\n nose.tools.assert_equal(hash(a),\r\n hash(b))",
"def test_dict(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType.from_dict(data)\n assert data == observation_type.to_dict()",
"def _equivalent_data_structures(struct_1, struct_2):\n if isinstance(struct_1, np.ndarray):\n return np.allclose(struct_1, struct_2)\n if isinstance(struct_1, Mapping):\n if set(struct_1.keys()) != set(struct_2.keys()):\n return False\n return all(\n _equivalent_data_structures(struct_1[key], struct_2[key])\n for key in struct_1)\n if isinstance(struct_1, Sequence):\n if len(struct_1) != len(struct_2):\n return False\n return all(\n _equivalent_data_structures(value_1, value_2)\n for value_1, value_2 in zip(struct_1, struct_2))\n if isinstance(struct_1, Number):\n return math.isclose(struct_1, struct_2)\n return False"
]
| [
"0.65680695",
"0.6526487",
"0.6405589",
"0.6161182",
"0.6083145",
"0.60452515",
"0.6042174",
"0.6006597",
"0.59724295",
"0.59593993",
"0.59462154",
"0.59145516",
"0.5882707",
"0.5867973",
"0.58577424",
"0.58535045",
"0.58512837",
"0.583794",
"0.58129144",
"0.5789161",
"0.5759179",
"0.5751552",
"0.5746933",
"0.5736626",
"0.5736103",
"0.5727636",
"0.57170314",
"0.5715768",
"0.57156724",
"0.5710248"
]
| 0.79315984 | 0 |
Weights and reduces the loss. We convert to float32 before reducing following TF1 implementation. After weighting and reducing the losses, we convert the output back to the dtype of the input. | def compute_weighted_loss(
loss,
weights,
dtype,
loss_reduction,
):
if loss_reduction == LossReductionType.RETURN_AS_IS:
# Handle no loss reduction, by returning tensor as-is.
return loss
loss = loss.astype(jnp.float32)
loss_weight = jnp.broadcast_to(weights, loss.shape).astype(jnp.float32)
loss *= loss_weight
total_loss = jnp.sum(loss)
if loss_reduction == LossReductionType.SUM_BY_NONZERO_WEIGHTS:
total_loss = safe_divide(total_loss, jnp.sum(loss_weight != 0.0))
elif loss_reduction == LossReductionType.MEAN:
total_loss = safe_divide(total_loss, jnp.sum(loss_weight))
elif loss_reduction != LossReductionType.SUM:
raise NotImplementedError('LossReductionType not supported for this loss:'
f'{loss_reduction}.')
return total_loss.astype(dtype) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_loss(self, x, weights=1.0):\n input_dtype = x.dtype\n x = self.cast(x, mstype.float32)\n weights = self.cast(weights, mstype.float32)\n x = self.mul(weights, x)\n if self.reduce and self.average:\n x = self.reduce_mean(x, self.get_axis(x))\n if self.reduce and not self.average:\n x = self.reduce_sum(x, self.get_axis(x))\n x = self.cast(x, input_dtype)\n return x",
"def model_with_dtype_int():\n\n input_1 = tf.keras.Input(shape=(8, 8, 3,), dtype=tf.int32)\n input_2 = tf.keras.Input(shape=(8, 8, 3,), dtype=tf.float32)\n x = tf.cast(input_1, tf.float32)\n x = tf.add(x, input_2)\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"model_with_dtype_int\")(x)\n return outputs",
"def loss(self, labels, input_data):\n\n pred, out = self.inference(input_data)\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels, out), name=\"loss\") + \\\n tf.losses.get_regularization_loss()\n return loss, pred",
"def compute_loss(self,\n inputs: Union[\n # Tuple of (features, labels).\n Tuple[\n Dict[str, tf.Tensor],\n tf.Tensor\n ],\n # Tuple of (features, labels, sample weights).\n Tuple[\n Dict[str, tf.Tensor],\n tf.Tensor,\n Optional[tf.Tensor]\n ]\n ],\n training: bool = False) -> tf.Tensor:\n\n # We need to work around a bug in mypy - tuple narrowing\n # based on length checks doesn't work.\n # See https://github.com/python/mypy/issues/1178 for details.\n if len(inputs) == 2:\n inputs = cast(\n Tuple[\n Dict[str, tf.Tensor],\n tf.Tensor\n ],\n inputs\n )\n features, labels = inputs\n sample_weight = None\n elif len(inputs) == 3:\n inputs = cast(\n Tuple[\n Dict[str, tf.Tensor],\n tf.Tensor,\n Optional[tf.Tensor],\n ],\n inputs\n )\n features, labels, sample_weight = inputs\n else:\n raise ValueError(\n \"Inputs should either be a tuple of (features, labels), \"\n \"or a tuple of (features, labels, sample weights). \"\n \"Got a length {len(inputs)} tuple instead: {inputs}.\"\n )\n\n outputs = self(features, training=training)\n\n loss = self._task(labels, outputs, sample_weight=sample_weight)\n loss = tf.reduce_mean(loss)\n # Scales loss as the default gradients allreduce performs sum inside the\n # optimizer.\n return loss / tf.distribute.get_strategy().num_replicas_in_sync",
"def loss(self, batch_inputs: dict,\n batch_data_samples: SampleList) -> Dict[str, Tensor]:\n pass",
"def loss_cls_single(self, cls_score, labels, label_weights,\n reweight_factor, num_total_samples):\n cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n -1, self.cls_out_channels).contiguous()\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n loss_cls = self.loss_cls(\n cls_score, labels, label_weights, avg_factor=num_total_samples)\n return reweight_factor * loss_cls,",
"def my_weighted_loss(onehot_labels, logits):\n # compute weights based on their frequencies\n class_weights = [1., 500.] # set your class weights here\n # computer weights based on onehot labels\n weights = tf.reduce_sum(class_weights * onehot_labels, axis=-1)\n # compute (unweighted) softmax cross entropy loss\n unweighted_losses = tf.nn.softmax_cross_entropy_with_logits(labels=[onehot_labels], logits=[logits])\n # apply the weights, relying on broadcasting of the multiplication\n weighted_losses = unweighted_losses * weights\n # reduce the result to get your final loss\n loss = tf.reduce_mean(weighted_losses)\n return loss",
"def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):\n pass",
"def loss_fn(self, targets, outputs, model):",
"def forward(self, inputs, target_oneHot):\n\n N = inputs.size()[0]\n\n # predicted probabilities for each pixel along channel\n inputs = F.softmax(inputs, dim=1)\n\n # Numerator Product\n inter = inputs * target_oneHot\n # Sum over all pixels N x C x H x W => N x C\n inter = inter.view(N, self.classes, -1).sum(2)\n\n # Denominator\n union = inputs + target_oneHot - (inputs * target_oneHot)\n # Sum over all pixels N x C x H x W => N x C\n union = union.view(N, self.classes, -1).sum(2)\n\n loss = inter / union\n\n ## Return average loss over classes and batch\n # return 1 - loss.mean()\n return -(loss.mean() - 1.)",
"def cost(self, output, labels, weights):\n return tf.multiply(0.5 * tf.square(output - labels), weights)",
"def loss(self, class_weights):\n losses = self.model_args.get('loss', 'categorical_crossentropy')\n\n if type(losses) is str:\n multi_loss = False\n losses = {losses: 1.0}\n elif type(losses) is dict:\n multi_loss = True\n\n if class_weights is not None:\n class_weights = tf.convert_to_tensor(class_weights, dtype=tf.float32)\n\n # custom 'ordinal' loss option\n if 'ordinal_squared_error' in losses.keys():\n k = float(self.data.num_classes)\n a = tf.expand_dims(tf.range(0, k, dtype=tf.float32), axis=-1)\n k_factor = tf.constant((k+1)/k, shape=[1,1], name='k_factor')\n min_regr = tf.constant(-0.5, shape=[1,1], name='min_regression_value')\n\n def ordinal_loss(y_true, y_pred):\n y_estimate = tf.tensordot(y_pred, a, [[-1], [0]])\n y_estimate = k_factor * y_estimate + min_regr # scale to range [-0.5, k+0.5]\n y_values = tf.cast(tf.argmax(y_true, -1), dtype=y_estimate.dtype)\n\n min_class = tf.convert_to_tensor(0.0, dtype=y_estimate.dtype)\n max_class = tf.convert_to_tensor( k, dtype=y_estimate.dtype)\n sqr_error = tf.square(y_values - tf.squeeze(tf.clip_by_value(y_estimate, min_class, max_class)))\n\n if class_weights is not None:\n weight_vec = tf.gather(class_weights, tf.argmax(y_true, -1))\n sqr_error *= weight_vec\n\n return tf.reduce_mean(sqr_error)\n\n if not multi_loss:\n return ordinal_loss\n\n if 'categorical_crossentropy' in losses.keys():\n # TODO: option for clipping?\n def categorical_loss(y_true, y_pred):\n epsilon_ = tf.convert_to_tensor(1e-5, dtype=y_pred.dtype)\n y_pred = tf.clip_by_value(y_pred, epsilon_, 1. - epsilon_)\n\n cross_entropy = -tf.reduce_sum(y_true * tf.math.log(y_pred), axis=-1)\n\n if class_weights is not None:\n weight_vec = tf.gather(class_weights, tf.argmax(y_true, -1))\n cross_entropy *= weight_vec\n\n return cross_entropy\n\n if not multi_loss:\n return categorical_loss\n\n # weighted multi-loss option\n if multi_loss:\n def weighted_loss(y_true, y_pred):\n ord_weight = tf.constant(losses['ordinal_squared_error'], shape=[1,1])\n cat_weight = tf.constant(losses['categorical_crossentropy'], shape=[1,1])\n loss = ord_weight * ordinal_loss(y_true, y_pred) \\\n + cat_weight * categorical_loss(y_true, y_pred)\n return loss\n return weighted_loss",
"def forward(self, y_pred: Dict[str, torch.Tensor], target: Union[torch.Tensor, rnn.PackedSequence]) -> torch.Tensor:\n # unpack\n if isinstance(target, rnn.PackedSequence):\n target, lengths = rnn.pad_packed_sequence(target, batch_first=True)\n # batch sizes reside on the CPU by default -> we need to bring them to GPU\n lengths = lengths.to(target.device)\n else:\n lengths = torch.ones(target.size(0), device=target.device, dtype=torch.long) * target.size(1)\n assert not target.requires_grad\n\n # calculate loss with \"none\" reduction\n if target.ndim == 3:\n weight = target[..., 1]\n target = target[..., 0]\n else:\n weight = None\n\n losses = self.loss(y_pred, target)\n # weight samples\n if weight is not None:\n losses = losses * weight.unsqueeze(-1)\n\n # mask loss\n mask = torch.arange(target.size(1), device=target.device).unsqueeze(0) >= lengths.unsqueeze(-1)\n if losses.ndim > 2:\n mask = mask.unsqueeze(-1)\n dim_normalizer = losses.size(-1)\n else:\n dim_normalizer = 1.0\n # reduce to one number\n if self.reduction == \"none\":\n loss = losses.masked_fill(mask, float(\"nan\"))\n else:\n if self.reduction == \"mean\":\n losses = losses.masked_fill(mask, 0.0)\n loss = losses.sum() / lengths.sum() / dim_normalizer\n elif self.reduction == \"sqrt-mean\":\n losses = losses.masked_fill(mask, 0.0)\n loss = losses.sum() / lengths.sum() / dim_normalizer\n loss = loss.sqrt()\n assert not torch.isnan(loss), (\n \"Loss should not be nan - i.e. something went wrong \"\n \"in calculating the loss (e.g. log of a negative number)\"\n )\n assert torch.isfinite(\n loss\n ), \"Loss should not be infinite - i.e. something went wrong (e.g. input is not in log space)\"\n return loss",
"def optimize(nn_last_layer, correct_label, learning_rate, global_step, add_class_weight=False):\n\n if add_class_weight:\n weight = tf.constant(CLASS_WEIGHT, dtype=tf.float32)\n f_correct_label = tf.cast(correct_label, dtype=tf.float32)\n weighted_label = tf.multiply(f_correct_label, weight)\n\n weighted_label = tf.reduce_sum(weighted_label, axis=3)\n # print(weighted_label.shape)\n\n r_correct_label = tf.reshape(correct_label, shape=(-1, 3))\n r_last_layer = tf.reshape(nn_last_layer, shape=(-1, 3))\n # r_weighted_label=tf.reshape(weighted_label,shape=(-1,3))\n\n cross_entropy_image = tf.losses.softmax_cross_entropy(onehot_labels=r_correct_label,\n logits=r_last_layer)\n cross_entropy_image = cross_entropy_image * weighted_label\n else:\n\n r_correct_label = tf.reshape(correct_label, shape=(-1, 3))\n r_last_layer = tf.reshape(nn_last_layer, shape=(-1, 3))\n cross_entropy_image=tf.losses.softmax_cross_entropy(onehot_labels=r_correct_label,logits=r_last_layer)\n\n cross_entropy_loss = tf.reduce_mean(cross_entropy_image)\n # cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=nn_last_layer,\n # labels=correct_label))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss, global_step=global_step)\n\n return train_op, cross_entropy_loss",
"def _weighted_loss(loss, weight):\n with ops.name_scope(None, \"weighted_loss\", (loss, weight)) as name:\n return math_ops.multiply(\n array_ops.reshape(\n loss, shape=(-1,)),\n array_ops.reshape(\n weight, shape=(-1,)),\n name=name)",
"def _loss_fn(\n mdl_vars: NestedJTensor, inputs: NestedMap\n ) -> Tuple[JTensor, Tuple[Any, NestedMap, SummaryDict, SummaryDict]]:\n if fprop_dtype == jnp.float32:\n pass\n elif fprop_dtype == jnp.bfloat16:\n mdl_vars = jax.tree_map(_maybe_to_bfloat16, mdl_vars)\n inputs = jax.tree_map(_maybe_to_bfloat16, inputs)\n else:\n assert NotImplementedError(f'fprop_dtype {fprop_dtype} not supported.')\n\n with base_layer.JaxContext.new_context(\n params=context_p, prng_key=subkey, global_step=states.step):\n # Prepares mdl for fprop. This clears all forward-updated vars that kept\n # locally in mdl.\n mdl.prepare_fprop()\n\n metrics, per_example_output = mdl.fprop(mdl_vars, inputs)\n loss_name = learner.loss_name\n assert loss_name in metrics\n loss, loss_weight = metrics[loss_name]\n assert loss.ndim == 0, 'loss has to be a scalar.'\n assert loss_weight.ndim == 0, 'loss_weight has to be a scalar'\n loss_weight = jax.lax.stop_gradient(loss_weight)\n if in_pmap:\n # Renormalize loss weight by the total weight across all replicas.\n # This also takes care of dividing loss by num of data parallel\n # replicas.\n loss_weight /= jax.lax.psum(\n loss_weight, axis_name=data_parallel_axis_name)\n else:\n # loss_weight == 1 in spmd.\n loss_weight /= jnp.sum(loss_weight)\n weighted_loss = loss * loss_weight\n # Fetch forward-updated vars, which often include batch norm vars, other\n # misc stats, etc.\n forward_updated_vars = mdl.forward_updated_vars\n # Finally, fetch all the summary tensors.\n summary_tensors = base_layer.all_summaries()\n if fprop_dtype == jnp.bfloat16 and weighted_loss.dtype == fprop_dtype:\n weighted_loss = weighted_loss.astype(jnp.float32)\n return weighted_loss, (metrics, forward_updated_vars, summary_tensors,\n per_example_output)",
"def Weighted_Cross_Entropy(y_true, y_pred, eps = 1e-10):\n y_pred = tf.cast(y_pred, 'float64')\n y_true = tf.cast(y_true, 'float64')\n # deduce weights based on true pixel value\n class_weights = weights * y_true\n # compute your (unweighted) softmax cross entropy loss\n unweighted_losses = y_true*tf.math.log(y_pred + eps)\n ##print(unweighted_losses.dtype, weights.dtype)\n weighted_losses = unweighted_losses * class_weights\n # reduce the result to get your final loss\n loss = -tf.reduce_sum(weighted_losses)\n return loss",
"def my_loss(y_pred,y_true,n_outputs):\n y_true = tf.one_hot(tf.cast(y_true,tf.int64), n_outputs, dtype=tf.float32)\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_true,y_pred))",
"def l1(name, weights):\n\n with tf.name_scope(name):\n regularizer = np.float32(0.0)\n for weight in weights:\n tf.add(regularizer, tf.nn.l1_loss(weight))\n\n return regularizer",
"def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)",
"def loss(self,\r\n inputs,\r\n **kwargs):\r\n\r\n return tf.zeros([]), self.call(inputs, **kwargs)",
"def loss(self, predictions, labels, labels_2, inputs, raw_inp):\n next_word = labels\n curr_label = tf.cast(labels_2, tf.float32)\n\n \n prediction_word = predictions[0]\n prediction_label = predictions[1]\n\n #initialising variables\n cross_entropy_next = tf.constant(0)\n cross_entropy_label = tf.constant(0)\n cross_entropy_label_similarity = tf.constant(0)\n cross_entropy_emb = tf.constant(0)\n \n self.prec_label, self.prec_label_op = tf.constant(1), tf.constant(1)\n self.recall_label, self.recall_label_op = tf.constant(1), tf.constant(1)\n self.label_sigmoid = tf.constant(0)\n\n \n if self.config.solver._next_node_loss:\n #<EOS> and <UNK> get encoded as 1 and 0 respectively\n #Count loss only for actual nodes\n \n raw_inp1 = tf.greater(tf.slice(raw_inp, [0,0],[-1, 1]), -1) #Make first column all True\n raw_inp2 = tf.greater(tf.slice(raw_inp, [0,1],[-1, -1]), 1) #Make only non (<EOS>,<UNK>) True\n raw_inp = tf.concat(1, [raw_inp1, raw_inp2]) #concatenate back to original shape\n raw_inp = tf.transpose(raw_inp) #Transpose raw_inp from batch*step to step*batch\n mask = [tf.reshape(tf.cast(raw_inp, tf.float32), [-1])] #Convert from bool to float and flatten array\n\n\n #<EOS> and <UNK> get encoded as 1 and 0 respectively\n #Transpose raw_inp from batch*step to shape*batch\n #Count loss only for actual nodes\n #Convert from bool to float and flatten array\n #mask = [tf.reshape(tf.cast(tf.greater(tf.transpose(raw_inp), 0), tf.float32), [-1])]\n\n #Vector to weigh different word losses\n #all_ones = [tf.ones([self.config.batch_size * self.config.num_steps])]\n\n #cross entropy loss for next word prediction\n cross_entropy_next = sequence_loss([prediction_word],[tf.reshape(next_word, [-1])], mask, self.config.data_sets._len_vocab)\n tf.add_to_collection('total_loss', cross_entropy_next)\n\n if self.config.solver._curr_label_loss:\n #Get the slice of tensor representing label '0' for all batch.seq\n #'0' label is assigned for <EOS> and the nodes whose labels are not known\n #Valid errors are only those which don't have '0' label\n valid = tf.cast(tf.less(tf.slice(curr_label, [0,0,0], [self.config.num_steps, self.config.batch_size, 1]), tf.constant(0.5)), tf.float32)\n #replicate along 3rd axis\n valid = tf.tile(valid, tf.pack([1,1,tf.shape(curr_label)[2]]))\n \n #Sigmoid activation\n self.label_sigmoid = tf.sigmoid(prediction_label)\n #binary cross entropy for labels\n cross_loss = tf.add(tf.log(1e-10 + self.label_sigmoid)*curr_label,\n tf.log(1e-10 + (1-self.label_sigmoid))*(1-curr_label))\n #only consider the loss for valid label predictions\n #[TODO] mean of all or mean of only valid ???\n cross_entropy_label = -1*tf.reduce_mean(tf.reduce_sum(cross_loss*valid,2))\n tf.add_to_collection('total_loss', cross_entropy_label)\n\n\n if self.config.solver._label_similarity_loss: \n #Label similarity loss \n label_sigmoid = tf.sigmoid(pred_label_reshaped)\n part1 = tf.slice(label_sigmoid, [0,0,0], [self.config.num_steps-1, self.config.batch_size, self.config.data_sets._len_labels])\n part2 = tf.slice(label_sigmoid, [1,0,0], [self.config.num_steps-1, self.config.batch_size, self.config.data_sets._len_labels])\n\n #Exponential weightage -> [r**(n-1), r**(n-2), ... , r**2. r**1]\n label_diffusion = tf.constant([self.config.data_sets._diffusion_rate**i for i in range(self.config.num_steps-1,0,-1)])\n cross_loss_sim = tf.add(tf.log(1e-10 + part1)*part2, tf.log(1e-10 + (1-part1))*(1-part2))\n #prediction is 3 dimensional (seq x batch x label_len), reduce along axis of label_len\n #Sum over each label error -> take mean over the batch -> sum for the sequence\n cross_entropy_label_similarity = tf.reduce_sum(tf.reduce_mean(-tf.reduce_sum(cross_loss_sim, 2),1) * label_diffusion)\n tf.add_to_collection('total_loss', cross_entropy_label_similarity)\n\n \n if self.config.solver._embedding_loss:\n #embedding similarity loss\n #Matching First input's embeddings with embeddings of other inputs\n #[TODO] reverse feed of input AND reverse diffusion rate\n \n emb_part1 = tf.slice(inputs, [self.config.num_steps-2,0,0], [1, self.config.batch_size, self.config.mRNN._embed_size])\n emb_part2 = tf.slice(inputs, [0,0,0], [self.config.num_steps-1, self.config.batch_size, self.config.mRNN._embed_size])\n\n #Exponential weightage -> [r**(n-1), r**(n-2), ... , r**2. r**1]\n label_diffusion = tf.constant([self.config.data_sets._diffusion_rate**i for i in range(self.config.num_steps-1,0,-1)])\n #Broadcastive Subtraction\n mse_emb = tf.reduce_mean(tf.square(emb_part2 - emb_part1),2)\n cross_entropy_emb = tf.reduce_sum(tf.reduce_mean(mse_emb,1) * label_diffusion) * self.config.data_sets._emb_factor\n tf.add_to_collection('total_loss', cross_entropy_emb)\n\n if self.config.solver._L2loss:\n vars = tf.trainable_variables() \n lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars])*0.00001\n tf.add_to_collection('total_loss', lossL2)\n\n loss = tf.add_n(tf.get_collection('total_loss'))\n grads, = tf.gradients(loss, [self.embedding]) \n\n tf.summary.scalar('next_node_loss', cross_entropy_next)\n tf.summary.scalar('curr_label_loss', cross_entropy_label)\n tf.summary.scalar('label_similarity_loss', cross_entropy_label_similarity )\n tf.summary.scalar('emb_loss', cross_entropy_emb)\n tf.summary.scalar('total_loss', tf.reduce_sum(loss))\n \n return [loss, cross_entropy_next, cross_entropy_label, cross_entropy_label_similarity, cross_entropy_emb, grads]",
"def class_weighted_loss_fn(loss_fn, labels, logits, pos_weights):\n logging.info(\"NOTE: using weighted loss!\")\n task_losses = []\n for task_num in range(labels.get_shape()[1]):\n task_losses.append(\n loss_fn(labels[:,task_num],\n logits[:,task_num],\n pos_weights[task_num]))\n task_loss_tensor = tf.stack(task_losses, axis=1)\n loss = tf.reduce_sum(task_loss_tensor)\n # to later get total_loss\n tf.add_to_collection(ops.GraphKeys.LOSSES, loss)\n\n return loss",
"def backwardPropagation(self, inputs, label, loss, node_hidden, node_output):\n err = node_output\n err[range(inputs.shape[0]), label] -= 1\n err = err / inputs.shape[0]\n \"\"\"Back propagate to hidden layer\"\"\"\n del_output_W = np.dot(node_hidden.T, err)\n \"\"\"Back propagate to input layer\"\"\"\n del_input_W = np.dot(err, self.hidden_W.T)\n \"\"\" Use Relu function\"\"\"\n del_input_W[node_hidden <= 0] = 0\n del_input_W = np.dot(inputs.T, del_input_W)\n \"\"\"Penalize the error with regularizer value\"\"\"\n del_input_W = del_input_W + self.regularizer * self.input_W\n del_output_W = del_output_W + self.regularizer * self.hidden_W\n \"\"\"Store the error value into the weight value\"\"\"\n self.input_W += -self.lr * del_input_W\n self.hidden_W += -self.lr * del_output_W",
"def loss(self, input_val_dict):\n\n sess = tf.get_default_session()\n feed_dict = self.create_feed_dict(input_val_dict)\n loss = sess.run(self._loss, feed_dict=feed_dict)\n return loss",
"def loss(self, X, labels):\n features = self.get_conv_features(X)\n loss = blah\n return loss",
"def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss",
"def forward(self, feat_T: torch.Tensor) -> torch.Tensor:\n return self.loss_weight * self.forward_train(feat_T)",
"def loss(self, X_batch, y_batch, reg):\n pass",
"def forward(ctx, x, weight, bias, stride):\n # For your convenience: ints for each size\n batch_size, in_channel, input_size = x.shape\n out_channel, _, kernel_size = weight.shape\n if not (type(x).__name__ == 'Tensor' and type(weight).__name__ == 'Tensor' and type(bias).__name__ == 'Tensor'):\n raise Exception(\"All args must be Tensors: {},{}, {}\".format(\n type(x).__name__, type(weight).__name__), type(bias).__name__)\n # TODO: Save relevant variables for backward pass\n ctx.save_for_backward(x, weight, bias)\n ctx.stride = stride\n # TODO: Get output size by finishing & calling get_conv1d_output_size()\n output_size = get_conv1d_output_size(input_size, kernel_size, stride)\n ctx.output_size = output_size\n requires_grad = x.requires_grad or weight.requires_grad or bias.requires_grad\n # TODO: Initialize output with correct size\n out = np.zeros((batch_size, out_channel, output_size))\n\n for i in range(batch_size):\n for j in range(out_channel):\n curr = 0\n for k in range(0, input_size-kernel_size+1, stride):\n out[i][j][curr] = np.sum(x.data[i, :, k:k+kernel_size]\n * weight.data[j]) + bias.data[j]\n curr += 1\n\n # TODO: Calculate the Conv1d output.\n # Remember that we're working with np.arrays; no new operations needed.\n out = tensor.Tensor(out, requires_grad=requires_grad,\n is_leaf=not requires_grad)\n\n # TODO: Put output into tensor with correct settings and return\n return out"
]
| [
"0.63926",
"0.6049288",
"0.60120857",
"0.6006793",
"0.59572875",
"0.59036493",
"0.5870864",
"0.5830311",
"0.57291305",
"0.5717345",
"0.56763923",
"0.5671253",
"0.5646253",
"0.56360745",
"0.5630161",
"0.5623673",
"0.56132084",
"0.560395",
"0.5577375",
"0.5574701",
"0.5570263",
"0.5562177",
"0.5561168",
"0.55591714",
"0.5553353",
"0.5551553",
"0.5546673",
"0.5544568",
"0.5536464",
"0.5535657"
]
| 0.642647 | 0 |
Computes a safe log. This function returns 0.0 wherever x contains any value <= 0.0. | def safe_log(x):
safe_x = jnp.where(x > 0.0, x, jnp.ones_like(x))
return jnp.where(x > 0.0, jnp.log(safe_x), jnp.zeros_like(x)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def safelog(x):\n #return np.log(x)\n return np.log(np.clip(x,floor,np.inf))",
"def smart_log(self, value: float) -> float:\n if value > 0:\n return math.log(value, self.log_scale)\n elif value == 0:\n return 0\n elif value < 0:\n return -(math.log(abs(value), self.log_scale))",
"def log(x, base=math.e):\n return 0.0",
"def log2(x: float) -> float:\n return math.log2(x) if x > 0 else 0",
"def log_transform(x, epsilon = 1e-4):\n if x.min() < 0: epsilon += np.abs(x.min())\n return (x.fillna(0).astype(float) + epsilon).apply(np.log)",
"def log10(x):\n return 0.0",
"def log_cust(x):\n if type(x) != str:\n if x < 0:\n return 0\n elif x == 0:\n return 0\n elif x > 0:\n return np.log(x)",
"def safe_log(data):\n\n non_zero = data != 0\n result = numpy.zeros(data.shape, dtype=float)\n with numpy.errstate(invalid=\"raise\"):\n result[non_zero] = numpy.log2(data[non_zero])\n return result",
"def log10_zero(x):\n if x == 0:\n return 0\n else:\n return math.log10(x)",
"def log(self, x, base=2):\n if x == 0:\n return 0\n return math.log(x, base)",
"def log1p(x):\n return 0.0",
"def logistic(x):\n try:\n denom = (1 + math.e ** -x)\n except OverflowError:\n return 0.0\n return 1.0 / denom",
"def lg(x: Union[int, float]) -> float:\n res = 0.0\n try:\n res = log(x, 2)\n except ValueError:\n pass\n return res",
"def my_log(num):\n\n if num == 0.0:\n return -9999999999\n return math.log(num)",
"def log_check(w_in: np.ndarray, w_log: np.ndarray) -> None:\n w_log[:] = np.nan\n\n if np.isnan(w_in).any():\n return\n\n if np.any(w_in <= 0):\n return\n\n w_log[:] = np.log(w_in[:])",
"def smart_unlog(self, value: float) -> float:\n if value > 0:\n return self.log_scale ** value\n elif value == 0:\n return 0\n elif value < 0:\n return -(self.log_scale ** abs(value))",
"def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:\n x = torch.clamp(x, eps, 1.0 - eps)\n return torch.log(x / (1.0 - x))",
"def _safe_xlogy(x: Tensor, y: Tensor) ->Tensor:\n res = x * torch.log(y)\n res[x == 0] = 0.0\n return res",
"def _signed_log(x, base):\n return numpy.sign(x) * numpy.log10(numpy.abs(x)) / numpy.log10(base)",
"def log_norm(log_x):\n c = np.max(log_x)\n\n if np.isinf(c):\n return c\n\n sum_exp = 0\n\n for x in log_x:\n sum_exp += np.exp(x - c)\n\n log_sum_exp = np.log(sum_exp)\n\n log_Z = log_sum_exp + c\n\n return log_Z",
"def log_i0(x):\n return pt.switch(\n pt.lt(x, 5),\n pt.log1p(\n x**2.0 / 4.0\n + x**4.0 / 64.0\n + x**6.0 / 2304.0\n + x**8.0 / 147456.0\n + x**10.0 / 14745600.0\n + x**12.0 / 2123366400.0\n ),\n x\n - 0.5 * pt.log(2.0 * np.pi * x)\n + pt.log1p(\n 1.0 / (8.0 * x)\n + 9.0 / (128.0 * x**2.0)\n + 225.0 / (3072.0 * x**3.0)\n + 11025.0 / (98304.0 * x**4.0)\n ),\n )",
"def _log_util(chips: float,\n bet_size: float,\n payout: float) -> float:\n if chips <= 0 or chips + payout*bet_size <= 0:\n return MIN_REWARD\n return max(math.log(1.0 + chips + payout*bet_size) - math.log(1.0 + chips),\n MIN_REWARD)",
"def logarithm(x, eps=10e-5):\n if abs(x) >= 1:\n return float('Nan')\n\n pre_x = x\n tmp = x ** 2\n sign = -1\n i = 2\n res_x = pre_x + sign * tmp / i\n\n while abs(res_x - pre_x) > eps:\n sign = -sign\n i += 1\n tmp *= x\n pre_x = res_x\n res_x += sign * tmp / i\n\n return res_x",
"def unifLogOne(self, x=np.array([]), low=0., hi=100.):\n \n const = 1.0/low**2. - 1.0/hi**2.\n lnPrior = np.log(const) - np.log(x)\n bOut = (x <= low) | (x > hi)\n lnPrior[bOut] = -np.inf\n\n return lnPrior",
"def _call(self, x):\n if self.prior is None:\n tmp = ((x - 1 - np.log(x)).inner(self.domain.one()))\n else:\n # This is the old line from odl version 0.6.0.\n # tmp = ((x - self.prior + self.prior * np.log(self.prior / x))\n tmp = ((x - self.prior + self.prior * np.log((self.prior + 1e-12) / x))\n .inner(self.domain.one()))\n if np.isnan(tmp):\n # In this case, some element was less than or equal to zero\n return np.inf\n else:\n return tmp",
"def ilog(x,delta):\n if(delta < x and x < 1.0 - delta):\n return np.log( -np.log(x) )\n elif(x < delta):\n return np.log( -np.log(delta) )\n else: \n return np.log( -np.log(1.0 - delta) )",
"def _call(self, x):\n if self.prior is None:\n tmp = -1.0 * (np.log(1 - x)).inner(self.domain.one())\n else:\n tmp = (-self.prior * np.log(1 - x)).inner(self.domain.one())\n if np.isnan(tmp):\n # In this case, some element was larger than or equal to one\n return np.inf\n else:\n return tmp",
"def log2_python(x):\n\n if (x == 0):\n return -float(\"inf\")\n\n return x.bit_length() - 1",
"def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)",
"def formula_0(x: np.ndarray) -> np.ndarray:\n logx = np.log(x)\n denom = x - 1\n k0 = (x - logx * x - 1) / denom\n return k0"
]
| [
"0.8466852",
"0.7791398",
"0.76290375",
"0.75993013",
"0.75740945",
"0.7498413",
"0.7462287",
"0.7412065",
"0.72859854",
"0.72764105",
"0.7259336",
"0.723413",
"0.7206518",
"0.71971625",
"0.7141088",
"0.7078743",
"0.70738405",
"0.70517623",
"0.6978665",
"0.69375044",
"0.6780881",
"0.67495954",
"0.674742",
"0.67160285",
"0.66949",
"0.66505885",
"0.6622417",
"0.66042584",
"0.66014147",
"0.65898496"
]
| 0.8335094 | 1 |
Cosine loss. This loss computes the dot product between predictions and labels as loss. The value ranges from [0, 2.0] depending on the alignment of prediction and label vectors. This loss can be used when we want to optimize the alignment of the vectors directly. | def cosine_loss(predictions,
labels,
weights = 1.0,
loss_reduction = LossReductionType.SUM_BY_NONZERO_WEIGHTS,
**kwargs):
del kwargs # Unused
check_shape_equal(predictions, labels)
cosine = 1.0 - jnp.sum(predictions * labels, axis=-1)
return compute_weighted_loss(
cosine,
weights=weights,
loss_reduction=loss_reduction,
dtype=predictions.dtype) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward_cosine(self, data, label):\n \n assert label.requires_grad is False\n data = self.preprocess(data)\n label = self.preprocess(label)\n\n feature_tri_data, f_data, orig_feature_data = self.extract_feature(data)\n feature_tri_label, f_label, orig_feature_label = self.extract_feature(label)\n \n perceptual_loss = (((feature_tri_data[0] * feature_tri_label[0]).sum(dim=1) + \\\n (feature_tri_data[1] * feature_tri_label[1]).sum(dim=1) + \\\n (feature_tri_data[2] * feature_tri_label[2]).sum(dim=1) + \\\n (feature_tri_data[3] * feature_tri_label[3]).sum(dim=1)) / 4).mean()\n \n return perceptual_loss, (orig_feature_data, orig_feature_label)",
"def cosine_similarity(y_true, y_pred, axis=-1):\n y_true = nn.l2_normalize(y_true, axis=axis)\n y_pred = nn.l2_normalize(y_pred, axis=axis)\n return -math_ops.reduce_sum(y_true * y_pred, axis=axis)",
"def __cosine_proximity_loss(self, y_true_decoder, y_pred_decoder):\n dot_products = np.array([np.dot(y_true_decoder[i],y_pred_decoder[i]) for i in range(y_true_decoder.shape[0])])\n l2norm_products = np.prod([np.linalg.norm(y_true_decoder, axis=1),np.linalg.norm(y_pred_decoder, axis=1)], axis=0)\n cos_loss_list = -np.divide(dot_products, l2norm_products)\n return cos_loss_list",
"def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])",
"def CosineSimilarity(test_vec, source_vecs):\n cos_dist = 0\n for source_vec in source_vecs:\n cos_dist += FacePredictor.findCosineDistance(test_vec, source_vec)\n return cos_dist / len(source_vecs)",
"def compute_cosine(pred, obs):\n assert(pred.numel() == obs.numel()), \\\n 'Size of observation and prediction tensors much match. Received: pred %s, obs %s.'%(\n str(pred.size()), str(obs.size()))\n\n def normalise(x, dim=1):\n \"\"\" compute L2 norm and normalise x \"\"\"\n norm = torch.sqrt( torch.pow(x,2.).sum(dim) )\n if dim>0:\n x /= norm.unsqueeze(dim)\n return x\n\n # if we have one-dimensional tensors, compute cosine similarity along first dimension (0).\n # if we have two-dimensional tensors, compute cosine similarity along second dimension (1).\n # if we have three-dimensional tensors, compute cosine similarity along third dimension (2).\n # i.e. first dimension is considered the feature vector (will be reduced to a scalar, the cos.sim.)\n dim = len(pred.size()) - 1\n assert(dim>=0 and dim <=2), \\\n 'This function only computes cosine similarity between 1D, 2D or 3D tensors! Received dim==%i'%(dim)\n\n p_norm = normalise(pred, dim=dim)\n v_norm = normalise(obs, dim=dim)\n return torch.nn.functional.cosine_similarity( p_norm, v_norm, dim=dim )",
"def _cosine_and_bce(preds: Tensor, pseudo_label: Tensor, mask: Tensor) -> Tensor:\n # cosine similarity\n cosine_sim = dot_product(preds[:, None, :], preds).clamp(min=0, max=1)\n # binary cross entropy\n unreduced_loss = F.binary_cross_entropy(cosine_sim, pseudo_label, reduction=\"none\")\n return torch.mean(unreduced_loss * mask)",
"def correlation_loss(predictions, labels):\n\n vp = predictions - torch.mean(predictions)\n vl = labels - torch.mean(labels)\n\n # cost = torch.sum(vp * vl) / (torch.sqrt(torch.sum(vp ** 2)) * torch.sqrt(torch.sum(vl ** 2)))\n cost = torch.mean(vp*vl) / (torch.std(predictions)*torch.std(labels))\n return cost",
"def compute_cosine_sim(vec1, vec2):\r\n\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.dot(vec1, vec2)/(norm(vec1) * norm(vec2))",
"def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))",
"def tf_cosine_distance(self, a, b):\n normalize_a = tf.nn.l2_normalize(a, -1)\n normalize_b = tf.nn.l2_normalize(b, -1)\n cos_similarity = tf.reduce_sum(\n tf.multiply(normalize_a, normalize_b), axis=-1, keep_dims=True\n )\n return (1.0 - cos_similarity) / 2.0",
"def cosine_triplet_semihard_loss(labels, embeddings, margin=1.0):\n # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.\n lshape = tf.shape(labels)\n assert lshape.shape == 1\n labels = tf.reshape(labels, [lshape[0], 1])\n\n # Build pairwise squared distance matrix.\n pdist_matrix = cosine_pairwise_distance(embeddings)\n # Build pairwise binary adjacency matrix.\n adjacency = tf.equal(labels, tf.transpose(labels))\n # Invert so we can select negatives only.\n adjacency_not = tf.math.logical_not(adjacency)\n\n batch_size = tf.size(labels)\n\n # Compute the mask.\n pdist_matrix_tile = tf.tile(pdist_matrix, [batch_size, 1])\n mask = tf.math.logical_and(\n tf.tile(adjacency_not, [batch_size, 1]),\n tf.math.greater(\n pdist_matrix_tile, tf.reshape(\n tf.transpose(pdist_matrix), [-1, 1])))\n mask_final = tf.reshape(\n tf.math.greater(\n tf.reduce_sum(\n tf.cast(mask, dtype=tf.float32), 1, keepdims=True),\n 0.0), [batch_size, batch_size])\n mask_final = tf.transpose(mask_final)\n\n adjacency_not = tf.cast(adjacency_not, dtype=tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n\n # negatives_outside: smallest D_an where D_an > D_ap.\n negatives_outside = tf.reshape(\n masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])\n negatives_outside = tf.transpose(negatives_outside)\n\n # negatives_inside: largest D_an.\n negatives_inside = tf.tile(\n masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])\n semi_hard_negatives = tf.where(\n mask_final, negatives_outside, negatives_inside)\n\n loss_mat = tf.math.add(margin, pdist_matrix - semi_hard_negatives)\n\n mask_positives = tf.cast(\n adjacency, dtype=tf.float32) - tf.linalg.diag(\n tf.ones([batch_size]))\n\n # In lifted-struct, the authors multiply 0.5 for upper triangular\n # in semihard, they take all positive pairs except the diagonal.\n num_positives = tf.reduce_sum(mask_positives)\n\n triplet_loss = tf.math.truediv(\n tf.reduce_sum(\n tf.math.maximum(\n tf.math.multiply(loss_mat, mask_positives), 0.0)),\n num_positives,\n name='triplet_semihard_loss')\n\n return triplet_loss",
"def safe_cosine_sim(x, y):\n l2x = fluid.layers.l2_normalize(x, axis=-1)\n l2y = fluid.layers.l2_normalize(y, axis=-1)\n cos = fluid.layers.reduce_sum(l2x * l2y, dim=1, keep_dim=True)\n return cos",
"def get_cosine(vec1, vec2):\n OPS = get_current_ops()\n v1 = OPS.to_numpy(OPS.asarray(vec1))\n v2 = OPS.to_numpy(OPS.asarray(vec2))\n return numpy.dot(v1, v2) / (numpy.linalg.norm(v1) * numpy.linalg.norm(v2))",
"def calculate_cosine_similarity(self):\n tfidf_matrix = self.calculate_tfidf()\n\n cosine_similarity = linear_kernel(tfidf_matrix, tfidf_matrix) # Cosine similarity matrix calculation\n\n return cosine_similarity",
"def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))",
"def compute_cosine_global_image_features(pred_location, pred_scale, obs):\n pred_location, pred_scale, obs = reshape_global_image_features(pred_location, pred_scale, obs)\n # compute cosine distance between ground truth image features and generated image features (zs)\n image_cosine = compute_cosine(pred_location, obs)\n image_cosine = torch.mean(image_cosine)\n return image_cosine",
"def cosine(xs: Tensor, ys: Tensor, epsilon: float = 1e-8) -> Tensor:\n mat = xs @ ys.t()\n x_norm = xs.norm(2, dim=1) + epsilon\n y_norm = ys.norm(2, dim=1) + epsilon\n x_diag = (1 / x_norm).diag()\n y_diag = (1 / y_norm).diag()\n return x_diag @ mat @ y_diag",
"def cos_vecs(x, y):\r\n _t = np.sum((x * y), axis=1)\r\n norm_x = np.linalg.norm(x, axis=1, keepdims=True)\r\n norm_y = np.linalg.norm(y, axis=1, keepdims=True)\r\n _t = np.reshape(_t, (-1, 1))\r\n ret = _t / (norm_x * norm_y + 1e-10)\r\n return ret",
"def cosine(X,Y=None,dense_output=True):\n #Reemplace NaN with Zero\n X[np.isnan(X)] = 0\n if Y is not None:\n Y[np.isnan(Y)] = 0\n \n return cosine_similarity(X,Y,dense_output)",
"def findCosineDistance(vector1, vector2):\n vec1 = vector1.flatten()\n vec2 = vector2.flatten()\n\n a = np.dot(vec1.T, vec2)\n b = np.dot(vec1.T, vec1)\n c = np.dot(vec2.T, vec2)\n return 1 - (a / (np.sqrt(b) * np.sqrt(c)))",
"def cos(\r\n vec1: torch.FloatTensor, vec2: torch.FloatTensor, dim: int = -1\r\n) -> torch.FloatTensor:\r\n return torch.sum(vec1 * vec2, dim=dim) / (\r\n vec1.norm(dim=dim) * vec2.norm(dim=dim) + EPS\r\n )",
"def center_loss(features, label, alfa, nrof_classes):\n nrof_features = features.get_shape()[1]\n centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,\n initializer=tf.constant_initializer(0), trainable=False)\n label = tf.reshape(label, [-1])\n centers_batch = tf.gather(centers, label)\n diff = (1 - alfa) * (centers_batch - features)\n centers = tf.scatter_sub(centers, label, diff)\n with tf.control_dependencies([centers]):\n loss = tf.reduce_mean(tf.square(features - centers_batch))\n return loss, centers",
"def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity",
"def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))",
"def calculate_cosine_dist(main_text, new_text):\n wordbag = set(\" \".join([main_text, new_text]).split(\" \"))\n dot_prod = 0\n main_text = main_text.split(\" \")\n new_text = new_text.split(\" \")\n\n for word in wordbag:\n if word in main_text and word in new_text:\n # only worth looking at if word is in both. Otherwise dot prod = 0\n count_A = sum(np.array(main_text) == word)\n count_B = sum(np.array(new_text) == word)\n dot_prod += count_A * count_B\n\n return float(dot_prod) / (len(main_text) * len(new_text))",
"def calc_loss(predictions, labels):\n return np.mean(np.square(predictions - labels))",
"def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]",
"def sc(self) -> float:\n a = np.dot(self.true - np.mean(self.true), self.predicted - np.mean(self.predicted))\n b = np.linalg.norm(self.true - np.mean(self.true))\n c = np.linalg.norm(self.predicted - np.mean(self.predicted))\n e = b * c\n return float(np.arccos(a / e))",
"def cosine_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x_1` and rows of `x_2`\n # \"ij,ij->i\" := output[i] = sum_j x1[i, j] * x2[i, j]\n cos_thetas = tf.linalg.einsum(\"ij,ij->i\", x1, x2)\n cos_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n cos_distances = tf.maximum(cos_distances, 0.0)\n\n return cos_distances"
]
| [
"0.7016788",
"0.6793693",
"0.66546625",
"0.65116197",
"0.64649904",
"0.64382917",
"0.62884885",
"0.62357163",
"0.6223788",
"0.6125927",
"0.6124123",
"0.60824096",
"0.60576713",
"0.60450846",
"0.6016146",
"0.5962783",
"0.5940745",
"0.5913568",
"0.5892761",
"0.5881885",
"0.58765066",
"0.58756244",
"0.5851935",
"0.58517313",
"0.5840613",
"0.58255965",
"0.58142555",
"0.5814228",
"0.5805444",
"0.57616234"
]
| 0.747866 | 0 |
A wrapper to add weight decay to underlying loss function. Use this wrapper if the weight decay in the optimizer is not suitable. For example, if you need to exclude some parameters from decay loss. | def weight_decay_loss_wrapper(
loss_fn = gin.REQUIRED,
factor = gin.REQUIRED,
exclude = (),
):
traversal = traverse_util.ModelParamTraversal(
lambda path, _: all([e not in path for e in exclude]))
def wrapped_loss(outputs, *args, params, **kwargs):
losses = loss_fn(outputs, *args, **kwargs)
weight_decay_params = list(traversal.iterate(params))
weight_l2 = sum([jnp.sum(x**2) for x in weight_decay_params])
weight_penalty = factor * 0.5 * weight_l2
if isinstance(losses, dict):
if 'model_loss' not in losses:
raise ValueError(
'Losses must contain `model_loss` key as total model loss.')
losses['pre_weight_penalty_model_loss'] = losses['model_loss']
losses['model_loss'] += weight_penalty
losses['l2_regularization_loss'] = weight_penalty
elif isinstance(losses, jnp.ndarray):
losses += weight_penalty
else:
raise ValueError('Encountered invalid loss type: ', type(losses))
return losses
return wrapped_loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extend_with_decoupled_weight_decay(base_optimizer):\n\n class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,\n base_optimizer):\n \"\"\"Base_optimizer with decoupled weight decay.\n\n This class computes the update step of `base_optimizer` and\n additionally decays the variable with the weight decay being decoupled from\n the optimization steps w.r.t. to the loss function, as described by\n Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).\n For SGD variants, this simplifies hyperparameter search since\n it decouples the settings of weight decay and learning rate.\n For adaptive gradient algorithms, it regularizes variables with large\n gradients more than L2 regularization would, which was shown to yield\n better training loss and generalization error in the paper above.\n \"\"\"\n\n def __init__(self, weight_decay, *args, **kwargs):\n # super delegation is necessary here\n # pylint: disable=useless-super-delegation\n super(OptimizerWithDecoupledWeightDecay,\n self).__init__(weight_decay, *args, **kwargs)\n # pylint: enable=useless-super-delegation\n\n return OptimizerWithDecoupledWeightDecay",
"def add_weight_decay(model, adjust_per_optimizer=True):\n if adjust_per_optimizer and 'lars' in FLAGS.optimizer:\n # Weight decay are taking care of by optimizer for these cases.\n # Except for supervised head, which will be added here.\n l2_losses = [\n tf.nn.l2_loss(v)\n for v in model.trainable_variables\n if 'head_supervised' in v.name and 'bias' not in v.name\n ]\n if l2_losses:\n return FLAGS.weight_decay * tf.add_n(l2_losses)\n else:\n return 0\n\n # TODO(srbs): Think of a way to avoid name-based filtering here.\n l2_losses = [\n tf.nn.l2_loss(v)\n for v in model.trainable_weights\n if 'batch_normalization' not in v.name\n ]\n loss = FLAGS.weight_decay * tf.add_n(l2_losses)\n return loss",
"def _add_weight_decay(self, var, wd):\n wd_loss = tf.multiply(tf.nn.l2_loss(var),\n wd,\n name='weight_loss')\n tf.add_to_collection(GKeys.LOSSES, wd_loss)",
"def decay_weights(cost, weight_decay_rate):\n costs = []\n for var in tf.trainable_variables():\n costs.append(tf.nn.l2_loss(var))\n cost += tf.multiply(weight_decay_rate, tf.add_n(costs))\n return cost",
"def __init__(self,\n weight_decay,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n use_locking=False,\n name=\"AdamW\"):\n super(AdamWOptimizer, self).__init__(\n weight_decay,\n learning_rate=learning_rate,\n beta1=beta1,\n beta2=beta2,\n epsilon=epsilon,\n use_locking=use_locking,\n name=name)",
"def __init__(self, weight_decay, **kwargs):\n self._decay_var_list = None # is set in minimize or apply_gradients\n self._weight_decay = weight_decay\n # The tensors are initialized in call to _prepare\n self._weight_decay_tensor = None\n super(DecoupledWeightDecayExtension, self).__init__(**kwargs)",
"def _variable_with_weight_decay_orig(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def get_weight_decay(self):\n if type(self.model.optimizer).__name__ == \"AdamWeightDecay\":\n return self.model.optimizer.weight_decay_rate\n else:\n return None",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape,\n initializer=tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n # add weight decay term to 'losses' collection, so the sum of all loss in 'losses' collection\n # will be the total/final loss\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = _variable_on_cpu(name, shape,\n tf.truncated_normal_initializer(stddev=stddev))\n if wd:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n\n #var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev))\n var = weight_variable(shape)\n if wd is not None:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name = 'weight_loss')\n tf.add_to_collection('losses', weight_decay)\n \n return var",
"def _add_weight_decay(net, l2_value, skip_list=()):\n decay, no_decay = [], []\n for name, param in net.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\") or name in skip_list:\n no_decay.append(param)\n else:\n decay.append(param)\n return [{'params': decay, 'weight_decay': l2_value}, {'params': no_decay, 'weight_decay': 0.}, ]",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def weights_decay(self):\n for param_group in self.optimizer.param_groups:\n for param in param_group['params']:\n param.data = param.data.add(-1.*self.weights_decay * param_group['lr'], param.data)",
"def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):\n if use_xavier:\n # initializer = tf.contrib.layers.xavier_initializer()\n initializer = tf.initializers.glorot_uniform()\n else:\n initializer = tf.truncated_normal_initializer(stddev=stddev)\n var = _variable_on_cpu(name, shape, initializer)\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n var = variable(\n name,\n shape,\n initializer=tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def __init__(self,\n weight_decay,\n learning_rate,\n momentum,\n use_locking=False,\n name=\"MomentumW\",\n use_nesterov=False):\n super(MomentumWOptimizer, self).__init__(\n weight_decay,\n learning_rate=learning_rate,\n momentum=momentum,\n use_locking=use_locking,\n name=name,\n use_nesterov=use_nesterov)",
"def _decay(self):\n wd_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n log.info('Weight decay variables')\n [log.info(x) for x in wd_losses]\n log.info('Total length: {}'.format(len(wd_losses)))\n if len(wd_losses) > 0:\n return tf.add_n(wd_losses)\n else:\n log.warning('No weight decay variables!')\n return 0.0",
"def weight_decay(norm=2):\n costs = []\n for var in tf.trainable_variables():\n if 'weight' in var.op.name or 'fc' in var.op.name or 'conv' in var.op.name:\n if norm == 1:\n lp_norm_var = tf.reduce_sum(tf.abs(var))\n elif norm == 2:\n lp_norm_var = tf.reduce_sum(tf.square(var))\n else:\n raise ValueError('wrong norm of weight decay')\n costs.append(lp_norm_var)\n return tf.add_n(costs)",
"def _variable_with_weight_decay(self, shape, stddev, wd):\n\n initializer = tf.truncated_normal_initializer(stddev=stddev)\n var = tf.get_variable('weights', shape=shape,\n initializer=initializer)\n\n# if wd and (not tf.get_variable_scope().reuse):\n# weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n# tf.add_to_collection('losses', weight_decay)\n return var",
"def _variable_with_weight_decay(self, name, shape, wd):\n var = self._variable_on_device(\n name,\n shape,\n tf.contrib.layers.xavier_initializer_conv2d(uniform=True))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd,\n name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n # We will replicate the model structure for the training subgraph, as well\n # as the evaluation subgraphs, while sharing the trainable parameters.",
"def _variable_with_weight_decay(name, shape, wd = 0.0):\n var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())\n # print(\"change var\")\n # var = tf.Variable(tf.truncated_normal(shape, mean= 0.0, stddev = 1.0), name = name)\n if wd != 0.0:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var",
"def set_weight_decay(self, wd=U.DEFAULT_WD):\n self._recompile(wd=wd)\n return",
"def _decay(self):\n costs = []\n for var in tf.trainable_variables():\n if var.op.name.find(r'DW') > 0:\n costs.append(tf.nn.l2_loss(var))\n # tf.histogram_summary(var.op.name, var)\n\n return tf.multiply(self.weight_decay_rate, tf.add_n(costs))",
"def variable_with_weight_decay(kernel_shape, initializer, wd):\n w = tf.get_variable(name=\"weights\", shape=kernel_shape, dtype=tf.float32, initializer=initializer)\n\n collection = tf.GraphKeys.REGULARIZATION_LOSSES\n if wd and (not tf.get_variable_scope().reuse):\n weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name=\"w_loss\")\n tf.add_to_collection(collection, weight_decay)\n variable_summaries(w)\n return w",
"def variable_with_weight_decay(kernel_shape, initializer, wd):\n w = tf.get_variable('weights', kernel_shape, tf.float32, initializer=initializer)\n\n collection_name = tf.GraphKeys.REGULARIZATION_LOSSES\n if wd and (not tf.get_variable_scope().reuse):\n weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name='w_loss')\n tf.add_to_collection(collection_name, weight_decay)\n variable_summaries(w)\n return w"
]
| [
"0.7632419",
"0.76224226",
"0.72987336",
"0.70135576",
"0.695887",
"0.68777436",
"0.6820775",
"0.6765971",
"0.67616886",
"0.673294",
"0.6707882",
"0.6704943",
"0.6666855",
"0.66658753",
"0.6649116",
"0.6647162",
"0.6631825",
"0.6618139",
"0.6591792",
"0.6591792",
"0.6585925",
"0.6485908",
"0.6463244",
"0.6445034",
"0.64269996",
"0.6381785",
"0.6348467",
"0.6313154",
"0.6202552",
"0.6192902"
]
| 0.7627787 | 1 |
Send a LA/SP/SnapMultiplier request command to message receivers. | async def request(self, multiplier: Optional[int]=None):
# TODO: validate the multiplier
message = Message(self.name_path, multiplier)
await self.issue_command(Command(message)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def request(self, position_difference: Optional[int]=None):\n # TODO: validate the multiplier\n message = Message(self.name_path, position_difference)\n await self.issue_command(Command(message))",
"def send_rates_request(self, wtp, lvap):\n\n rates_req = Container(version=PT_VERSION,\n type=PT_RATES_REQUEST,\n length=18,\n seq=wtp.seq,\n rates_id=self.module_id,\n sta=lvap.addr.to_raw())\n\n LOG.info(\"Sending rates request to %s @ %s (id=%u)\",\n lvap.addr, wtp.addr, self.module_id)\n\n msg = RATES_REQUEST.build(rates_req)\n wtp.connection.stream.write(msg)",
"async def request(self, position: Optional[int]=None):\n # TODO: validate the multiplier\n message = Message(self.name_path, position)\n await self.issue_command(Command(message))",
"async def request(self, position: Optional[int]=None):\n # TODO: validate the multiplier\n message = Message(self.name_path, position)\n await self.issue_command(Command(message))",
"def send_through_aprs(self, message) -> None:\n self.get_module_or_raise_error(\"aprs\").send(f\"{message}\") # FIXME FORAMTTING",
"async def multiply(message, number1: ParamType.NUMBER, number2: ParamType.NUMBER):\n prod = number1 * number2\n return \"product = \" + str(prod)",
"def query_weight(self):\n # open socket connection (TCP/IP)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\n # set time out time for connections (seconds)\n s.settimeout(1)\n\n # connect to the terminal\n try:\n s.connect((self.IP_scale, self.PORT_scale))\n except Exception as e:\n print(\"Couldn't connect to the load cell when quering weight\")\n print(f\"Exception: {e}\")\n\n\n # send stable weight or, if timeout (in ms), then send dynamic weight\n request = self._fomat_request(\"SC 420\")\n s.sendall(request)\n\n # keep calling receive until the end of line symbols are received\n response = []\n while True:\n part_response = s.recv(1024).decode()\n response.append(part_response)\n \n if (\"\\r\" in part_response) or (\"\\n\" in part_response):\n break\n\n # format the reponse\n response_str = str(response).strip('[]')\n parsed_response = re.findall(r'\\b\\d+\\b', response_str)\n weight = int(parsed_response[0]) + int(parsed_response[1])/100\n\n\n return weight",
"def reqSetPower(self, ID_list, s_l):\n while self.status != Modem.Status.IDLE :\n sleep(0.1)\n if self.status != Modem.Status.IDLE:\n raise ValueError(\"Modem setPower unexpected status: \\\n \" + str(self.status))\n self.status = Modem.Status.BUSY2REQ\n self.send(self.interpreter.buildSetPower(ID_list, s_l))\n while self.status != Modem.Status.IDLE and self.status != Modem.Status.KILL:\n sleep(self.m_to)\n # self.recvCommand()\n if self.status == Modem.Status.KILL:\n return self.close()\n return self.errorCheck()",
"def set_multiplex_ratio(ratio):\n send_command(0xA8)\n send_command(ratio)",
"def update_send(self, parameters, loss): #parameters为训练网络的参数\n # Increase the clock value\n self.clock += 1\n\n # Serve the new parameters\n state = {'clock': self.clock, 'loss': loss}\n # 在rx线程中保存此时的loss和模型参数等\n self.rx.set_current_state(state, parameters)\n\n self.fetching = True\n self.tx.fetch_send()",
"def command(self):\n saw_error = False\n try:\n analog_gain = float(self.value_analog.get())\n except:\n print(\"analog must be floating point value\")\n self.value_analog.set(str(self.tcp_comms.tcp_params.analog_gain_target))\n saw_error = True\n try:\n digital_gain = float(self.value_digital.get())\n except:\n print(\"digital must be floating point value\")\n self.value_digital.set(str(self.tcp_comms.tcp_params.digital_gain_target))\n saw_error = True\n try:\n analog_tol = float(self.value_analog_tol.get())\n except:\n print(\"analog tol must be floating point value\")\n self.value_analog_tol.set(str(self.tcp_comms.tcp_params.analog_gain_tol))\n saw_error = True\n try:\n digital_tol = float(self.value_digital_tol.get())\n except:\n print(\"digital tol must be floating point value\")\n self.value_digital_tol.set(str(self.tcp_comms.tcp_params.digital_gain_tol))\n saw_error = True\n if not saw_error:\n self.tcp_comms.tcp_params.analog_gain_target = analog_gain\n self.tcp_comms.tcp_params.digital_gain_target = digital_gain\n self.tcp_comms.tcp_params.analog_gain_tol = analog_tol\n self.tcp_comms.tcp_params.digital_gain_tol = digital_tol\n self.tcp_comms.send_freeze_exposure(analog_gain, analog_tol, digital_gain, digital_tol)",
"def _send_request(self):\n route_chosen = self.comboBox_route_list.currentText()\n route_id = route_chosen.split(',')[0] #to get the id of the route\n trip_headsign_chosen = self.comboBox_trip_headsign_list.currentText()\n stop_chosen = self.comboBox_stop_list.currentText()\n self.request(route_id, trip_headsign_chosen, stop_chosen)",
"def multiplier(self, multiplier):\n self._multiplier = multiplier",
"def testSetRequest(self):\n self.mgr.sendGoProCommand = Mock()\n value = struct.pack('<HH', 8, 22)\n self.mgr.handlePacket(app_packet.GOPRO_SET_REQUEST, value)\n self.mgr.sendGoProCommand.assert_called_with( 8, (22, 0, 0, 0) )",
"def request(self, pdu):\n self.client.request(pdu)",
"def set_wheel_power(front_left, front_right, back_left, back_right):\n message = \"WHEELS:\" + str(front_left) + ',' + str(front_right) + ',' + str(back_left) + ',' \\\n + str(back_right) + '\\n';\n sock.sendall(message)\n return",
"def _send_market_price_request(self, ric_name):\n mp_req_json = {\n 'ID': 2,\n 'Key': {\n 'Name': ric_name,\n 'Service': service\n },\n }\n self.web_socket_app.send(json.dumps(mp_req_json))\n print(\"SENT on \" + self.session_name + \":\")\n print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))",
"def func(self):\n try:\n from evennia.server.models import ServerConfig\n\n if not self.args:\n self.msg(\"You have %s AP remaining.\" % self.caller.roster.action_points)\n return\n if ServerConfig.objects.conf(key=\"DISABLE_AP_TRANSFER\"):\n raise CommandError(\"AP transfers are temporarily disabled.\")\n targ = self.caller.search(self.lhs)\n if not targ:\n return\n try:\n val = int(self.rhs)\n except (ValueError, TypeError):\n raise CommandError(\"AP needs to be a number.\")\n if self.caller.roster.current_account == targ.roster.current_account:\n raise CommandError(\"You cannot give AP to an alt.\")\n receive_amt = val // self.ap_conversion\n if receive_amt < 1:\n raise CommandError(\"Must transfer at least %s AP.\" % self.ap_conversion)\n max_ap = targ.roster.max_action_points\n if targ.roster.action_points + receive_amt > max_ap:\n raise CommandError(\"That would put them over %s AP.\" % max_ap)\n if not self.caller.pay_action_points(val):\n raise CommandError(\"You do not have enough AP.\")\n targ.pay_action_points(-receive_amt)\n self.msg(\n \"Using %s of your AP, you have given %s %s AP.\"\n % (val, targ, receive_amt)\n )\n msg = \"%s has given you %s AP.\" % (self.caller, receive_amt)\n targ.inform(msg, category=msg)\n except CommandError as err:\n self.msg(err)",
"def multiplier(self, multiplier):\n\n self._multiplier = multiplier",
"def sendBuffer():\n dislin.sendbf()",
"def __call__(self, sell: int, price: Union[int, float, Decimal], passthrough: Optional[Any] = None, req_id: Optional[int] = None):\n\n data = {\n \"sell\": int(sell),\n \"price\": price\n }\n\n\n\n return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)",
"def command(self):\n saw_error = False\n red_gain = 0.0\n blue_gain = 0.0\n try:\n red_gain = float(self.value_red.get())\n except:\n print(\"red must be floating point value\")\n self.value_red.set(str(self.tcp_comms.tcp_params.awb_gains_r))\n saw_error = True\n try:\n blue_gain = float(self.value_blue.get())\n except:\n print(\"blue must be floating point value\")\n self.value_blue.set(str(self.tcp_comms.tcp_params.awb_gains_b))\n saw_error = True\n if not saw_error:\n self.tcp_comms.tcp_params.awb_gains_r = red_gain\n self.tcp_comms.tcp_params.awb_gains_b = blue_gain\n self.tcp_comms.send_awb_gains(red_gain, blue_gain)",
"def multiplication_worker(group_name):\n proxy = Proxy(\n group_name=group_name,\n component_type=\"multiply_worker\",\n expected_peers={\"master\": 1},\n )\n\n # Nonrecurring receive the message from the proxy.\n msg = proxy.receive_once()\n print(f\"{proxy.name} receive message from {msg.source}. the payload is {msg.body}.\")\n\n if msg.tag == \"job\":\n replied_payload = np.prod(msg.body)\n proxy.reply(message=msg, tag=\"multiply\", body=replied_payload)",
"def _send(self, msg, buffers=None):\n if self.comm is not None and (self.comm.kernel is not None if hasattr(self.comm, \"kernel\") else True):\n self.comm.send(data=msg, buffers=buffers)",
"def _request_power_buffer(self, log_address=None, callback=None):\n if log_address == None:\n log_address = self._last_log_address\n if log_address != None:\n if bool(self.power_history):\n # Only request last 2 power buffer logs\n self.stick.send(CirclePowerBufferRequest(self.mac, log_address - 1),)\n self.stick.send(\n CirclePowerBufferRequest(self.mac, log_address), callback,\n )\n else:\n # Collect power history info of today and yesterday\n # Each request contains 4 hours except last request\n for req_log_address in range(log_address - 13, log_address):\n self.stick.send(\n CirclePowerBufferRequest(self.mac, req_log_address),\n )\n self.stick.send(\n CirclePowerBufferRequest(self.mac, log_address), callback,\n )",
"def on_send_order(self, data, request):\n self.update_rate_limit(request)",
"def _send(self, action: List[np.ndarray]) -> None:",
"def _send_msg(self, msg):\n self._kernel.comm.send(msg)",
"def testSendShutterTo1(self):\n self.v.message_factory.gopro_set_request_encode.return_value = 3\n self.mgr.sendGoProCommand(mavutil.mavlink.GOPRO_COMMAND_SHUTTER, (1, 0, 0, 0))\n\n self.v.message_factory.gopro_set_request_encode.assert_called_with(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,\n mavutil.mavlink.GOPRO_COMMAND_SHUTTER, (1, 0, 0, 0))\n self.mgr.queueMsg.assert_called_with(3)",
"def receiveMessage(self, currentTime, msg):\n super().receiveMessage(currentTime, msg)\n if self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD':\n bid, _, ask, _ = self.getKnownBidAsk(self.symbol)\n if bid and ask:\n self.mid_list.append((bid + ask) / 2)\n if len(self.mid_list) > self.window1: self.avg_win1_list.append(pd.Series(self.mid_list).ewm(span=self.window1).mean().values[-1].round(2))\n if len(self.mid_list) > self.window2: self.avg_win2_list.append(pd.Series(self.mid_list).ewm(span=self.window2).mean().values[-1].round(2))\n if len(self.avg_win1_list) > 0 and len(self.avg_win2_list) > 0:\n if self.avg_win1_list[-1] >= self.avg_win2_list[-1]:\n # Check that we have enough cash to place the order\n if self.holdings['CASH'] >= (self.size * ask):\n self.placeLimitOrder(self.symbol, quantity=self.size, is_buy_order=True, limit_price=ask)\n else:\n if self.symbol in self.holdings and self.holdings[self.symbol] > 0:\n self.placeLimitOrder(self.symbol, quantity=self.size, is_buy_order=False, limit_price=bid)\n self.setWakeup(currentTime + self.getWakeFrequency())\n self.state = 'AWAITING_WAKEUP'"
]
| [
"0.5807723",
"0.55108577",
"0.5509182",
"0.5509182",
"0.5228989",
"0.51324904",
"0.50371724",
"0.4978056",
"0.49448347",
"0.49367115",
"0.48951223",
"0.4852278",
"0.4847397",
"0.48421168",
"0.4836499",
"0.48327184",
"0.47988936",
"0.47799703",
"0.4773954",
"0.47554228",
"0.4742315",
"0.4737559",
"0.47250342",
"0.47224727",
"0.4720819",
"0.47056562",
"0.47037706",
"0.46954995",
"0.46863312",
"0.46619546"
]
| 0.63926464 | 0 |
Notify all receivers of received LA/SmoothedPosition response. | async def notify_response_receivers(self, position: int) -> None:
await asyncio.gather(*[
receiver.on_linear_actuator_smoothed_position(position)
for receiver in self.response_receivers
]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._balloon_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass",
"def inform_listeners(self):\n d = self.get_all_sorted()\n for listener in self.listeners:\n listener.stream_updated(d)",
"async def _notifyUpdate(self):\n for observer in self.__observers:\n await observer.updateSSE()",
"def get_response_notifier(self, receiver):\n return receiver.on_linear_actuator_smoothed_position_snap_multiplier",
"def _notify():\n for observer in Bots._observers:\n observer.update(Bots.BOT_UPDATE)",
"def notify(self):\n for observer in self.observers:\n observer(self.obj)",
"def get_response_notifier(self, receiver):\n return receiver.on_linear_actuator_smoothed_position_activity_threshold",
"def get_response_notifier(self, receiver):\n return receiver.on_linear_actuator_smoothed_position_range_high",
"def get_response_notifier(self, receiver):\n return receiver.on_linear_actuator_smoothed_position_range_low",
"def notify(self) -> None:\n for s in self.subscribers:\n s()",
"def waypoints_cb(self, msg):\n rospy.loginfo(rospy.get_name() + ': waypoints received')\n self.base_waypoints = msg.waypoints",
"def _broadcast(self, msg: str) -> None:\n from jesse.routes import router\n\n for r in router.routes:\n # skip self\n if r.strategy.id == self.id:\n continue\n\n if msg == 'route-open-position':\n r.strategy.on_route_open_position(self)\n elif msg == 'route-close-position':\n r.strategy.on_route_close_position(self)\n elif msg == 'route-increased-position':\n r.strategy.on_route_increased_position(self)\n elif msg == 'route-reduced-position':\n r.strategy.on_route_reduced_position(self)\n elif msg == 'route-canceled':\n r.strategy.on_route_canceled(self)\n\n r.strategy._detect_and_handle_entry_and_exit_modifications()",
"def notify_watchers(self):\n log.debug(\"Notifying watchers\")\n for watcher in self._watchers:\n watcher(self._params)",
"def _ros_location_callback(self, msg: NavSatFix):\n self._telegram_updater.bot.send_location(self._telegram_chat_id, location=Location(msg.longitude, msg.latitude))",
"def send_roi_coords(self):\n new_roi_coords = [group.get_roi_coords() for group in self.roi_groups]\n self.signal_status_message.emit('Updated ROI coords.: {}'.format(new_roi_coords))\n self.signal_roi_coords.emit(new_roi_coords)",
"def notifyPlayers(self) -> None:\n # TODO: Used for external communication to a front-end module.\n pass",
"def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)",
"def notify_all(self):\n for voter in self.registered_voters:\n voter.notify(self, None, None, None, final_call=1)\n Legislation.open_legislation.remove(self)",
"def notify(self) -> None:\n logging.info(\"Subject: Notifying observers...\")\n for observer in self._observers:\n observer.update(self.current)",
"def notify(self, ref_output=None, moves_made=None):\n pass",
"def notifyObservers(self):",
"def _notify_observers(self):\n for observer in self.observers:\n observer.notify(self.game_state)",
"def __update_observers(self):\n for observer in self.__observers: \n # print(\"hello\")\n observer.update(self)",
"def waypoints_cb(self, msg):\n t = time.time()\n waypoints = msg.waypoints\n num_wp = len(waypoints)\n\n if self.base_waypoints and self.next_waypoint is not None:\n # Normally we assume that waypoint list doesn't change (or, at least, not\n # in the position where the car is located). If that happens, just handle it.\n if not self.is_same_waypoint(self.base_waypoints[self.next_waypoint],\n waypoints[self.next_waypoint]):\n self.next_waypoint = None # We can't assume previous knowledge of waypoint\n self.base_waypoints = None # Just for debugging. Will be updated later\n rospy.logwarn(\"Base waypoint list changed\")\n else:\n # No change. We could probably return here.\n pass\n\n \"\"\"\n # -- Uncomment for debugging\n # Stamp waypoint index in PoseStamped and TwistStamped headers of internal messages\n for idx in range(len(waypoints)):\n waypoints[idx].pose.header.seq = idx\n waypoints[idx].twist.header.seq = idx\n \"\"\"\n\n self.base_wp_orig_v = [self.get_waypoint_velocity(waypoints, idx) for idx in range(num_wp)]\n\n if debugging and not self.base_waypoints:\n dist = self.distance(waypoints, 0, num_wp-1)\n rospy.loginfo(\"Received: %d waypoints, %.1f m, %.1f m/wp in t=%f\", num_wp, dist, dist/num_wp, time.time()-t)\n\n self.base_waypoints = waypoints\n\n if self.unsubscribe_base_wp:\n self.base_wp_sub.unregister()",
"def _process_ping_response(self, message):\n self.set_available(True, True)\n if self.in_RSSI != message.in_RSSI.value:\n self.in_RSSI = message.in_RSSI.value\n self.do_callback(SENSOR_RSSI_IN[\"id\"])\n if self.out_RSSI != message.out_RSSI.value:\n self.out_RSSI = message.out_RSSI.value\n self.do_callback(SENSOR_RSSI_OUT[\"id\"])\n if self.ping_ms != message.ping_ms.value:\n self.ping_ms = message.ping_ms.value\n self.do_callback(SENSOR_PING[\"id\"])",
"def receiveBroadcastOfDownloadProgress(self, messagesProcessed):\n self.emit(SIGNAL('updateProgressBar(PyQt_PyObject)'), messagesProcessed)",
"def notify_all(self, event: GameEvent):\n for listener in self._listeners:\n listener.notify(event)",
"def register_response_receivers(self, *response_receivers):\n for response_receiver in response_receivers:\n self.translator.message_receivers.append(response_receiver)",
"def waypoints_cb(self, waypoints):\n # This callback should be called only once, with the list of waypoints not yet initialised.\n assert self.waypoints is None\n\n for wp in waypoints.waypoints:\n wp.twist.twist.linear.x = 9.\n\n self.waypoints = waypoints.waypoints # No need to guarantee mutual exclusion in accessing this data member\n\n # Now that the waypoints describing the track have been received, it is time to subscribe to pose updates.\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.DBW_enabled_cb)",
"async def on_positions_replaced(self, positions: List[MetatraderPosition]):\n self._positions = positions"
]
| [
"0.6062182",
"0.59106153",
"0.58623815",
"0.5594468",
"0.55179775",
"0.55153215",
"0.54873025",
"0.5484033",
"0.54707086",
"0.54130167",
"0.5287817",
"0.5217821",
"0.5215302",
"0.52003986",
"0.5177278",
"0.51471597",
"0.5112523",
"0.508691",
"0.5063324",
"0.50475454",
"0.49947444",
"0.49814677",
"0.49782538",
"0.4975686",
"0.49746656",
"0.4962922",
"0.49576047",
"0.49371555",
"0.49194998",
"0.49020085"
]
| 0.7360916 | 0 |
Return Genomic Deletion Range Classifier instance. | def classifier_instance(self):
return GenomicDeletionRangeClassifier() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def classifier_instance(self):\n return CodingDNADeletionClassifier()",
"def validator_instance(self):\n return CodingDNADeletion(*self.params)",
"def genomic_deletion():\n params = {\n \"id\": \"normalize.variation:NC_000003.12%3Ag.10146527_10146528del\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.hvwBZON5KzQGQazIMpeUu_dmyJ-xN8EV\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.hvwBZON5KzQGQazIMpeUu_dmyJ-xN8EV\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.Os_JtDBBPeryeCLpxOCwzvEBThWGmWtr\",\n \"interval\": {\n \"end\": {\"value\": 10146528, \"type\": \"Number\"},\n \"start\": {\"value\": 10146524, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"CT\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"genomic\",\n \"structural_type\": \"SO:0000159\",\n \"vrs_ref_allele_seq\": \"CTCT\"\n }\n return VariationDescriptor(**params)",
"def fixture_name(self):\n return \"genomic_deletion_range\"",
"def pop_range(domain=None):\n libnvtx_pop_range(Domain(domain).handle)",
"def DeleteRange(self, r):\n self.__context.builder.DocumentDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end)\n left = self._blip_data.content[:r.start]\n right = self._blip_data.content[r.end + 1:]\n self._blip_data.content = left + right",
"def classifier_instance(self):\n return CodingDNASubstitutionClassifier()",
"def create_range(range_class):\n if not hasattr(range_class, 'name'):\n raise exceptions.ValidationError(\n \"A custom range must have a name attribute\")\n return Range.objects.create(\n name=range_class.name,\n proxy_class=_class_path(range_class))",
"def subject(self) -> global___Range:",
"def remove_chr_from_chrom_annotation(pr_ranges: pr.PyRanges) -> pr.PyRanges:\n df = pr_ranges.df\n df['Chromosome'] = df['Chromosome'].str.replace('chr', '')\n return pr.PyRanges(df)",
"def getRange(self):\n return self.range",
"def __init__(self, ranges=None, *args, **kwargs):\n self.ranges = ranges\n super(DiscreteGeneticAlgorithm, self).__init__(*args, **kwargs)",
"def range_(self):\n return self.bset.range_",
"def define_deletions(genome, num):\n start = []\n end = []\n for n in range(num):\n start_pos, end_pos = get_del_pos(genome)\n # add deletion Variants to genome list\n var = Variant(\"deletion\", start_pos, end_pos, start_pos-end_pos)\n genome.add_variant(var)\n # add to unavail list\n for j in range(start_pos, end_pos):\n genome.unavail_pos.append(j)",
"def DeleteRange(self, rangeText, silent=False):\n startTaxon, stopTaxon, startColumn, stopColumn = self.ParseIndex(rangeText)\n if (self.translated == True):\n startColumn = startColumn * 3\n stopColumn = (stopColumn * 3) + 2\n if (startTaxon >= 0): #Make sure we had a valid range\n changeLength = 0\n deleteTaxon = False\n if ((startColumn == 0) & (stopColumn == len(self.alignment[0]) - 1)):\n deleteTaxon = True\n if ((startTaxon > 0) | (stopTaxon < len(self.alignment) - 1)):\n changeLength = (stopColumn - startColumn) + 1\n taxon = 0\n newSequences = []\n for Sequence in self.alignment:\n if (taxon in range(startTaxon, stopTaxon + 1)):\n if (not deleteTaxon):\n if (startColumn > 0):\n Sequence.seq = Sequence.seq[:startColumn] + Sequence.seq[stopColumn + 1:]\n else:\n Sequence.seq = Sequence.seq[stopColumn + 1:]\n if (changeLength):\n Sequence.seq = Sequence.seq + Seq('-' * changeLength)\n newSequences.append(Sequence)\n else:\n newSequences.append(Sequence)\n taxon += 1\n self.alignment = MultipleSeqAlignment(newSequences)\n if (not silent):\n self.Show(self.displayedColumn)\n self.BackupAlignment()",
"def delete_ga_classifiers(self,\n population: ClassifiersList,\n match_set: ClassifiersList,\n child_no: int,\n randomfunc=random):\n del_no = self.overall_numerosity() + child_no - self.cfg.theta_as\n if del_no <= 0:\n # There is still room for more classifiers\n return\n\n # print(\"GA: requested to delete: %d classifiers\", del_no)\n for _ in range(0, del_no):\n self.delete_a_classifier(\n match_set, population, randomfunc=randomfunc)",
"def get_relevance_model_cls(self):\n return ClassificationModel",
"def getDeletion(self, *args):\n return _libsbml.Submodel_getDeletion(self, *args)",
"def interval_class(self):\n return self._interval_class",
"def add_deletion_in_range(design: sc.DNADesign, helix: int, start: int, end: int, deletion_offset: int):\n candidate_offsets = []\n for candidate_deletion_offset in range(start, end):\n if valid_deletion_offset(design, helix, candidate_deletion_offset):\n candidate_offsets.append(candidate_deletion_offset)\n if len(candidate_offsets) == 0:\n raise ValueError(f\"no pair of Substrands found on Helix {helix} \"\n f\"overlapping interval [{start},{end})\")\n if deletion_offset < 0:\n # pick offset furthest from edges of interval\n candidate_offsets.sort(key=lambda offset: min(offset - start, end - offset))\n deletion_absolute_offset = candidate_offsets[0]\n else:\n deletion_absolute_offset = start + deletion_offset\n design.add_deletion(helix, deletion_absolute_offset)",
"def pop(self):\n return _uhd_swig.range_vector_t_pop(self)",
"def coding_dna_deletion(erbb2_context):\n params = {\n \"id\": \"normalize.variation:NM_004448.3%3Ac.2264_2278delTGAGGGAAAACACAT\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.NUCURWYivhjC4oyBtzgJZ27SaaMY08Q7\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.NUCURWYivhjC4oyBtzgJZ27SaaMY08Q7\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.3uPWAjsdzd8MbAqw8DV46eBLK8tQRyEs\",\n \"interval\": {\n \"end\": {\"value\": 2453, \"type\": \"Number\"},\n \"start\": {\"value\": 2437, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.y9b4LVMiCXpZxOg9Xt1NwRtssA03MwWM\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"T\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"transcript\",\n \"structural_type\": \"SO:0000159\",\n \"vrs_ref_allele_seq\": \"TTGAGGGAAAACACAT\",\n \"gene_context\": erbb2_context\n }\n return VariationDescriptor(**params)",
"def get_splice_donor(self):\n if self.strand == \"+\":\n return self.bounds[0]\n elif self.strand == \"-\":\n return self.bounds[1]",
"def range (self):\n return self._range",
"def range (self):\n return self._range",
"def __init__(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()",
"def range(self):\n return self.range_array",
"def __new__(cls, contig, start, end, name='', score='', strand='',\n block_ids=None, superblock_ids=None,\n coverage='', completeness=''):\n return super(BaseInterval, cls).__new__(\n cls,\n contig,\n start,\n end,\n name,\n score,\n strand,\n (block_ids or []), # default to empty lists\n (superblock_ids or []), # do\n coverage,\n completeness\n )",
"def range_field(self):\n return self.db.range_field",
"def discard(self, rng: Rangelike) -> None:\n # be lazy and do O(n^2) erasure\n if isinstance(rng, RangeSet):\n temp = self.copy()\n for r in rng:\n temp.discard(r)\n self._ranges = temp._ranges\n return\n # elif _is_iterable_non_string(rng):\n # raise ValueError(\"argument is iterable and not range-like. Use .difference_update() instead\")\n # make sure rng is a Range\n rng = Range(rng)\n # remove rng from our ranges until we no longer need to\n current_node = self._ranges.first\n while current_node:\n new_range = current_node.value.difference(rng)\n if not new_range or new_range.isempty():\n # first node is entirely consumed by the range to remove. So remove it.\n self._ranges.pop_node(current_node)\n elif isinstance(new_range, RangeSet):\n # replace current value with lower, and add higher just afterwards.\n # It can't possibly overlap with the next range, because they are disjoint.\n current_node.value = new_range._ranges.first.value\n self._ranges.insert_after(current_node, new_range._ranges.last.value)\n # in this case, we also know that we just hit the top of the discarding range.\n # therefore, we can short-circuit.\n break\n else:\n # replace just this element, which was cut off\n if new_range > current_node.value:\n # we're only computing the difference of one contiguous range.\n # if all we've done is cut off the bottom part of this range, then\n # we must have reached the top of the discarding range.\n # therefore, we can short-circuit.\n current_node.value = new_range\n break\n else:\n # otherwise, we just change this element (maybe replace it with itself) and keep going.\n current_node.value = new_range\n current_node = current_node.next"
]
| [
"0.7024641",
"0.6220088",
"0.568311",
"0.5404298",
"0.5216875",
"0.50686204",
"0.49060804",
"0.48364788",
"0.47991565",
"0.47258618",
"0.47212866",
"0.4713959",
"0.46968925",
"0.46862343",
"0.46761173",
"0.46747065",
"0.46739888",
"0.46273258",
"0.45934516",
"0.45488095",
"0.4538248",
"0.4522043",
"0.4493071",
"0.44848147",
"0.44848147",
"0.4474904",
"0.44311458",
"0.4429764",
"0.4416481",
"0.43952176"
]
| 0.87302405 | 0 |
Return Genomic Deletion Range fixture name. | def fixture_name(self):
return "genomic_deletion_range" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fixture_name(self):\n return \"coding_dna_deletion\"",
"def fixture_name(self):\n return \"coding_dna_insertion\"",
"def fixture_name(self):\n return \"genomic_silent_mutation\"",
"def fixture_name(self):\n return \"coding_dna_substitution\"",
"def fixture_name(self):\n return 'amino_acid_insertion'",
"def wemo_entity_suffix_fixture():\n return \"\"",
"def getName(self):\n return _libsbml.Deletion_getName(self)",
"def get_name_for_id(self):\n return self._id if self._id is not None else self.fixture",
"def genomic_deletion():\n params = {\n \"id\": \"normalize.variation:NC_000003.12%3Ag.10146527_10146528del\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.hvwBZON5KzQGQazIMpeUu_dmyJ-xN8EV\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.hvwBZON5KzQGQazIMpeUu_dmyJ-xN8EV\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.Os_JtDBBPeryeCLpxOCwzvEBThWGmWtr\",\n \"interval\": {\n \"end\": {\"value\": 10146528, \"type\": \"Number\"},\n \"start\": {\"value\": 10146524, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"CT\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"genomic\",\n \"structural_type\": \"SO:0000159\",\n \"vrs_ref_allele_seq\": \"CTCT\"\n }\n return VariationDescriptor(**params)",
"def region_name(self):\n return self.random_element(self._regions)[1]",
"def delete_trigger_name(self):\n if len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 10:\n return constant.DELETE_TRIGGER_PREFIX + self._old_table.name\n elif (\n len(self._old_table.name) >= constant.MAX_TABLE_LENGTH - 10\n and len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 2\n ):\n return constant.SHORT_DELETE_TRIGGER_PREFIX + self._old_table.name\n else:\n return constant.DELETE_TRIGGER_PREFIX + constant.GENERIC_TABLE_NAME",
"def fixture_microbial_sample_name():\n return \"microbial_name_test\"",
"def fixture_make_unique_name():\n def _make_unique_name(prefix):\n return f\"{prefix}{time.time_ns()}\"\n return _make_unique_name",
"def classifier_instance(self):\n return GenomicDeletionRangeClassifier()",
"def get_gene_deletion_string(genename: str):\n base_str = r'$\\it{gene}$$\\Delta$'\n new_str = base_str.replace('gene', genename)\n return new_str",
"def getSuffix(self):\r\n return self.chrom",
"def get_step_fixture_name(name, type_, encoding=None):\n return \"pytestbdd_{type}_{name}\".format(\n type=type_, name=force_encode(name, **(dict(encoding=encoding) if encoding else {}))\n )",
"def _identifier_suffix(self):\r\n return ''",
"def range(self) -> str:\n return f\"{self.name}!A:F\"",
"def define_deletions(genome, num):\n start = []\n end = []\n for n in range(num):\n start_pos, end_pos = get_del_pos(genome)\n # add deletion Variants to genome list\n var = Variant(\"deletion\", start_pos, end_pos, start_pos-end_pos)\n genome.add_variant(var)\n # add to unavail list\n for j in range(start_pos, end_pos):\n genome.unavail_pos.append(j)",
"def tname(self) -> str:",
"def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")",
"def setName(self, *args):\n return _libsbml.Deletion_setName(self, *args)",
"def get_suffix(self):\n return '%s%d' % (self.disk.devletters(), self.get_index() + 1)",
"def unique_dataset_name(prefix: str = \"selenium-dataset\"):\n return f'{prefix}-{uuid.uuid4().hex[:8]}'",
"def getName(self):\n return _libsbml.UnitDefinition_getName(self)",
"def _testcase_name(testcase):\n name = os.path.splitext(os.path.basename(testcase))[0]\n name = name.replace('-', '_')\n name = 'test_{name}'.format(name=name)\n\n assert name.isidentifier()\n\n return name",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:"
]
| [
"0.76704973",
"0.68228257",
"0.6700993",
"0.6554753",
"0.6247812",
"0.5513192",
"0.54988897",
"0.5480645",
"0.5433036",
"0.5414399",
"0.52840185",
"0.5280451",
"0.5257719",
"0.5250501",
"0.5206071",
"0.5159251",
"0.5125642",
"0.50086826",
"0.5006948",
"0.49726692",
"0.49377516",
"0.49317268",
"0.4929031",
"0.49050415",
"0.48844928",
"0.48639545",
"0.48296693",
"0.48230162",
"0.48230162",
"0.48230162"
]
| 0.91267174 | 0 |
Attempt to find host with an incomplete tag (no key). Expects 400 response. | def test_get_host_with_invalid_tag_no_key(mq_create_three_specific_hosts, api_get):
url = build_hosts_url(query="?tags=namespace/=Value")
response_status, response_data = api_get(url)
assert response_status == 400 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_host_tag_part_too_long(tag_query, part_name, mq_create_three_specific_hosts, api_get):\n\n url = build_hosts_url(query=f\"?tags={tag_query}\")\n response_status, response_data = api_get(url)\n\n assert_error_response(\n response_data, expected_status=400, expected_detail=f\"{part_name} is longer than 255 characters\"\n )",
"def _host_unreachable(host):\n try:\n hostname, aliaslist, addrlist = socket.gethostbyname_ex(host)\n if not all(addr in PROD02_ADDRS for addr in addrlist):\n return Problem(host, \"DNS does not point to prod02 ingress\", ProblemType.WRONG_DNS)\n except socket.gaierror as e:\n return Problem(host, str(e), ProblemType.UNKNOWN_HOST)\n return None",
"def test_tags_content_search_invalid_tag(self):\n\n global NON_EXISTENT_TAG\n\n po = self.catalog.load_pageobject('TagsPage')\n\n self.browser.proxy_client.new_har(\"page\")\n po.goto_page()\n har_entry = self.browser.page_load_details()\n\n start_url = po.current_url()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_content([NON_EXISTENT_TAG])\n har_entry = self.browser.page_load_details()\n\n end_url = po.current_url()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is True, \\\n \"while on the tags page %s,\" % (start_url) \\\n + \" searching for content with the tag '%s'\" % (NON_EXISTENT_TAG) \\\n + \" did not return an error\" \\\n + \" response code on page %s.\" % (end_url) \\\n + \" http archive follows:\\n%s\" % (pprint.pformat(har_entry))",
"def test_tags_tag_search_invalid_tag(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n global NON_EXISTENT_TAG\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags(NON_EXISTENT_TAG)\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an the tag\" \\\n + \"'%s' returned an error response code\" % (NON_EXISTENT_TAG) \\\n + \"on the page %s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))",
"def test_host_header_no_port_in_uri(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)",
"def test_host_header_no_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:80/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)",
"def test_host_header_mismatch_empty(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://[email protected]/ HTTP/1.1\\r\\nHost: \\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)",
"def test_010_describe_by_invalid_pid(self):\n client = test_client.TestClient(context.node[\"baseurl\"])\n # The exception is caused by the body being empty since describe() uses a\n # HEAD request.\n with pytest.raises(xml.parsers.expat.ExpatError):\n client.describe(context.TOKEN, \"_invalid_pid_\")",
"def TEST_fetch_host_info( hostname ):\n if os.path.exists(\"/tmp/test-poll-host-pubkey.pub\"):\n with open(\"/tmp/test-poll-host-pubkey.pub\", \"r\") as f:\n pubk = f.read()\n host_info = {}\n host_info['hostname'] = hostname\n host_info['public_key'] = pubk.strip()\n return host_info \n\n raise Exception(\"Missing /tmp/test-poll-host-pubkey.pub\")",
"def _transport_key_not_found():\n pecan.abort(404, u._('Not Found. Transport Key not found.'))",
"def _host_exists(self, host_name):\n hosts = self.host_obj.search_by_name(host_name)\n\n if len(hosts) > 0:\n for host in hosts:\n hostname = host['match']\n if host_name == hostname:\n return hostname\n return hostname\n LOG.debug(\"no host found for:\" + host_name)\n return None",
"def urlvoid_check(name, api_key):\n if not is_fqdn(name):\n return None\n\n url = 'http://api.urlvoid.com/api1000/{key}/host/{name}'.format(key=api_key, name=name)\n response = requests.get(url)\n tree = ET.fromstring(response.text)\n if tree.find('./detections/engines'):\n return [e.text for e in tree.find('./detections/engines')]\n else:\n return None",
"def dnslookup(url) -> 'text': \n try:\n hn = socket.gethostbyaddr(url)[0] \n except socket.error as msg: \n hn = 'nohost'\n return hn",
"def find_by_status(self, host, state):",
"def test_get_host(self):\n pass",
"def opencloud_fetch_host_info( hostname ):\n raise Exception(\"Opencloud support not implemented\")",
"def get_ip_by_unknown(client, host):\n\n if not is_valid_ip(host):\n # If it is not an ip, assume it's a container name:\n host = get_ip_by_container_name(client, host)\n return host",
"def test_get_ip_tags_invalid_ip(client, database):\n\n invalid_ip = \"http://127.0.0.1:5000/ip-tags/10.1.2.3000\"\n response = client.get(invalid_ip)\n response_data = response.get_json()\n\n assert response.status_code == 400\n assert response.headers[\"Content-Type\"] == \"application/json\"\n assert (\n response_data[\"error\"]\n == \"400 Bad Request: Address 10.1.2.3000 does not have IPv4 format\"\n )",
"def test_retrieve_not_found(self):\n\n # get a valid digest\n content = \"\"\"\\xe1\\xbc\\x84\\xce\\xbd\\xce\\xb4\\xcf\\x81\\xce\\xb1\n \\xce\\xbc\\xce\\xbf\\xce\\xb9\n \\xe1\\xbc\\x94\\xce\\xbd\\xce\\xbd\\xce\\xb5\\xcf\\x80\\xce\\xb5\"\"\"\n namespace = 'default'\n collection = generate_collection(namespace, [content])\n preupload_status = self.call_api(\n 'preupload', self.message_to_dict(collection), 200)\n message = preupload_status.json.get(u'items', [{}])[0]\n\n # get the digest\n request = preupload_status_to_request(message, content)\n embedded = validate(\n request.upload_ticket, handlers_endpoints_v1.UPLOAD_MESSAGES[0])\n\n # don't upload data; try to retrieve\n retrieve_request = handlers_endpoints_v1.RetrieveRequest(\n digest=embedded['d'], namespace=handlers_endpoints_v1.Namespace())\n with self.call_should_fail('404'):\n self.call_api('retrieve', self.message_to_dict(retrieve_request), 200)",
"def test_add_hostname(self):\n hostname = 'test123.com'\n info = self.api.add_hostname(hostname, tags=['asd'])\n self.assertEqual(info['value'], hostname)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def get_from_host(cls, host, silent=False):\n if cls.search([], count=True) == 1:\n return cls.search([])[0]\n try:\n website, = cls.search([('name', '=', host)])\n except ValueError:\n if not silent:\n raise WebsiteNotFound()\n else:\n return website",
"def test_host_header_with_old_proto(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.0\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: Host header field in protocol prior to HTTP/1.1\",\n )",
"def test_host_header_mismatch(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://[email protected]/ HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)",
"def test_default_host_http_required(self):\n client = self.base_scenario(\n frang_config=\"\", requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"]\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)",
"def test_missingName(self):\n servers = {\n ('1.1.2.3', 53): {\n (b'foo.example.com', A): {\n 'rCode': ENAME,\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress(b'foo.example.com')\n return self.assertFailure(d, DNSNameError)",
"def testGetHostConfig_notExist(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n host = pool.GetHostConfig('not_exist')\n self.assertIsNone(host)",
"def testGetHostConfig_notExist(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()\n host = pool.GetHostConfig('not_exist')\n self.assertIsNone(host)",
"def test_key_not_found(self):\n self.expect_datatore_lookup('SomeBlobKey', False)\n self.mox.ReplayAll()\n self.assertResponse('404 %s' % httplib.responses[404], [], '', self.app,\n self._environ)",
"def handle_hostname(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<ipnr>')\n return\n try:\n hostname = socket.gethostbyaddr(item)\n ievent.reply(hostname[0])\n except:\n ievent.reply(\"can't match \" + str(item))",
"def test_index_hostid_ok(self):\n self.check_response('/attributes?h=1',\n ('data-url=\"/attributes/tabledata.json\"',\n 'params[\\'h\\'] = \\'1\\';'))"
]
| [
"0.5940153",
"0.5782371",
"0.56358194",
"0.5556439",
"0.55262023",
"0.5500007",
"0.54433006",
"0.5438519",
"0.54059374",
"0.5344553",
"0.5337218",
"0.5327904",
"0.52971035",
"0.526064",
"0.52405095",
"0.5223708",
"0.52061826",
"0.519632",
"0.5188607",
"0.51555604",
"0.5137124",
"0.5113854",
"0.51039404",
"0.50862694",
"0.50847965",
"0.50794005",
"0.5067217",
"0.5057125",
"0.5039144",
"0.5035776"
]
| 0.6690849 | 0 |
send a request to find hosts with a string tag where the length of the namespace excedes the 255 character limit | def test_get_host_tag_part_too_long(tag_query, part_name, mq_create_three_specific_hosts, api_get):
url = build_hosts_url(query=f"?tags={tag_query}")
response_status, response_data = api_get(url)
assert_error_response(
response_data, expected_status=400, expected_detail=f"{part_name} is longer than 255 characters"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_host_with_invalid_tag_no_key(mq_create_three_specific_hosts, api_get):\n url = build_hosts_url(query=\"?tags=namespace/=Value\")\n response_status, response_data = api_get(url)\n\n assert response_status == 400",
"def getRequestHostname():",
"def test_add_hostname(self):\n hostname = 'test123.com'\n info = self.api.add_hostname(hostname, tags=['asd'])\n self.assertEqual(info['value'], hostname)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def get_ns_descriptors(self, token, _filter=None, host=None, port=None): \n if host is None:\n base_path = self._base_path.format(self._host, self._port)\n else:\n base_path = self._base_path.format(host, port)\n\n query_path = ''\n if _filter:\n query_path = '?_admin.type=' + _filter\n\n _endpoint = \"{0}/nsd/v1/ns_descriptors_content{1}\".format(base_path, query_path)\n result = {'error': True, 'data': ''}\n headers = {\"Content-Type\": \"application/yaml\", \"accept\": \"application/json\",\n 'Authorization': 'Bearer {}'.format(token)}\n\n try:\n r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)\n except Exception as e:\n result['data'] = str(e)\n return result\n\n if r.status_code == requests.codes.ok:\n result['error'] = False\n\n result['data'] = r.text\n return json.dumps(result)",
"def hostname(name: str = \"\") -> str:\n ...",
"def test_networking_project_network_tag_get(self):\n pass",
"def get_hostname_suggestion(LayerId=None):\n pass",
"def getaddrinfo(host: str, port: int) -> List:\n ...",
"def test_read_net_namespace(self):\n pass",
"def get_ip(tag,env=None,eip=False):\n api_url = 'http://api.rahulinux.io/ip?host={0}&env={1}&eip={2}'\n try:\n resp = requests.get(api_url.format(tag,env,eip))\n except requests.exceptions.RequestException as e:\n return e\n if len(resp.text) >= 30:\n return resp.text.split()\n return [ resp.text ]",
"def ns(tags):\n return '/'.join(['*[local-name()=\"%s\"]' % t if t not in ['*', '..', '.'] else t\n for t in tags.split('/') if t])",
"def __getmessage__():\n\tmsg = \\\n\t\t 'M-SEARCH * HTTP/1.1\\r\\n' \\\n\t\t 'HOST:239.255.255.250:1900\\r\\n' \\\n\t\t 'ST:upnp:rootdevice\\r\\n' \\\n\t\t 'MX:2\\r\\n' \\\n\t\t 'MAN:\"ssdp:discover\"\\r\\n' \\\n\t\t '\\r\\n'\n\n\treturn msg",
"def getHost():",
"def getHost():",
"def custom_dns_resolver(hostname, type='A'):\n nameservers = globals.config.service.initial_dns\n custom_resolver = dns.resolver.Resolver()\n custom_resolver.nameservers = nameservers\n answer = custom_resolver.query(hostname, type)\n\n return str(random.choice(answer))",
"def public_ip_dns(resolv, nameservers, rdatatype, server, responsetype):\n for ns in nameservers:\n try:\n answer = resolv.query(ns, rdatatype)\n nameserver = answer[0].to_text()\n except Exception as e:\n print(e)\n continue\n resolve_public_ip(nameserver, server, responsetype)",
"def test_list_net_namespace(self):\n pass",
"def parse_digtxt(querystr,resultset):\n response = pydig.query(querystr, 'txt')\n for elem in response[0].split():\n if 'include:' in elem:\n resultset = parse_digtxt(elem[8:], resultset)\n else:\n if 'ip4' in elem:\n if elem[4:] not in resultset:\n resultset[elem[4:]] = \"GCP\"\n if 'ip6' in elem:\n if elem[4:] not in resultset:\n resultset[elem[4:]] = \"GCP\"\n return resultset",
"def test_search_nips(client):\n subjects, request_id = client.search_nips(\n [\"3245174504\", \"1854510877\", \"7250018312\"]\n )\n\n assert len(subjects) == 3",
"def host_urls_command():\n # 1. Get input host and limit from Demisto\n host = demisto.args().get('host')\n limit = demisto.args().get('limit')\n # 2. Get the host report from SlashNext API\n response = host_urls(host=host, limit=limit)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n snx_ioc_cont_list = [] # type: List[Dict[str, str]]\n dbot_score_cont_list = [] # type: List[Dict[str, str]]\n url_cont_list = [] # type: List[Dict[str, str]]\n snx_ec_cont_list = [] # type: List[Dict[str, str]]\n for url_data in response.get('urlDataList'):\n if url_data.get('threatData').get('verdict').startswith('Unrated') is False:\n snx_ioc_cont, dbot_score_cont, url_cont = get_snx_url_ioc_context(url_data, is_scan=True)\n snx_ioc_cont_list.extend(snx_ioc_cont)\n dbot_score_cont_list.extend(dbot_score_cont)\n url_cont_list.extend(url_cont)\n snx_ec_cont_list.append(snx_ioc_cont[0])\n\n ec = {} # type: Dict[str, List[Dict[str, str]]]\n if response.get('urlDataList')[0].get('threatData').get('verdict').startswith('Unrated') is False:\n ec = {\n 'SlashNext.URL(val.Value === obj.Value)': snx_ec_cont_list,\n 'DBotScore': dbot_score_cont_list,\n 'URL': url_cont_list\n }\n\n host = host.encode('idna')\n\n title = 'SlashNext Phishing Incident Response - Host URLs\\n' \\\n '##### host = {}'.format(host.decode())\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont_list,\n ['Value',\n 'Type',\n 'Verdict',\n 'ScanID',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont_list)",
"def _resolve_any_to_text(name, ns, dom):\n ret = []\n cmdline = (\"dig +noadditional +noquestion +nocmd \"\n \"+nostats +nocomment %s any @%s | grep ^%s\"\n % (name, ns, name))\n for line in os.popen(cmdline, \"r\"):\n line = re.sub(r'\\s+', ' ', line).strip()\n line = re.sub(r'\\.%s. ' % (dom), ' ', line)\n line = re.sub(r'^%s. ' % (dom), '@ ', line)\n line = \"%-30s %6s %3s %6s %s\" % tuple(re.split(r'\\s+', line, 4))\n ret.append(line)\n return ret",
"def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)",
"def test_longer_string():\n response = echo_client('GET test/test/test HTTP/1.2')\n assert '505' in response",
"def handle_hostname(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<ipnr>')\n return\n try:\n hostname = socket.gethostbyaddr(item)\n ievent.reply(hostname[0])\n except:\n ievent.reply(\"can't match \" + str(item))",
"def parseHostList( ipstring ):\r\n\r\n # ideally, we should be able to handle these cases:\r\n # w.x.y.z, .x.y.z, .y.z, .z\r\n # w.x.y.a-b, .x.y.a-b, .x.a-b, .a-b\r\n # w.x.y.z-a.b.c.d, w.x.y-a.b.c, w.x-a.b, w-a\r\n # we also need to be able to parse CIDR ranges. Urgh. w.x.y.z/0\r\n \r\n # ...but for the sake of simplicity we'll implement a subset, consisting of these cases:\r\n # 1. w.x.y.z\r\n # 2. w.x.y.z1-zN\r\n # 3. .z1-.zN\r\n\r\n currentNetwork = '0.0.0'\r\n groups = ipstring.split(',') \r\n iplist = []\r\n for i in groups:\r\n\r\n octets = i.split('.')\r\n if len(octets) == 4: # cases 1 and 2\r\n currentNetwork = \"%s.%s.%s\" % (octets[0],octets[1],octets[2])\r\n iprange = getRange(octets[3])\r\n ips = [\"%s.%s\" % (currentNetwork,i) for i in iprange]\r\n\r\n elif len(octets) == 2: # case 3\r\n network = currentNetwork\r\n iprange = getRange(octets[1])\r\n ips = [\"%s.%s\" % (currentNetwork,i) for i in iprange]\r\n \r\n else:\r\n print 'syntax error in specifying host list!'\r\n sys.exit(1)\r\n \r\n iplist += ips\r\n\r\n return uniq(iplist) # get rid of repeats\r",
"def test_sanitized_hostname(self):\n value = \" ../ ../some/dubious/hostname \"\n response = clean.hostname(value)\n assert response == \"somedubioushostname\"",
"def test_udp_query():\n assert dnsck_query(\"8.8.8.8\", \"google.com\", \"a\", 1) == 0",
"def qhost():\n command = '%s -xml -q' % QHOST_PATH\n result_xml = subprocess.check_output([command], env=ENV, shell=True)\n hosts_element = xml.etree.ElementTree.fromstring(result_xml)\n hosts = []\n for host_element in hosts_element:\n if host_element.get('name') == 'global':\n continue\n host = {\n 'name': host_element.get('name')\n }\n queues = {}\n for host_value in host_element:\n if host_value.tag == 'hostvalue':\n host[host_value.get('name')] = host_value.text\n elif host_value.tag == 'queue':\n queue_name = host_value.get('name')\n queue = {}\n for queue_value in host_value:\n queue[queue_value.get('name')] = queue_value.text\n queues[queue_name] = queue\n host['queues'] = queues\n hosts.append(host)\n return hosts",
"async def discover(self, timeout: int):",
"def build_ssdp_search_packet(\n ssdp_target: AddressTupleVXType, ssdp_mx: int, ssdp_st: str\n) -> bytes:\n return (\n \"M-SEARCH * HTTP/1.1\\r\\n\"\n \"HOST:{target}\\r\\n\"\n 'MAN:\"ssdp:discover\"\\r\\n'\n \"MX:{mx}\\r\\n\"\n \"ST:{st}\\r\\n\"\n \"\\r\\n\".format(\n target=get_host_port_string(ssdp_target), mx=ssdp_mx, st=ssdp_st\n ).encode()\n )"
]
| [
"0.5158016",
"0.5090252",
"0.5040747",
"0.4784739",
"0.47647437",
"0.47189265",
"0.46949816",
"0.46308947",
"0.46128097",
"0.45641142",
"0.45574144",
"0.45475337",
"0.45263064",
"0.45263064",
"0.4514373",
"0.45076632",
"0.44830647",
"0.4478172",
"0.4459085",
"0.44373482",
"0.44331256",
"0.44327965",
"0.4411499",
"0.44086888",
"0.44025806",
"0.4396792",
"0.43935877",
"0.43929055",
"0.43880144",
"0.43639955"
]
| 0.60618293 | 0 |
The number of full days since last checkin. We consider the current day to start from 5AM local time. Note that this can be more than 24 hours ago! | def days_since_last_checkin(self):
# TODO use local timezone
checkin_date = (self.last_checkin - datetime.timedelta(hours=5)).date()
today = datetime.date.today()
return (today - checkin_date).days | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_number_days(self):\r\n return 1",
"def get_fine_due(self):\n fine = 0\n ndays = (dt.datetime.now() - self._checkout_date).days\n ndays_over = ndays - self.loantime\n if ndays_over > 0:\n fine += (ndays_over * self.finerate)\n return fine",
"def day(self):\n return 0",
"def day(self):\n return 0",
"def last_seen_days(self):\n return self.last_seen.days",
"def remaining_days_in_cycle(self) -> int:\n if not self.expiration:\n return 0\n delta = self.expiration - _today()\n return int(delta.days)",
"def joined_days(self):\n return (timezone.now() - self.user.date_joined).days",
"def _clock_day(self):\n return int(self._shifted_time / 86400)",
"def elapsed_days(self) -> int:\n return (datetime.today() - self.release_datetime).days",
"def _unit_day(self):\n return (self.time_base * 60.0) * 24.0",
"def days(self):\n ends_at = created_at = datetime.datetime.now().replace(tzinfo=utc)\n if self.created_at:\n created_at = self.created_at\n if self.ends_at:\n ends_at = self.ends_at\n return (ends_at - created_at).days",
"def remaining_days_in_current_period(self):\n try:\n return self.count_days_from_now(self.current_period_ends_at)\n except AttributeError:\n return 0",
"def compute_real_days(self):\n if (self.end_date > date.today()):\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, date.today())\n else:\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, self.end_date)",
"def age(self):\n then = self.ship_date\n if self.status == 'delivered':\n now = self.event_time.date()\n else:\n now = datetime.datetime.now().date()\n delta = now - then\n return delta.days",
"def days_registered(self):\n days_registered = (datetime.utcnow() - self.date_joined).days\n if not days_registered:\n return 1\n return days_registered",
"def unit_day(self):\n return (self.time_base * 60.0) * 24.0",
"def pullGateCountToday():\n now = datetime.now()\n # catch 23 hour when date changes\n start_date = now - timedelta(hours=1)\n start_date = start_date.strftime(\"%Y-%m-%d\")\n end_date = now + timedelta(days=1)\n end_date = end_date.strftime(\"%Y-%m-%d\")\n return pullGateCountDateRange(start_date, end_date)",
"def days_since_start():\n initial = initial_time('time_stamp.txt')\n actual = time.localtime(time.time())\n if initial[0] == actual[0]:\n return actual[7] - initial[7]\n else:\n if calendar.isleap(initial[0]):\n return (366 - initial[7]) + actual[7]\n else:\n return (365 - initial[7]) + actual[7]",
"def hindu_day_count(cls, date):\n return date - cls.EPOCH",
"def calculate_days(time):\n return int(time / 86400)",
"def get_current_day() -> int:\n return datetime.now().day",
"def thirty_days_ago():\n return date.today() - timedelta(days=30)",
"def current_seconds_worked(user):\n if not user.profile.clock_in_time:\n return 0\n now = pytz.timezone('America/Los_Angeles').localize(datetime.now())\n time_diff = now - user.profile.clock_in_time\n return time_diff.total_seconds()",
"def remaining_days(self):\n if self.trialing or self.trial_ended:\n return self.remaining_trial_days\n else:\n return self.remaining_days_in_current_period",
"def calculate_days(self):\n tweet_time = self.data['created_at']\n birthday = self.data['user']['created_at']\n my_dates = {\"Jan\": 1, \"Feb\": 2, \"Mar\": 3, \"Apr\": 4, \"May\": 5, \"Jun\": 6, \"Jul\": 7, \"Aug\": 8, \"Sep\": 9, \"Oct\": 10,\n \"Nov\": 11, \"Dec\": 12}\n # This could have easily been cast into one of the numerous datetime function's immediately, however\n # it was causing a major slowdown to the program and so the below was a quick fix.\n ######################################################################\n # NOTICE: IF SOMETHING BREAKS THIS IS MOST LIKELY TO BE WHAT IT IS #\n ######################################################################\n tweet_time2 = [my_dates[tweet_time[4:7]], int(tweet_time[8:10]), int(tweet_time[26:])]\n birthday2 = [my_dates[birthday[4:7]], int(birthday[8:10]), int(birthday[26:])]\n first = date(tweet_time2[2], tweet_time2[0], tweet_time2[1])\n second = date(birthday2[2], birthday2[0], birthday2[1])\n final = first - second\n days = final.days\n follows = self.data['user']['followers_count']\n favorites = self.data['user']['favourites_count']\n statuses = self.data['user']['statuses_count']\n favpd = favorites/days\n folpd = follows/days\n statpd = statuses/days\n return {\"days\": final.days, \"folpd\": folpd, \"favpd\": favpd, \"statpd\": statpd}",
"def what_night_is_it():\n d = datetime.datetime.utcnow() - datetime.timedelta(7 / 24 + 0.5)\n tonight = int(d.strftime('%Y%m%d'))\n return tonight",
"def brasilia_day():\n return (dt.datetime.utcnow() + dt.timedelta(hours=-3)).replace(hour=0, minute=0, second=0, microsecond=0)",
"def get_today_stat(cls):\n return cls.get_specified_days_stat(0)",
"def remaining_trial_days(self):\n try:\n return self.count_days_from_now(self.trial_ended_at)\n except AttributeError:\n return 0",
"def days(self):\n return int(self.hours / 24)"
]
| [
"0.6554062",
"0.63184106",
"0.62983453",
"0.62983453",
"0.62096196",
"0.6207035",
"0.61662465",
"0.6056306",
"0.6037407",
"0.60328513",
"0.60032517",
"0.5990085",
"0.59769547",
"0.593517",
"0.5919269",
"0.5907213",
"0.5886326",
"0.58565485",
"0.5838369",
"0.583007",
"0.58180374",
"0.5810705",
"0.57853824",
"0.578503",
"0.5784664",
"0.5772053",
"0.5752595",
"0.5752253",
"0.5727861",
"0.5697507"
]
| 0.76280934 | 0 |
Sets the owner of this link. | def set_owner(self, owner: Optional["STACObject_Type"]) -> "Link":
self.owner = owner
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_owner(self, owner):\n self.__owner = owner",
"def set_owner(self, owner):\n self.settings[\"owner\"] = owner",
"def owner(self, owner: str):\n\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner_id(self, owner_id):\n self._owner_id = owner_id",
"def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)",
"def owner_id(self, owner_id):\n\n self._owner_id = owner_id",
"def owner_reference(self, owner_reference):\n\n self._owner_reference = owner_reference",
"def is_owner(self, is_owner):\n\n self._is_owner = is_owner",
"def bot_owner_id(self, bot_owner_id):\n\n self._bot_owner_id = bot_owner_id",
"def scope_owner(self, scope_owner):\n\n self._scope_owner = scope_owner",
"def owner(self, owner):\n if self.local_vars_configuration.client_side_validation and owner is None: # noqa: E501\n raise ValueError(\"Invalid value for `owner`, must not be `None`\") # noqa: E501\n\n self._owner = owner",
"def set_owner(self, owner, is_stream=False):\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner",
"def owner_id(self, owner_id):\n if owner_id is None:\n raise ValueError(\"Invalid value for `owner_id`, must not be `None`\") # noqa: E501\n\n self._owner_id = owner_id",
"def possessed_by(self, other):\r\n self.owner = other",
"def owner(self) -> str:\n return self._owner",
"def set_owner_name(self, data, **kwargs):\n try:\n git_url = GitURL.parse(data[\"git_url\"])\n except UnicodeError as e:\n raise ValidationError(\"`git_url` contains unsupported characters\") from e\n except ConfigurationError as e:\n raise ValidationError(\"Invalid `git_url`\") from e\n\n if git_url.owner is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"owner\"] = git_url.owner\n\n if git_url.name is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"name\"] = git_url.name\n data[\"slug\"] = normalize_to_ascii(data[\"name\"])\n\n return data",
"def business_owner(self, business_owner):\n\n self._business_owner = business_owner",
"def technical_owner(self, technical_owner):\n\n self._technical_owner = technical_owner",
"def owner_type(self, owner_type):\n\n self._owner_type = owner_type",
"def owner_id(self) -> str:\n return self.__owner_id",
"def pre_save(self, obj):\n obj.owner = self.request.user",
"def owner(self):\n return self._owner"
]
| [
"0.8302126",
"0.8135389",
"0.8064421",
"0.79439116",
"0.79439116",
"0.79439116",
"0.79439116",
"0.7895545",
"0.7895545",
"0.7895545",
"0.7895545",
"0.7442883",
"0.7366591",
"0.7358244",
"0.72455347",
"0.6930826",
"0.6841902",
"0.6657067",
"0.6593225",
"0.6468359",
"0.640672",
"0.63263345",
"0.6211192",
"0.6204912",
"0.6189089",
"0.61352134",
"0.61348814",
"0.6127851",
"0.60984254",
"0.60971767"
]
| 0.85328895 | 0 |
Optional title for this link. If not provided during instantiation, this will attempt to get the title from the STAC object that the link references. | def title(self) -> Optional[str]:
if self._title is not None:
return self._title
if self._target_object is not None and isinstance(
self._target_object, pystac.Catalog
):
return self._target_object.title
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_title(self) -> Optional[str]:\n return self.title",
"def title(self) -> Optional[str]:\n return self.get(\"/Title\")",
"def Title(self, default={}):\n return HEP.TitleObject(self.data.get('title', default))",
"def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"",
"def resource_link_title(self):\n return self.request.POST.get(\"resource_link_title\", self.resource_link_id)",
"def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")",
"def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")",
"def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")",
"def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")",
"def title(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"title\")",
"def title(self) -> \"str\":\n return self._attrs.get(\"title\")",
"def title(self) -> \"str\":\n return self._attrs.get(\"title\")",
"def title(self) -> \"str\":\n return self._attrs.get(\"title\")",
"def title(self) -> \"str\":\n return self._attrs.get(\"title\")",
"def Title(self, default=None):\n return self.data.get('title', default)",
"def title(self):\n return self.get(\"title\")",
"def title(self):\n return self.properties.get('Title', None)",
"def get_title(self):\n title = self.title\n if not title and self.parent_id:\n title = self.parent.title\n return title",
"def title(self):\n return self.get(self._names[\"title\"])",
"def title(self):\n return self.get(self._names[\"title\"])",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self.title",
"def get_title(self):\n return self.title",
"def safe_title(self):\n try:\n return self.title\n except ObjectDoesNotExist:\n return None",
"def get_title(self):\n\n return self.title",
"def get_title(self):\n return self._title",
"def get_title(self):\n return self._title",
"def get_title(self):\n return self._title",
"def getTitle(self, item):\n return item.Title() or item.getId()",
"def title(self, title: \"str\"):\n self._attrs[\"title\"] = title"
]
| [
"0.69105786",
"0.6895118",
"0.6473908",
"0.64691746",
"0.64505994",
"0.6439746",
"0.6439746",
"0.6439746",
"0.6439746",
"0.6439746",
"0.6379838",
"0.6379838",
"0.6379838",
"0.6379838",
"0.63518524",
"0.63235426",
"0.63089734",
"0.63060486",
"0.62965906",
"0.62965906",
"0.62822044",
"0.62822044",
"0.62822044",
"0.6280743",
"0.62384546",
"0.62342906",
"0.62342906",
"0.62342906",
"0.62224996",
"0.621329"
]
| 0.7591315 | 0 |
Returns the HREF for this link. If the href is None, this will throw an exception. Use get_href if there may not be an href. | def href(self) -> str:
result = self.get_href()
if result is None:
raise ValueError(f"{self} does not have an HREF set.")
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_href(self, transform_href: bool = True) -> Optional[str]:\n # get the self href\n if self._target_object:\n href = self._target_object.get_self_href()\n else:\n href = self._target_href\n\n if (\n transform_href\n and href\n and is_absolute_href(href)\n and self.owner\n and self.owner.get_root()\n ):\n root = self.owner.get_root()\n rel_links = [\n *HIERARCHICAL_LINKS,\n *pystac.EXTENSION_HOOKS.get_extended_object_links(self.owner),\n ]\n # if a hierarchical link with an owner and root, and relative catalog\n if root and root.is_relative() and self.rel in rel_links:\n owner_href = self.owner.get_self_href()\n if owner_href is not None:\n href = make_relative_href(href, owner_href)\n\n return href",
"def href(self):\n return self._href",
"def href(self):\n return self._href",
"def absolute_href(self) -> str:\n result = self.get_absolute_href()\n if result is None:\n raise ValueError(f\"{self} does not have an HREF set.\")\n return result",
"def self_href(cls, href: HREF) -> \"Link\":\n href_str = str(os.fspath(href))\n return cls(pystac.RelType.SELF, href_str, media_type=pystac.MediaType.JSON)",
"def get_href_url(self, href):\n verb = \"GET\"\n if(isinstance(href,str)):\n url = urljoiner(self.baseurl, [href])\n elif(isinstance(href,dict) and 'href' in href): # check if dictionary and has key\n url = urljoiner(self.baseurl, [href['href']])\n if(self.debug):\n print(verb + \" \" + url)\n r = requests.get(url, headers=self.headers)\n self.handle_error_message(r)\n return r.json()",
"def extractUrl(self, href):\n url = ''\n pattern = re.compile(r'(http[s]?://[^&]+)&', re.U | re.M)\n url_match = pattern.search(href)\n if(url_match and url_match.lastindex > 0):\n url = url_match.group(1)\n\n return url",
"def get_absolute_href(self) -> Optional[str]:\n if self._target_object:\n href = self._target_object.get_self_href()\n else:\n href = self._target_href\n\n if href is not None and self.owner is not None:\n href = make_absolute_href(href, self.owner.get_self_href())\n\n return href",
"def Link(self, default=None):\n return self.data.get('links', {}).get('self', default)",
"def href(self, value, request) -> Optional[str]:\n if value is None:\n return None\n elif isinstance(value, str):\n return value\n else:\n return value.href(request)",
"def getLink(self):\n return self.link",
"def get_href(text, base_url=None):\n m = re.search(r'href\\s*=\\s*[\"\\']?([^\"\\'> ]+)[\"\\'> ]', text, re.I)\n if not m:\n return None\n link = m.group(1).strip()\n if base_url and not link.lower().startswith(\"http\"):\n import urlparse\n link = urlparse.urljoin(base_url, link)\n return link",
"def link(self):\n\n return self._get_field(\"link\")",
"def web_link(self):\n if \"webLink\" in self._prop_dict:\n return self._prop_dict[\"webLink\"]\n else:\n return None",
"def self_link(self):\n return self._json['coredata'].get('link', [])[0].get('@href')",
"def get_link_by_reference(self, ref: Reference) -> str:\n logger.debug(f'Getting link for reference {ref} in API {self.name}')\n self.check_reference(ref)\n anchor = self.generate_anchor_by_reference(ref)\n return self.gen_full_url(anchor)",
"def _get_link(li):\n try:\n a = li.find(\"a\")\n link = a[\"href\"]\n except Exception:\n return None\n return link",
"def get_link_by_reference(self, ref: Reference) -> str:\n\n logger.debug(f'Getting link for reference {ref} in API {self.name}')\n self.check_reference(ref)\n anchor = self.get_anchor_by_reference(ref) # may throw ReferenceNotFoundError\n return self.gen_full_url(anchor)",
"def get_link_by_reference(self, ref: Reference) -> str:\n\n logger.debug(f'Getting link for reference {ref} in API {self.name}')\n self.check_reference(ref)\n anchor = self.get_anchor_by_reference(ref) # may throw ReferenceNotFoundError\n return self.gen_full_url(anchor)",
"def get_link_by_reference(self, ref: Reference) -> str:\n\n logger.debug(f'Getting link for reference {ref} in API {self.name}')\n self.check_reference(ref)\n anchor = self.get_anchor_by_reference(ref) # may throw ReferenceNotFoundError\n return self.gen_full_url(anchor)",
"def link_url(self):\n # planet_assoc = self.planet_assocs.join(PlanetAssociation.planet.of_type(LinkPlanet)).first()\n\n for planet_assoc in self.planet_assocs:\n if planet_assoc.planet.kind == \"link\":\n return planet_assoc.planet.url\n return None",
"def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")",
"def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")",
"def link(self):\n return self._link",
"def link(self):\n return self._link",
"def link(self):\n return self._link",
"def get_href(asset_details):\n Script._validate_type(asset_details, u'asset_details', object, True)\n Script._validate_type_of_details(asset_details, DATA_ASSETS_DETAILS_TYPE)\n\n return WMLResource._get_required_element_from_dict(asset_details, u'asset_details', [u'metadata', u'href'])",
"def get_href_or_none(soup, css_selector):\n link = soup.select_one(css_selector)\n return link['href'] if link else None",
"def theLinky(self):\n theLink = self.absolute_url()\n return theLink",
"def GetTransactionHref(self):\n if self.transactionHref is None:\n self._GetAurumAccount()\n return self.transactionHref"
]
| [
"0.68136275",
"0.68054163",
"0.68054163",
"0.67377317",
"0.66265225",
"0.63145745",
"0.6179258",
"0.61082935",
"0.60186666",
"0.596101",
"0.592977",
"0.5924774",
"0.58844876",
"0.58479047",
"0.580537",
"0.57960415",
"0.5775185",
"0.5771198",
"0.5771198",
"0.5771198",
"0.57571363",
"0.56120545",
"0.56120545",
"0.5505585",
"0.5505585",
"0.5505585",
"0.54297644",
"0.54038906",
"0.53853774",
"0.53825676"
]
| 0.76138645 | 0 |
Returns the absolute HREF for this link. If the href is None, this will throw an exception. Use get_absolute_href if there may not be an href set. | def absolute_href(self) -> str:
result = self.get_absolute_href()
if result is None:
raise ValueError(f"{self} does not have an HREF set.")
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_absolute_href(self) -> Optional[str]:\n if self._target_object:\n href = self._target_object.get_self_href()\n else:\n href = self._target_href\n\n if href is not None and self.owner is not None:\n href = make_absolute_href(href, self.owner.get_self_href())\n\n return href",
"def href(self) -> str:\n result = self.get_href()\n if result is None:\n raise ValueError(f\"{self} does not have an HREF set.\")\n return result",
"def get_href(self, transform_href: bool = True) -> Optional[str]:\n # get the self href\n if self._target_object:\n href = self._target_object.get_self_href()\n else:\n href = self._target_href\n\n if (\n transform_href\n and href\n and is_absolute_href(href)\n and self.owner\n and self.owner.get_root()\n ):\n root = self.owner.get_root()\n rel_links = [\n *HIERARCHICAL_LINKS,\n *pystac.EXTENSION_HOOKS.get_extended_object_links(self.owner),\n ]\n # if a hierarchical link with an owner and root, and relative catalog\n if root and root.is_relative() and self.rel in rel_links:\n owner_href = self.owner.get_self_href()\n if owner_href is not None:\n href = make_relative_href(href, owner_href)\n\n return href",
"def href(self):\n return self._href",
"def href(self):\n return self._href",
"def self_href(cls, href: HREF) -> \"Link\":\n href_str = str(os.fspath(href))\n return cls(pystac.RelType.SELF, href_str, media_type=pystac.MediaType.JSON)",
"def extractUrl(self, href):\n url = ''\n pattern = re.compile(r'(http[s]?://[^&]+)&', re.U | re.M)\n url_match = pattern.search(href)\n if(url_match and url_match.lastindex > 0):\n url = url_match.group(1)\n\n return url",
"def href(self, value, request) -> Optional[str]:\n if value is None:\n return None\n elif isinstance(value, str):\n return value\n else:\n return value.href(request)",
"def get_href(text, base_url=None):\n m = re.search(r'href\\s*=\\s*[\"\\']?([^\"\\'> ]+)[\"\\'> ]', text, re.I)\n if not m:\n return None\n link = m.group(1).strip()\n if base_url and not link.lower().startswith(\"http\"):\n import urlparse\n link = urlparse.urljoin(base_url, link)\n return link",
"def get_absolute_url(self) -> str:\n return self.cagnotte.get_absolute_url()",
"def theLinky(self):\n theLink = self.absolute_url()\n return theLink",
"def get_absolute_url(relative_url, base_url):\n return abs(relative_url.attributes.get('href',''), base=base_url.redirect or base_url.string)",
"def get_absolute_url(self):\n # TODO not implemented yet\n return self.slug",
"def get_href(asset_details):\n Script._validate_type(asset_details, u'asset_details', object, True)\n Script._validate_type_of_details(asset_details, DATA_ASSETS_DETAILS_TYPE)\n\n return WMLResource._get_required_element_from_dict(asset_details, u'asset_details', [u'metadata', u'href'])",
"def get_canonical_link(self):\n if self.article.final_url:\n kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'}\n meta = self.parser.getElementsByTag(self.article.doc, **kwargs)\n if meta is not None and len(meta) > 0:\n href = self.parser.getAttribute(meta[0], 'href')\n if href:\n href = href.strip()\n o = urlparse(href)\n if not o.hostname:\n z = urlparse(self.article.final_url)\n domain = '%s://%s' % (z.scheme, z.hostname)\n href = urljoin(domain, href)\n return href\n return self.article.final_url",
"def get_absolute_uri(self, uri):\n url_parts = urllib.parse.urlparse(uri)\n if url_parts.scheme not in urllib.parse.uses_relative \\\n or url_parts.path.startswith('/') \\\n or self.parser.base_uri is None:\n return uri\n return urllib.parse.urljoin(self.parser.base_uri, uri)",
"def get_absolute_url(self) -> str:\n return self.proposition.get_absolute_url()",
"def get_absolute_url(rel_url):\n protocol = \"http\" if settings.DEBUG else \"https\"\n domain = Site.objects.get_current().domain\n return f\"{protocol}://{domain}{rel_url}\"",
"def get_href_url(self, href):\n verb = \"GET\"\n if(isinstance(href,str)):\n url = urljoiner(self.baseurl, [href])\n elif(isinstance(href,dict) and 'href' in href): # check if dictionary and has key\n url = urljoiner(self.baseurl, [href['href']])\n if(self.debug):\n print(verb + \" \" + url)\n r = requests.get(url, headers=self.headers)\n self.handle_error_message(r)\n return r.json()",
"def Link(self, default=None):\n return self.data.get('links', {}).get('self', default)",
"def extract_url_from_anchor_tag(text):\n pattern = re.compile(r'(?<=href=\").*?(?=\")')\n matches = pattern.findall(text)\n return matches[0] if matches else ''",
"def self_link(self):\n return self._json['coredata'].get('link', [])[0].get('@href')",
"def get_href(self, model_definition_details):\n if 'asset_id' in model_definition_details['metadata']:\n return WMLResource._get_required_element_from_dict(model_definition_details, u'model_definition_details', [u'metadata', u'asset_id'])\n else:\n ModelDefinition._validate_type(model_definition_details, u'model__definition_details', object, True)\n # ModelDefinition._validate_type_of_details(model_definition_details, MODEL_DEFINITION_DETAILS_TYPE)\n\n return WMLResource._get_required_element_from_dict(model_definition_details, u'model_definition_details',\n [u'metadata', u'href'])",
"def get_absolute_url(self):\n return reverse('link_detail', args=[str(self.id)])",
"def GetTransactionHref(self):\n if self.transactionHref is None:\n self._GetAurumAccount()\n return self.transactionHref",
"def get_absolute_url(self):\n\n file_url = settings.MEDIA_URL + str(self.file_link.url)\n filelist_url = self.file_list.get_absolute_url() if self.file_list else \"\"\n contentmodel_url = super(File, self).get_absolute_url()\n\n # otherwise return the url for its list of files or its content model url\n return (file_url or filelist_url or contentmodel_url or \"\")",
"def build_absolute_url(self, path_or_url):\n return urllib.parse.urljoin(self.parsed_url.geturl(), path_or_url)",
"def click(self, href=u''):\n if href:\n if isinstance(href, URL):\n clicked = href\n else:\n # TODO: This error message is not completely accurate,\n # as URL objects are now also valid, but Twisted's\n # test suite (wrongly) relies on this exact message.\n _textcheck('relative URL', href)\n clicked = URL.from_text(href)\n if clicked.absolute:\n return clicked\n else:\n clicked = self\n\n query = clicked.query\n if clicked.scheme and not clicked.rooted:\n # Schemes with relative paths are not well-defined. RFC 3986 calls\n # them a \"loophole in prior specifications\" that should be avoided,\n # or supported only for backwards compatibility.\n raise NotImplementedError('absolute URI with rootless path: %r'\n % (href,))\n else:\n if clicked.rooted:\n path = clicked.path\n elif clicked.path:\n path = self.path[:-1] + clicked.path\n else:\n path = self.path\n if not query:\n query = self.query\n return self.replace(scheme=clicked.scheme or self.scheme,\n host=clicked.host or self.host,\n port=clicked.port or self.port,\n path=_resolve_dot_segments(path),\n query=query,\n fragment=clicked.fragment)",
"def get_absolute_url(path):\n if is_absolute_url(path):\n return path\n site = settings.SITES['front']\n return build_url(path, scheme=site['scheme'], domain=site['domain'])",
"def absolute(self):\n if self.relative == '':\n return self.root # don't join in this case as that appends trailing '/'\n return os.path.join(self.root, self.relative)"
]
| [
"0.775969",
"0.74204093",
"0.72394633",
"0.6734149",
"0.6734149",
"0.6159152",
"0.59924763",
"0.58902055",
"0.587239",
"0.5865415",
"0.5855938",
"0.5798646",
"0.576012",
"0.5723536",
"0.55905426",
"0.55897635",
"0.5544071",
"0.55341566",
"0.54994625",
"0.54854596",
"0.54666847",
"0.545551",
"0.5449928",
"0.5449373",
"0.54429525",
"0.53971714",
"0.5333668",
"0.53211707",
"0.53037965",
"0.5291603"
]
| 0.82808983 | 0 |
Gets the absolute href for this link, if possible. | def get_absolute_href(self) -> Optional[str]:
if self._target_object:
href = self._target_object.get_self_href()
else:
href = self._target_href
if href is not None and self.owner is not None:
href = make_absolute_href(href, self.owner.get_self_href())
return href | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def absolute_href(self) -> str:\n result = self.get_absolute_href()\n if result is None:\n raise ValueError(f\"{self} does not have an HREF set.\")\n return result",
"def href(self) -> str:\n result = self.get_href()\n if result is None:\n raise ValueError(f\"{self} does not have an HREF set.\")\n return result",
"def href(self):\n return self._href",
"def href(self):\n return self._href",
"def theLinky(self):\n theLink = self.absolute_url()\n return theLink",
"def get_href(self, transform_href: bool = True) -> Optional[str]:\n # get the self href\n if self._target_object:\n href = self._target_object.get_self_href()\n else:\n href = self._target_href\n\n if (\n transform_href\n and href\n and is_absolute_href(href)\n and self.owner\n and self.owner.get_root()\n ):\n root = self.owner.get_root()\n rel_links = [\n *HIERARCHICAL_LINKS,\n *pystac.EXTENSION_HOOKS.get_extended_object_links(self.owner),\n ]\n # if a hierarchical link with an owner and root, and relative catalog\n if root and root.is_relative() and self.rel in rel_links:\n owner_href = self.owner.get_self_href()\n if owner_href is not None:\n href = make_relative_href(href, owner_href)\n\n return href",
"def get_absolute_url(self) -> str:\n return self.cagnotte.get_absolute_url()",
"def get_absolute_url(self):\n # TODO not implemented yet\n return self.slug",
"def get_absolute_url(relative_url, base_url):\n return abs(relative_url.attributes.get('href',''), base=base_url.redirect or base_url.string)",
"def url(self):\n return (urljoin(self.lodgeit.address, self.relative_url)\n if self.relative_url else None)",
"def get_absolute_url(self) -> str:\n return self.proposition.get_absolute_url()",
"def get_canonical_link(self):\n if self.article.final_url:\n kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'}\n meta = self.parser.getElementsByTag(self.article.doc, **kwargs)\n if meta is not None and len(meta) > 0:\n href = self.parser.getAttribute(meta[0], 'href')\n if href:\n href = href.strip()\n o = urlparse(href)\n if not o.hostname:\n z = urlparse(self.article.final_url)\n domain = '%s://%s' % (z.scheme, z.hostname)\n href = urljoin(domain, href)\n return href\n return self.article.final_url",
"def self_link(self):\n return self._json['coredata'].get('link', [])[0].get('@href')",
"def get_href(text, base_url=None):\n m = re.search(r'href\\s*=\\s*[\"\\']?([^\"\\'> ]+)[\"\\'> ]', text, re.I)\n if not m:\n return None\n link = m.group(1).strip()\n if base_url and not link.lower().startswith(\"http\"):\n import urlparse\n link = urlparse.urljoin(base_url, link)\n return link",
"def get_absolute_url(rel_url):\n protocol = \"http\" if settings.DEBUG else \"https\"\n domain = Site.objects.get_current().domain\n return f\"{protocol}://{domain}{rel_url}\"",
"def build_absolute_url(self, path_or_url):\n return urllib.parse.urljoin(self.parsed_url.geturl(), path_or_url)",
"def getLink(self):\n return self.link",
"def _get_url(self, absolute):",
"def _get_full_url(self, link, url):\n from webcrawler.settings import process_link_value\n path = urlparse.urljoin(url, link)\n path = process_link_value(path)\n return path",
"def get_absolute_url(self):\n\n file_url = settings.MEDIA_URL + str(self.file_link.url)\n filelist_url = self.file_list.get_absolute_url() if self.file_list else \"\"\n contentmodel_url = super(File, self).get_absolute_url()\n\n # otherwise return the url for its list of files or its content model url\n return (file_url or filelist_url or contentmodel_url or \"\")",
"def get_absolute_url(self):\n return reverse('link_detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return ('')",
"def link(self):\n\n return self._get_field(\"link\")",
"def extractUrl(self, href):\n url = ''\n pattern = re.compile(r'(http[s]?://[^&]+)&', re.U | re.M)\n url_match = pattern.search(href)\n if(url_match and url_match.lastindex > 0):\n url = url_match.group(1)\n\n return url",
"def get_absolute_url(self):\n return get_front_end_url(self)",
"def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url",
"def get_absolute_url(self):\n return reverse('', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('', args=[str(self.id)])",
"def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")",
"def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")"
]
| [
"0.8552441",
"0.77883655",
"0.7635637",
"0.7635637",
"0.7434001",
"0.73831236",
"0.72302043",
"0.6954633",
"0.68366444",
"0.6803096",
"0.67107046",
"0.6695295",
"0.66920805",
"0.66411704",
"0.66239035",
"0.66154504",
"0.6570545",
"0.6557817",
"0.6527888",
"0.65234864",
"0.6501156",
"0.64941907",
"0.6483267",
"0.6453677",
"0.6450061",
"0.64375126",
"0.6414109",
"0.6414109",
"0.6385565",
"0.6385565"
]
| 0.79687524 | 1 |
The target of the link. If the link is unresolved, or the link is to something that is not a STACObject, the target is an HREF. If resolved, the target is a STACObject. | def target(self) -> Union[str, "STACObject_Type"]:
if self._target_object:
return self._target_object
elif self._target_href:
return self._target_href
else:
raise ValueError("No target defined for link.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_target_str(self) -> Optional[str]:\n if self._target_href:\n return self._target_href\n elif self._target_object:\n return self._target_object.get_self_href()\n else:\n return None",
"def has_target_href(self) -> bool:\n return self._target_href is not None",
"def target(self):\n if self._security_class == \"lnk_file\":\n return self._target\n else:\n return None",
"def target(self, target: Union[str, \"STACObject_Type\"]) -> None:\n if isinstance(target, str):\n self._target_href = target\n self._target_object = None\n else:\n self._target_href = None\n self._target_object = target",
"def handle_a(self, tag, attrs):\n ad = dict(attrs)\n if 'href' in ad.keys() \\\n and ad['href'].startswith('http:') \\\n and 'target' not in ad.keys():\n self.errmsg(\"External link with no target attribute\")",
"def targetURL(self):\n target = self.getTargetObject()\n if target:\n return target.absolute_url()\n return '#'",
"def getLinkTarget(filename):\n is_link = False\n while os.path.exists(filename) and os.path.islink(filename):\n link_target = os.readlink(filename)\n\n filename = os.path.join(os.path.dirname(filename), link_target)\n is_link = True\n\n return is_link, filename",
"def resolve_stac_object(self, root: Optional[\"Catalog_Type\"] = None) -> \"Link\":\n if self._target_object:\n pass\n elif self._target_href:\n target_href = self._target_href\n\n # If it's a relative link, base it off the parent.\n if not is_absolute_href(target_href):\n if self.owner is None:\n raise pystac.STACError(\n \"Relative path {} encountered \"\n \"without owner or start_href.\".format(target_href)\n )\n start_href = self.owner.get_self_href()\n\n if start_href is None:\n raise pystac.STACError(\n \"Relative path {} encountered \"\n 'without owner \"self\" link set.'.format(target_href)\n )\n\n target_href = make_absolute_href(target_href, start_href)\n obj = None\n\n stac_io: Optional[pystac.StacIO] = None\n\n if root is not None:\n obj = root._resolved_objects.get_by_href(target_href)\n stac_io = root._stac_io\n\n if obj is None:\n\n if stac_io is None:\n if self.owner is not None:\n if isinstance(self.owner, pystac.Catalog):\n stac_io = self.owner._stac_io\n if stac_io is None:\n stac_io = pystac.StacIO.default()\n\n obj = stac_io.read_stac_object(target_href, root=root)\n obj.set_self_href(target_href)\n if root is not None:\n obj = root._resolved_objects.get_or_cache(obj)\n obj.set_root(root)\n self._target_object = obj\n else:\n raise ValueError(\"Cannot resolve STAC object without a target\")\n\n if (\n self.owner\n and self.rel in [pystac.RelType.CHILD, pystac.RelType.ITEM]\n and isinstance(self.owner, pystac.Catalog)\n ):\n assert self._target_object\n self._target_object.set_parent(self.owner)\n\n return self",
"def target(self):\n return self._properties.get('target')",
"def symlink_target(pth):\n\n if os.path.islink(pth):\n return os.readlink(pth)\n return pth",
"def get_target_url(self):\n return self.TARGET_URL",
"def target(self) -> Optional[str]:\n return pulumi.get(self, \"target\")",
"def getTarget(self):\n return self._target",
"def _targetof(node):\r\n if node is None: return None\r\n return node.target",
"def getTarget(self):\n return self.Target",
"def _get_target(self):\n target = None\n lnw = self.wrapped_handler_ref()\n if lnw is not None:\n target_ref = getattr(lnw, \"object\", None)\n if target_ref is not None:\n target = target_ref()\n return target",
"def target(self) :\n\t\ttry :\n\t\t\treturn self._target\n\t\texcept Exception as e:\n\t\t\traise e",
"def target(self) :\n\t\ttry :\n\t\t\treturn self._target\n\t\texcept Exception as e:\n\t\t\traise e",
"def link(self):\n return self.container['link']",
"def targetUrl(self):\n domain = self.site.get('DOMAIN', None)\n if not domain:\n return u''\n return SiteProcessUtils.getUrlFromPath(self.site, domain, self.targetPath)",
"def getTarget(self):\n\n return self._target",
"def UseRouteDistinguisherAsRouteTarget(self):\n return self._get_attribute('useRouteDistinguisherAsRouteTarget')",
"def is_target(self):\n\t\treturn self.window and self.window.target is self",
"def link(self):\n return self._link",
"def link(self):\n return self._link",
"def link(self):\n return self._link",
"def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")",
"def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")",
"def link(self):\n\n return self._get_field(\"link\")",
"def link_url(self):\n # planet_assoc = self.planet_assocs.join(PlanetAssociation.planet.of_type(LinkPlanet)).first()\n\n for planet_assoc in self.planet_assocs:\n if planet_assoc.planet.kind == \"link\":\n return planet_assoc.planet.url\n return None"
]
| [
"0.67976296",
"0.62307954",
"0.6215397",
"0.61617273",
"0.5862698",
"0.565353",
"0.5597784",
"0.55615544",
"0.5497127",
"0.54542977",
"0.5454122",
"0.54058546",
"0.53850055",
"0.5383484",
"0.5362352",
"0.5348752",
"0.53102714",
"0.53102714",
"0.5302297",
"0.5296698",
"0.5288905",
"0.5256877",
"0.52383494",
"0.5230274",
"0.5230274",
"0.5230274",
"0.52269983",
"0.52269983",
"0.52248734",
"0.51489526"
]
| 0.795444 | 0 |
Sets this link's target to a string or a STAC object. | def target(self, target: Union[str, "STACObject_Type"]) -> None:
if isinstance(target, str):
self._target_href = target
self._target_object = None
else:
self._target_href = None
self._target_object = target | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e",
"def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e",
"def target(self) -> Union[str, \"STACObject_Type\"]:\n if self._target_object:\n return self._target_object\n elif self._target_href:\n return self._target_href\n else:\n raise ValueError(\"No target defined for link.\")",
"def setTarget(self, target):\n\n self._target = target",
"def SetTarget(self, entity):\n\t\tself.target = entity",
"def target(self, target):\n\n self._target = target",
"def target(self, value):\n self._target = value",
"def target_name(self, target_name):\n\n self._target_name = target_name",
"def target_name(self, target_name):\n\n self._target_name = target_name",
"def set_target(self, newtarget):\n if newtarget is None:\n self._side_properties['target'] = None\n return\n \n # -- Input Test -- #\n if AstroTarget not in newtarget.__class__.__mro__:\n raise TypeError(\"'newtarget' should be (or inherite of) an AstroTarget\")\n \n # -- Seems Ok -- #\n self._side_properties[\"target\"] = newtarget.copy()",
"def setCurrentTarget(self):\n if self.isAssault:\n self.setAssaultTarget()\n else:\n self.setWarshipTarget()",
"def target_description(self, target_description):\n\n self._target_description = target_description",
"def setTarget(t):\n global targetFolder\n if t[-1] != '/':\n t += '/'\n targetFolder = t",
"def set_target(self, target, useAvoidance=False, verbose=False):\n self.logger.info(\"Deprecated function set_target called. Please call head_direction.\")\n self.head_direction(self, target, useAvoidance, verbose)",
"def target_id(self, target_id: str):\n\n self._target_id = target_id",
"def get_target_str(self) -> Optional[str]:\n if self._target_href:\n return self._target_href\n elif self._target_object:\n return self._target_object.get_self_href()\n else:\n return None",
"def target_id(self, target_id):\n\n self._target_id = target_id",
"def setTarget(self, *args):\n return _osgAnimation.Channel_setTarget(self, *args)",
"def repo_link_set(self, repo_id, link_type, target):\n self.send(repo_id, 'repo_link_set', link_type, target)",
"def target(self, target):\n self.__target = float(target)",
"def target_resource(self, target_resource):\n self._target_resource = target_resource",
"def set_target(request):\n connfd = None\n ret = 'Success'\n try:\n connfd = urllib2.urlopen(request)\n except urllib2.HTTPError, e:\n ret = 'HTTPError: ' + str(e.code)\n except urllib2.URLError, e:\n ret = 'URLError: ' + str(e.reason)\n except httplib.HTTPException, e:\n ret = 'HTTPException'\n except Exception:\n ret = 'Invalid Target -- Make sure the complete URL is provided'\n return connfd, ret",
"def target_lang(self, target_lang):\n\n self._target_lang = target_lang",
"def from_the(self, target: Target) -> \"SelectByText\":\n self.target = target\n return self",
"def set_target(self, target):\n # parse target objects\n res = []\n targets = target.split(',')\n for item in targets:\n res.append(item)\n self.target = res\n \n # create conversion table for new index\n self.conversion = {}\n for i, cat in enumerate(self.target):\n self.conversion[cat] = f'{i}'",
"def set_as_target(remote):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_SetAsTarget();\n remote.runCommand(cmd)",
"def target_contact(self, target_contact):\n \n self._target_contact = target_contact",
"def __setattr__(self, name, value):\n if name == 'source' or name == 'destination':\n # produce \"canonical\" form of a source / destination\n # FIXME: we need to handle arbitrary netmasks here\n if value is not None and value.endswith('/32'):\n value = value[:-3]\n elif name == 'goto' or name == 'jump':\n if value is not None and not isinstance(value, Target):\n value = Target(value)\n elif name == 'matches':\n if not isinstance(value, list):\n raise Exception(\"matches attribute requires a list\")\n self.__dict__[name] = value",
"def target_instance(self, target_instance):\n self._target_instance = target_instance",
"def set_value ( self, object, value ):\n target, name = self.target_name( object )\n setattr( target, name, value )"
]
| [
"0.71120214",
"0.71120214",
"0.71052563",
"0.69285864",
"0.677933",
"0.6764078",
"0.6680332",
"0.6353015",
"0.6353015",
"0.6341229",
"0.6152728",
"0.60386664",
"0.6020701",
"0.60081893",
"0.5942475",
"0.59326124",
"0.58607703",
"0.5849536",
"0.58451325",
"0.5837806",
"0.58233184",
"0.58098114",
"0.5805775",
"0.57821834",
"0.57116705",
"0.57011837",
"0.56744176",
"0.5604846",
"0.556946",
"0.55546993"
]
| 0.81399906 | 0 |
Returns this link's target as a string. If a string href was provided, returns that. If not, tries to resolve the self link of the target object. | def get_target_str(self) -> Optional[str]:
if self._target_href:
return self._target_href
elif self._target_object:
return self._target_object.get_self_href()
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def target(self) -> Union[str, \"STACObject_Type\"]:\n if self._target_object:\n return self._target_object\n elif self._target_href:\n return self._target_href\n else:\n raise ValueError(\"No target defined for link.\")",
"def href(self) -> str:\n result = self.get_href()\n if result is None:\n raise ValueError(f\"{self} does not have an HREF set.\")\n return result",
"def get_absolute_href(self) -> Optional[str]:\n if self._target_object:\n href = self._target_object.get_self_href()\n else:\n href = self._target_href\n\n if href is not None and self.owner is not None:\n href = make_absolute_href(href, self.owner.get_self_href())\n\n return href",
"def get_href(self, transform_href: bool = True) -> Optional[str]:\n # get the self href\n if self._target_object:\n href = self._target_object.get_self_href()\n else:\n href = self._target_href\n\n if (\n transform_href\n and href\n and is_absolute_href(href)\n and self.owner\n and self.owner.get_root()\n ):\n root = self.owner.get_root()\n rel_links = [\n *HIERARCHICAL_LINKS,\n *pystac.EXTENSION_HOOKS.get_extended_object_links(self.owner),\n ]\n # if a hierarchical link with an owner and root, and relative catalog\n if root and root.is_relative() and self.rel in rel_links:\n owner_href = self.owner.get_self_href()\n if owner_href is not None:\n href = make_relative_href(href, owner_href)\n\n return href",
"def targetURL(self):\n target = self.getTargetObject()\n if target:\n return target.absolute_url()\n return '#'",
"def get_target_url(self):\n return self.TARGET_URL",
"def absolute_href(self) -> str:\n result = self.get_absolute_href()\n if result is None:\n raise ValueError(f\"{self} does not have an HREF set.\")\n return result",
"def get_selfLink(self):\n if 'selfLink' in self.target_pool_config:\n return self.target_pool_config['selfLink']",
"def target(self) -> Optional[str]:\n return pulumi.get(self, \"target\")",
"def targetUrl(self):\n domain = self.site.get('DOMAIN', None)\n if not domain:\n return u''\n return SiteProcessUtils.getUrlFromPath(self.site, domain, self.targetPath)",
"def href(self):\n return self._href",
"def href(self):\n return self._href",
"def self_href(cls, href: HREF) -> \"Link\":\n href_str = str(os.fspath(href))\n return cls(pystac.RelType.SELF, href_str, media_type=pystac.MediaType.JSON)",
"def has_target_href(self) -> bool:\n return self._target_href is not None",
"def target(self, target: Union[str, \"STACObject_Type\"]) -> None:\n if isinstance(target, str):\n self._target_href = target\n self._target_object = None\n else:\n self._target_href = None\n self._target_object = target",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def self_link(self) -> str:\n return pulumi.get(self, \"self_link\")",
"def target(self):\n if self._security_class == \"lnk_file\":\n return self._target\n else:\n return None",
"def getTarget(self):\n\n return self._target",
"def extractUrl(self, href):\n url = ''\n pattern = re.compile(r'(http[s]?://[^&]+)&', re.U | re.M)\n url_match = pattern.search(href)\n if(url_match and url_match.lastindex > 0):\n url = url_match.group(1)\n\n return url",
"def self_link(self):\n return self._json['coredata'].get('link', [])[0].get('@href')",
"def symlink_target(pth):\n\n if os.path.islink(pth):\n return os.readlink(pth)\n return pth",
"def getTarget(self):\n return self._target"
]
| [
"0.6613602",
"0.6349546",
"0.6326438",
"0.6221752",
"0.6183839",
"0.58277327",
"0.5809976",
"0.5801703",
"0.5685269",
"0.5653803",
"0.561428",
"0.561428",
"0.546091",
"0.5437358",
"0.5431587",
"0.540748",
"0.540748",
"0.540748",
"0.540748",
"0.540748",
"0.540748",
"0.540748",
"0.540748",
"0.540748",
"0.5406207",
"0.5397461",
"0.5393511",
"0.53799474",
"0.5349006",
"0.5338452"
]
| 0.8104228 | 0 |
Returns true if this link has a string href in its target information. | def has_target_href(self) -> bool:
return self._target_href is not None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_href_valid(self, link):\n url = str(link['href'])\n # if it doesn't lead to a wiki page\n if not url.startswith(\"/wiki/\"):\n return False\n\n wikipedia_classes = [\"external_text\", \"mw-disambig\", \"infobox-data\"]\n # if the href has a class\n if link.get(\"class\") is not None:\n link_class = \"_\".join(link.get(\"class\"))\n # if the class is an external text class, or a disambiguation link\n if any(wiki_class in link_class for wiki_class in wikipedia_classes):\n return False\n\n if 'wikimedia' in url or 'wiktionary' in url:\n return False\n wikipedia_keywords = [\"Help\", \"Category\", \"Wikipedia\", \"Template\", \"File\", \"Talk\", \"Special\", \"Portal\"]\n if any(keyword + ':' in url for keyword in wikipedia_keywords):\n return False\n if '#' in url:\n return False\n # if the page is a file\n if re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z]$\", url) or re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z]$\", url):\n return False\n\n # if the href is enclosed in brackets\n if WikiPage.is_substring_enclosed_in_brackets(link, link.parent.parent):\n return False\n\n wikipedia_not_needed_tags = ['small', 'sup', 'i']\n if link.parent.name in wikipedia_not_needed_tags:\n return False\n\n # if the href shows two different spellings. like in: https://en.wikipedia.org/wiki/Carbon_fibers\n # Carbon fibers ~or~ carbon fibres - here or is the href.\n\n if link.contents == [\"or\"]:\n return False\n\n parents_classes = [p.get(\"class\") for p in link.parents if p.get(\"class\") is not None]\n parents_classes = [str(\"_\".join(p)) for p in parents_classes]\n parents_ids = [p.get(\"id\") for p in link.parents if p.get(\"id\") is not None]\n\n # 'toc' - the Contents menu class\n # 'mw-editsection' - the Edit section\n # 'thumbcaption' - a Photo Caption\n # 'hlist' - a list like in: https://en.wikipedia.org/wiki/January\n wikipedia_classes_to_ignore = [\"thumbcaption\", \"infobox\", \"navigation-not-searchable\", \"sidebar\", \"box-text\",\n \"toc\", \"mw-editsection\", \"thumb\", \"hlist\", \"navbox\"]\n\n for p_class in parents_classes:\n\n if any(class_to_ignore in p_class for class_to_ignore in wikipedia_classes_to_ignore):\n return False\n\n # if it is a coordinates href\n if \"coordinates\" in parents_ids:\n return False\n\n '''\n Update 13.04.2021:\n ------------------\n Someone edited the \"Epistemology\" page. and changed the first link <a>branches<a/>.\n Instead of pointing to the page \"Branches of science\", it was changed to point to \"Outline of philosophy\".\n Which creates a loop. I chose to ignore it manually, and instead click on the next link.\n ( which happens to be Philosophy :) )\n This changed also caused some of the \"paths\" in the PDF files,\n generated before that date to be slightly outdated. But the concept stays the same :)\n \n Update 08.05.2021:\n ------------------\n they fixed it since :)\n \"Epistemology\" -> branches of philosophy : \"https://en.wikipedia.org/wiki/Outline_of_philosophy\" ->\n -> Philosophy.\n \n #if \"Outline_of_philosophy\" in url:\n # return False\n '''\n\n return True",
"def islink(self):\n return os.path.islink(self.path)",
"def handle_a(self, tag, attrs):\n ad = dict(attrs)\n if 'href' in ad.keys() \\\n and ad['href'].startswith('http:') \\\n and 'target' not in ad.keys():\n self.errmsg(\"External link with no target attribute\")",
"def is_link(s):\n return (len(s) == 2 and is_link(s[1])) or s == empty",
"def islink(self, path):\n return os.path.islink(path)",
"def check_for_url_in_text(self, string):\r\n has_link = False\r\n\r\n # Find all links in the string.\r\n links = re.findall(r'(https?://\\S+)', string)\r\n if len(links)>0:\r\n has_link = True\r\n\r\n # Autolink by wrapping links in anchor tags.\r\n for link in links:\r\n string = re.sub(link, self.generate_file_link_html_from_url(link, link), string)\r\n\r\n return has_link, string",
"def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))",
"def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))",
"def is_link(s):\n return s == empty or (len(s) == 2 and is_link(s[1]))",
"def is_link(self, url):\n return not self.is_page(url)",
"def target_url(self, url):\n url_parse = urlparse.urlparse(url)\n patten = re.compile(self.url_patten)\n if patten.match(url_parse.path):\n return True\n else:\n return False",
"def is_valid_tag(self, tag):\n\n if tag.has_attr('href') and len(tag['href']) > 0:\n href = tag['href']\n complete_href = self.session.complete_url(href)\n\n is_relative = self.url in complete_href\n is_visited = complete_href in self.visited_paths\n is_style_sheet = tag.name == \"link\"\n is_jumpTo = \"#\" in href\n is_mailTo = \"mailto\" in href\n is_js = \"javascript:\" in href\n return is_relative and \\\n not (is_visited or is_style_sheet or is_jumpTo or is_mailTo or is_js)\n else:\n return False",
"def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.startswith('Help:') or title.startswith('File:') or title.endswith('.ogg') or title.startswith('Wikipedia:'):\n return False\n return True",
"def hasSuffixLink(self):\n return False if self.suffix_link is None else True",
"def is_link(token):\n\n pattern = r'ht{2}p(s|)\\:\\/\\/(w{3}.|)[\\w]+\\.[\\w]+\\/[\\w\\d]+'\n return re.match(pattern, token)",
"def islink(path):\n return get_instance(path).islink(path)",
"def check_href(href):\n if bool(pattern.match(href)):\n if os.path.basename(urlparse.urlparse(href).path) not in file_list:\n return True\n return False",
"def should_render_as_link(self):\n if self.active and not self.render_link_for_active:\n return False\n return bool(self.url)",
"def IsTarget(self, target_name):\n return target_name in self.GetTargets()",
"def isRelURL(self, url):\n (scheme, netloc) = urlparse(url)[0:2]\n return not scheme and not netloc",
"def check_link(self, link):\n false_links = [\"wikipedia:\", \"w:\", \"wikitionary:\", \"wikt:\", \"wikinews:\",\n \"n:\", \"wikibooks:\", \"b:\", \"wikiquote:\", \"q:\", \"wikisource:\",\n \"s:\", \"wikispecies:\", \"species:\", \"wikiversity\", \"v:\", \n \"wikivoyage:\", \"voy:\", \"wikimedia:\", \"foundation:\", \"wmf:\", \n \"commonds:\", \"c:\", \"chapter:\", \"metawikipedia:\", \"meta:\", \n \"m:\", \"incubator:\", \"outreach:\", \"mw:\", \"mediazilla:\", \n \"bugzilla:\", \"testwiki:\", \"wikitech:\", \"wikidata:\", \"d:\",\n \"phabricator:\", \"phab:\", \"talk:\", \"user talk:\", \"file:\", \n \"user:\", \"template:\", \"category:\", \"file talk:\", \n \"category talk:\", \"image:\", \"media:\", \"special:\", \n \"help:\", \"portal:\", \"portal talk:\", \"\\#\"]\n is_bad = any(false_link in link.lower() for false_link in false_links)\n if is_bad or link[0] == \":\":\n return False\n else:\n return True",
"def __verify(self, href):\n # change main url to avoid mistakes with http ou https\n main = self.main_url.replace('https://', '').replace('http://', '')\n forbiden = {\"#\", 'None'} # forbidden possible urls\n if (href is None) or (href in forbiden):\n return False\n for item in ['tel:', 'mailto:', 'javascript:']:\n if item in href: # verify if is a link to telephone, e-mail or javascript\n return False\n if main in href and (\"/checkout/cart/add\" in href or \"/checkout/#/cart\" in href):\n return False # prevents a purchase from being made\n elif main in href or (main not in href and href[:4] != \"http\"):\n return True # possible case of a valid link\n else:\n return False # any other link is not valid",
"def is_url_requirement(ireq):\n return bool(ireq.original_link)",
"def getLinkTarget(filename):\n is_link = False\n while os.path.exists(filename) and os.path.islink(filename):\n link_target = os.readlink(filename)\n\n filename = os.path.join(os.path.dirname(filename), link_target)\n is_link = True\n\n return is_link, filename",
"def isLinkName(word):\r\n return wikiLink.match(word)",
"def _is_url(string):\n return \"http\" in string",
"def has_target(self):\n return self._has_target",
"def is_safe_url(target: str) -> bool:\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc",
"def _islink(path):\n if not os.path.isdir(path):\n return False\n\n if not isinstance(path, str):\n path = str(path)\n\n attributes = ctypes.windll.kernel32.GetFileAttributesW(path)\n if attributes == INVALID_FILE_ATTRIBUTES:\n return False\n\n return (attributes & FILE_ATTRIBUTE_REPARSE_POINT) > 0",
"def is_safe_url(target):\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in ('http', 'https') and \\\n ref_url.netloc == test_url.netloc"
]
| [
"0.67790794",
"0.6705688",
"0.6634559",
"0.65864813",
"0.6529386",
"0.64960945",
"0.648174",
"0.648174",
"0.648174",
"0.64595854",
"0.64516133",
"0.6385341",
"0.6367129",
"0.6193168",
"0.6110131",
"0.6107418",
"0.60843617",
"0.60497266",
"0.60383934",
"0.59954834",
"0.59822106",
"0.5963318",
"0.594472",
"0.5936288",
"0.59225357",
"0.5911556",
"0.5908639",
"0.5857308",
"0.5838993",
"0.581657"
]
| 0.79885954 | 0 |
Resolves a STAC object from the HREF of this link, if the link is not already resolved. | def resolve_stac_object(self, root: Optional["Catalog_Type"] = None) -> "Link":
if self._target_object:
pass
elif self._target_href:
target_href = self._target_href
# If it's a relative link, base it off the parent.
if not is_absolute_href(target_href):
if self.owner is None:
raise pystac.STACError(
"Relative path {} encountered "
"without owner or start_href.".format(target_href)
)
start_href = self.owner.get_self_href()
if start_href is None:
raise pystac.STACError(
"Relative path {} encountered "
'without owner "self" link set.'.format(target_href)
)
target_href = make_absolute_href(target_href, start_href)
obj = None
stac_io: Optional[pystac.StacIO] = None
if root is not None:
obj = root._resolved_objects.get_by_href(target_href)
stac_io = root._stac_io
if obj is None:
if stac_io is None:
if self.owner is not None:
if isinstance(self.owner, pystac.Catalog):
stac_io = self.owner._stac_io
if stac_io is None:
stac_io = pystac.StacIO.default()
obj = stac_io.read_stac_object(target_href, root=root)
obj.set_self_href(target_href)
if root is not None:
obj = root._resolved_objects.get_or_cache(obj)
obj.set_root(root)
self._target_object = obj
else:
raise ValueError("Cannot resolve STAC object without a target")
if (
self.owner
and self.rel in [pystac.RelType.CHILD, pystac.RelType.ITEM]
and isinstance(self.owner, pystac.Catalog)
):
assert self._target_object
self._target_object.set_parent(self.owner)
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve(s):\n # if the target graph isn't immediately available, we want to load it.\n if not s.try_to_point():\n g = s.store.graph_from_url( s.url )\n s.store.resolve_single_ref(s)",
"def _resolve(self, csub, c, direction):\n\t# By default, no way to resolve\n\treturn None",
"def resolve(self, doc_uri, obj):\n self.cache = {doc_uri: obj}\n session = requests.Session()\n session.mount(\"file://\", requests_file.FileAdapter())\n session.mount(\"resource://\", requests_resource.ResourceAdapter())\n with session:\n return self._resolve(type(obj)(), obj, session)",
"def maybe_resolve(object, resolve):\n if isinstance(object, dict) and object.get('$ref'):\n return resolve(object['$ref'])\n return object",
"def get_resource_from_uri(self, href):\n if not href.is_absolute():\n # resolve relative to the service root\n href = href.resolve(self.service_root)\n # check the canonical roots\n if not self.service_root.get_canonical_root().match(\n href.get_canonical_root()):\n # This isn't even for us\n return None\n request = core.ODataURI(href, self.path_prefix)\n return self.get_resource(request)[0]",
"def resolve(self, doc_uri, obj):\n self.cache = {doc_uri: obj}\n return self._resolve(doc_uri, obj, obj)",
"def resolve(self, resource, resourceType = None):\n pass;",
"def from_href(cls, href, **kw):\r\n for package_type in cls._REGISTRY:\r\n try:\r\n return package_type(href, **kw)\r\n except package_type.InvalidLink:\r\n continue",
"def resolveAlias(self, alias):",
"def parse_first_link(self):\n for link in self.links:\n if self.check_link(link):\n return self.clean_link(link)\n return None",
"def __expandURL(self, link):\n try:\n return requests.get(link).url\n except Exception:\n return link",
"def parse_resolve(cls, url):\n loc = cls.parse(url)\n if loc.path and loc.path != '/':\n # If true ref name contains slash, a prefix of path might be a suffix of\n # ref. Try to resolve it.\n ref_prefix = None\n if loc.treeish.startswith('refs/'):\n ref_prefix = loc.treeish + '/'\n refs = get_refs(loc.hostname, loc.project, ref_prefix)\n if not refs:\n raise TreeishResolutionError('could not resolve treeish in %s' % url)\n\n treeishes = set(refs.keys())\n # Add branches and tags without a prefix.\n for ref in refs:\n for prefix in ('refs/tags/', 'refs/heads/'):\n if ref.startswith(prefix):\n treeishes.add(ref[len(prefix):])\n break\n loc = cls.parse(url, treeishes=treeishes)\n return loc",
"def _resolve(self):\n pass",
"def resolve(self, address):",
"def resolve_resource(url):\n try:\n resource = soundcloud.get('/resolve', url=url)\n except HTTPError as e:\n if e.response.status_code == 404:\n return None\n else:\n raise\n \n return resource",
"def get_item_with_href(self, href):\n for item in self.get_items():\n if item.get_name() == href:\n return item\n\n return None",
"def lookupLink(cls, session, link, model, recordID):\n checkURL = Link.httpRegexSub(link.get('url', None))\n return session.query(cls)\\\n .join(model.__tablename__)\\\n .filter(model.id == recordID)\\\n .filter(cls.url == checkURL)\\\n .one_or_none()",
"def _expand_ref(self, element): \n if element.tag == 'xref':\n target = element.attrib.get('target', '')\n format = element.attrib.get('format', self.defaults['xref_format'])\n item = self._getItemByAnchor(target)\n if not self.indexmode:\n if not item:\n xml2rfc.log.warn(\"Can't resolve xref target %s\" % target)\n else:\n item.used = True\n # Create xref from index lookup\n if not item:\n text = '[' + target + ']'\n elif format == 'none':\n text = ''\n elif format == 'counter':\n text = item.counter\n elif format == 'title':\n text = item.title.strip() if item.title else ''\n else:\n # Default\n text = item.autoName\n\n # following the V3 HTML -\n # If you specify text, that is what you get.\n if element.text:\n text = element.text.rstrip()\n \n a = E.A(href='#' + target)\n a.attrib[\"class\"] = \"xref\"\n a.text = text\n if element.tail:\n a.tail = element.tail\n \n return [a]\n\n elif element.tag == 'eref':\n target = element.attrib.get('target', '')\n if element.text:\n a = E.A(element.text, href=target)\n a.tail = element.tail\n return [a]\n else:\n sp1 = E.SPAN('<')\n a = E.A(target, href=target)\n sp2 = E.SPAN('>')\n sp2.tail = element.tail\n return [sp1, a, sp2]\n elif element.tag == 'cref':\n self.cref_counter += 1\n anchor = element.attrib.get('anchor', None)\n if anchor is None:\n anchor = 'CREF' + str(self.cref_counter)\n a = E.A('[' + anchor + ']', id=anchor)\n a.attrib['class'] = 'info'\n source = element.attrib.get('source', '')\n if source:\n source = source + \": \"\n b = E.SPAN(source + element.text)\n b.attrib['class'] = 'info'\n a.append( b )\n self._indexCref(self.cref_counter, anchor)\n if element.tail:\n a.tail = element.tail\n return [a]\n elif element.tag == 'iref':\n return self._add_iref_to_index(element)\n elif element.tag == 'spanx':\n style = element.attrib.get('style', self.defaults['spanx_style'])\n text = ''\n if element.text:\n text = element.text\n elem = None\n if style == 'strong':\n elem = E.STRONG(text)\n elif style == 'verb':\n elem = E.SAMP(text)\n else:\n # Default to style=emph\n elem = E.EM(text)\n if element.tail:\n elem.tail = element.tail\n return [elem]",
"def resolve(self, anchors):\n\n for anchor in anchors:\n if self.node[DuAttrRefid] in anchor.ids():\n self.toAnchor = anchor\n break",
"def _dereference(self, ref_url, obj_path, recursions):\n # In order to start dereferencing anything in the referenced URL, we have\n # to read and parse it, of course.\n contents = _url.fetch_url(ref_url, self.__reference_cache, self.__encoding)\n\n # In this inner parser's specification, we can now look for the referenced\n # object.\n value = contents\n if len(obj_path) != 0:\n from prance.util.path import path_get\n try:\n value = path_get(value, obj_path)\n except (KeyError, IndexError, TypeError) as ex:\n raise _url.ResolutionError('Cannot resolve reference \"%s\": %s'\n % (ref_url.geturl(), str(ex)))\n\n # Deep copy value; we don't want to create recursive structures\n import copy\n value = copy.deepcopy(value)\n\n # Now resolve partial specs\n value = self._resolve_partial(ref_url, value, recursions)\n\n # That's it!\n return value",
"def resolve_to(self, v: syntax.Construct, resolution):\n if not hasattr(v, \"resolution\"):\n v.resolution = resolution",
"def resolve_reference(self, key, filetype=None):\n return self.__resolve_reference(\n key,\n self.get_metainfo().get(key), # TODO: use Java method with metaKey instead of getting metainfo here\n checked_filetype(filetype)\n )",
"def resolveURI(self, URI):\n ret = libxml2mod.xmlACatalogResolveURI(self._o, URI)\n return ret",
"async def resolve(self, location: ResourceLocation) -> ResourceType:\n # Resolve the (possibly relative) resource location into an absolute one.\n physical_location = self.location_resolver(location)\n # If this resource is already cached, return it.\n if (cached := self.cache.get(physical_location)) is not None:\n return cached\n # Otherwise, reload the resource.\n return await self._reload_resource(physical_location)",
"def resolve(self, address):\n address_map = self._address_map_from_spec_path(address.spec_path)\n if address not in address_map:\n self._raise_incorrect_address_error(address.spec_path, address.target_name, address_map)\n else:\n return address_map[address]",
"def resolve_references(self):\n self.specs = self._resolve_partial(self.parsed_url, self.specs, ())",
"def read(self, href):\r\n\t\treturn self._read(href)",
"def read(self, href):\r\n\t\treturn self._read(href)",
"def resolve(self, container: object) -> object:\n raise NotImplementedError()",
"def get_link(prefix: str, identifier: str, use_bioregistry_io: bool = True) -> Optional[str]:\n providers = get_providers(prefix, identifier)\n for key in LINK_PRIORITY:\n if not use_bioregistry_io and key == \"bioregistry\":\n continue\n if key not in providers:\n continue\n rv = providers[key]\n if rv is not None:\n return rv\n return None"
]
| [
"0.559725",
"0.5572399",
"0.55626106",
"0.55213284",
"0.5492655",
"0.54890937",
"0.5466336",
"0.5449987",
"0.5390744",
"0.5251315",
"0.5218634",
"0.5183477",
"0.5123284",
"0.5101816",
"0.50858814",
"0.5025732",
"0.5021247",
"0.50134563",
"0.49961463",
"0.49439776",
"0.49255773",
"0.4923055",
"0.4833566",
"0.48167557",
"0.47585863",
"0.47279802",
"0.47255006",
"0.47255006",
"0.47122917",
"0.47095647"
]
| 0.6704159 | 0 |
Determines if the link's target is a resolved STACObject. | def is_resolved(self) -> bool:
return self._target_object is not None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def target(self) -> Union[str, \"STACObject_Type\"]:\n if self._target_object:\n return self._target_object\n elif self._target_href:\n return self._target_href\n else:\n raise ValueError(\"No target defined for link.\")",
"def has_target_href(self) -> bool:\n return self._target_href is not None",
"def is_target(self):\n\t\treturn self.window and self.window.target is self",
"def has_target(self):\n return self.target is not None",
"def IsTarget(self, target_name):\n return target_name in self.GetTargets()",
"def has_target(self):\n return self._has_target",
"def _is_acquisition_link(link):\n return any(\n [rel for rel in link.rels if rel in LinkRelations.CIRCULATION_ALLOWED]\n )",
"def is_gentarget(self, target):\r\n raise NotImplementedError",
"def should_link(self, item):\r\n return item.__class__ in self.class_map.keys()",
"def is_ode_noad_link(self):\n if self.project_name in IDENTIFIERS:\n return True\n else:\n return False",
"def browse_target(self):\n return self.type in ('a', 's')",
"def _IsLink(self, file_attribute_flags):\n if file_attribute_flags is None:\n return False\n return bool(\n file_attribute_flags & pyfsntfs.file_attribute_flags.REPARSE_POINT)",
"def resolve_stac_object(self, root: Optional[\"Catalog_Type\"] = None) -> \"Link\":\n if self._target_object:\n pass\n elif self._target_href:\n target_href = self._target_href\n\n # If it's a relative link, base it off the parent.\n if not is_absolute_href(target_href):\n if self.owner is None:\n raise pystac.STACError(\n \"Relative path {} encountered \"\n \"without owner or start_href.\".format(target_href)\n )\n start_href = self.owner.get_self_href()\n\n if start_href is None:\n raise pystac.STACError(\n \"Relative path {} encountered \"\n 'without owner \"self\" link set.'.format(target_href)\n )\n\n target_href = make_absolute_href(target_href, start_href)\n obj = None\n\n stac_io: Optional[pystac.StacIO] = None\n\n if root is not None:\n obj = root._resolved_objects.get_by_href(target_href)\n stac_io = root._stac_io\n\n if obj is None:\n\n if stac_io is None:\n if self.owner is not None:\n if isinstance(self.owner, pystac.Catalog):\n stac_io = self.owner._stac_io\n if stac_io is None:\n stac_io = pystac.StacIO.default()\n\n obj = stac_io.read_stac_object(target_href, root=root)\n obj.set_self_href(target_href)\n if root is not None:\n obj = root._resolved_objects.get_or_cache(obj)\n obj.set_root(root)\n self._target_object = obj\n else:\n raise ValueError(\"Cannot resolve STAC object without a target\")\n\n if (\n self.owner\n and self.rel in [pystac.RelType.CHILD, pystac.RelType.ITEM]\n and isinstance(self.owner, pystac.Catalog)\n ):\n assert self._target_object\n self._target_object.set_parent(self.owner)\n\n return self",
"def is_concrete(self):\r\n targets = list(self.resolve())\r\n return len(targets) == 1 and targets[0] == self",
"def is_re_analysis_link_present(self):\n return self.is_element_present(self.re_analysis_locator)",
"def _islink(path):\n if not os.path.isdir(path):\n return False\n\n if not isinstance(path, str):\n path = str(path)\n\n attributes = ctypes.windll.kernel32.GetFileAttributesW(path)\n if attributes == INVALID_FILE_ATTRIBUTES:\n return False\n\n return (attributes & FILE_ATTRIBUTE_REPARSE_POINT) > 0",
"def isReference(node):\n return bool(isinstance(node, nodes.Referential)\n and node.get(DuAttrRefid, None))",
"def _is_open_access_link_(link_data, circulation_data):\n open_access_link = (\n link_data.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD and link_data.href\n )\n\n if open_access_link:\n return True\n\n # Try to deduce if the ast_link is open-access, even if it doesn't explicitly say it is\n rights_uri = link_data.rights_uri or circulation_data.default_rights_uri\n open_access_rights_link = (\n link_data.media_type in Representation.BOOK_MEDIA_TYPES\n and link_data.href\n and rights_uri in RightsStatus.OPEN_ACCESS\n )\n\n return open_access_rights_link",
"def at_target(self):\n return self.location == self.target_location",
"def is_anchored(self):\n return self.anchor is not None",
"def is_linked(self): \n return self.ichair_id is not None",
"def is_cross_onap_link(self, logical_link):\n for relationship in logical_link[\"relationship-list\"][\"relationship\"]:\n if relationship[\"related-to\"] == \"ext-aai-network\":\n return True\n return False",
"def isLinkCheckReq(self):\n return self.cid == LINKCHECKREQ",
"def try_to_point(s):\n target_graph = s.store.get_if_already_have( s.url )\n if target_graph==None:\n s.pointing_at = None\n return False\n start_pt = s.start_pt or \"START\"\n if sortof_type_str_of(start_pt) == \"STR\":\n reach = target_graph.flags[ start_pt ]\n elif sortof_type_str_of(start_pt) == \"INT\":\n reach = target_graph.nodes[start_pt]\n else:\n raise \"I can't figure out what s.start_pt is: %s\" % str(start_pt)\n if s.path == None or s.path == []:\n s.pointing_at = reach\n return True\n\n # for now, we'll just not worry about indexing beyond reference nodes.\n # this'll work just fine,\n # if you're only indexing within the graph\n for index in s.path:\n try:\n reach = reach[index]\n except TypeError:\n s.pointing_at = None\n return False\n s.pointing_at = reach\n return True",
"def islink(self):\n return os.path.islink(self.path)",
"def is_target_in(self, newtarget):\n from .utils.shape import HAS_SHAPELY\n # Test if shapely\n if not HAS_SHAPELY:\n print(\"WARNING: could not test if the target is in the image since you do not have SHAPELY\")\n return True\n # Test if WCS \n if not self.has_wcs():\n print(\"WARNING: because there is no wcs solution, \"+\\\n \"I can't test the inclusion of the new astrotarget\")\n return True\n \n return self.wcs.coordsAreInImage(*newtarget.radec)",
"def islink(path):\n return get_instance(path).islink(path)",
"def should_render_as_link(self):\n if self.active and not self.render_link_for_active:\n return False\n return bool(self.url)",
"def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)",
"def needToLink(self):\n return _osgAnimation.AnimationManagerBase_needToLink(self)"
]
| [
"0.62964565",
"0.6135031",
"0.6005933",
"0.5843953",
"0.58416283",
"0.5790394",
"0.577382",
"0.5659708",
"0.56222975",
"0.55950385",
"0.5592904",
"0.5583178",
"0.5562626",
"0.554284",
"0.5501144",
"0.54868263",
"0.5481935",
"0.5480033",
"0.5472723",
"0.54365206",
"0.54336035",
"0.5375086",
"0.5365536",
"0.53538895",
"0.53500795",
"0.5314498",
"0.53053266",
"0.53050214",
"0.52723104",
"0.5255587"
]
| 0.66906935 | 0 |
Clones this link. This makes a copy of all link information, but does not clone a STACObject if one is the target of this link. | def clone(self) -> "Link":
cls = self.__class__
return cls(
rel=self.rel,
target=self.target,
media_type=self.media_type,
title=self.title,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())",
"def clone(self):\n raise NotImplementedError",
"def clone(self):",
"def __copy__(self):\r\n other = self.__class__(\r\n linkers=[copy(l) for l in self.linkers],\r\n wrapper=self.wrapper)\r\n return other",
"def copy(self):\r\n\t\tobj = DecaLink()\r\n\t\tfor k in self.__dict__.keys():\r\n\t\t\tobj.__setattr__(k, self.__getattribute__(k))\r\n\t\treturn obj",
"def clone(self) -> Any:\n return cp.copy(self)",
"def clone(self):\n from copy import deepcopy\n return deepcopy(self)",
"def clone(self):\n return None",
"def clone(self):\n return shallow_clone(self)",
"def clone(self):\n new_list = LinkedList()\n\n # Checking for empty list\n if self.head is None:\n return new_list\n\n # Copying head\n new_list.add_first(self.head.value)\n\n # Copying remaining nodes\n tail = new_list.head\n node = self.head.next_\n while node is not None:\n tail.next_ = Node(node.value)\n node = node.next_\n tail = tail.next_\n\n return new_list",
"def clone(self):\n return self.__class__(self.name, *self)",
"def clone(self):\n return _libsbml.Deletion_clone(self)",
"def clone(self):\n return _libsbml.Compartment_clone(self)",
"def clone(self):\n return deepcopy(self)",
"def clone(self):\n return deepcopy(self)",
"def clone(self):\n return deepcopy(self)",
"def clone(self):\n return deepcopy(self)",
"def copy_link(self):\n try:\n Clipboard.copy(self.url)\n except:\n self.ids.link.text=self.link_message",
"def clone(self):\n return copy.deepcopy(self)",
"def clone(self):\n return _libsbml.Member_clone(self)",
"def clone(self):\n return _libsbml.SBase_clone(self)",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def clone(self):\n return _libsbml.CompartmentReference_clone(self)",
"def clone(self):\r\n #return copy(self)\r\n cp = self.__class__(self.type, None, None, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp",
"def GetClone(self, *args, **kwargs):\n pass",
"def clone(self):\n return self.copy()",
"def clone(self):\r\n cp = self.__class__(self.type, self.data, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp",
"def clone(self):\n return self"
]
| [
"0.6272683",
"0.6001762",
"0.5967772",
"0.5881224",
"0.58453804",
"0.58337474",
"0.5790788",
"0.5785189",
"0.5755317",
"0.5731128",
"0.57152885",
"0.56804925",
"0.56655896",
"0.56652945",
"0.56652945",
"0.56652945",
"0.56652945",
"0.5663458",
"0.5652629",
"0.5647763",
"0.56377286",
"0.56342334",
"0.56342334",
"0.56342334",
"0.5629512",
"0.5606275",
"0.5581022",
"0.5573139",
"0.55589926",
"0.5540569"
]
| 0.61682236 | 1 |
Deserializes a Link from a dict. | def from_dict(cls, d: Dict[str, Any]) -> "Link":
d = copy(d)
rel = d.pop("rel")
href = d.pop("href")
media_type = d.pop("type", None)
title = d.pop("title", None)
extra_fields = None
if any(d):
extra_fields = d
return cls(
rel=rel,
target=href,
media_type=media_type,
title=title,
extra_fields=extra_fields,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_dict(cls, dikt: dict) -> 'Links':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'ExtraLink':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, _dict: Dict) -> 'NextHref':\n args = {}\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n return cls(**args)",
"def _decode_link(self, link):\n\n if link.HasField(\"bucket\"):\n bucket = link.bucket\n else:\n bucket = None\n if link.HasField(\"key\"):\n key = link.key\n else:\n key = None\n if link.HasField(\"tag\"):\n tag = link.tag\n else:\n tag = None\n\n return (bucket, key, tag)",
"def from_dict(cls, _dict: Dict) -> 'FirstHref':\n args = {}\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n return cls(**args)",
"def from_dict(cls, dikt) -> 'Edge':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Sitemap':\n return util.deserialize_model(dikt, cls)",
"def link_decode(key: str, link: str) -> str:\n\n parts = link.split('/')\n _id = parts[-1]\n dec_id = decode(key, _id)\n dec_link = '/'.join(parts[:-1]) + '/' + dec_id\n return dec_link",
"def read(data):\n return Link(**data)",
"def deserialize_object(d):\n pass",
"def from_dictionary(cls,\r\n dictionary):\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n links = dictionary.get('links')\r\n email_config = dictionary.get('emailConfig')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(links,\r\n email_config,\r\n dictionary)",
"def from_dict(cls, _dict: Dict) -> 'OfferingSpeed':\n args = {}\n if 'link_speed' in _dict:\n args['link_speed'] = _dict.get('link_speed')\n else:\n raise ValueError('Required property \\'link_speed\\' not present in OfferingSpeed JSON')\n return cls(**args)",
"def from_dict(cls, dikt) -> 'ConnectionEndPoint':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, _dict: Dict) -> 'Port':\n args = {}\n if 'direct_link_count' in _dict:\n args['direct_link_count'] = _dict.get('direct_link_count')\n else:\n raise ValueError('Required property \\'direct_link_count\\' not present in Port JSON')\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n else:\n raise ValueError('Required property \\'id\\' not present in Port JSON')\n if 'label' in _dict:\n args['label'] = _dict.get('label')\n else:\n raise ValueError('Required property \\'label\\' not present in Port JSON')\n if 'location_display_name' in _dict:\n args['location_display_name'] = _dict.get('location_display_name')\n else:\n raise ValueError('Required property \\'location_display_name\\' not present in Port JSON')\n if 'location_name' in _dict:\n args['location_name'] = _dict.get('location_name')\n else:\n raise ValueError('Required property \\'location_name\\' not present in Port JSON')\n if 'provider_name' in _dict:\n args['provider_name'] = _dict.get('provider_name')\n else:\n raise ValueError('Required property \\'provider_name\\' not present in Port JSON')\n if 'supported_link_speeds' in _dict:\n args['supported_link_speeds'] = _dict.get('supported_link_speeds')\n else:\n raise ValueError('Required property \\'supported_link_speeds\\' not present in Port JSON')\n return cls(**args)",
"def from_dict(cls, dikt) -> 'OrgApacheFelixHttpProperties':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'InlineResponse200Properties':\n return deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'CatalogDataProductAttributeMediaGalleryEntryInterface':\n return deserialize_model(dikt, cls)",
"def from_json(cls, b):\n return cls.from_dict(json.loads(b))",
"def from_dict(cls, dikt) -> 'Body':\n return deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'InlineResponse20011':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'NewsList':\n return deserialize_model(dikt, cls)",
"def _decode(self, input_dict):\n pass",
"def from_dict(cls, dikt) -> 'Body':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dct):\n dct['address'] = Address(**dct['address'])\n return cls(**dct)",
"def get_as_link(self, key):\n\n s = self.get(key)\n data = s.get_json()\n data.pop(\"property\", \"\")\n return LinkSetting(self, data)",
"def from_dict(cls, dikt) -> 'ShareStatusRead':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, _dict: Dict) -> 'PortsPaginatedCollectionNext':\n args = {}\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n else:\n raise ValueError('Required property \\'href\\' not present in PortsPaginatedCollectionNext JSON')\n if 'start' in _dict:\n args['start'] = _dict.get('start')\n return cls(**args)",
"def from_dict(cls, _dict: Dict) -> 'PortsPaginatedCollectionFirst':\n args = {}\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n else:\n raise ValueError('Required property \\'href\\' not present in PortsPaginatedCollectionFirst JSON')\n return cls(**args)",
"def from_dict(self, d: dict):\n wntr.network.io.from_dict(d, append=self)",
"def from_dict(cls, dikt) -> \"InlineResponse201\":\n return util.deserialize_model(dikt, cls)"
]
| [
"0.75336295",
"0.7124407",
"0.62645185",
"0.6063125",
"0.6002239",
"0.5913116",
"0.5884565",
"0.5822485",
"0.5573647",
"0.556385",
"0.55602205",
"0.54055965",
"0.5388422",
"0.53881705",
"0.5371569",
"0.5370352",
"0.53601426",
"0.53588325",
"0.5337932",
"0.53161967",
"0.5304831",
"0.52876407",
"0.5284926",
"0.52776605",
"0.5242892",
"0.5225091",
"0.5213006",
"0.5205536",
"0.520364",
"0.5179417"
]
| 0.71413106 | 1 |
Creates a link to a root Catalog or Collection. | def root(cls, c: "Catalog_Type") -> "Link":
return cls(pystac.RelType.ROOT, c, media_type=pystac.MediaType.JSON) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initCatalog():\n t = \"SINGLE_LINKED\"\n catalog = model.newCatalog(t)\n return catalog",
"def _get_root_record(self) -> Link:\n rel = \"root\"\n href = self.api_endpoint\n return Link(href=href, rel=rel)",
"def created(event):\n root = event.object\n registry = event.registry\n root.sdi_title = 'Simple Book Catalog'\n service = root['catalogs']\n service.add_catalog('books', update_indexes=True)\n books = registry.content.create('BookFolder', name='books', title='Books')\n root['books'] = books",
"def get_collection_link(db_id, collection_id):\n\n # Return a link to the relevant CosmosDB Container/Document Collection\n return \"dbs/\" + db_id + \"/colls/\" + collection_id",
"def parent(cls, c: \"Catalog_Type\") -> \"Link\":\n return cls(pystac.RelType.PARENT, c, media_type=pystac.MediaType.JSON)",
"def collection(cls, c: \"Collection_Type\") -> \"Link\":\n return cls(pystac.RelType.COLLECTION, c, media_type=pystac.MediaType.JSON)",
"def _get_self_collection(self) -> Link:\n rel = \"self\"\n href = self.api_endpoint\n return Link(href=href, rel=rel)",
"def child(cls, c: \"Catalog_Type\", title: Optional[str] = None) -> \"Link\":\n return cls(\n pystac.RelType.CHILD, c, title=title, media_type=pystac.MediaType.JSON\n )",
"def _create_link(self):\n expression = str(self.ui.expression.text())\n pc = parse.ParsedCommand(expression, self._labels)\n label = str(self.ui.new_label.text()) or 'new component'\n new_id = core.data.ComponentID(label)\n link = parse.ParsedComponentLink(new_id, pc)\n return link",
"def resolve_stac_object(self, root: Optional[\"Catalog_Type\"] = None) -> \"Link\":\n if self._target_object:\n pass\n elif self._target_href:\n target_href = self._target_href\n\n # If it's a relative link, base it off the parent.\n if not is_absolute_href(target_href):\n if self.owner is None:\n raise pystac.STACError(\n \"Relative path {} encountered \"\n \"without owner or start_href.\".format(target_href)\n )\n start_href = self.owner.get_self_href()\n\n if start_href is None:\n raise pystac.STACError(\n \"Relative path {} encountered \"\n 'without owner \"self\" link set.'.format(target_href)\n )\n\n target_href = make_absolute_href(target_href, start_href)\n obj = None\n\n stac_io: Optional[pystac.StacIO] = None\n\n if root is not None:\n obj = root._resolved_objects.get_by_href(target_href)\n stac_io = root._stac_io\n\n if obj is None:\n\n if stac_io is None:\n if self.owner is not None:\n if isinstance(self.owner, pystac.Catalog):\n stac_io = self.owner._stac_io\n if stac_io is None:\n stac_io = pystac.StacIO.default()\n\n obj = stac_io.read_stac_object(target_href, root=root)\n obj.set_self_href(target_href)\n if root is not None:\n obj = root._resolved_objects.get_or_cache(obj)\n obj.set_root(root)\n self._target_object = obj\n else:\n raise ValueError(\"Cannot resolve STAC object without a target\")\n\n if (\n self.owner\n and self.rel in [pystac.RelType.CHILD, pystac.RelType.ITEM]\n and isinstance(self.owner, pystac.Catalog)\n ):\n assert self._target_object\n self._target_object.set_parent(self.owner)\n\n return self",
"def addCollectionNode():\n return render_template(\"addCollectionNode.html\")",
"def create_collection_command(destination: str, thumbnail: str) -> None:\n collection = stac.create_collection(thumbnail_url=thumbnail)\n\n collection.set_self_href(destination)\n\n collection.save_object()",
"def add_root_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchyDesignSession.add_root_bin\n self._get_provider_session('catalog_hierarchy_design_session').add_root_catalog(*args, **kwargs)",
"def __init__(self, root, api, symlink_resource):\n assert root and isinstance(root, config_types.Path)\n self._root = root\n self._api = api\n self._resource = symlink_resource\n # dict[Path]list(Path): Maps target to a list of linknames.\n self._link_map = {}",
"def link(self, link):\r\n return links.Link(self, link)",
"def _get_href_link(self, request, identifier, collection_name):\n prefix = self._update_masakari_link_prefix(request.application_url)\n return url_join(prefix,\n self._get_project_id(request),\n collection_name,\n str(identifier))",
"def get_root_rest_url(self):\n # Gets the default url-name in the same way as django rest framework\n # does in relations.HyperlinkedModelSerializer\n root_instance = self.get_root_rest_element()\n rest_url = '%s-detail' % type(root_instance)._meta.object_name.lower()\n return reverse(rest_url, args=[str(root_instance.pk)])",
"def CreateCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text",
"def compartment(self):\n return \"_links\"",
"def _create_links_and_track(self, page_name, category_list):\n env = self.state.document.settings.env\n if not hasattr(env, \"categories\"):\n env.categories = {}\n\n link_rst = \"\"\n ncategs = 0\n for categ_name in category_list:\n #categ_name is the full category name - register that\n category = self.register_category(categ_name, env)\n category.pages.add(PageRef(page_name, env.docname))\n\n #now step up a step up each time the category hierarchy\n parent_category = categ_name\n while True:\n if r\"\\\\\" in parent_category:\n categs = parent_category.split(r\"\\\\\")\n else:\n break\n # remove the last item\n subcat = Category(categ_name, env.docname) #create the category with the full name\n subcat.name=categs.pop() # and then replace it with the last token of the name\n parent_category = r\"\\\\\".join(categs)\n\n #register the parent category\n parent = self.register_category(parent_category, env)\n parent.subcategories.add(subcat)\n\n # endwhile\n\n #category should be the last subcategory by this point\n link_rst += \"`%s <%s>`_ | \" % (categ_name, category.link(env.docname))\n ncategs += 1\n # endfor\n\n link_rst = \"**%s**: \" + link_rst.rstrip(\" | \") # remove final separator\n if ncategs == 1:\n link_rst = link_rst % \"Category\"\n else:\n link_rst = link_rst % \"Categories\"\n #endif\n\n return link_rst",
"def canonical(\n cls,\n item_or_collection: Union[\"Item_Type\", \"Collection_Type\"],\n title: Optional[str] = None,\n ) -> \"Link\":\n return cls(\n pystac.RelType.CANONICAL,\n item_or_collection,\n title=title,\n media_type=pystac.MediaType.JSON,\n )",
"def createLink(self):\n \n if( self.useLink ):\n trymakedir( self.parent.installPath + \"/\" + self.alias )\n\n os.chdir( self.parent.installPath + \"/\" + self.alias )\n \n # check for already existing symlinks or dirs \n if( os.path.islink( self.version )):\n os.unlink( self.version )\n elif( os.path.isdir( self.version )):\n self.abort( \"could not create link to [ \" + self.linkPath + \" ]\\nin [ \" \\\n + os.path.basename( self.installPath ) + \" ]!!!\" )\n\n os.symlink( self.linkPath , self.version )\n print \"+ Linking \" + self.parent.installPath + \"/\" + self.alias + \"/\" + self.version \\\n + \" -> \" + self.linkPath",
"def catalog_alias_create(self, args):\n try:\n if args.id:\n alias = self.server.connect_ermrest_alias(args.id)\n try:\n if alias.retrieve():\n print(\"Catalog alias already exists\")\n return\n except requests.HTTPError as e:\n if e.response.status_code == 404:\n pass\n else:\n raise\n owner = args.owner if args.owner else None\n alias = self.server.create_ermrest_alias(args.id, owner, args.alias_target)\n if not args.quiet:\n print(\"Created new catalog alias %s with the following configuration:\\n\" % alias.alias_id)\n pp(alias.retrieve())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n elif e.response.status_code == requests.codes.conflict:\n raise ResourceException(\"Catalog alias already exists\", e)\n else:\n raise",
"def createLink(context, title, link, exclude_from_nav=False):\n oid = idnormalizer.normalize(title, 'es')\n if not hasattr(context, oid):\n context.invokeFactory('Link', id=oid, title=title, remoteUrl=link)\n link = context[oid]\n if exclude_from_nav:\n link.setExcludeFromNav(True)\n link.reindexObject()",
"def CreateResourceInCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc, collection=col)\n print 'Created:', doc.title.text, doc.resource_id.text",
"def make_link_node(rawtext, app, type, slug, options):\r\n\r\n try:\r\n base = app.config.github_project_url\r\n if not base:\r\n raise AttributeError\r\n if not base.endswith('/'):\r\n base += '/'\r\n except AttributeError, err:\r\n raise ValueError('github_project_url configuration value is not set (%s)' % str(err))\r\n\r\n ref = base + type + '/' + slug + '/'\r\n set_classes(options)\r\n prefix = \"#\"\r\n if type == 'pull':\r\n prefix = \"PR \" + prefix\r\n node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref,\r\n **options)\r\n return node",
"def BuildCatalogPath(self, p_item):\n #create a path for a GlossElement object\n if p_item.meta_type == METATYPE_ALISSELEMENT:\n return '%s/%s' % (p_item.center_parent, p_item.id)\n #create a path for a Google object\n elif p_item.meta_type == METATYPE_ALISSGOOGLE:\n return '%s/%s/%s' % (p_item.center_parent, p_item.elem_parent, p_item.id)\n #create a path for a GooglePage object\n elif p_item.meta_type == METATYPE_ALISSPAGE:\n return '%s/%s/%s/%s' % (p_item.center_parent, p_item.elem_parent, p_item.google_parent, p_item.id)\n #create a path for a ALiSSCenter object\n elif p_item.meta_type == METATYPE_ALISSCENTER:\n return '/'.join(p_item.getPhysicalPath())",
"def link(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.LINK_TO_TEMPLATE,\n EntityType.ROLE.value, None)",
"def catalog_create(self, args):\n try:\n if args.id and self.server.connect_ermrest(args.id).exists():\n print(\"Catalog already exists\")\n return\n owner = args.owner if args.owner else None\n catalog = self.server.create_ermrest_catalog(args.id, owner)\n if args.auto_configure:\n model = catalog.getCatalogModel()\n model.configure_baseline_catalog(**args.configure_args)\n if not args.quiet:\n print(\"Created new catalog %s with the following default configuration:\\n\" % catalog.catalog_id)\n pp(catalog.get('/').json())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n elif e.response.status_code == requests.codes.conflict:\n raise ResourceException(\"Catalog already exists\", e)\n else:\n raise e",
"def _create_links_and_track(self, page_name, category_list):\n env = self.state.document.settings.env\n if not hasattr(env, \"categories\"):\n env.categories = {}\n\n link_rst = \"\"\n ncategs = 0\n for item in category_list:\n has_subcat = False\n if r\"\\\\\" in item: \n categs = item.split(r\"\\\\\")\n has_subcat = True\n else:\n categs = [item]\n # endif\n\n print \n for index, categ_name in enumerate(categs):\n if categ_name not in env.categories:\n category = Category(categ_name)\n env.categories[categ_name] = category\n else:\n category = env.categories[categ_name]\n #endif\n category.pages.append(PageRef(page_name))\n if has_subcat and index > 0:\n category.subcategories.append(PageRef(categ_name))\n #endif\n link_rst += \":ref:`%s` | \" % categ_name\n ncategs += 1\n # endfor\n # endfor\n\n link_rst = \"`%s: <categories.html>`_ \" + link_rst\n if ncategs == 1:\n link_rst = link_rst % \"Category\"\n else:\n link_rst = link_rst % \"Categories\"\n #endif\n\n return link_rst"
]
| [
"0.638612",
"0.58883375",
"0.58257294",
"0.5813237",
"0.58120126",
"0.5705625",
"0.5569404",
"0.5393234",
"0.5327661",
"0.5320878",
"0.52987134",
"0.52738225",
"0.52723676",
"0.52303886",
"0.52119464",
"0.52098554",
"0.515138",
"0.51208967",
"0.51130015",
"0.5111391",
"0.5110331",
"0.5082915",
"0.5060926",
"0.5035115",
"0.5028568",
"0.50227994",
"0.5002081",
"0.5000584",
"0.4994592",
"0.49841878"
]
| 0.69907457 | 0 |
Creates a link to a parent Catalog or Collection. | def parent(cls, c: "Catalog_Type") -> "Link":
return cls(pystac.RelType.PARENT, c, media_type=pystac.MediaType.JSON) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_parent_record(self) -> Link:\n rel = \"parent\"\n href = self.api_endpoint\n return Link(href=href, rel=rel)",
"def _get_absolute_url(self, parent_slug=None):\n return reverse(\n \"catalogue:category\",\n kwargs={\n \"category_slug\": self.get_full_slug(parent_slug=parent_slug),\n \"pk\": self.pk,\n },\n )",
"def create_urlpath(self, parent, slug):\r\n return URLPath.create_article(parent, slug, title=slug)",
"def link(cls, traceparent: str, attributes: Optional[Attributes] = None) -> None:\n cls.link_from_headers({\"traceparent\": traceparent}, attributes)",
"def add_parent(sender, instance, **kwargs):\n if not kwargs['created']:\n return\n for att in ['term', 'term_secondary', 'context']:\n if getattr(instance, att) is None:\n continue\n parent = getattr(instance, att).item_id\n child = instance.item_id\n ItemRelation.objects.get_or_create(\n parent_id=parent,\n child_id=child,\n visible=True,\n active=instance.active,\n )",
"def child(cls, c: \"Catalog_Type\", title: Optional[str] = None) -> \"Link\":\n return cls(\n pystac.RelType.CHILD, c, title=title, media_type=pystac.MediaType.JSON\n )",
"def parent(self, nid):\n self._parent = nid",
"def add_parent(self, parent, *args, **kwargs):\n return parent.add_child(self, **kwargs)",
"def parent(self):\n return self._collection.parent(self)",
"def connect_to(self, parent: TableModel[Any]) -> None:\n if self.connector:\n raise Exception(\"Attempting to connect an already connected sub-table instance\")\n\n # Confirm that the source table has a relation to the parent table\n # that is now claiming us as a sub-table\n if parent.id_field not in self.model.table_fields:\n raise ValueError(\n f\"Can not use {self.model.table} as a sub-table of {parent.table}, \"\n f\"as it has no foreign key to {parent.table}\"\n )\n\n self.connector = parent.id_field\n self.model.foreigners[parent.id_field] = (parent.id_field, parent)\n self.validate()",
"def addlink(self, parent=None, child=None):\n parent = self.getnodenamed(parent) # verify pointer.\n child = self.getnodenamed(child) # verify pointer.\n\n # (node_bn* parent, node_bn* child)\n cnetica.AddLink_bn.argtypes = [c_void_p, c_void_p]\n cnetica.AddLink_bn.restype = c_int\n return cnetica.AddLink_bn(parent, child) # link_index",
"def parent(self, parent):\n\n self._parent = parent",
"def parent(self, parent):\n\n self._parent = parent",
"def parent(self, parent):\n\n self._parent = parent",
"def parent(self, parent):\n\n self._parent = parent",
"def get_parentID(self):\n parent_path = self.collection.container\n if self.collection.is_root:\n parent_path = \"/\"\n parent = Collection.find(parent_path)\n return parent.uuid",
"def link(self, req, ino, newparent, newname):\r\n self.reply_err(req, EROFS)",
"def getParent():",
"def parent(self, parent: AbstractPaths):\r\n self._parent = parent",
"def parent_asin(self, parent_asin):\n\n self._parent_asin = parent_asin",
"def connectToParent(self, *args):\n return _libsbml.SBase_connectToParent(self, *args)",
"def attach(self, name: str) -> ContainerReference:\n self.parent = ContainerReference(name)\n return self.parent",
"def get_parentID(self):\n parent = Collection.find(self.resource.container)\n return parent.uuid",
"def parent_id(self, new_id: str) -> None:\n self._db_data.parent_id = new_id",
"def parent(self, _):\n raise AttributeError('The technical root cannot have a parent.')",
"def parent(self, parent_object, limit_parent_language=True):\n return self.all().parent(parent_object, limit_parent_language)",
"def set_parent(self, parent):\n self.parent = parent",
"def set_parent(self, parent):\n self.parent = parent",
"def get_collection_link(db_id, collection_id):\n\n # Return a link to the relevant CosmosDB Container/Document Collection\n return \"dbs/\" + db_id + \"/colls/\" + collection_id",
"def cambiar_parent(self):\r\n self.client.parent = self"
]
| [
"0.67542535",
"0.6109863",
"0.6103144",
"0.6049322",
"0.6006557",
"0.5838816",
"0.5682152",
"0.5543328",
"0.5493133",
"0.5490017",
"0.5449701",
"0.54340756",
"0.54340756",
"0.54340756",
"0.54340756",
"0.5416398",
"0.541076",
"0.5406102",
"0.5401539",
"0.5392546",
"0.53919244",
"0.5379279",
"0.5337644",
"0.53356045",
"0.53247666",
"0.53127426",
"0.52850324",
"0.52850324",
"0.52808446",
"0.52775085"
]
| 0.7330132 | 0 |
Creates a link to an item's Collection. | def collection(cls, c: "Collection_Type") -> "Link":
return cls(pystac.RelType.COLLECTION, c, media_type=pystac.MediaType.JSON) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_collection_link(db_id, collection_id):\n\n # Return a link to the relevant CosmosDB Container/Document Collection\n return \"dbs/\" + db_id + \"/colls/\" + collection_id",
"def _link_items(self):\n pass",
"def canonical(\n cls,\n item_or_collection: Union[\"Item_Type\", \"Collection_Type\"],\n title: Optional[str] = None,\n ) -> \"Link\":\n return cls(\n pystac.RelType.CANONICAL,\n item_or_collection,\n title=title,\n media_type=pystac.MediaType.JSON,\n )",
"def _get_self_collection(self) -> Link:\n rel = \"self\"\n href = self.api_endpoint\n return Link(href=href, rel=rel)",
"def get_collection_url(self, **kwargs):\n\n url = self._generate_url(url_type='collection', **kwargs)\n return url",
"def __init__(self, collection):\n self.collection = collection",
"def addCollectionNode():\n return render_template(\"addCollectionNode.html\")",
"def addCollection():\n return render_template(\"addCollection.html\")",
"def _get_href_link(self, request, identifier, collection_name):\n prefix = self._update_masakari_link_prefix(request.application_url)\n return url_join(prefix,\n self._get_project_id(request),\n collection_name,\n str(identifier))",
"def load_references(self, collections, item):",
"def create_collection(title):\n result = api_request('post', api_url('collections'), json={\"title\": title})\n return _collection_from_response(result)",
"def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/collection/{}'.format(obj.id)\n )",
"def add_to_collection(collection_id):\n\tpost_json = request.get_json()\n\tcollection = models.Collection.query.get(collection_id)\n\tif not collection:\n\t\tabort(400)\n\n\tif post_json['content_type'] == \"html\":\n\t\tarticle = models.Article.query.get(post_json['article_id'])\n\t\tif not article:\n\t\t\tabort(400)\n\t\titem = models.CollectionArticle(\n\t\t\tcollection_id = collection.id,\n\t\t\tarticle_id = article.id,\n\t\t\torder = collection.get_num_items()\n\t\t)\n\telif post_json['content_type'] == \"image\":\n\t\timage = models.Image.query.get(post_json['image_id'])\n\t\tif not image:\n\t\t\tabort(400)\n\t\titem = models.CollectionImage(\n\t\t\tcollection_id = collection.id,\n\t\t\timage_id = image.id,\n\t\t\torder = collection.get_num_items()\n\t\t)\n\tdb.session.add(item)\n\tdb.session.commit()\n\treturn jsonify({'message': 'Success'}), 201",
"def create_collection_command(destination: str, thumbnail: str) -> None:\n collection = stac.create_collection(thumbnail_url=thumbnail)\n\n collection.set_self_href(destination)\n\n collection.save_object()",
"def link(self, link):\r\n return links.Link(self, link)",
"def create_collections(self):\n\n ''''''",
"def item(cls, item: \"Item_Type\", title: Optional[str] = None) -> \"Link\":\n return cls(\n pystac.RelType.ITEM, item, title=title, media_type=pystac.MediaType.JSON\n )",
"def add_collection(db_name, collection_name):\n db = client[db_name]\n collection = db[collection_name]\n return collection",
"def post_collection():\n\tpost_json = request.get_json()\n\tif not post_json:\n\t\tabort(400)\n\ttitle = post_json['title']\n\tdescription = post_json['description']\n\tcategory = post_json['category']\n\tuser_id = post_json['user_id']\n\n\tif None in [title, description, category, user_id]:\n\t\tabort(400)\n\n\tcollection = models.Collection(\n\t\tuser_id = user_id,\n\t\ttitle = title,\n\t\tdescription = description,\n\t\tcategory = category,\n\t\tpublished = False,\n\t\tpublish_date = None,\n\t\tthumbnail = None,\n\t)\n\tdb.session.add(collection)\n\tdb.session.commit()\n\treturn jsonify({'collection_id':collection.id}), 201",
"def _get_bookmark_link(self, request, identifier, collection_name):\n base_url = remove_trailing_version_from_href(request.application_url)\n base_url = self._update_masakari_link_prefix(base_url)\n return url_join(base_url,\n self._get_project_id(request),\n collection_name,\n str(identifier))",
"def test_create_collection(self):\n pass",
"def create_collection(collection):\n return db[collection]",
"def url(self):\n\n if self.identifier and self.identifier != \"\":\n return self.collection.url + self.identifier + \"/\"\n else:\n return self.collection.url",
"def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass",
"def collection_attach(self, name, version_id):\n try:\n return CastleCollection(name, self, version_id)\n except:\n raise",
"def __init__(self,\n collection_summary=None,\n collection_parent=None,\n collection_item=None):\n\n # Initialize members of the class\n self.collection_summary = collection_summary\n self.collection_parent = collection_parent\n self.collection_item = collection_item",
"def get_site_collection(self, request):\n\n objects = self.get()\n\n groups = [\n ('topics', request.translate(_(\"Topics\"))),\n ('news', request.translate(_(\"Latest news\"))),\n ('imagesets', request.translate(_(\"Photo Albums\"))),\n ('forms', request.translate(_(\"Forms\"))),\n ('directories', request.translate(_(\"Directories\"))),\n ('resources', request.translate(_(\"Resources\"))),\n ]\n\n links = []\n\n for id, label in groups:\n for obj in objects[id]:\n # in addition to the default url/name pairings we use a group\n # label which will be used as optgroup label\n links.append({\n 'group': label,\n 'name': obj.title,\n 'url': request.link(obj)\n })\n\n return links",
"def toolbar_link(url, title):\n return LazyToolbarItem(\"staff_toolbar.items.Link\", url=url, title=title)",
"def item_href(self, item_href):\n\n self._item_href = item_href",
"def create_item_page():\n catagories = [c.name for c in Catagory.fetch_all()]\n return render_template('add_item.html', catagories=catagories, values={})"
]
| [
"0.66433454",
"0.64570546",
"0.6361431",
"0.61382896",
"0.609448",
"0.5907729",
"0.587232",
"0.5871424",
"0.5854887",
"0.57617444",
"0.5746278",
"0.5737202",
"0.56804323",
"0.5651097",
"0.5628368",
"0.5599761",
"0.5560198",
"0.55528027",
"0.5549098",
"0.5530474",
"0.5461734",
"0.5432869",
"0.54256153",
"0.5412185",
"0.5390474",
"0.53644973",
"0.53349334",
"0.5328712",
"0.53117675",
"0.5310081"
]
| 0.6910698 | 0 |
Creates a link to a child Catalog or Collection. | def child(cls, c: "Catalog_Type", title: Optional[str] = None) -> "Link":
return cls(
pystac.RelType.CHILD, c, title=title, media_type=pystac.MediaType.JSON
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parent(cls, c: \"Catalog_Type\") -> \"Link\":\n return cls(pystac.RelType.PARENT, c, media_type=pystac.MediaType.JSON)",
"def addlink(self, parent=None, child=None):\n parent = self.getnodenamed(parent) # verify pointer.\n child = self.getnodenamed(child) # verify pointer.\n\n # (node_bn* parent, node_bn* child)\n cnetica.AddLink_bn.argtypes = [c_void_p, c_void_p]\n cnetica.AddLink_bn.restype = c_int\n return cnetica.AddLink_bn(parent, child) # link_index",
"def add_child(self, cls, id=None, collection=\"channels\", prefix=\"ch_\", attr_name=\"\", **kwargs):\n child = cls(self, id, **kwargs)\n collection_data = getattr(self, collection, {})\n if isinstance(collection_data, CommonBase.BaseChannelCreator):\n collection_data = {}\n # Create channel interface if prefix or name is present\n if (prefix or attr_name) and id is not None:\n if not collection_data:\n # Add a grouplist to the parent.\n setattr(self, collection, collection_data)\n collection_data[id] = child\n child._collection = collection\n if attr_name:\n setattr(self, attr_name, child)\n child._name = attr_name\n else:\n setattr(self, f\"{prefix}{id}\", child)\n child._name = f\"{prefix}{id}\"\n elif attr_name and id is None:\n # If attribute name is passed with no channel id\n # set the child to the attribute name.\n setattr(self, attr_name, child)\n child._name = attr_name\n else:\n if collection_data:\n raise ValueError(f\"An attribute '{collection}' already exists.\")\n setattr(self, collection, child)\n child._name = collection\n return child",
"def collection(cls, c: \"Collection_Type\") -> \"Link\":\n return cls(pystac.RelType.COLLECTION, c, media_type=pystac.MediaType.JSON)",
"def add_link(self,link,verbose=False):\n label, child = link\n self.outgoing.add((label,child))\n child.incoming.add((label,self))\n if verbose: print('added', label, self.nodeid, child.nodeid)",
"def get_collection_link(db_id, collection_id):\n\n # Return a link to the relevant CosmosDB Container/Document Collection\n return \"dbs/\" + db_id + \"/colls/\" + collection_id",
"def link_child(self, parent, child):\n nodelist = self.get_nodes()\n parent_index = nodelist.index(parent)\n child_index = nodelist.index(child)\n\n self.__nodes[parent_index].append(child_index)",
"def initCatalog():\n t = \"SINGLE_LINKED\"\n catalog = model.newCatalog(t)\n return catalog",
"def create_urlpath(self, parent, slug):\r\n return URLPath.create_article(parent, slug, title=slug)",
"def createLink(context, title, link, exclude_from_nav=False):\n oid = idnormalizer.normalize(title, 'es')\n if not hasattr(context, oid):\n context.invokeFactory('Link', id=oid, title=title, remoteUrl=link)\n link = context[oid]\n if exclude_from_nav:\n link.setExcludeFromNav(True)\n link.reindexObject()",
"def add_child_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchyDesignSession.add_child_bin\n self._get_provider_session('catalog_hierarchy_design_session').add_child_catalog(*args, **kwargs)",
"def _create_link(self):\n expression = str(self.ui.expression.text())\n pc = parse.ParsedCommand(expression, self._labels)\n label = str(self.ui.new_label.text()) or 'new component'\n new_id = core.data.ComponentID(label)\n link = parse.ParsedComponentLink(new_id, pc)\n return link",
"def link(cls, traceparent: str, attributes: Optional[Attributes] = None) -> None:\n cls.link_from_headers({\"traceparent\": traceparent}, attributes)",
"def root(cls, c: \"Catalog_Type\") -> \"Link\":\n return cls(pystac.RelType.ROOT, c, media_type=pystac.MediaType.JSON)",
"def create_child(self):\n raise NotImplementedError",
"def connectToChild(self):\n return _libsbml.GeneProductAssociation_connectToChild(self)",
"def deriveLinkfromObject(obj, scale=1, parent_link=True, parent_objects=True,\n reparent_children=True, nameformat='', scaleByBoundingBox=False):\n log('Deriving link from ' + nUtils.getObjectName(obj), level=\"INFO\")\n # create armature/bone\n bUtils.toggleLayer('link', True)\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.armature_add()\n newlink = bpy.context.active_object\n newlink.name = obj.name + \"_link\"\n newlink.matrix_world = obj.matrix_world\n newlink.phobostype = 'link'\n if scaleByBoundingBox:\n bound_box = (\n max([c[0] for c in obj.bound_box]),\n max([c[1] for c in obj.bound_box]),\n max([c[2] for c in obj.bound_box]),\n )\n newlink.scale = [max(bound_box)*scale] * 3\n else:\n newlink.scale = [scale] * 3\n if obj.parent is not None and parent_link:\n eUtils.parentObjectsTo(newlink, obj.parent)\n if parent_objects:\n eUtils.parentObjectsTo(obj, newlink)\n if reparent_children:\n eUtils.parentObjectsTo(list(obj.children), newlink)\n if bpy.context.scene.phoboswireframesettings.links:\n newlink.display_type = \"WIRE\"\n return newlink",
"def link(self, link):\r\n return links.Link(self, link)",
"def link(self, req, ino, newparent, newname):\r\n self.reply_err(req, EROFS)",
"def created(event):\n root = event.object\n registry = event.registry\n root.sdi_title = 'Simple Book Catalog'\n service = root['catalogs']\n service.add_catalog('books', update_indexes=True)\n books = registry.content.create('BookFolder', name='books', title='Books')\n root['books'] = books",
"def create_collection_command(destination: str, thumbnail: str) -> None:\n collection = stac.create_collection(thumbnail_url=thumbnail)\n\n collection.set_self_href(destination)\n\n collection.save_object()",
"def add_parent(sender, instance, **kwargs):\n if not kwargs['created']:\n return\n for att in ['term', 'term_secondary', 'context']:\n if getattr(instance, att) is None:\n continue\n parent = getattr(instance, att).item_id\n child = instance.item_id\n ItemRelation.objects.get_or_create(\n parent_id=parent,\n child_id=child,\n visible=True,\n active=instance.active,\n )",
"def post_add_link(self):\n course = courses.Course(self)\n link = course.add_link()\n link.href = ''\n course.save()\n self.redirect(self.get_action_url(\n 'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))",
"def _get_absolute_url(self, parent_slug=None):\n return reverse(\n \"catalogue:category\",\n kwargs={\n \"category_slug\": self.get_full_slug(parent_slug=parent_slug),\n \"pk\": self.pk,\n },\n )",
"def _get_parent_record(self) -> Link:\n rel = \"parent\"\n href = self.api_endpoint\n return Link(href=href, rel=rel)",
"def detail_link(db_obj, text=None):\n\n def build_link(obj):\n name = str(obj) if text is None else text\n return _make_link(obj.detail_url(), name)\n\n return mark_safe(', '.join(map(build_link, as_list(db_obj))))",
"def _link_items(self):\n pass",
"def _get_href_link(self, request, identifier, collection_name):\n prefix = self._update_masakari_link_prefix(request.application_url)\n return url_join(prefix,\n self._get_project_id(request),\n collection_name,\n str(identifier))",
"def createRelation(rid, rlabel, list, x, y):\n relation = Relation(rid, rlabel, x, y)\n list.append(relation)",
"def create_collection(self, collection_name, parent_collection_id=None, parent_collection_name=None, return_results=False):\n # Making sure we have the data we need\n if not parent_collection_id:\n if not parent_collection_name:\n print('Either the name of id of the parent collection must be provided.')\n if parent_collection_name == 'Root':\n parent_collection_id = None\n else:\n parent_collection_id = self.get_item_id('collection', parent_collection_name)\n\n res = self.post('/api/collection', json={'name':collection_name, 'parent_id':parent_collection_id, 'color':'#509EE3'})\n if return_results:\n return res"
]
| [
"0.62376",
"0.5781384",
"0.57675976",
"0.57353675",
"0.5678566",
"0.55408216",
"0.5517142",
"0.54925543",
"0.5476133",
"0.54684025",
"0.54334456",
"0.5425332",
"0.5413889",
"0.53119826",
"0.5287868",
"0.52431107",
"0.52239215",
"0.5204206",
"0.5199658",
"0.5180586",
"0.51772606",
"0.5169785",
"0.5144121",
"0.5077451",
"0.5070347",
"0.5055519",
"0.50374115",
"0.501073",
"0.49827173",
"0.49646068"
]
| 0.6712364 | 0 |
Creates a link to an Item. | def item(cls, item: "Item_Type", title: Optional[str] = None) -> "Link":
return cls(
pystac.RelType.ITEM, item, title=title, media_type=pystac.MediaType.JSON
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass",
"def createLink(context, title, link, exclude_from_nav=False):\n oid = idnormalizer.normalize(title, 'es')\n if not hasattr(context, oid):\n context.invokeFactory('Link', id=oid, title=title, remoteUrl=link)\n link = context[oid]\n if exclude_from_nav:\n link.setExcludeFromNav(True)\n link.reindexObject()",
"async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })",
"def create_item_command(cog_href: str, destination: str) -> None:\n item = stac.create_item(cog_href)\n\n item.save_object(dest_href=destination)",
"def add_item(self, item):\n if item.get_type() == ebooklib.ITEM_STYLE:\n self.add_link(href=item.get_name(), rel=\"stylesheet\", type=\"text/css\")\n\n if item.get_type() == ebooklib.ITEM_SCRIPT:\n self.add_link(src=item.get_name(), type=\"text/javascript\")",
"def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )",
"def create_item(self, user: User, **kwargs) -> None:",
"def add_item(link, desc, title, date):\n item = rsslib.Item()\n item.link = link\n item.description = 'Keywords: ' + desc\n item.title = title\n item.pubDate = datetime.strptime(date, '%Y-%m-%d, %H:%M%p %Z')\n return item",
"def test_link(app): # pylint: disable=unused-argument,redefined-outer-name\n\n link = {'id': str(uuid4()), 'link': 'https://example.org/linkx', 'tags': ['test'], 'created': datetime.utcnow().isoformat()}\n dynamo.tables[TABLE_NAME].put_item(Item=link)\n yield link",
"def item_href(self, item_href):\n\n self._item_href = item_href",
"def wrap_spotify_link(item, text=''):\n\n # generate default text if no text has been given\n if not text:\n name = item['name']\n if item['type'] == 'playlist':\n user = SP.user(item['owner']['id'])['display_name']\n text = f'{name} by {user}'\n elif item['type'] == 'artist':\n text = name\n else:\n artist = item['artists'][0]['name']\n text = f'{name} by {artist}'\n\n link = item['external_urls']['spotify']\n return f'<a href=\"{link}\">{text}</a>'",
"def _link_items(self):\n pass",
"def toolbar_link(url, title):\n return LazyToolbarItem(\"staff_toolbar.items.Link\", url=url, title=title)",
"def new_link(self, key, link, default):\n\n s = self._new_link()\n s.key = key\n s.link = link\n s.default = default\n return s",
"def create_item(item: Item):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n if not item.users:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"Empty user list not allowed.\")\n\n if not item.content:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"No description / content given.\")\n\n for user_name in item.users:\n if coll_users.find_one({\"name\": user_name}) is None:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n f\"User {user_name} not exists in the user list.\")\n\n item_dict = item.dict()\n item_dict[\"item_id\"] = uuid.uuid4()\n\n tm_now = datetime.datetime.now().isoformat()\n item_dict[\"status_change_date\"] = tm_now\n\n coll_items.insert_one(item_dict)",
"def make(item:dict):\n main_image = item[\"item_json\"][\"Item\"][\"mediumImageUrls\"][0][\"imageUrl\"]\n new_main_image = rak_image_mainpulation.sz10(main_image_url=main_image)\n embedVar = discord.Embed(title=item[\"item_name\"], description=item[\"price\"], color=0x00ff00)\n embedVar.set_image(url=new_main_image)\n embedVar.add_field(name=\"Link\", value=item[\"item_url\"], inline=False)\n return embedVar",
"def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }",
"def add_item(self, item):\n if item.media_type == '':\n (has_guessed, media_type) = guess_type(item.get_name().lower())\n\n if has_guessed:\n if media_type is not None:\n item.media_type = media_type\n else:\n item.media_type = has_guessed\n else:\n item.media_type = 'application/octet-stream'\n\n if not item.get_id():\n # make chapter_, image_ and static_ configurable\n if isinstance(item, EpubHtml):\n item.id = 'chapter_%d' % self._id_html\n self._id_html += 1\n elif isinstance(item, EpubImage):\n item.id = 'image_%d' % self._id_image\n self._id_image += 1\n else:\n item.id = 'static_%d' % self._id_image\n self._id_image += 1\n\n item.book = self\n self.items.append(item)\n\n return item",
"def createItem(name, description, category_id, image, user_id):\n i = Item(name=name, description=description, category_id=category_id,\n image=image, user_id=user_id, pub_date=datetime.utcnow())\n db_session.add(i)\n db_session.commit()\n return i",
"def create_link(self, key, link, default):\n\n setting = self.new_link(key, link, default)\n setting.create()\n return setting",
"def add_item(self):\n item = models.Item(item_name=self.test_item,\n list_id=1,\n description=self.test_item_desc)\n item.add()",
"def link(self, link):\r\n return links.Link(self, link)",
"def add_item(self, name, url):\n self.insert(\"\", \"end\", values=(name, url, \"\"))\n # Add the item - backend\n s.updateItem({\"item\": name, \"url\": url, \"status\": \"\", \"pstatus\": \"\"})\n\n self.selection_clear()",
"def add_link(self, link):\n raise NotImplementedError",
"def post(self, item):\n\n db.session.add(item)\n\n return item",
"def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))",
"def createLink(self, source, destination):\n log(\"creating link\")\n\n if \"flix\" in source:\n return \"%s\" % +OSUtils.createLink(source, destination)\n return \"0\"",
"def createItem(name, category, price, user_id):\n try:\n description = wikipedia.summary(name)\n except wikipedia.exceptions.DisambiguationError as e:\n description = wikipedia.summary(name + \" \" + category.name)\n\n i = Item(name=name, description=description,\n category_id=category.id, price=price, user_id=user_id)\n session.add(i)\n session.commit()\n print 'Item \"' + name + '\" added.'\n return i",
"def createLink(self, downloadUrl, title):\n newUrl = downloadUrl.replace(\"details\", \"download\") \n return self.url + '/' + newUrl",
"def initCreateFeedItem(self, item):\r\n text_utils.shorten(item.descr, MAX_DESCR_LEN)\r\n return item"
]
| [
"0.71673506",
"0.68592036",
"0.66910374",
"0.6681152",
"0.6672842",
"0.66544336",
"0.648805",
"0.64858866",
"0.64009356",
"0.6378397",
"0.6374486",
"0.63420165",
"0.6303804",
"0.62359434",
"0.6182014",
"0.6174261",
"0.6141716",
"0.6140531",
"0.6136516",
"0.6130008",
"0.61296827",
"0.6126565",
"0.6017199",
"0.5999948",
"0.5971217",
"0.590665",
"0.58832604",
"0.5845265",
"0.5844966",
"0.58373725"
]
| 0.72301376 | 0 |
Creates a canonical link to an Item or Collection. | def canonical(
cls,
item_or_collection: Union["Item_Type", "Collection_Type"],
title: Optional[str] = None,
) -> "Link":
return cls(
pystac.RelType.CANONICAL,
item_or_collection,
title=title,
media_type=pystac.MediaType.JSON,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _link_items(self):\n pass",
"def _get_href_link(self, request, identifier, collection_name):\n prefix = self._update_masakari_link_prefix(request.application_url)\n return url_join(prefix,\n self._get_project_id(request),\n collection_name,\n str(identifier))",
"def link(self, link):\r\n return links.Link(self, link)",
"def _get_self_collection(self) -> Link:\n rel = \"self\"\n href = self.api_endpoint\n return Link(href=href, rel=rel)",
"def item(cls, item: \"Item_Type\", title: Optional[str] = None) -> \"Link\":\n return cls(\n pystac.RelType.ITEM, item, title=title, media_type=pystac.MediaType.JSON\n )",
"def collection(cls, c: \"Collection_Type\") -> \"Link\":\n return cls(pystac.RelType.COLLECTION, c, media_type=pystac.MediaType.JSON)",
"def get_canonical_link(self):\n if self.article.final_url:\n kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'}\n meta = self.parser.getElementsByTag(self.article.doc, **kwargs)\n if meta is not None and len(meta) > 0:\n href = self.parser.getAttribute(meta[0], 'href')\n if href:\n href = href.strip()\n o = urlparse(href)\n if not o.hostname:\n z = urlparse(self.article.final_url)\n domain = '%s://%s' % (z.scheme, z.hostname)\n href = urljoin(domain, href)\n return href\n return self.article.final_url",
"def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )",
"def wrap_spotify_link(item, text=''):\n\n # generate default text if no text has been given\n if not text:\n name = item['name']\n if item['type'] == 'playlist':\n user = SP.user(item['owner']['id'])['display_name']\n text = f'{name} by {user}'\n elif item['type'] == 'artist':\n text = name\n else:\n artist = item['artists'][0]['name']\n text = f'{name} by {artist}'\n\n link = item['external_urls']['spotify']\n return f'<a href=\"{link}\">{text}</a>'",
"def _get_bookmark_link(self, request, identifier, collection_name):\n base_url = remove_trailing_version_from_href(request.application_url)\n base_url = self._update_masakari_link_prefix(base_url)\n return url_join(base_url,\n self._get_project_id(request),\n collection_name,\n str(identifier))",
"def link(self):\n return Link(connection=self)",
"def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/collection/{}'.format(obj.id)\n )",
"def getCanonicalLink(self, article):\n kwargs = {'tag':'link', 'attr':'rel', 'value':'canonical'}\n meta = Parser.getElementsByTag(article.doc, **kwargs)\n if meta is not None and len(meta) > 0:\n href = meta[0].attrib.get('href')\n if href:\n href = href.strip()\n o = urlparse(href)\n if not o.hostname:\n z = urlparse(article.finalUrl)\n domain = '%s://%s' % (z.scheme, z.hostname)\n href = urljoin(domain, href)\n return href\n return article.finalUrl",
"def linkTo(sharedProxyOrItem):\n if isinstance(sharedProxyOrItem, sharing.SharedProxy):\n userStore = sharing.itemFromProxy(sharedProxyOrItem).store\n else:\n userStore = sharedProxyOrItem.store\n appStore = isAppStore(userStore)\n if appStore:\n # This code-path should be fixed by #2703; PublicWeb is deprecated.\n from xmantissa.publicweb import PublicWeb\n substore = userStore.parent.getItemByID(userStore.idInParent)\n pw = userStore.parent.findUnique(PublicWeb, PublicWeb.application == substore)\n path = [pw.prefixURL.encode('ascii')]\n else:\n for lm in userbase.getLoginMethods(userStore):\n if lm.internal:\n path = ['users', lm.localpart.encode('ascii')]\n break\n else:\n raise RuntimeError(\n \"Shared item is in a user store with no\"\n \" internal username -- can't generate a link.\")\n if (sharedProxyOrItem.shareID == getDefaultShareID(userStore)):\n shareID = sharedProxyOrItem.shareID\n path.append('')\n else:\n shareID = None\n path.append(sharedProxyOrItem.shareID)\n return _ShareURL(shareID, scheme='', netloc='', pathsegs=path)",
"def get_absolute_url(self):\n return ('publication_detail', (), {'slug': self.slug})",
"def get_canonical_link(self, article):\r\n if article.final_url:\r\n kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'}\r\n meta = self.parser.getElementsByTag(article.doc, **kwargs)\r\n if meta is not None and len(meta) > 0:\r\n href = self.parser.getAttribute(meta[0], 'href')\r\n if href:\r\n href = href.strip()\r\n o = urlparse(href)\r\n if not o.hostname:\r\n z = urlparse(article.final_url)\r\n domain = '%s://%s' % (z.scheme, z.hostname)\r\n href = urljoin(domain, href)\r\n return href\r\n return article.final_url",
"def linkify(field_name):\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify",
"def get_publish_link(self):\n return self.get_link(PUBLISH_LINK_REL)",
"def get_collection_link(db_id, collection_id):\n\n # Return a link to the relevant CosmosDB Container/Document Collection\n return \"dbs/\" + db_id + \"/colls/\" + collection_id",
"def linkify(field_name):\n\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify",
"def anchor_to_resource(resource, post_create_func=None, title=None):\n href = resource.get('href')\n resource = {\n \"description\": title or resource.text_content().encode('utf8'),\n \"name\": href.split('/')[-1],\n \"url\": href,\n \"format\": href[href.rfind(\".\")+1:].upper(),\n }\n if post_create_func:\n post_create_func(resource)\n return resource",
"def createLink(context, title, link, exclude_from_nav=False):\n oid = idnormalizer.normalize(title, 'es')\n if not hasattr(context, oid):\n context.invokeFactory('Link', id=oid, title=title, remoteUrl=link)\n link = context[oid]\n if exclude_from_nav:\n link.setExcludeFromNav(True)\n link.reindexObject()",
"def get_absolute_url(self):\n return reverse('authors', args=[str(self.id)])",
"def link(self, id):\r\n return links.RepoLink(self, id)",
"def get_absolute_url(self):\n\t\treturn reverse('author-detail', args=[str(self.id)])",
"def linkify(obj, link_text=''):\n try:\n lst = []\n # if obj is not a list, convert it into a list\n if not getattr(obj, '__iter__', False):\n obj = [obj]\n for item in obj:\n if hasattr(item, 'child'):\n item = item.child\n if link_text == '':\n l_text = unicode(item)\n else:\n try:\n link_text = link_text.encode('ascii')\n l_text = getattr(item, link_text, link_text)\n except UnicodeEncodeError:\n l_text = link_text\n if not (isinstance(item, Content) and\n isinstance(l_text, SafeText)):\n l_text = filter.force_escape(l_text)\n format_args = (item.get_absolute_url(), l_text)\n lst.append(mark_safe('<a href=\\'%s\\'>%s</a>' % format_args))\n\n # nonlists obj's should be returned as nonlists\n return lst[0] if len(lst) == 1 else lst\n except:\n return ''",
"def _getWikiLink(self, link):\n return reverse('wiki.document',\n kwargs={'document_slug': link.replace(' ', '+')})",
"def test_bug_22_at_plone_org(self):\n curl = re.compile('<link\\srel\\s*=\\s*\"canonical\"\\s+' \\\n '[^>]*href\\s*=\\s*\\\"([^\\\"]*)\\\"[^>]*>', re.S|re.M)\n # When adapter registered for the object - canoncal link present on the page\n self.assertNotEqual( queryAdapter(self.my_doc, ICanonicalLink), None)\n\n res = self.publish(path=self.mydoc_path, basic=self.basic_auth)\n self.assertNotEqual(curl.search(res.getBody()), None)\n\n # Now remove adapter from the registry -> this should :\n # - not break page on rendering;\n # - canonical link will be absent on the page\n gsm = getGlobalSiteManager()\n gsm.unregisterAdapter(DefaultCanonicalLinkAdapter, [ITraversable,],\n ICanonicalLink)\n self.assertEqual( queryAdapter(self.my_doc, ICanonicalLink), None)\n\n res = self.publish(path=self.mydoc_path, basic=self.basic_auth)\n self.assertEqual(curl.search(res.getBody()), None)\n\n # register adapter back in the global site manager\n gsm.registerAdapter(DefaultCanonicalLinkAdapter, [ITraversable,],\n ICanonicalLink)",
"def get_item_url(self, item):\n return self.get_absolute_url(item, 'detail')",
"async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })"
]
| [
"0.5986782",
"0.58662117",
"0.58599937",
"0.58512133",
"0.5758114",
"0.5712557",
"0.5686602",
"0.5646597",
"0.55707866",
"0.5521616",
"0.5483528",
"0.5483245",
"0.5466893",
"0.5444288",
"0.54267216",
"0.5385771",
"0.53692365",
"0.53664565",
"0.53472406",
"0.532653",
"0.5222759",
"0.52190185",
"0.52162206",
"0.51977795",
"0.5190844",
"0.5187061",
"0.51725197",
"0.5165838",
"0.5144513",
"0.51363117"
]
| 0.81255555 | 0 |
fetch the crime data | def test_fetch_crime(self):
assert isinstance(_tabular.fetch_crime_data(),
pd.DataFrame) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_data(self):",
"def _fetch_data(self):\n pass",
"def _fetch_ucr_data_single(year, which):\n try:\n yy = str(year)[2:]\n yy_pre = str(year - 1)[2:]\n\n if which == 'index':\n filename = f'CrimeData_{yy}_{yy_pre}.xlsx'\n elif which == 'domestic':\n filename = f'DomesticOffenses_{yy}_{yy_pre}.xlsx'\n elif which == 'hate':\n filename = f'HateCrime_{yy}_{yy_pre}.xlsx'\n elif which == 'school':\n filename = f'SchoolIncidents_{yy}_{yy_pre}.xlsx'\n\n url = f'http://www.isp.state.il.us/docs/cii/cii{yy}/ds/{filename}' \n df = pd.read_excel(url)\n select_cols = lambda x: not re.search('\\d', x)\n\n if which == 'school':\n df = df.rename(columns=lambda x: x[:-2].lower() if x[-2:] == str(year)[2:] else x.lower())\n df = df.loc[:, df.columns.map(select_cols)]\n else:\n def helper(df,year):\n rename_cols = lambda x: x[:-2].lower() if x[-2:] == str(year)[2:] else x.lower()\n \n return (\n df\n .rename(columns=rename_cols)\n .loc[:, df.rename(columns=rename_cols).columns.map(select_cols)]\n .assign(year=year)\n .iloc[:102, ]\n )\n\n df = pd.concat([helper(df, year), helper(df, year - 1)])\n\n return _transform_ucr_data(df, which)\n except XLRDError:\n raise ValueError(\"WARNING: Uniform Crime Report data is up to date.\")\n except:\n raise",
"def covid_fetch():\n #Sets the structure of the data retrieved from the API\n cases_and_deaths = {\n \"date\": \"date\",\n \"areaName\": \"areaName\",\n \"areaCode\": \"areaCode\",\n \"newCasesByPublishDate\": \"newCasesByPublishDate\",\n \"cumCasesByPublishDate\": \"cumCasesByPublishDate\",\n \"newDeathsByDeathDate\": \"newDeathsByDeathDate\",\n \"cumDeathsByDeathDate\": \"cumDeathsByDeathDate\"\n }\n #Sets the filter for the API using config.json\n covid_nation = ['areaType=nation']\n nation = 'areaName=' + str(config_fetcher(\"covid_region\"))\n covid_nation.append(nation)\n\n #Gets API latest data\n covid_api = Cov19API(\n filters = covid_nation,\n structure = cases_and_deaths,\n )\n #Gets data in form of dictionary\n covid_json = covid_api.get_json()\n #Gets timestamp for last update\n covid_timestamp = covid_api.last_update\n #Assign data to variables\n covid_data = covid_json['data'] #This formats the data as a list, while I want a dictionary, hence the next line.\n return covid_data",
"def _fetch_chri_data(year=None):\n try:\n if year is None:\n year = _get_max_year([9]) + 1\n\n database = 'AnnualPulls'\n tbl = 'Arrests'\n cols = 'ArrestYear, ArrestAge, EventORI'\n condition = f'ArrestYear = {year}'\n\n df = _fetch_from_ms_sql_server(database, tbl, cols, condition)\n return _transform_chri(df)\n except:\n raise",
"def get_cit_by_pmid(pmid=None, apikey=\"82bd3c40b84225a5981daff35e1c2097\"): #!!!!!!! find out why None for 24649328,24596747,24551446,12725084,\n \n url = f\"http://api.elsevier.com/content/search/scopus?query=PMID({pmid})&field=citedby-count\"\n sleep(0.3)\n r = requests.get(url,headers={\"X-ELS-APIKey\": \"82bd3c40b84225a5981daff35e1c2097\"})\n scopus_json = r.json()\n cit_count = extract_values(scopus_json,'citedby-count') \n\n if extract_values(scopus_json,\"opensearch:totalResults\")[0] == '0':\n print(\"need pmid\")\n return(None,None)\n\n else: \n if len(cit_count)==0:\n cit_count = 0\n else:\n cit_count = int(cit_count[0])\n return(cit_count,scopus_json)",
"def test_fetch_crime_sedf(self):\n assert isinstance(_vector.fetch_beach_access_data(f='arcgis'), \n pd.DataFrame)",
"def fetch_community_crime_data(dpath='/tmp/glm-tools'):\n if os.path.exists(dpath):\n shutil.rmtree(dpath)\n os.mkdir(dpath)\n\n fname = os.path.join(dpath, 'communities.csv')\n base_url = (\"http://archive.ics.uci.edu/ml/machine-learning-databases\")\n url = os.path.join(base_url, \"communities/communities.data\")\n urllib.urlretrieve(url, fname)\n\n # Read in the file\n df = pd.read_csv('/tmp/glm-tools/communities.csv', header=None)\n\n # Remove missing values\n df.replace('?', np.nan, inplace=True)\n df.dropna(inplace=True, axis=1)\n df.dropna(inplace=True, axis=0)\n df.reset_index(inplace=True, drop=True)\n\n # Extract predictors and target from data frame\n X = np.array(df[df.keys()[range(3, 102)]])\n y = np.array(df[127])\n\n return X, y",
"def get_citizens():\n response = table.scan()[\"Items\"]\n logger.info(\"All citizens returned\")\n return jsonify(response)",
"def fetchData(self, date):\n\n\t\tclimbers = Climber.objects.all()\n\t\troutes = Route.objects.all()\n\n\t\tif date:\n\t\t\tclimbers = climbers.filter(climb__date__lte=date.date())\n\t\t\troutes = routes.filter(climb__date__lte=date.date())\n\n\t\t# Exclude climbers who have not climbed anything\n\t\tclimbers = climbers.annotate(Count('climb')).filter(climb__count__gte=1)\n\t\troutes = routes.annotate(Count('climb')).filter(climb__count__gte=1)\n\n\t\t# Fetch!\n\t\tclimbers = list(climbers)\n\t\troutes = list(routes)\n\n\t\tmatches = []\n\n\t\t# Keep track of whether we have a counted climb\n\t\tfor route in routes:\n\t\t\troute.climbed = False\n\n\t\tfor climber in climbers:\n\t\t\t# Aggregate minimum and maximum route numbers for each color that\n\t\t\t# this climber has climbed\n\t\t\tmin_for_color = {}\n\t\t\tmax_for_color = {}\n\n\t\t\tfor color in Route.COLOR_POINTS.keys():\n\t\t\t\tclimbs = Climb.objects.filter(climber=climber, route__color=color)\n\t\t\t\tif date:\n\t\t\t\t\tclimbs = climbs.filter(date__lte=date.date())\n\n\t\t\t\tif not climbs.exists():\n\t\t\t\t\tcontinue\n\n\t\t\t\tres = climbs.aggregate(Max('route__number'), Min('route__number'))\n\t\t\t\tmin_for_color[color] = res['route__number__min']\n\t\t\t\tmax_for_color[color] = res['route__number__max']\n\n\t\t\tfor route in routes:\n\t\t\t\t# Anything below your category does not count.\n\t\t\t\tif route.color < climber.minColor():\n\t\t\t\t\tcontinue\n\n\t\t\t\t# If the route is in the climbers category or below, assume that the climber has only tried\n\t\t\t\t# the routes up to his maximum entry in that color.\n\t\t\t\t# Otherwise, assume that the climber has failed.\n\t\t\t\tabove_category = route.color != Route.PINK and route.color > climber.maxColor()\n\n\t\t\t\tif not above_category:\n\t\t\t\t\tif route.color not in max_for_color:\n\t\t\t\t\t\t# Climber hasn't climbed anything in that category, nothing to rate.\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif route.number > max_for_color[route.color] or route.number < min_for_color[route.color]:\n\t\t\t\t\t\t# Outside of climbers' climb history => not attempted\n\t\t\t\t\t\tcontinue\n\n\t\t\t\tclimbs = Climb.objects.filter(climber=climber, route=route)\n\t\t\t\tif date:\n\t\t\t\t\tclimbs = climbs.filter(date__lte=date.date())\n\n\t\t\t\tclimbed = route.color < climber.minColor() or climbs.exists()\n\t\t\t\tmatches.append((climber, route, climbed))\n\n\t\t\t\troute.climbed = True\n\n\t\t# Filter out routes which still did not get climbs\n\t\t# Can happen if climbs are rejected by the logic above.\n\t\troutes = [ route for route in routes if route.climbed ]\n\t\tmatches = [ match for match in matches if match[1].climbed ]\n\n\t\treturn climbers, routes, matches",
"def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table",
"def query_all_crimes(zip_):\n q_str = f\"\"\"SELECT * FROM \"Crime\" WHERE zip={zip_} ;\"\"\"\n cursor.execute(q_str)\n return cursor.fetchall()",
"def crime_data_fbi(request):\n http = urllib3.PoolManager()\n\n # Getting the request arguments for city, distance , start date and end date \n\n #request_args = request.args\n #print('request_args')\n #ori=request_args['ori']\n #start_date=request_args['start_date']\n #end_date=request_args['end_date']\n #distance=request_args['distance']\n base_url=fbi_url(request)\n print(base_url)\n \n # New request url \n request_url = base_url\n print(request_url)\n \n print(\"i am inside the functino\")\n payload = http.request('GET',\n request_url,\n headers={\n 'Content-Type': 'application/json',\n 'x-api-key': creds\n },\n fields={\n 'API_KEY':creds\n }\n )\n\n #*** only changing it for testing ***\n #return request_url\n print(payload)\n return payload",
"def read(filename: str)-> List [CrimeStatistics]:\n #return [] #stub\n # Template from htDAP\n \n #loc contains all results read so far\n loc = [] #type List[CrimeStatistics]\n \n with open(filename) as csvfile:\n reader = csv.reader(csvfile)\n next(reader)\n \n \n for row in reader:\n university = row[0].replace(\"4\", \"\")\n campus = parse_campus(row[1])\n enrollment = parse_int(row[2].replace(\",\", \"\"))\n violent_crimes = parse_int(row[3])\n property_crimes = parse_int(row[8])\n arson = parse_int(row[12])\n \n if valid(enrollment):\n cs = CrimeStatistics(university,\n campus,\n enrollment,\n violent_crimes,\n property_crimes,\n arson)\n \n loc.append(cs)\n return loc",
"def getcurso(curso):\n\n dataset = {\n \"curso\": [],\n \"materia\": [],\n \"professor\": [],\n \"horas\": [],\n \"ids\": []\n }\n request_data_get = cursos_collections.find({\"curso\": curso})\n\n for result in request_data_get:\n dataset['curso'].append(result[\"curso\"])\n dataset['materia'].append(result[\"materia\"])\n dataset['professor'].append(result[\"professor\"])\n dataset['horas'].append(result[\"horas\"])\n dataset['ids'].append(str(result[\"_id\"]))\n\n return dataset",
"async def fetch_cached_patrons():\n return await self.conn.fetch(\"SELECT userid, super FROM patreon.cache\")",
"def fetch_data(self):\n\n data_dict = {\n 'price': self.get_current_price(),\n }\n\n return self.save_data(data_dict)",
"def get_incidents(year):\n print 'Downloading year: %s' % year\n \n # Build URL from year.\n # If the year is 2007-2011, download the XML straight from ... my S3 account.\n if year in range(2007, 2011):\n url = 'http://wapo-projects.s3.amazonaws.com/techathon/scraperwiki/xml/crime_incidents_%s_plain.xml' % year\n \n # If the year is 2012, get it from the DC government. This is NOT the whole year.\n if year == 2012:\n url = 'http://data.octo.dc.gov/feeds/crime_incidents/crime_incidents_current.xml' \n \n # Request the data using the Requests library.\n request = requests.get(url)\n unzipped_request = request.content\n \n # Parse the XML using lxml's BeautifulSoup parser.\n crime_xml_parsed = fromstring(unzipped_request)\n\n # Return the parsed Element() objects by grabbing the xpath for <entry> tags.\n return crime_xml_parsed.xpath('//entry')",
"def get_incidents(year):\n print 'Downloading year: %s' % year\n \n # Build URL from year.\n # If the year is 2007-2011, download the XML straight from ... my S3 account.\n if year in range(2007, 2011):\n url = 'http://wapo-projects.s3.amazonaws.com/techathon/scraperwiki/xml/crime_incidents_%s_plain.xml' % year\n \n # If the year is 2012, get it from the DC government. This is NOT the whole year.\n if year == 2012:\n url = 'http://data.octo.dc.gov/feeds/crime_incidents/crime_incidents_current.xml' \n \n # Request the data using the Requests library.\n request = requests.get(url)\n unzipped_request = request.content\n \n # Parse the XML using lxml's BeautifulSoup parser.\n crime_xml_parsed = fromstring(unzipped_request)\n\n # Return the parsed Element() objects by grabbing the xpath for <entry> tags.\n return crime_xml_parsed.xpath('//entry')",
"def getCitationsData():\n # Follows https://github.com/simonw/irma-scrapers/issues/1\n citationsResponse = requests.get(\"https://api.github.com/repos/greenelab/covid19-review/git/trees/output\", headers=headers).json()\n treeEntry = [t for t in citationsResponse[\"tree\"] if t[\"path\"] == \"references.json\"][0] \n citations = json.loads(base64.b64decode(requests.get(treeEntry[\"url\"]).json()[\"content\"]))\n\n citationsDF = pd.DataFrame(citations)\n citationsDF[\"Covid19-review_paperLink\"] = citationsDF.id.apply(lambda x: \"https://greenelab.github.io/covid19-review/#ref-\" + x)\n citationsDF = citationsDF[[\"DOI\", \"title\", \"issued\", \"container-title\", \"URL\", \"Covid19-review_paperLink\"]]\n citationsDF.rename(columns={\"DOI\": \"doi\", \"issued\": \"date\", \"container-title\": \"publication\"}, inplace=True)\n\n # Convert date to string\n def dateStringFromDateParts(row):\n try:\n dateParts = row['date']['date-parts'][0]\n if len(dateParts) == 3:\n return \"-\".join([str(dateParts[1]), str(dateParts[2]), str(dateParts[0])])\n elif len(dateParts) == 2:\n return \"-\".join([str(dateParts[1]), str(dateParts[0])])\n elif len(dateParts) == 1:\n return str(dateParts[0])\n else:\n return\n except:\n return\n\n citationsDF.date = citationsDF.apply(dateStringFromDateParts, axis=1)\n\n citationsDF.set_index(\"doi\", inplace=True)\n return citationsDF",
"def scrape_crew(self):\n\n page = requests.get(self.url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(\"div\", id=\"fullcredits_content\")\n directors_and_writers = results.find_all(\n \"table\", class_=\"simpleTable simpleCreditsTable\"\n )\n cast = results.find(\"table\", class_=\"cast_list\")\n\n crew = []\n crew.append(directors_and_writers[0])\n crew.append(directors_and_writers[1])\n crew.append(cast)\n\n return crew",
"def get_data(self):\n\n self.cur.execute('SELECT year, sex, education, score from vocabulary_scores;')\n scores = dict()\n education = dict()\n count = dict()\n\n for row in self.cur :\n if row[0] in scores:\n if row[1] in scores[row[0]]:\n scores[row[0]][row[1]] += int(row[3])\n education[row[0]][row[1]] += int(row[2])\n count[row[0]][row[1]] += 1\n else:\n scores[row[0]][row[1]] = int(row[3])\n education[row[0]][row[1]] = int(row[2])\n count[row[0]][row[1]] = 1\n else:\n # scores[year] = {gender: score}\n scores[row[0]] = {row[1]: int(row[3])}\n education[row[0]] = {row[1]: int(row[2])}\n count[row[0]] = {row[1]: 1}\n\n scores, education = self.average_scores(scores, education, count)\n\n return scores, education",
"def load_covid_cases_data(date=None):\n if not date:\n date = datetime.today()\n data = requests.get(f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date.strftime(\"%m-%d-%Y\")}.csv')\n\n f = StringIO(data.text)\n reader = csv.DictReader(f, delimiter=',')\n results = {}\n for row in reader:\n fips = row.pop('FIPS', None)\n if fips:\n results[int(fips)] = row\n print(f\"{date.strftime('%m-%d-%Y')} has {len(results.keys())} results\")\n return results",
"def fetchSenateElection ():\n\n # Url obtained from\n # https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/PEJ5QU/XXQCIK&version=4.0\n # and then go down and look for the Download URL section. this link can be found there.\n #\n url=\"https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/PEJ5QU/XXQCIK\"\n\n # output file where the downloaded data will be saved\n electionDataFile = \"data/senate_election_data.tab\"\n\n # the download comes down as a tab delimited file, so \\t tells pandas.read_csv that the file is a\n # a tab delimited file.\n df = getUrlPd (url, electionDataFile, sep='\\t')\n\n # save the data to a sql db\n dbString = \"postgres://rhea@localhost/research\"\n\n pdToSql (df, \"state_senate\", dbString)\n\n return df",
"def fetch_data():\n data.fetch_data()\n data.start_updating()",
"def covid_realtime():\r\n url = \"https://phl.carto.com/api/v2/sql\"\r\n gdf = carto2gpd.get(url, \"covid_cases_by_zip\",fields=['zip_code', 'count', \"etl_timestamp\"])\r\n return gdf",
"def club_info(self, cid):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/clubInfo.phtml?cid=' + cid, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n plist = list()\r\n for i in soup.find('table', cellpadding=2).find_all('tr')[1:]:\r\n plist.append('%s\\t%s\\t%s\\t%s\\t%s' % (\r\n i.find_all('td')[0].text, i.find_all('td')[1].text, i.find_all('td')[2].text, i.find_all('td')[3].text,\r\n i.find_all('td')[4].text))\r\n return soup.title.text, plist",
"def get_course(dept, num):\n \n # semester: 10 = Fall, 20 = Spring, 30 = Summer\n host = \"https://selfservice.mypurdue.purdue.edu/prod/bwckctlg.p_disp_course_detail\"\n query = \"?cat_term_in={term}&subj_code_in={dept}&crse_numb_in={num}\".format(term=\"201620\", dept=dept, num=num)\n urlfetch.set_default_fetch_deadline(600)\n result = urlfetch.fetch(host+query)\n \n if result.status_code == 200:\n tree = html.fromstring(result.content)\n text = tree[1][4][2].text_content() # get just the relevant text of the webpage \n\n # remove unicode non-breaking spaces to allow regexing\n text = text.replace(u'\\xa0',u' ')\n return text",
"def fetch_self(self):\r\n self.parsed_doc['names'] = self.fetch_candidate_name() \r\n self.parsed_doc['phones'] = self.fetch_phone_numbers() \r\n self.parsed_doc['emails'] = self.fetch_emails() \r\n self.parsed_doc['github'] = self.fetch_github() \r\n self.parsed_doc['linkedin'] = self.fetch_linkedin() \r\n self.parsed_doc['degrees'] = self.fetch_degrees() \r\n self.parsed_doc['skills'] = self.fetch_skills() \r\n self.parsed_doc['education'] = self.fetch_education() \r\n self.parsed_doc['languages'] = self.fetch_languages() \r\n self.parsed_doc['addresses'] = self.fetch_address() \r\n self.parsed_doc['raw_resume'] = self.stringtext",
"def get_data():\n pass"
]
| [
"0.6204969",
"0.59845346",
"0.58863574",
"0.5830197",
"0.58131176",
"0.5786138",
"0.570744",
"0.5705819",
"0.5674465",
"0.5605659",
"0.55591035",
"0.55420095",
"0.5533729",
"0.54788285",
"0.54764843",
"0.546429",
"0.5451421",
"0.544013",
"0.544013",
"0.5437254",
"0.5436855",
"0.54239553",
"0.54225814",
"0.54016525",
"0.5393967",
"0.5393199",
"0.53544146",
"0.53367853",
"0.53211266",
"0.53179497"
]
| 0.61369556 | 1 |
Unsubscribes a PubgemUser from an RSSFeed. | def unsubscribe(self, *rss_feeds):
[self.subscriptions.remove(feed) for feed in rss_feeds if feed in self.subscriptions]
self.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unsubscribe(self, feed, **args):\n args.update(feed=feed)\n return self.fetch(\"/unsubscribe\", post_args=args)",
"def unsubscribe(id, userId):\n db = core.connect()\n theUser = db[userId]\n if id in theUser[\"streams\"]:\n theUser[\"streams\"].remove(id)\n db[userId] = theUser",
"def unsubscribe(self, feedUrl):\r\n response = self.httpPost(\r\n ReaderUrl.SUBSCRIPTION_EDIT_URL,\r\n {'ac':'unsubscribe', 's': feedUrl})\r\n # FIXME - need better return API\r\n if response and 'OK' in response:\r\n return True\r\n else:\r\n return False",
"def unsubscribe_feed():\n\n source_id = request.args.get('source_id', type=int)\n if source_id:\n current_user.unsubscribe_feed(source_id)\n flash('Unsubscribe successful source=%s' % source_id)\n\n return redirect(url_for('mod_feed.manage_subscriptions'))",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if not user in self.users:\n return 'You are not subscribed!'\n else:\n user = self.users.pop(user)\n self.message_queue.append('_%s has left the channel_' % user)\n self.log.info( '%s unsubscribed from the broadcast.' % user)\n self.save_state()\n return 'You are now unsubscribed.'",
"def unsubscribe(self, subject):\n pass",
"def unsubscribe(self, user_token, topic):\n response = _request('DELETE',\n url=self.url_v1('/user/subscriptions/' + topic),\n user_agent=self.user_agent,\n user_token=user_token,\n )\n _raise_for_status(response)",
"def unsubscribe(self, destination, *args, **kwargs):",
"def unsubscribe(self, update, context):\n # remove or update to the sqlite table.\n chat = update.message.chat\n self.db_manager.remove_user(chat.id)\n self.logger.info(\n 'Username: %s and chat_id: %s unsubscribed to the list.' % (chat.username, chat.id)\n )\n update.message.reply_text('You have successfully unsubscribed the notifications forever.')",
"def unsubscribe(self, item_name):\n self.subscribed = None",
"def unregister(self, url):\n param_d = {\n 'url': url,\n }\n r = self._send_request('feeds/unregister', param_d, http_post=False)\n # Return True on success.\n if 'result' in r and r['result'] == 'success':\n return True\n else:\n return False",
"def unsubscribe(cls,sender,receiver):\n cls._unsubscribe(id(sender),receiver)",
"def unregister_publisher(self, hostname):",
"def unsubscribe(self, inst):\r\n if inst in self._subscribers:\r\n self._subscribers.remove(inst)\r\n vprint(\"{} is unsubscribed from {}\".format(inst.name, self.name))",
"def _remove_pub(pub):\n # counting publisher instance per topic name\n TopicBack.pub_instance_count[pub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return pub.unregister()",
"def unsubscribe(self, request):\n email = self.cleaned_data.get('email')\n subscriber = Subscriber.objects.get(email=email, mailing_list=self.mailing_list)\n subscriber.unsubscribe(request)",
"def unsubscribe(self, jid=None, username=None, domain=None):\n if jid is not None:\n self._pres_manager.flag_offline(jid)\n elif username is not None and domain is not None:\n self._pres_manager.flag_offline(xmpp.JID(node=username, domain=domain))\n self._roster.unsubscribe(jid=jid, username=username, domain=domain)",
"def untag():\n form = TagSubscriptionForm(hidden_mode=True)\n if not form.validate_on_submit():\n abort(403)\n\n subscription = current_user.subscriptions.filter_by(\n channel_id=form.channel_id.data\n ).first_or_404()\n tag = current_user.tags.filter_by(name=form.tag_name.data).first_or_404()\n\n results = subscription.untag(tag.id)\n response = {\"success\": results}\n return jsonify(response)",
"def unsubscribe(\n self, *, other_subreddits: list[praw.models.Subreddit] | None = None\n ):\n data = {\n \"action\": \"unsub\",\n \"sr_name\": self._subreddit_list(\n other_subreddits=other_subreddits, subreddit=self\n ),\n }\n self._reddit.post(API_PATH[\"subscribe\"], data=data)",
"def unsubscribe_user(sub_id):\n repository.unsubscribe(sub_id)\n return flask.render_template('www/email/unsubscribed.html')",
"def unsubscribe(self):\n pass # pragma: no cover",
"def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)",
"def _unsubscribe(self):\n self.unsubscribe_date = now()\n self.unsubscribed = True\n self.subscribed = False",
"def unsubscribe(self, topic):\n request = protos.RequestUnsubscribe(topic=topic)\n return self.stub.unsubscribe(request)",
"def unsubscribe(self, user_id):\n removed_subscription = self.data_source.unsubscribe(user_id)\n\n return removed_subscription"
]
| [
"0.73993146",
"0.6520943",
"0.64468044",
"0.634923",
"0.6127742",
"0.6127742",
"0.6127742",
"0.6127742",
"0.6127742",
"0.61200035",
"0.6014998",
"0.59376156",
"0.59051955",
"0.58890295",
"0.5878844",
"0.58269995",
"0.58126444",
"0.579354",
"0.5736344",
"0.5714773",
"0.5711835",
"0.56888676",
"0.56730247",
"0.56685144",
"0.5628731",
"0.5625565",
"0.56232536",
"0.561447",
"0.554241",
"0.5510551"
]
| 0.69304186 | 1 |
Save environment identifier to local file for defaulting. | def save_default_environment(
environment=None,
cwd=None
):
env_file = get_local_default_file(cwd=cwd)
with open(env_file, 'w') as f_out:
f_out.write(f'{str(environment)}\n')
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_environment(path: Optional[str] = None):\n environment = EnvironmentProvider().environment\n serialize_environment_to_file(environment=environment,\n path=path)",
"def save_to_env_file(self, envs, env_file_location):\n\n if not self.pre_initiated and envs:\n file_instance = PyFunceble.helpers.File(env_file_location)\n\n try:\n content = file_instance.read()\n except FileNotFoundError:\n content = \"\"\n\n for environment_variable, value in envs.items():\n to_write = \"{0}={1}\".format(environment_variable, value)\n regex = r\"{0}=.*\".format(environment_variable)\n\n if content:\n if PyFunceble.helpers.Regex(f\"^{regex}\").get_matching_list(\n content.splitlines()\n ):\n content = PyFunceble.helpers.Regex(regex).replace_match(\n content, to_write\n )\n else:\n if not content.endswith(\"\\n\"):\n content += \"\\n{0}\\n\".format(to_write)\n else:\n content += \"{0}\\n\".format(to_write)\n else:\n content += \"{0}\\n\".format(to_write)\n\n file_instance.write(content, overwrite=True)",
"def saveenv(self):\n\t\tmesslen, received = self.socket.send('saveenv\\r', 5)\t\t\n\t\treturn None",
"def save():\n\n env.config.save(env.config_file)",
"def save_envs_to_file(file_path=constants.ENV_FILE_DEFAULT_PATH.value):\n #pylint: disable=unspecified-encoding\n with open(file_path, \"w\") as file:\n for key, value in environ.items():\n if key in constants.ENV_FILE_ALLOWED_KEYS.value:\n file.write(\"{}={}\\n\".format(key, value))",
"def save_info_file(self, path):\n path = os.path.join(path, 'asv-env-info.json')\n content = {\n 'tool_name': self.tool_name,\n 'python': self._python,\n 'requirements': self._requirements,\n 'build_env_vars': self.build_env_vars\n }\n util.write_json(path, content)",
"def writeLocalEnv(self):\n \n # open file\n f = open(self.installPath + \"/build_env.sh\", 'w')\n \n # write to file\n f.write( 80*'#' + os.linesep + \"# Environment script generated by ilcsoft-install on \" + time.ctime() + os.linesep )\n f.write( \"# for \" + self.name + \" located at [ \" + self.installPath + \" ]\" + os.linesep + 80*'#' + os.linesep )\n\n # global environment variables\n if( len( self.parent.env ) > 0 ):\n f.write( 2*os.linesep + \"#\" + 80*'-' + os.linesep + \"#\" + 5*' ' + \"Global Environment Variables\" + os.linesep \\\n + \"#\" + 80*'-' + os.linesep )\n for k, v in self.parent.env.iteritems():\n f.write( \"export \" + str(k) + \"=\\\"\" + str(v) + \"\\\"\" + os.linesep )\n \n\n # write environment recursively to file\n self.writeEnv(f, [])\n \n\n f.write( \"# --- additional comands ------- \" + os.linesep ) \n print \"\\n ----- adding additional commands to build_env.sh : \\n \"\n for c in self.envcmds:\n f.write( c + os.linesep ) \n print \"\\n ----- adding additional command to build_env.sh \" + c + \"\\n\"\n\n # close file\n f.close()",
"def pwrite(self):\n shell = os.getenv('SHELL')\n if shell == None: # assume bash or ksh\n shell = 'bash'\n else:\n shell = os.path.basename(shell)\n\n fname = '/tmp/source_' + os.environ['USER'] # get login id of current user\n try:\n fid = open(fname, 'w')\n except:\n print(\"ERROR. Could not open \", fname, \" for writing! Exiting...\")\n exit(1)\n\n if self.val == None:\n self.val = \"\"\n\n if 'csh' in shell:\n wstr = \"setenv \" + self.name + \" \" + self.val\n else:\n wstr = \"export \" + self.name + \"=\" + self.val\n\n fid.write(wstr)\n fid.close()\n print(\"Source \", fname, \" for new path to take effect\")",
"def env_file_op(api_name,api_version,spread_sheet_id,client_secret_file_name):\n\n lines = [\"SPREAD_SHEET_ID = {0} \\n\".format(spread_sheet_id),\"API_NAME = {0} \\n\".format(api_name),\"API_VERSION = {0} \\n\".format(api_version),\"CLIENT_SECRET_FILE = {0} \\n\".format(client_secret_file_name)]\n \n path = str(os.path.expanduser('~')) +'/.config/hackerjobs/.env'\n with open(path,'w+') as file:\n file.writelines(lines)",
"def save_env():\n global vis\n vis.save([vis.env])",
"def save_env(wall_time_=None, start_time_=None, environment_file=\"/etc/environment\"):\n if not os.access(environment_file, os.W_OK):\n raise EnvironmentError(\"Can't write to %s\" % environment_file)\n\n with open(name=environment_file, mode=\"r\") as file_:\n # keep results != WALLTIME/BOOTTIME\n content = [entry for entry in file_.readlines() if re.match(\"(?:WALL|BOOT)TIME\", entry, re.IGNORECASE) is None]\n if wall_time_ is not None:\n content.append(\"WALLTIME=%d\\n\" % wall_time_)\n if start_time_ is not None:\n content.append(\"BOOTTIME=%d\\n\" % start_time_)\n with open(name=environment_file, mode=\"w\") as file_:\n file_.writelines(content)",
"def overwrite_environment_variable(self, key, value):\n if value is not None:\n value = BashParentEnvironment._format_environment_value(value)\n self._printer(\"export {0}=\\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"unset {0}\".format(key))",
"def export(self, **env):\n with self.lock:\n for key, value in env.items():\n self.environment[key] = value",
"def create_vars_dot_env(self):\n\n print(\"Creating vars.env in your Google Drive!\")\n\n with open(self.envpath, \"w\") as envfile:\n envfile.write(\"COLAB_ENV = Active\\n\")",
"def overwrite_environment_variable(self, key, value):\n if value is not None:\n self._printer(\"$env:{0} = \\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"$env:{0} = \\\"\\\"\".format(key))",
"def __write_epics_env(self, path, template_name, macros):\n file = \"{}mps.env\".format(path)\n template = \"{}epics_env/{}\".format(self.template_path, template_name)\n self.__write_file_from_template(file=file, template=template, macros=macros)",
"def get_saved_default_environment(cwd=None):\n env_file = get_local_default_file(cwd=cwd)\n saved_default = None\n if os.path.exists(env_file):\n with open(env_file, 'r') as f:\n saved_default = f.read().replace('\\n', '')\n return saved_default",
"def persist(self, filepath):\n joblib.dump('hello-steppy', filepath)",
"def writeEnv(self, f, checked):\n \n # resolve circular dependencies\n if( self.name in checked ):\n return\n else:\n checked.append( self.name )\n\n if self.env or sum(map(len, self.envpath.values()), 0):\n f.write( 2*os.linesep + \"#\" + 80*'-' + os.linesep + \"#\" + 5*' ' \\\n + self.name + os.linesep + \"#\" + 80*'-' + os.linesep )\n \n # first write the priority values\n for k in self.envorder:\n f.write( \"export \" + str(k) + \"=\\\"\" + str(self.env[k]) + \"\\\"\" + os.linesep )\n # then write the rest\n for k, v in self.env.iteritems():\n if k not in self.envorder:\n f.write( \"export \" + str(k) + \"=\\\"\" + str(self.env[k]) + \"\\\"\" + os.linesep )\n\n # list of \"trivial\" paths we do not want to add again to PATH and co\n ignorepaths = ['/usr/bin','/usr/lib','/sbin','/usr/sbin']\n # path environment variables\n for k, v in self.envpath.iteritems():\n if( len(v) != 0 ):\n # expand every variable we introduced previously\n exp = str().join(v)\n for e, ev in self.env.iteritems():\n p = re.compile(r\"\\$\"+str(e)) # compile regular expression to match shell variable\n exp = p.sub(str(ev), exp) # replace with expanded variable for absolute path\n # check for match\n if exp in ignorepaths:\n continue\n path = str.join(':', v)\n path = path + ':'\n f.write( \"export \" + k + \"=\\\"\" + path + \"$\" + k + \"\\\"\" + os.linesep )\n\n if( len(checked) > 1 ):\n mods = self.optmodules + self.reqmodules\n else:\n # buildonly modules are only written for the package were they are needed\n mods = self.optmodules + self.reqmodules + self.reqmodules_buildonly + self.reqmodules_external\n \n for modname in mods:\n self.parent.module(modname).writeEnv(f, checked)",
"def _save_state(self, filename=\".logs_state.json\"):\n curr_state = self.current_state\n with open(join(self.logs_dir, filename), 'w') as fh:\n json.dump(curr_state, fh)",
"def persist_version():\r\n #it's not necessary to do this every time we persist, but\r\n #this way we don't have to worry about race conditions with resume.py\r\n #reading this\r\n f = open(os.path.join(get_persist_root_dir(), \"sparkVersion\"), 'w')\r\n from spark.internal.version import VERSION \r\n f.write(VERSION)\r\n f.close()",
"def _save(self, filename = str(int(time()))):\n if filename:\n with open(filename, 'w') as f:\n f.write('null')\n self.prompt_time = 0\n exit()",
"def export_env_spec(project, name, filename):\n failed = _check_problems(project)\n if failed is not None:\n return failed\n\n if name is None:\n name = project.default_env_spec_name\n assert name is not None\n\n if name not in project.env_specs:\n problem = \"Environment spec {} doesn't exist.\".format(name)\n return SimpleStatus(success=False, description=problem)\n\n spec = project.env_specs[name]\n\n try:\n spec.save_environment_yml(filename)\n except Exception as e:\n return SimpleStatus(success=False, description=\"Failed to save {}: {}.\".format(filename, str(e)))\n\n return SimpleStatus(success=True, description=\"Exported environment spec {} to {}.\".format(name, filename))",
"def writeShREEKConfig(self, filename):\n self._ShREEKConfig.save(filename)\n return",
"def local_seed(self) -> str:\n assert self.definition.settings.sp_root_dir\n seed_file = self.definition.settings.sp_root_dir.joinpath(\"seed.txt\")\n if not seed_file.exists():\n seed = str(encode_hex(bytes(random.randint(0, 255) for _ in range(20))))\n seed_file.write_text(seed)\n else:\n seed = seed_file.read_text().strip()\n return seed",
"def env_to_file(env_variables, destination_path=None, posix=True):\n\n if not env_variables:\n return None\n if not destination_path:\n destination_path = tempfile.mkstemp(suffix='env')[1]\n\n if posix:\n linesep = '\\n'\n else:\n linesep = '\\r\\n'\n with open(destination_path, 'w') as f:\n if posix:\n f.write('#!/bin/bash')\n f.write(linesep)\n f.write('# Environmnet file generated by Cloudify. Do not delete '\n 'unless you know exactly what you are doing.')\n f.write(linesep)\n f.write(linesep)\n else:\n f.write('rem Environmnet file generated by Cloudify. Do not '\n 'delete unless you know exactly what you are doing.')\n f.write(linesep)\n for key, value in env_variables.iteritems():\n if posix:\n f.write('export {0}={1}'.format(key, value))\n f.write(linesep)\n else:\n f.write('set {0}={1}'.format(key, value))\n f.write(linesep)\n f.write(linesep)\n\n return destination_path",
"def set_env(self, env_dict):\n # Only save environment group if file_path exists\n if not os.path.exists(self.file_path):\n print('netCDF file does not exist, exiting without saving Environment group...')\n else:\n ds = xr.Dataset({'temperature': (['ping_time'], env_dict['temperature'])},\n coords={'ping_time': (['ping_time'], env_dict['ping_time'],\n {'axis': 'T',\n 'calendar': 'gregorian',\n 'long_name': 'Timestamp of each ping',\n 'standard_name': 'time',\n 'units': 'seconds since 1970-01-01'})},\n attrs={'long_name': \"Water temperature\",\n 'units': \"C\"})\n\n # save to file\n if self.format == '.nc':\n ds.to_netcdf(path=self.file_path, mode='a', group='Environment')\n elif self.format == '.zarr':\n if not self.append_zarr:\n ds.to_zarr(store=self.file_path, mode='a', group='Environment')\n else:\n ds.to_zarr(store=self.file_path, mode='a', group='Environment', append_dim='ping_time')",
"def sync_local_fabric_env(self):\n env.sync_filename = '/tmp/{0}_env.txt'.format(time.time())\n env_copy = self.env\n env_copy.use_ssh_config = False\n env_copy.host = False\n env_copy.host_string = False\n env_copy.local_deployment = True\n # TODO: add context from each need to repopulate\n with self.file.tmpfile(self.to_json(env_copy, cls=SilentEncoder)) as f:\n self.up(f.name, env.sync_filename)",
"def _save_version_file(cls, hivemind_version, git_revision, git_date):\n with open(\"hive/version.py\", 'w') as version_file:\n version_file.write(\"# generated by setup.py\\n\")\n version_file.write(\"# contents will be overwritten\\n\")\n version_file.write(\"VERSION = '{}'\\n\".format(hivemind_version))\n version_file.write(\"GIT_REVISION = '{}'\\n\".format(git_revision))\n version_file.write(\"GIT_DATE = '{}'\\n\".format(git_date))",
"def export_vars(env_vars_dict):\n env_vars = ['{}={}'.format(k, env_vars_dict[k])\n for k in env_vars_dict.keys()]\n with open(\"vars.env\", \"w\") as file:\n for item in env_vars[:-1]:\n file.write(\"{}\\n\".format(item))\n file.write(\"{}\".format(env_vars[-1]))"
]
| [
"0.6939367",
"0.6756803",
"0.6492533",
"0.6465991",
"0.6240178",
"0.6202867",
"0.612628",
"0.6109327",
"0.60026574",
"0.5978912",
"0.5927",
"0.59054667",
"0.59054285",
"0.5880969",
"0.5760519",
"0.5732237",
"0.57036567",
"0.56264794",
"0.562367",
"0.55891657",
"0.5572052",
"0.5561217",
"0.55597687",
"0.55512357",
"0.5549356",
"0.5538735",
"0.5509592",
"0.5497607",
"0.547761",
"0.5463843"
]
| 0.70126224 | 0 |
Remove saved default environment file. | def clear_saved_default_environment(cwd=None):
env_file = get_local_default_file(cwd=cwd)
if os.path.exists(env_file):
os.remove(env_file)
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_out_old_env():\n d = \"dict_racey.json\"\n if os.path.exists(d):\n print(\"Remove the old cached JSON before continuing.\")\n os.remove(d)",
"def reset_default_paths():\n filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')\n if os.path.exists(filename):\n os.remove(filename)\n print('Default paths cleared\\n')\n else:\n print('No default paths currently set\\n')",
"def reset_default_paths():\n filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')\n if os.path.exists(filename):\n os.remove(filename)\n print('Default paths cleared\\n')\n else:\n print('No default paths currently set\\n')",
"def reset(self):\r\n if os.path.isfile(os.path.join(self._var_dir, 'SETUP')):\r\n os.remove(os.path.join(self._var_dir, 'SETUP'))",
"def remove_temporary_settings():\n if os.path.exists(\"settings.json\"):\n os.remove(\"settings.json\")",
"def clean_env():\n for key in ['FOO', 'THOR', 'IRON', 'NAME', 'PERSONAL_DIR']:\n os.environ.pop(key, None)",
"def reset(args):\n if os.path.exists(args.config):\n os.remove(args.config)\n return",
"def save_default_environment(\n environment=None,\n cwd=None\n):\n env_file = get_local_default_file(cwd=cwd)\n with open(env_file, 'w') as f_out:\n f_out.write(f'{str(environment)}\\n')\n return True",
"def del_env(self, envname):\n\n with open(self.envpath, \"r\") as envfile:\n my_vars = {}\n for line in envfile.readlines():\n key, value = self.__kv_pair(line)\n if key is not None:\n my_vars[key] = value\n\n current_value = my_vars.pop(envname, None)\n\n if current_value is None:\n return # do nothing if not set\n\n new_lines = [f\"{k} = {v}\\n\" for k, v in my_vars.items()]\n\n with open(self.envpath, \"w\") as envfile:\n envfile.writelines(new_lines)\n\n os.environ.unsetenv(envname)",
"def remove_stored_config(self):\n stored_config_filename = self.stored_config_filename\n if stored_config_filename.exists():\n stored_config_filename.remove()\n self._stored_cmake_generator = self._stored_config.cmake_generator",
"def env_cleanup(self):\n pass",
"def clear_save_name():\n clear_dir(MODEL_SAVE_DIR)\n clear_dir(SUMMARY_SAVE_DIR)\n clear_dir(IMG_SAVE_DIR)",
"def remove_local_config(self):\n with ignored(OSError):\n os.remove(os.path.join(self.rundir, const.LOCAL_CONFIG_FILE))",
"def get_saved_default_environment(cwd=None):\n env_file = get_local_default_file(cwd=cwd)\n saved_default = None\n if os.path.exists(env_file):\n with open(env_file, 'r') as f:\n saved_default = f.read().replace('\\n', '')\n return saved_default",
"def tearDown(self):\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")",
"def tearDown(self):\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")",
"def stop(self):\n self._unbind_observers()\n self._pref_decls.clear()\n pref_path = os.path.join(self.default_folder, self.default_file)\n try:\n prefs = ConfigObj()\n prefs.update(self._prefs)\n prefs.filename = pref_path\n prefs.write()\n except Exception:\n print 'Invalid pref path'\n\n def_path = os.path.join(MODULE_PATH, 'default.ini')\n try:\n defaults = ConfigObj(def_path)\n defaults['folder'] = self.default_folder\n defaults['file'] = self.default_file\n defaults.write()\n except Exception:\n print 'Invalid default pref path'",
"def tearDown(self):\n if os.path.exists('file.json'):\n os.remove(\"file.json\")",
"def clean():\n local('rm -fr %s' % os.path.abspath(env.config['destination']))",
"def clear(self) -> None:\n self._REGISTERED_ENVS.clear()\n self._manifests = []\n self._sync = True",
"def clear_config():\n check_config()\n fs.truncate(PYWS_DIR_BIN)",
"def hard_reset(self) -> None:\n os.system('rm -fr \"$HOME/.daf/\"')",
"def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))",
"def __deleteSave(self) -> None:\n os.remove(self.save_location)",
"def remove(self):\n if self.exists():\n try:\n utils.run_in_bash(\n f'{CONDA_BIN} env remove -q -y -n {self.name}')\n except CalledProcessError as err:\n err_message = err.output.strip().decode('ascii')\n if 'CondaEnvironmentError:' in err_message:\n inform.info('deactivating and retry')\n utils.run_in_bash(\n 'source deactivate && '\n f'{CONDA_BIN} env remove -q -y -n {self.name}')\n else:\n inform.error('Couldn\\'t remove environment. '\n 'Following error occured:')\n print(err_message)\n inform.critical()",
"def clean():\n Log.d(DEBUG_TAG, \"Delete config file...\")\n try:\n os.remove(CONFIG_FILE)\n except os.error as e:\n Log.e(DEBUG_TAG, \"Delete config file%s error, reason:%s\"%(CONFIG_FILE, e))",
"def clear_data_base():\n\n\tcommand = 'rm object_models/*.json'\n\tos.system(command)\n\tprint(\"data base cleared\")",
"def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space",
"def tearDown(self) -> None:\n os.remove(TestConfigFile.TEST_CONFIG)",
"def remove_persisted_files():\r\n persistIncarnations = get_persist_incarnation_dirs()\r\n for p in persistIncarnations:\r\n clear_dir(p)\r\n os.remove(p)\r\n clear_dir(get_persist_src_backup_dir())\r\n clear_dir(get_persist_src_dir())\r\n clear_dir(get_persist_root_dir()) \r\n\r\n #make sure the persist kb data structures aren't keeping any info \r\n global PERSISTED_LOAD_IDS\r\n AGENT_KB_MAP.clear()\r\n KB_WORKING_SET.clear()\r\n copy = PERSISTED_LOAD_IDS[:]\r\n for x in copy:\r\n PERSISTED_LOAD_IDS.remove(x)"
]
| [
"0.7001228",
"0.6956922",
"0.6956922",
"0.6749785",
"0.66680175",
"0.6539512",
"0.64257216",
"0.64223576",
"0.63839066",
"0.6359835",
"0.63121593",
"0.626912",
"0.6264178",
"0.6244929",
"0.62184757",
"0.62184757",
"0.62158924",
"0.6210639",
"0.617264",
"0.61695546",
"0.61609566",
"0.6135488",
"0.6113214",
"0.6097435",
"0.60936975",
"0.6071075",
"0.60690755",
"0.6040004",
"0.60323507",
"0.6030165"
]
| 0.81255233 | 0 |
Validate secrets base directory by presence of a marker file. Returns False if the directory either does not exist or does not contain the expected marker file, or True otherwise. | def is_secrets_basedir(basedir=None, raise_exception=True):
result = False
if basedir is None:
if raise_exception:
raise RuntimeError("[-] no basedir was specified")
basedir_path = Path(basedir)
marker_path = Path(basedir) / MARKER
if not basedir_path.exists():
if raise_exception:
raise BasedirNotFoundError(basedir=basedir)
elif not marker_path.exists():
if raise_exception:
raise InvalidBasedirError(basedir=basedir)
else:
result = True
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_sanity(params):\n \n for dpath in ['input_dir','output_dir']:\n if path.isdir(params[dpath]) == False:\n print('ERROR: Cannot find directory '+params[dpath])\n exit()\n \n if path.isfile(params['star_file']) == False:\n print('ERROR: Cannot find star file '+params['star_file'])\n exit()",
"def is_shared_secret_from_secret_name(soa_dir: str, secret_name: str) -> bool:\n secret_path = os.path.join(\n soa_dir, SHARED_SECRET_SERVICE, \"secrets\", f\"{secret_name}.json\"\n )\n return os.path.isfile(secret_path)",
"def is_valid_file(base_dir, filename):\n base_dir = os.path.realpath(base_dir)\n full_path = os.path.realpath(os.path.join(base_dir, filename, ''))\n return is_subdir(full_path, base_dir) and os.path.exists(full_path)",
"def _search_for_key_file(path_to_key_file):\n\n return True if os.path.exists(path_to_key_file) else False",
"def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False",
"def check_helpers(self):\n paths = self.get_helper_out_paths()\n\n for p in paths:\n full_path = p + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return False\n\n return True",
"def check_config_file(self, file):\n (var1, var2) = file.split(\".\")\n try:\n f = os.path.join(self.config[var1][\"directory\"],\n self.config[var1][var2])\n if os.path.exists(f) or os.path.lexists(f):\n if os.path.islink(f) is False:\n raise ProfileCheckError(\"'%s' is in a bad config\" % f)\n\n except KeyError:\n raise ProfileKeyError(\"no value for %s.%s\" % (var1, var2))",
"def checkIfFileExistsInPossibleLocations(testConfig):\n assert \"name\" in testConfig\n assert \"file\" in testConfig\n assert \"file_locations\" in testConfig\n testPass = False\n for filePath in testConfig[\"file_locations\"]:\n if isfile(join(filePath,testConfig[\"file\"])):\n testPass=True\n \n assert testPass,\"Failure for package \"+testConfig[\"name\"]+\"\\n File: \"+\\\n testConfig[\"file\"]+\" does not exist\"+\"\\nSearched in \"+\\\n str(testConfig[\"file_locations\"])",
"def validate(cls, config_location):\n if not os.path.isdir(config_location):\n return False\n config_path = os.path.join(config_location, cls.CONFIG_FILENAME)\n if not os.path.isfile(config_path):\n return False\n cache_dir = os.path.join(config_location, cls.CACHE_DIRNAME)\n if not os.path.isdir(cache_dir):\n return False\n if not CacheManager.validate(cache_dir):\n return False\n data_path = os.path.join(config_location, cls.DATA_DIRNAME)\n if not os.path.isdir(cache_dir):\n return False\n if not DataManager.validate(data_path):\n return False",
"def __validate_location(self):\n if not os.path.exists(self._file_path):\n raise FileNotFoundError(\"Directory does not exist\")\n if not os.path.isfile(self._path_name):\n raise FileNotFoundError('File does not exist')",
"def check_file(f):\n if not os.path.isfile(f):\n raise OSError(f + ' not found.')\n if f.startswith('~'):\n raise OSError(f + ' has \"~\" in path.')",
"def test_environment_path_subdir_leadingslash(self):\n self.assertRaises(\n RuntimeError,\n self.secrets_env.environment_path,\n subdir=\"/keys\"\n )",
"def test_verifies_token_file_exists(self):\n\n with self.assertRaises(exceptions.TokenFileNotFoundError):\n badgr = BadgrLite(token_filename='./non_existent_token_file.json')\n badgr.load_token()",
"def validate_configdir(configdir):\r\n if (configdir and configdir != '/' and\r\n configdir != '~' and\r\n configdir != os.path.abspath(os.path.expanduser('~'))):\r\n return True\r\n\r\n return False",
"def _validate_pants_repo(self, pants_repo: pathlib.PosixPath) -> bool:\n return (\n pants_repo and\n pants_repo.is_dir() and\n pants_repo.joinpath('pants').is_file()\n )",
"def task_dir_is_valid(task_dir: str) -> bool:\n return True",
"def verify_file_path(self) -> None:\n path = \"/data\"\n verify_file_path(path)",
"def validate_file(inp, name=''):\n validate_string(inp, name)\n assert (os.path.exists(inp)), name + ' settings with value ' + inp + ' should exist.'",
"def check_config_dir(self, directory):\n (var1, var2) = directory.split(\".\")\n try:\n d = self.config[var1][var2]\n if os.path.isdir(d) is False:\n raise ProfileCheckError(\"'%s' isn't a directory\" % d)\n\n except KeyError:\n raise ProfileKeyError(\"no value for '%s'\" % directory)",
"def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")",
"def _check_dir(self, req, dir):\n if not os.path.isabs(dir):\n add_warning(req, _('The repository directory must be an absolute '\n 'path.'))\n return False\n prefixes = [os.path.join(self.env.path, prefix)\n for prefix in self.allowed_repository_dir_prefixes]\n if prefixes and not any(is_path_below(dir, prefix)\n for prefix in prefixes):\n add_warning(req, _('The repository directory must be located '\n 'below one of the following directories: '\n '%(dirs)s', dirs=', '.join(prefixes)))\n return False\n return True",
"def _validate_data(self):\n logger.debug(\"Validating directory\")\n root = self.data_dir\n for path in self._walk_cases():\n print(path)\n full_path = os.path.join(root, path)\n logger.debug(\" \" + full_path)\n try:\n assert os.path.exists(full_path)\n except AssertionError:\n raise AssertionError(\n \"Couldn't find data on path {}\".format(full_path)\n )",
"def valid_tpkg_file(self, path):\n\n\t\tprint(self.config[\"daemon\"][\"rootdir\"] + path)\n\t\tif os.path.exists(self.config[\"daemon\"][\"rootdir\"] + \"/\" + path):\n\t\t\treturn self.fetch_remote_hashcode(path) == self.fetch_local_hashcode(path)\n\t\telse:\n\t\t\tprint(\"Package: \" + path + \" has not been downloaded.\");\n\t\treturn False",
"def validate_config(location):\n if not os.path.exists(location):\n full_path = os.path.abspath(location)\n logging.fatal('Config not found - expected a configfile at: [{loc}] - exiting.'.format(loc=full_path))\n sys.exit(1)",
"def check_data_dir(data_dir):\n if not os.path.exists(data_dir):\n raise Error('Data directory {0} dose not exist!'.format(data_dir))\n config_file = os.path.isfile(create_path(data_dir, CONFIG_FILE))\n meta_data_file = os.path.isfile(create_path(data_dir, METADATA_FILE))\n ts_data_dir = os.path.exists(create_path(data_dir, TS_DATA_DIR))\n ts_not_file = not os.path.isfile(create_path(data_dir, TS_DATA_DIR))\n if not (config_file and meta_data_file and ts_data_dir and ts_not_file):\n raise Error('Data directory is not complete!')",
"def _validate_file(self, filepath: str):\n if not os.path.exists(filepath):\n raise FileNotFoundError(f\"No such file or directory: {filepath}\")\n if not os.path.isfile(filepath):\n raise IsADirectoryError(f\"Is a directory: {filepath}\")",
"def verify_secret(prop_name, value):\n\n hashed = hashlib.sha256(value.encode('UTF-8')).hexdigest()\n has_must_be = RUN_CONFIG.get(prop_name)\n\n return hashed == has_must_be",
"def verify(self):\n\t\t\n\t\tif not os.path.exists(self.objects_root):\n\t\t\tself.error = \"no such directory: %s\" % \\\n\t\t\t\tself.objects_root\n\t\t\treturn False\n\t\t\n\t\tif not os.path.isdir(self.objects_root):\n\t\t\tself.error = \"not a directory: %s\" % \\\n\t\t\t\tself.objects_root\n\t\t\treturn False\n\t\t\n\t\treturn True",
"def is_valid_production_root(path: pathlib.Path) -> bool:\n if not path.is_absolute():\n return False\n if not path.exists():\n return False\n if not path.is_dir():\n return False\n config_file_path = get_production_config_file_path(path)\n return config_file_path.exists()",
"def _validate_api_secret_key(self, api_secret_key):\n regex = APP_SECRET_REGEX_LIST[1]\n m = regex.search(api_secret_key)\n if not m:\n return False\n else:\n return True"
]
| [
"0.5820036",
"0.58147424",
"0.5705744",
"0.5678354",
"0.5674497",
"0.5647545",
"0.5631452",
"0.56306946",
"0.5505698",
"0.55015725",
"0.5488795",
"0.5472963",
"0.54715496",
"0.54707843",
"0.5459883",
"0.54526377",
"0.54288095",
"0.54050606",
"0.540398",
"0.534037",
"0.53371006",
"0.5305123",
"0.5300086",
"0.52881444",
"0.5275474",
"0.5272147",
"0.5265348",
"0.5261353",
"0.5256682",
"0.52541095"
]
| 0.7215526 | 0 |
Return the default secrets base directory path. | def get_default_secrets_basedir():
default_basedir = Path.home() / BASEDIR_BASENAME
return Path(
os.getenv('D2_SECRETS_BASEDIR', default_basedir)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_local_default_file(cwd=None):\n # TODO(dittrich): May need to do this differently to support\n # Windows file systems.\n if cwd is None:\n cwd = os.getcwd()\n return Path(cwd) / '.python_secrets_environment'",
"def default_path():\n return os.path.join(os.environ.get('OVERRIDE_ETC', '/etc'), 'auth')",
"def _get_default_path(self):\n return os.path.join(cfg.ROOT_DIR, 'data', 'KITTI')",
"def _get_config_path():\n return os.path.join(os.path.expanduser('~'))",
"def default_config_file(self):\n return DEFAULT_CONFIG_FILEPATH",
"def get_base_dir(config: Mapping[str, Any]) -> str:\n return normalize_base_dir(config.get(\"base_dir\"))",
"def get_base_dir():\n # copied from config2.py, without the lines that check whether the\n # directory already contains a user-config.py file\n # this code duplication is nasty, should fix\n NAME = \"pywikibot\"\n for arg in sys.argv[1:]:\n if arg.startswith(\"-dir:\"):\n base_dir = arg[5:]\n sys.argv.remove(arg)\n break\n else:\n if \"PYWIKIBOT2_DIR\" in os.environ:\n base_dir = os.environ[\"PYWIKIBOT2_DIR\"]\n else:\n is_windows = sys.platform == 'win32'\n home = os.path.expanduser(\"~\")\n if is_windows:\n _win_version = int(platform.version()[0])\n if _win_version == 5:\n base_dir = os.path.join(home, \"Application Data\", NAME)\n elif _win_version == 6:\n base_dir = os.path.join(home, \"AppData\\\\Roaming\", NAME)\n else:\n base_dir = os.path.join(home, \".\"+NAME)\n if not os.path.isdir(base_dir):\n os.makedirs(base_dir, mode=0700)\n if not os.path.isabs(base_dir):\n base_dir = os.path.normpath(os.path.join(os.getcwd(), base_dir))\n return base_dir",
"def get_config_dir():\n return Path(environ.get(CONFIG_DIR_ENV_VAR, _default_dir))",
"def base_path(self):\n return self.setup.base_path",
"def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)",
"def _load_name_root(self):\n if self._pypath:\n return self._pypath[0]\n elif self._dirs:\n return secrets.token_hex()",
"def get_default_secret_key():\n secret_access_key_script = AWS_ACCOUNTS['default'].SECRET_ACCESS_KEY_SCRIPT.get()\n return secret_access_key_script or get_s3a_secret_key()",
"def get_user_config_dir(options):\n return '/root/.spinnaker'",
"def default_module_dir(self):\n return os.path.dirname(self._modules['default'].path)",
"def getDefaultFileLocation(self):\n\n label_env = os.getenv('DISPASS_LABELFILE')\n std_env = os.getenv('XDG_DATA_HOME') or os.getenv('APPDATA')\n home_file = '~/.dispass/labels'\n\n if label_env:\n return label_env\n if not exists(home_file) and std_env:\n return std_env + '/dispass/labels'\n else:\n return home_file",
"def getVaultPath():\n\n global args, vaultPathDefault\n\n if args.vault_location:\n return args.vault_location;\n return vaultPathDefault;",
"def base_dir(context):\n return '{}'.format(os.getcwd())",
"def get_default_config_path():\n if os.name == 'posix':\n config_path = os.path.join(os.path.expanduser(\"~\"), '.fpdb')\n elif os.name == 'nt':\n config_path = os.path.join(os.environ[\"APPDATA\"], 'fpdb')\n else: config_path = False\n return config_path",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')",
"def get_testcases_default_config_dir():\n global_conf_dir = '/etc/testcases'\n user_global_path = os.path.join(os.path.expanduser('~'), '.testcases/etc')\n if os.path.isdir(global_conf_dir):\n return global_conf_dir\n elif os.path.isdir(user_global_path):\n return user_global_path\n else:\n os.makedirs(user_global_path)\n return user_global_path",
"def defaultDirectory(self):\n return self.__defaultDirectory",
"def _get_default_path(self):\n\n raise NotImplementedError()",
"def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME",
"def _get_default_cache_dir(self):\n default_cache_dir = os.path.join(os.path.expanduser(\"~\"), 'dbcollection')\n return default_cache_dir",
"def get_default_config_file() -> Path:\n return get_path_to_pyflow() / \"pyflow\" / \"conf\" / CONFIG_FILE",
"def get_base_dir(self):\n return self._config_dict['output']['@baseDirectory']",
"def default_salt(self):\n return f\"{self.__class__.__module__}.{self.__class__.__name__}\"",
"def base_dir(self):\n pass",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'vehicles_dataset_v{}'.format(self._version))",
"def config_directory(self):\n\n return self.get_raw(\"config_directory\")"
]
| [
"0.74521095",
"0.7392667",
"0.67659163",
"0.6656529",
"0.66075575",
"0.6602614",
"0.6575067",
"0.6522957",
"0.6508674",
"0.64659834",
"0.6442842",
"0.6433506",
"0.6431116",
"0.6431051",
"0.6407672",
"0.64000154",
"0.63792914",
"0.6345531",
"0.63445127",
"0.62789977",
"0.6249005",
"0.6238993",
"0.62377405",
"0.6236018",
"0.6212728",
"0.61999315",
"0.61990666",
"0.6197269",
"0.61854416",
"0.6173772"
]
| 0.88887465 | 0 |
Create secrets root directory | def secrets_basedir_create(
basedir=None,
mode=DEFAULT_MODE,
):
if basedir is None:
raise RuntimeError("[-] a base directory is required")
secrets_basedir = Path(basedir)
secrets_basedir.mkdir(
parents=True,
mode=mode,
exist_ok=True
)
marker = secrets_basedir / MARKER
marker.touch(exist_ok=True)
marker.chmod(mode=DEFAULT_FILE_MODE)
return secrets_basedir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ensure_secrets_basedir(\n secrets_basedir=None,\n allow_create=False,\n allow_prompt=False,\n verbose_level=1,\n):\n if secrets_basedir is None:\n secrets_basedir = get_default_secrets_basedir()\n homedir = str(Path.home())\n if allow_create is None:\n allow_create = str(secrets_basedir).startswith(homedir)\n valid_basedir = False\n try:\n valid_basedir = is_secrets_basedir(\n basedir=secrets_basedir,\n raise_exception=True,\n )\n except BasedirNotFoundError as err:\n if verbose_level > 0:\n logger.info(str(err))\n if not allow_create:\n if allow_prompt:\n client = YesNo(\n f\"create directory '{secrets_basedir}'? \",\n default='n'\n )\n result = client.launch()\n if not result:\n sys.exit(\"[!] cancelled creating '%s'\" % secrets_basedir)\n else:\n sys.exit(\n \"[-] add the '--init' flag or use 'psec init' \"\n \"to initialize secrets storage\"\n )\n except InvalidBasedirError as err:\n if not allow_create:\n sys.exit(str(err))\n if not valid_basedir:\n secrets_basedir_create(basedir=secrets_basedir)\n if verbose_level >= 1:\n logger.info(\n \"[+] initialized secrets storage in '%s'\",\n secrets_basedir\n )\n # else:\n # if verbose_level >= 1:\n # logger.info(\n # \"[+] secrets storage already initialized in '%s'\",\n # secrets_basedir\n # )\n return Path(secrets_basedir)",
"def credentials_file() -> Path:\n Path.home().joinpath('.jina').mkdir(parents=True, exist_ok=True)\n return Path.home().joinpath('.jina').joinpath('access.yml')",
"def create_secrets(file):\n with open(file, 'w') as secfile:\n secfile.write((\n '# _credentials: Maintain your credentials below. Do not remove unused fields.\\n'\n 'USER = \\'\\'\\nPASSWORD = \\'\\'\\n# _courses: Define which courses should be crawled\\nCOURSES = []\\n\\n'\n '# local: Required if you want to download files and store them in a local folder'\n ' (for example in the Dropbox client folder)\\n'\n 'PATH = \\'\\' # Path to the destination folder\\n\\n'\n '# dropbox (-d): Required if you want to download files and upload them to Dropbox\\n'\n 'DROPBOX_TOKEN = \\'\\' # Personal Dropbox API token\\n'\n 'PATH_IN_DB = \\'\\' # Destination path of downloaded files within Dropbox\\n'))\n print('File app_secrets.py was created. Please maintain your credentials.')\n sys.exit(1)",
"def install_secret_key(app, filename='secret_key'):\n filename = os.path.join(app.instance_path, filename)\n\n try:\n app.config['SECRET_KEY'] = open(filename, 'rb').read()\n except IOError:\n print('Error: No secret key. Create it with:')\n full_path = os.path.dirname(filename)\n if not os.path.isdir(full_path):\n print('mkdir -p {filename}'.format(filename=full_path))\n print('head -c 24 /dev/urandom > {filename}'.format(filename=filename))\n sys.exit(1)",
"def setup_keys():\n if os.path.isfile(\"key.txt\"):\n message = \"Key already generated\"\n else:\n secret = secrets.token_urlsafe(64)\n message = \"Secret generated and saved in key.txt\"\n with open(\"key.txt\", \"w\") as fd:\n fd.write(secret)\n return json.dumps({'message': message})",
"def secrets():\n click.echo(STEP_PATH / \"secrets\")",
"def _keypath(self) -> pathlib.Path:\n home = pathlib.Path.home()\n keyfile = home / \".cmdc\" / \"apikey\"\n keyfile.parent.mkdir(parents=True, exist_ok=True)\n return keyfile",
"def test_create_config_roots(self):\n with self.override_role():\n self._create_config_root()",
"def _init_secret_token_map(model_context):\n method_name = '_init_secret_token_map'\n global _secret_token_map\n\n log_method = _logger.warning\n if model_context.get_validate_configuration().allow_unresolved_secret_tokens():\n log_method = _logger.info\n\n _secret_token_map = dict()\n\n # add name/key pairs for files in sub-directories of directories in WDT_MODEL_SECRETS_DIRS.\n\n locations = env_helper.getenv(str_helper.to_string(_secret_dirs_variable))\n if locations is not None:\n for secret_dir in locations.split(\",\"):\n if not os.path.isdir(secret_dir):\n # log at WARN or INFO, but no exception is thrown\n log_method('WLSDPLY-01738', _secret_dirs_variable, secret_dir, class_name=_class_name,\n method_name=method_name)\n continue\n\n for subdir_name in os.listdir(secret_dir):\n subdir_path = os.path.join(secret_dir, subdir_name)\n if os.path.isdir(subdir_path):\n _add_file_secrets_to_map(subdir_path, subdir_name, model_context)\n\n # add name/key pairs for files in directories assigned in WDT_MODEL_SECRETS_NAME_DIR_PAIRS.\n # these pairs will override if they were previously added as sub-directory pairs.\n\n dir_pairs_text = env_helper.getenv(str_helper.to_string(_secret_dir_pairs_variable))\n if dir_pairs_text is not None:\n dir_pairs = dir_pairs_text.split(',')\n for dir_pair in dir_pairs:\n result = dir_pair.split('=')\n if len(result) != 2:\n log_method('WLSDPLY-01735', _secret_dir_pairs_variable, dir_pair, class_name=_class_name,\n method_name=method_name)\n continue\n\n secret_dir = result[1]\n if not os.path.isdir(secret_dir):\n log_method('WLSDPLY-01738', _secret_dir_pairs_variable, secret_dir, class_name=_class_name,\n method_name=method_name)\n continue\n\n name = result[0]\n _add_file_secrets_to_map(secret_dir, name, model_context)",
"def fs(tmp_path_factory):\n dir_ = tmp_path_factory.mktemp(\"\")\n for key, val in file_dirs.fs1.items():\n d_ = dir_ / key\n d_.mkdir()\n for v in val:\n f = d_ / v\n f.write_text(v)\n\n return dir_",
"def get_default_secrets_basedir():\n default_basedir = Path.home() / BASEDIR_BASENAME\n return Path(\n os.getenv('D2_SECRETS_BASEDIR', default_basedir)\n )",
"def create_temporary_secret():\n return uuid.uuid4().hex",
"def prepare_secrets(c, rebuild_venv=False, no_secret_cache=False):\n cli_tasks.prepare_secrets.run(c, rebuild_venv, no_secret_cache)",
"def make_fsroot(root_dir, proid):\n newroot_norm = fs.norm_safe(root_dir)\n mounts = [\n '/bin',\n '/common',\n '/dev',\n '/etc',\n '/home',\n '/lib',\n '/lib64',\n '/mnt',\n '/proc',\n '/sbin',\n '/srv',\n '/sys',\n '/usr',\n '/var/lib/sss',\n '/var/tmp/treadmill/env',\n '/var/tmp/treadmill/spool',\n ]\n # Add everything under /opt\n mounts += glob.glob('/opt/*')\n\n emptydirs = [\n '/tmp',\n '/opt',\n '/var/empty',\n '/var/run',\n '/var/spool/keytabs',\n '/var/spool/tickets',\n '/var/spool/tokens',\n '/var/tmp',\n '/var/tmp/cores',\n ]\n\n stickydirs = [\n '/tmp',\n '/opt',\n '/var/spool/keytabs',\n '/var/spool/tickets',\n '/var/spool/tokens',\n '/var/tmp',\n '/var/tmp/cores/',\n ]\n\n for mount in mounts:\n if os.path.exists(mount):\n fs.mount_bind(newroot_norm, mount)\n\n for directory in emptydirs:\n _LOGGER.debug('Creating empty dir: %s', directory)\n fs.mkdir_safe(newroot_norm + directory)\n\n for directory in stickydirs:\n os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)\n\n # Mount .../tickets .../keytabs on tempfs, so that they will be cleaned\n # up when the container exits.\n #\n # TODO: Do we need to have a single mount for all tmpfs dirs?\n for tmpfsdir in ['/var/spool/tickets', '/var/spool/keytabs',\n '/var/spool/tokens']:\n fs.mount_tmpfs(newroot_norm, tmpfsdir, '4M')",
"def createValDir(self):\n\t\tself.setDirNames()\n\t\tself.setScreenXmlFile()\n\n\t\tself.rollValDir \t = self.roll + \"/screenval\"\n\t\tcmd = 'mkdir -p %s' % (self.rollValDir)\n\t\tos.system(cmd)\n\n\t\tself.createValDirFiles()\n\t\treturn",
"def create_storer_paths():\n config.config_storer()\n _create_paths(vmcheckerpaths.storer_paths())",
"def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n self.keystore_path = response",
"def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))",
"def root():\n click.echo(STEP_PATH / \"secrets/root_ca_key\")",
"def create_secret(secret_name, secret_value, environment):\n environment.add_cleanup(\n environment.cfy.secrets.delete,\n kwargs={\n 'secret_name': secret_name,\n },\n )\n environment.cfy.secrets.create(\n secret_name=secret_name,\n secret_value=secret_value,\n )",
"def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml",
"def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"",
"def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))",
"def make_directories():\n os.mkdir('principal_wings')\n os.mkdir('random_wings')",
"def seed(vault_client, opt):\n if opt.thaw_from:\n opt.secrets = tempfile.mkdtemp('aomi-thaw')\n auto_thaw(opt)\n\n ctx = Context.load(get_secretfile(opt), opt)\n ctx.fetch(vault_client)\n ctx.sync(vault_client)\n\n if opt.thaw_from:\n rmtree(opt.secrets)",
"def apply_secrets():\n for name, value in Secrets.__dict__.items():\n if name[0] != '_':\n os.environ[name] = value",
"def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )",
"def thaw(self, tmp_dir):\n for sfile in self.secrets():\n src_file = \"%s/%s\" % (tmp_dir, sfile)\n if not os.path.exists(src_file):\n raise aomi \\\n .exceptions \\\n .IceFile(\"%s secret missing from icefile\" %\n (self))\n\n dest_file = \"%s/%s\" % (self.opt.secrets, sfile)\n dest_dir = os.path.dirname(dest_file)\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n\n shutil.copy(src_file, dest_file)\n log(\"Thawed %s %s\" % (self, sfile), self.opt)",
"def _init_dir(self):\n for directory in ['', CERT_DIR_NAME, CRL_DIR_NAME, NEWCERT_DIR_NAME,\n PRIVATE_DIR_NAME]:\n mode = 0o755 if directory != PRIVATE_DIR_NAME else 0o700\n os.mkdir(self.ca_dir + directory, mode=mode)",
"def create_directory_structure(root):\n berlin = os.path.join(root, \"Berlin\",\"Berlin_test\")\n istanbul = os.path.join(root, \"Istanbul\",\"Istanbul_test\")\n moscow = os.path.join(root, \"Moscow\", \"Moscow_test\")\n try:\n os.makedirs(berlin)\n os.makedirs(istanbul)\n os.makedirs(moscow)\n except OSError:\n print(\"failed to create directory structure\")\n sys.exit(2)"
]
| [
"0.66333914",
"0.64847565",
"0.64647585",
"0.6165368",
"0.61403096",
"0.6137444",
"0.5966019",
"0.58695114",
"0.5848818",
"0.58375996",
"0.5834272",
"0.5827661",
"0.5826345",
"0.5748656",
"0.5725303",
"0.57216537",
"0.5718884",
"0.5700134",
"0.5697976",
"0.5673246",
"0.56579286",
"0.56568694",
"0.5645157",
"0.56215745",
"0.56171143",
"0.56101066",
"0.5609188",
"0.55999166",
"0.55960643",
"0.5590616"
]
| 0.7260202 | 0 |
Ensure that the secrets basedir exists. If the path is within the user's home directory, it is OK to create the directory automatically if it does not exist. This was the original behavior. If the path does exist and contains file, but does not have the special marker, that will be considered an error the user needs to resolve. For paths that lie outside the user's home directory, the user must explicitly confirm that it is OK to create the directory by responding to prompts (when possible) or by using the `init` option flag or `psec init` command. | def ensure_secrets_basedir(
secrets_basedir=None,
allow_create=False,
allow_prompt=False,
verbose_level=1,
):
if secrets_basedir is None:
secrets_basedir = get_default_secrets_basedir()
homedir = str(Path.home())
if allow_create is None:
allow_create = str(secrets_basedir).startswith(homedir)
valid_basedir = False
try:
valid_basedir = is_secrets_basedir(
basedir=secrets_basedir,
raise_exception=True,
)
except BasedirNotFoundError as err:
if verbose_level > 0:
logger.info(str(err))
if not allow_create:
if allow_prompt:
client = YesNo(
f"create directory '{secrets_basedir}'? ",
default='n'
)
result = client.launch()
if not result:
sys.exit("[!] cancelled creating '%s'" % secrets_basedir)
else:
sys.exit(
"[-] add the '--init' flag or use 'psec init' "
"to initialize secrets storage"
)
except InvalidBasedirError as err:
if not allow_create:
sys.exit(str(err))
if not valid_basedir:
secrets_basedir_create(basedir=secrets_basedir)
if verbose_level >= 1:
logger.info(
"[+] initialized secrets storage in '%s'",
secrets_basedir
)
# else:
# if verbose_level >= 1:
# logger.info(
# "[+] secrets storage already initialized in '%s'",
# secrets_basedir
# )
return Path(secrets_basedir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_environment_path_subdir_leadingslash(self):\n self.assertRaises(\n RuntimeError,\n self.secrets_env.environment_path,\n subdir=\"/keys\"\n )",
"def is_secrets_basedir(basedir=None, raise_exception=True):\n result = False\n if basedir is None:\n if raise_exception:\n raise RuntimeError(\"[-] no basedir was specified\")\n basedir_path = Path(basedir)\n marker_path = Path(basedir) / MARKER\n if not basedir_path.exists():\n if raise_exception:\n raise BasedirNotFoundError(basedir=basedir)\n elif not marker_path.exists():\n if raise_exception:\n raise InvalidBasedirError(basedir=basedir)\n else:\n result = True\n return result",
"def secrets_basedir_create(\n basedir=None,\n mode=DEFAULT_MODE,\n):\n if basedir is None:\n raise RuntimeError(\"[-] a base directory is required\")\n secrets_basedir = Path(basedir)\n secrets_basedir.mkdir(\n parents=True,\n mode=mode,\n exist_ok=True\n )\n marker = secrets_basedir / MARKER\n marker.touch(exist_ok=True)\n marker.chmod(mode=DEFAULT_FILE_MODE)\n return secrets_basedir",
"def test_ensure_dir_exists(self):\n pass",
"def make_sure_path_exists(path):\n if path != '':\n try:\n os.makedirs(os.path.realpath(path))\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n return path",
"def test_environment_path_subdir_trailingslash(self):\n self.assertRaises(\n RuntimeError,\n self.secrets_env.environment_path,\n subdir=\"keys/\"\n )",
"def ensure_dir(path):\n\n \n try:\n os.makedirs(path)\n except (EnvironmentError) as e:\n if not(e.errno == errno.EEXIST and \n e.filename == path):\n raise\n return",
"def ensure_dir_exists(path: Union[str,Path]) -> None:\n# path = str(path)\n assert not os.path.isfile(path)\n os.makedirs(path, exist_ok=True)\n assert os.path.isdir(path)",
"def ensure_path(path):\n\n path = os.path.expanduser(path)\n #Do not take into consideration the last path element\n #Unless it end with '/'\n os.makedirs('/'.join(path.split('/')[:-1]), exist_ok=True)\n return path",
"def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass",
"def assure_path_exists(self, path):\n\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)",
"def ensure_path_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)\n if not os.path.exists(path):\n msg = \"Creating path {0} failed!\" # pragma: no cover\n raise Exception(msg.format(path)) # pragma: no cover",
"def check_path(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path, 0755)",
"def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")",
"def make_sure_path_exists(path):\n try: os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST: raise",
"def _validate_path(dir_path: str) -> None:\n if os.path.exists(dir_path):\n return\n\n logger.info('Creating directory: %s', dir_path)\n os.mkdir(dir_path)",
"def make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise",
"def _ensure_config_file_exists():\n config_file = Path(ELIBConfig.config_file_path).absolute()\n if not config_file.exists():\n raise ConfigFileNotFoundError(ELIBConfig.config_file_path)",
"def ensure_dir_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def EnsureDirExists(path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n pass",
"def ensure_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except FileExistsError:\n pass",
"def check_dir(path):\n \n if not os.path.exists(path):\n os.makedirs(path)\n print path",
"def check_charm_dir_exists(charm_dir: Path) -> None:\n assert charm_dir.is_dir()",
"def test_make_sure_path_exists(tmp_path):\n existing_directory = tmp_path\n directory_to_create = Path(tmp_path, \"not_yet_created\")\n\n utils.make_sure_path_exists(existing_directory)\n utils.make_sure_path_exists(directory_to_create)\n\n # Ensure by base system methods.\n assert existing_directory.is_dir()\n assert existing_directory.exists()\n assert directory_to_create.is_dir()\n assert directory_to_create.exists()",
"def checkExistenceDir(path):\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n logger.warning(\n \"Directory {} does not seem to exist, creating one.\".format(path)\n )\n os.mkdir(path)",
"def ensure_dirpath_exists(path: Path) -> Path:\n assert path\n out_path: Path = path\n\n if not out_path.exists():\n out_path.mkdir(parents=True, exist_ok=True)\n\n return out_path",
"def check_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def ensure_path_exists(path):\n\t\t\n\t\tif not os.path.exists(path):\n\t\t\traise FileNotFoundError('The \"{0}\" path dont exists.'.format(path))",
"def test_6_1_2_etc_passwd_exists(host):\n assert host.file(ETC_PASSWD).exists",
"def prepare_environment(base_path):\n shutil.rmtree(base_path, ignore_errors=True)\n if not os.path.isdir(base_path):\n os.makedirs(base_path)"
]
| [
"0.651136",
"0.6511327",
"0.64604455",
"0.6436057",
"0.6282728",
"0.6207623",
"0.61954594",
"0.6180025",
"0.616266",
"0.61557084",
"0.6141379",
"0.61250085",
"0.6013276",
"0.601109",
"0.59528375",
"0.59509784",
"0.59147865",
"0.590804",
"0.58827823",
"0.5879972",
"0.5847711",
"0.5825869",
"0.58210963",
"0.5819897",
"0.5814278",
"0.5790878",
"0.57885855",
"0.57791483",
"0.57539636",
"0.57432085"
]
| 0.76771754 | 0 |
Return the default environment identifier. There are multiple ways for a user to specify the environment to use for python_secrets commands. Some of these involve explicit settings (e.g., via command line option, a saved value in the current working directory, or an environment variable) or implicitly from the name of the current working directory. | def get_default_environment(cwd=None):
# NOTE(dittrich): I know this code has multiple return points
# but it is simpler and easier to understand this way.
#
# Highest priority is inhereted environment variable.
environment = os.getenv('D2_ENVIRONMENT', None)
if environment is not None:
return environment
#
# Next is saved file in current working directory.
if cwd is None:
cwd = os.getcwd()
local_default = get_saved_default_environment(cwd=cwd)
if local_default not in ['', None]:
return local_default
#
# Lowest priority is the directory path basename.
return os.path.basename(cwd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_env_name(self):\n if self.options.environment:\n return self.options.environment\n elif os.environ.get(\"JUJU_ENV\"):\n return os.environ['JUJU_ENV']\n\n env_ptr = os.path.join(self.juju_home, \"current-environment\")\n if os.path.exists(env_ptr):\n with open(env_ptr) as fh:\n return fh.read().strip()\n\n with open(self.get_env_conf()) as fh:\n conf = yaml.safe_load(fh.read())\n if not 'default' in conf:\n raise ConfigError(\"No Environment specified\")\n return conf['default']",
"def get_local_default_file(cwd=None):\n # TODO(dittrich): May need to do this differently to support\n # Windows file systems.\n if cwd is None:\n cwd = os.getcwd()\n return Path(cwd) / '.python_secrets_environment'",
"def get_conda_env_name():\n env_name = os.popen('echo $CONDA_DEFAULT_ENV').read().strip()\n if env_name == '' or env_name == '$CONDA_DEFAULT_ENV':\n env_name = 'base'\n logging.info('Anaconda environment: ' + env_name)\n return env_name",
"def get_env(env_name: str, default: Optional[str] = None) -> str:\n if env_name not in os.environ:\n if default is None:\n raise KeyError(f\"{env_name} not defined and no default value is present!\")\n return default\n\n env_value: str = os.environ[env_name]\n if not env_value:\n if default is None:\n raise ValueError(\n f\"{env_name} has yet to be configured and no default value is present!\"\n )\n return default\n\n return env_value",
"def default_credentials_id(self) -> str:\n if self._default_credentials_id is None:\n default_credentials_id = self._get_env(\"DEFAULT_CREDENTIALS_ID\")\n self._default_credentials_id = default_credentials_id\n\n return self._default_credentials_id",
"def _DefaultAppId():\n return os.getenv('APPLICATION_ID', '_')",
"def get_default_secrets_basedir():\n default_basedir = Path.home() / BASEDIR_BASENAME\n return Path(\n os.getenv('D2_SECRETS_BASEDIR', default_basedir)\n )",
"def get_saved_default_environment(cwd=None):\n env_file = get_local_default_file(cwd=cwd)\n saved_default = None\n if os.path.exists(env_file):\n with open(env_file, 'r') as f:\n saved_default = f.read().replace('\\n', '')\n return saved_default",
"def env(setting, default=None):\n key = os.environ.get(setting, default)\n\n if key is None:\n error_msg = \"Set the %s env variable\" % setting\n raise ImproperlyConfigured(error_msg)\n\n return key",
"def get_default(name, value):\n return os.environ.get('EXAMPLE_{}'.format(name.upper()), value)",
"def get_env(self) -> str:\n return self.env or ENV",
"def get_environment_var(env_name, default_value):\n if env_name in os.environ:\n return os.environ[env_name]\n else:\n return default_value",
"def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")",
"def getenv(name, default=None):\n return os.environ.get(name, default)",
"def getenv_string(setting, default=''):\n return os.environ.get(setting, default)",
"def get_from_environ(key: str, default: Any = None) -> str:\n return os.environ.get(key, default)",
"def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''",
"def env(*vars, **kwargs):\r\n for v in vars:\r\n value = os.environ.get(v)\r\n if value:\r\n return value\r\n return kwargs.get('default', '')",
"def get_identity_name(identity_kind: str = GLOBAL_APPLICATION_CONFIGURATION) -> str:\n identity_name = os.environ.get(identity_kind)\n if identity_name:\n return identity_name\n # TODO: Add discovery here? This can probably be inferred.\n # Need to be careful because not all users may have IAM privileges.\n # -kmp 31-Aug-2022\n context = \"\"\n account_number = os.environ.get('ACCOUNT_NUMBER')\n if account_number:\n context = f\" in account {account_number}\"\n raise ValueError(f\"There is no default identity name available for {identity_kind}{context}.\")",
"def env(*vars, **kwargs):\n for v in vars:\n value = os.environ.get(v, None)\n if value:\n return value\n return kwargs.get('default', '')",
"def env(*vars, **kwargs):\n for v in vars:\n value = os.environ.get(v, None)\n if value:\n return value\n return kwargs.get('default', '')",
"def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")",
"def env(*vars, **kwargs):\n for v in vars:\n value = os.environ.get(v)\n if value:\n return value\n return kwargs.get('default', '')",
"def GetEnvironFallback(var_list, default):\n for var in var_list:\n if var in os.environ:\n return os.environ[var]\n return default",
"def parameter_environment_or_default(parameter, env_var: str, default):\n if parameter is not None:\n return parameter\n if env_var in os.environ:\n return os.environ[env_var]\n return default",
"def env(*_vars, **kwargs):\r\n for v in _vars:\r\n value = os.environ.get(v, None)\r\n if value:\r\n return value\r\n return kwargs.get('default', '')",
"def env(*_vars, **kwargs):\n for v in _vars:\n value = os.environ.get(v, None)\n if value:\n return value\n return kwargs.get('default', '')",
"def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvVarArgs']]]]:\n return pulumi.get(self, \"env\")",
"def managed_environment_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"managed_environment_id\")",
"def current_config():\n if os.environ[\"ENVIRONMENT\"] == \"production\":\n return Production()\n elif os.environ[\"ENVIRONMENT\"] == \"staging\":\n return Staging()\n elif os.environ[\"ENVIRONMENT\"] == \"testing\":\n return Testing()\n elif os.environ[\"ENVIRONMENT\"] == \"development\":\n return Development()\n else:\n raise KeyError(f\"Unknown environment '{os.environ['ENVIRONMENT']}'\")"
]
| [
"0.71756715",
"0.7117789",
"0.68693656",
"0.6850072",
"0.68206316",
"0.67635626",
"0.6761713",
"0.6749947",
"0.6571323",
"0.65223706",
"0.64882225",
"0.64521635",
"0.64339304",
"0.6418718",
"0.64067113",
"0.640124",
"0.63758415",
"0.63409525",
"0.6335122",
"0.6334783",
"0.6334783",
"0.6322582",
"0.63160217",
"0.6305141",
"0.62905926",
"0.6282795",
"0.6271595",
"0.62438124",
"0.62326616",
"0.6231738"
]
| 0.74237955 | 0 |
Just copy the descriptions portion of an environment directory from src to dst. | def copydescriptions(src: Path, dst: Path):
if not dst.suffix == '.d':
raise InvalidDescriptionsError(
msg=f"[-] destination '{dst}' is not a descriptions ('.d') directory" # noqa
)
# Ensure destination directory exists.
dst.mkdir(exist_ok=True)
if src.suffix == '.d' and not src.is_dir():
raise InvalidDescriptionsError(
msg=f"[-] source '{src}' is not a descriptions ('.d') directory" # noqa
)
for descr_file in [f for f in src.iterdir() if f.suffix == '.json']:
src_text = descr_file.read_text(encoding='utf-8')
dst_file = dst / descr_file.name
dst_file.write_text(src_text, encoding='utf-8')
remove_other_perms(dst) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest",
"def copy(self, src_path: str, tgt_path: str) -> None:",
"def copy_to_se(self, src, dst, create_parent_directory=True):\n mgm, dst = self._safe_split_mgm(dst)\n dst = self._join_mgm_lfn(mgm, dst)\n if create_parent_directory:\n parent_directory = osp.dirname(dst)\n self.create_directory(parent_directory)\n logger.warning('Copying {0} to {1}'.format(src, dst))\n cmd = [ 'xrdcp', '-s', src, dst ]\n svj.core.utils.run_command(cmd)",
"def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))",
"def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))",
"def copy(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))",
"def copydir(self):\n pass",
"def RestoreCase(dirc, dest):\n subprocess.call(['cp', '-r', dirc, dest])",
"def test_copy(self):\n\n tempdir = tempfile.mkdtemp()\n include_example = os.path.join(here, 'include-example.ini')\n manifest = ManifestParser(manifests=(include_example,))\n manifest.copy(tempdir)\n self.assertEqual(sorted(os.listdir(tempdir)),\n ['fleem', 'include', 'include-example.ini'])\n self.assertEqual(sorted(os.listdir(os.path.join(tempdir, 'include'))),\n ['bar.ini', 'crash-handling', 'flowers', 'foo.ini'])\n from_manifest = ManifestParser(manifests=(include_example,))\n to_manifest = os.path.join(tempdir, 'include-example.ini')\n to_manifest = ManifestParser(manifests=(to_manifest,))\n self.assertEqual(to_manifest.get('name'), from_manifest.get('name'))\n shutil.rmtree(tempdir)",
"def copy_skel(src, dest):\n md_common.copytree(src, dest)",
"def copy():\n put(os.path.join('dist', get_egg_name()), remote_egg_dir)",
"def copyDir(src, dst, includes, excludes = []):\n\tmultiFilesReplacements([], dst, src, includes, excludes)",
"def copy_test_configuration(self, source_dir, dest_dir):\n for root, dirs, files in os.walk(source_dir):\n if '.svn' in dirs:\n dirs.remove('.svn')\n dirs = [ d for d in dirs if not d.startswith('gyptest') ]\n files = [ f for f in files if not f.startswith('gyptest') ]\n for dirname in dirs:\n source = os.path.join(root, dirname)\n destination = source.replace(source_dir, dest_dir)\n os.mkdir(destination)\n if sys.platform != 'win32':\n shutil.copystat(source, destination)\n for filename in files:\n source = os.path.join(root, filename)\n destination = source.replace(source_dir, dest_dir)\n shutil.copy2(source, destination)",
"def copystat(src, dest):\n import shutil\n\n shutil.copystat(str(src), str(dest))",
"def copy_dir(source, dest, vars, verbosity=1, simulate=False, indent=0,\n sub_vars=True, interactive=False, overwrite=True,\n template_renderer=None, out_=sys.stdout):\n def out(msg):\n out_.write(msg)\n out_.write('\\n')\n out_.flush()\n # This allows you to use a leading +dot+ in filenames which would\n # otherwise be skipped because leading dots make the file hidden:\n vars.setdefault('dot', '.')\n vars.setdefault('plus', '+')\n use_pkg_resources = isinstance(source, tuple)\n if use_pkg_resources:\n names = sorted(pkg_resources.resource_listdir(source[0], source[1]))\n else:\n names = sorted(os.listdir(source))\n pad = ' '*(indent*2)\n if not os.path.exists(dest):\n if verbosity >= 1:\n out('%sCreating %s/' % (pad, dest))\n if not simulate:\n makedirs(dest, verbosity=verbosity, pad=pad)\n elif verbosity >= 2:\n out('%sDirectory %s exists' % (pad, dest))\n for name in names:\n if use_pkg_resources:\n full = '/'.join([source[1], name])\n else:\n full = os.path.join(source, name)\n reason = should_skip_file(name)\n if reason:\n if verbosity >= 2:\n reason = pad + reason % {'filename': full}\n out(reason)\n continue # pragma: no cover\n if sub_vars:\n dest_full = os.path.join(dest, substitute_filename(name, vars))\n sub_file = False\n if dest_full.endswith('_tmpl'):\n dest_full = dest_full[:-5]\n sub_file = sub_vars\n if use_pkg_resources and pkg_resources.resource_isdir(source[0], full):\n if verbosity:\n out('%sRecursing into %s' % (pad, os.path.basename(full)))\n copy_dir((source[0], full), dest_full, vars, verbosity, simulate,\n indent=indent+1,\n sub_vars=sub_vars, interactive=interactive,\n template_renderer=template_renderer, out_=out_)\n continue\n elif not use_pkg_resources and os.path.isdir(full):\n if verbosity:\n out('%sRecursing into %s' % (pad, os.path.basename(full)))\n copy_dir(full, dest_full, vars, verbosity, simulate,\n indent=indent+1,\n sub_vars=sub_vars, interactive=interactive,\n template_renderer=template_renderer, out_=out_)\n continue\n elif use_pkg_resources:\n content = pkg_resources.resource_string(source[0], full)\n else:\n f = open(full, 'rb')\n content = f.read()\n f.close()\n if sub_file:\n try:\n content = substitute_content(\n content, vars, filename=full,\n template_renderer=template_renderer\n )\n except SkipTemplate:\n continue # pragma: no cover\n if content is None:\n continue # pragma: no cover\n already_exists = os.path.exists(dest_full)\n if already_exists:\n f = open(dest_full, 'rb')\n old_content = f.read()\n f.close()\n if old_content == content:\n if verbosity:\n out('%s%s already exists (same content)' %\n (pad, dest_full))\n continue # pragma: no cover\n if interactive:\n if not query_interactive(\n native_(full, fsenc), native_(dest_full, fsenc),\n native_(content, fsenc), native_(old_content, fsenc),\n simulate=simulate, out_=out_):\n continue\n elif not overwrite:\n continue # pragma: no cover\n if verbosity and use_pkg_resources:\n out('%sCopying %s to %s' % (pad, full, dest_full))\n elif verbosity:\n out(\n '%sCopying %s to %s' % (pad, os.path.basename(full),\n dest_full))\n if not simulate:\n f = open(dest_full, 'wb')\n f.write(content)\n f.close()",
"def _copy_metadata(from_dir, to_dir):\n if not FLAGS.dry_run:\n tf.io.gfile.makedirs(to_dir)\n for fname in tfds.core.utils.list_info_files(from_dir):\n from_path = os.path.join(from_dir, fname)\n to_path = os.path.join(to_dir, fname)\n logging.info('cp %s %s', from_path, to_path)\n if not FLAGS.dry_run:\n tf.io.gfile.copy(from_path, to_path, overwrite=True)",
"def cpr(src, dst):\n shutil.copytree(src, dst)",
"def copymode(src, dest):\n import shutil\n\n shutil.copymode(src, dest)",
"def _clone_defaults(self, source, dest, context):\n\n for base, dirs, files in os.walk(source):\n relative = os.path.relpath(base, source)\n\n for d in dirs:\n os.makedirs(os.path.join(dest, relative, d))\n\n for filename in files:\n\n if not filename.endswith(self.valid_extensions):\n continue\n\n with open(os.path.join(base, filename), 'r') as f:\n data = f.read()\n\n with open(os.path.join(dest, relative, filename), 'w') as f:\n data = jinja2.Template(data).render(**context)\n f.write(data)",
"def copy_deep(src: str, dst: str, create_dst_dir: bool = False) -> None:\n system_is_darwin = platform.system().lower() == \"darwin\"\n if create_dst_dir:\n mkdir_p(os.path.dirname(dst))\n src_is_link = os.path.islink(src)\n dst_exists = os.path.lexists(dst)\n if os.path.isdir(src) and not src_is_link:\n logging.debug(\"Copying directory {} to {}\".format(src, dst))\n mkdir_p(dst)\n for name in os.listdir(src):\n copy_deep(os.path.join(src, name), os.path.join(dst, name))\n elif src_is_link:\n if dst_exists:\n return\n target = os.readlink(src)\n logging.debug(\"Creating symlink {} -> {}\".format(dst, target))\n os.symlink(target, dst)\n else:\n if dst_exists:\n if not system_is_darwin:\n return\n # Only overwrite the file if the source is newer than the destination.\n if os.path.getmtime(src) <= os.path.getmtime(dst):\n return\n logging.debug(\"Copying file {} to {}\".format(src, dst))\n # Preserve the file attributes.\n shutil.copy2(src, dst)",
"def bootstrap_development_distribution(project_name: str, dest_dir: Path):\n src_dir = Path(__file__).parent.parent.absolute()\n print(f\"Bootstrap: {src_dir} -> {dest_dir}\")\n shutil.copytree(\n src_dir,\n dest_dir,\n ignore=shutil.ignore_patterns(\n project_name.lower(),\n \".git\",\n \"build\",\n \"dist\",\n \"docs\",\n \".pytest_cache\",\n \".eggs\",\n \"templates\",\n \"__pycache__\",\n ),\n )",
"def set_dest(self, mode, dest, prefix):\n try: \n os.mkdir(dest)\n os.mkdir(dest+'/images')\n os.mkdir(dest+'/images'+'/train')\n os.mkdir(dest+'/images'+'/test')\n os.mkdir(dest+'/labels')\n os.mkdir(dest+'/labels'+'/train')\n os.mkdir(dest+'/labels'+'/test')\n except FileExistsError:\n pass\n\n if mode == 'train':\n self.detImage = dest+'images/'+'train/'\n self.detLabel = dest+'labels/'+'train/'\n self.detList = dest+'train.txt'\n elif mode == 'test':\n self.detImage = dest+'images/'+'test/'\n self.detLabel = dest+'labels/'+'test/'\n self.detList = dest+'test.txt'\n self.prefix = prefix",
"def prepare(self, dst, options):\n self.checkExisting(dst)\n self.makedirs(dst.parent())",
"def copy_dir(src, dst):\n try:\n debug.log(\"copy dir from \"+ src, \"to \"+ dst)\n shutil.copytree(src, dst)\n except Exception as e:\n debug.log(\"Error: happened while copying!\\n%s\\n\"%e)",
"def copy(src, dst):\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n shutil.copy2(src, dst)",
"def copy_structure(self, other_directory):\n pass",
"def copy(from_dir: tfds.typing.PathLike, to_dir: tfds.typing.PathLike) -> None:\n for full_name in tfds.core.load.list_full_names():\n from_full_name_dir = os.path.join(from_dir, full_name)\n to_full_name_dir = os.path.join(to_dir, full_name)\n\n # Skip if the dataset isn't generated or that metadata are already copied\n if not tf.io.gfile.exists(from_full_name_dir):\n logging.info('Skipping %s (not found)', from_full_name_dir)\n continue\n if tf.io.gfile.exists(to_full_name_dir) and not FLAGS.overwrite:\n logging.info('Skipping %s (already exists)', to_full_name_dir)\n continue\n\n _copy_metadata(from_dir=from_full_name_dir, to_dir=to_full_name_dir)",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True)\n full_dst = dst\n\n return full_dst",
"def copy_fixture(src: Path, dest: Path) -> Path:\n return shutil.copy(src.absolute(), dest.absolute())",
"def _copy_sources():\n shutil.rmtree(SRC_DIR_LOCAL, ignore_errors=True)\n os.mkdir(SRC_DIR_LOCAL)\n\n shutil.copy(os.path.join(SRC_DIR_REPO, 'LICENSE.txt'), SRC_DIR_LOCAL)\n shutil.copy(os.path.join(SRC_DIR_REPO, 'z3.pc.cmake.in'), SRC_DIR_LOCAL)\n shutil.copy(os.path.join(SRC_DIR_REPO, 'CMakeLists.txt'), SRC_DIR_LOCAL)\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'cmake'), os.path.join(SRC_DIR_LOCAL, 'cmake'))\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'scripts'), os.path.join(SRC_DIR_LOCAL, 'scripts'))\n\n # Copy in src, but avoid recursion\n def ignore_python_setup_files(src, _):\n if os.path.normpath(src).endswith('api/python'):\n return ['core', 'dist', 'MANIFEST', 'MANIFEST.in', 'setup.py', 'z3_solver.egg-info']\n return []\n shutil.copytree(os.path.join(SRC_DIR_REPO, 'src'), os.path.join(SRC_DIR_LOCAL, 'src'),\n ignore=ignore_python_setup_files)"
]
| [
"0.598834",
"0.59464955",
"0.587178",
"0.57934016",
"0.57934016",
"0.5778099",
"0.5773663",
"0.5771135",
"0.57689947",
"0.5698882",
"0.5686469",
"0.5679195",
"0.56741464",
"0.5659078",
"0.5631439",
"0.56296587",
"0.55986625",
"0.5580042",
"0.5578742",
"0.5528275",
"0.5516777",
"0.5495479",
"0.54898095",
"0.54790795",
"0.5428948",
"0.5413854",
"0.5411854",
"0.53835225",
"0.5373568",
"0.5368093"
]
| 0.70404476 | 0 |
Output an ASCII BEL character to ``stderr``. | def bell():
if sys.stderr.isatty():
sys.stderr.write('\a')
sys.stderr.flush() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_err(self, text): # pragma: no cover\n # type: (str) -> None\n stderr = self.stderr\n if self.stderr.closed:\n stderr = sys.stderr\n stderr.write(decode_output(u\"\\r\", target_stream=stderr))\n stderr.write(decode_output(CLEAR_LINE, target_stream=stderr))\n if text is None:\n text = \"\"\n text = decode_output(u\"{0}\\n\".format(text), target_stream=stderr)\n self.stderr.write(text)\n self.out_buff.write(decode_output(text, target_stream=self.out_buff))",
"def err(*s):\n sys.stderr.write(TERM.bold_red)\n sys.stderr.write('Error: ')\n for part in s:\n sys.stderr.write(part)\n sys.stderr.write(TERM.normal)\n sys.stderr.write('\\n')",
"def print_std_err(str_):\n print(str_, file=sys.stderr)",
"def to_stderr(self, message):\n message = self.ydl._bidi_workaround(message)\n output = message + '\\n'\n self.ydl._write_string(output, self.ydl._err_file)",
"def error(text):\n print(red(\"✘ {0}\".format(text)))\n sys.stdout.flush()",
"def err(self, output, newline=True):\r\n self.stderr.write(output)\r\n if newline:\r\n self.stderr.write(os.linesep)",
"def print_error(message):\n from sys import stderr\n print(\"\\033[1;31;40m \" + message + \"\\033[0;37;40m\", file=stderr)",
"def write(self, msg):\n sys.stderr.write(msg)",
"def printerr(message):\n sys.stderr.write('{}\\n'.format(message))\n sys.stderr.flush()",
"def err(string, exitval):\n\tprint >> sys.stderr, string.rstrip()\n\tsys.exit(exitval)",
"def err(msg):\n print(colored.red(\"[ERROR]: {0}\".format(msg)))",
"def to_stderr(message):\n print >> sys.stderr, message",
"def error(msg):\n sys.stdout.write('%s[ ERROR ]%s %s\\n' % (colors.RED, colors.RESET, msg))",
"def print_failure_msg(msg):\n click.secho(msg, fg='red', file=sys.stderr)",
"def print_err(msg):\n print(msg, file=sys.stderr)",
"def write(self, msg):\n\n sys.stderr.write(msg)",
"def printerr(msg):\n print(msg, file=sys.stderr)",
"def fail():\n sys.stdout.write('%s[ fail ]%s\\n' % (colors.RED, colors.RESET))",
"def color_print(message, color, newline='\\n'):\n sys.stderr.write('%s%s%s%s' % (color, message, ANSI_NORMAL, newline))",
"def write(self, msg):\n\n self.clear()\n if not msg.endswith(\"\\n\"):\n sys.stderr.write(msg+\"\\n\")\n else:\n sys.stderr.write(msg)\n self.draw()",
"def print_to_stderr(msg):\n sys.stderr.write(msg)",
"def print_err(err):\n return stdout.write(err.args[0])",
"def error(message, exits=None): # pylint: disable=unused-argument\n print(crayons.red(fmt(message, \"[✗]\"), bold=True))\n sys.stdout.flush()",
"def eprint(errmsg):\n print(errmsg, file=STDERR)",
"def printerr(*args, **kwargs):\n console_print(sys.stderr, *args, **kwargs)",
"def print_err(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 31, **kwargs)",
"def errprint(msg):\n\n print('!! *** ERROR: %s' % msg)",
"def stderr(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"stderr\", _args)\n return _ctx.execute_sync(str)",
"def _print_error(message):\n sys.stderr.write(str(message) + \"\\n\")\n sys.stderr.flush()",
"def _write_err_msg_and_quit(self, msg):\n sys.stderr.write(msg)\n sys.exit(1)"
]
| [
"0.666913",
"0.6625823",
"0.6519658",
"0.6480107",
"0.6476801",
"0.64253056",
"0.63144094",
"0.62877625",
"0.62532157",
"0.6244532",
"0.62090224",
"0.619707",
"0.6193064",
"0.6173199",
"0.6130274",
"0.6129684",
"0.61290085",
"0.6114449",
"0.610679",
"0.6103659",
"0.60770804",
"0.60712105",
"0.5994633",
"0.59943837",
"0.5992277",
"0.5989011",
"0.5935451",
"0.5899027",
"0.58899885",
"0.5877419"
]
| 0.7196933 | 0 |
Identifies the filesystem mount point for the partition containing ``mypath``. | def getmount(mypath): # noqa
path_ = os.path.realpath(os.path.abspath(mypath))
while path_ != os.path.sep:
if os.path.ismount(path_):
return path_
path_ = os.path.abspath(os.path.join(path_, os.pardir))
return path_ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mount_point(path):\n\n path = os.path.abspath(path)\n while path != os.path.sep:\n if os.path.ismount(path):\n return path\n path = os.path.abspath(os.path.join(path, os.pardir))\n return path",
"def find_mount_point(path):\n path = os.path.abspath(path)\n while not os.path.ismount(path):\n path = os.path.dirname(path)\n return path",
"def get_fs_type(mypath):\n\n root_type = ''\n for part in psutil.disk_partitions():\n if part.mountpoint == os.path.sep:\n root_type = part.fstype\n continue\n if str(mypath).startswith(part.mountpoint):\n return part.fstype\n return root_type",
"def get_disk_by_mountpoint(mnt_point):\n diskparts = psutil.disk_partitions()\n for item in diskparts:\n if item.mountpoint == mnt_point:\n return realpath(item.device)\n return None",
"def getmount_fstype(mypath):\n\n mountpoint = getmount(mypath)\n return get_fs_type(mountpoint)",
"def FilesystemMountedAt(self, path):\n mount_info = self._GetMountSourceAndTarget(path)\n return mount_info[0] if mount_info else None",
"def get_device_mounted_at(query_path):\n\n # pylint: disable=unused-variable\n for device, mountpoint, filesystem, options in iter_mounts():\n try:\n if os.path.samefile(mountpoint, query_path):\n return device\n except OSError:\n continue\n\n return None",
"def mount_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mount_path\")",
"def mount_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mount_path\")",
"def mount_path(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"mount_path\")",
"def get_mount(cls, path: os.PathLike) -> ty.Tuple[Path, str]:\n try:\n # Only the first match (most recent parent) counts, mount table sorted longest\n # to shortest\n return next(\n (Path(p), t)\n for p, t in cls.get_mount_table()\n if str(path).startswith(p)\n )\n except StopIteration:\n return (Path(\"/\"), \"ext4\")",
"def get_mount_point(self):\n try:\n output = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--output=TARGET',\n '--raw',\n self.canonical_device_file,\n ]\n )\n # Examples:\n # /media/8c982ec2-8aa7-4fe2-a912-7478f0429e06\n # /srv/_dev_disk_by-id_dm-name-vg01-lv01\n # /srv/dev-disk-by-label-xx\\x20yy\n return openmediavault.string.unescape_blank(output.decode().strip())\n except subprocess.CalledProcessError:\n pass\n return None",
"def device_mounted(uuid):\n out, err = run_cmd(['lsblk', '-o', 'NAME,UUID,MOUNTPOINT', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('children', blkdevice):\n for child in blkdevice['children']:\n if key_exists('mountpoint', child) and child['uuid'] == uuid:\n return child['mountpoint']",
"def _get_mount(self):\n if not self._mount.endswith(os.path.sep):\n return \"%s%s\" % (self._mount, os.path.sep)\n else:\n return self._mount",
"def bootpart(disks):\n return path_to_partition(disks, '/boot/foo')",
"def path_mounts(self) -> Path:\n return self.path_supervisor / MOUNTS_FOLDER",
"def get_mount(path, _mounts=None):\n path = os.path.abspath(path)\n for mount in sorted(mounts() if _mounts is None else _mounts, key=lambda m: len(m.path), reverse=True):\n if path.startswith(mount.path) and (mount.path.endswith('/') or path[len(mount.path):len(mount.path)+1] in ('/', '')):\n return mount",
"def get_mountpoint(host, fqpath):\n command = \"df -P %s | awk 'END{print $NF}'\" % fqpath\n rcode, rout, rerr = g.run(host, command)\n if rcode == 0:\n return rout.strip()\n\n g.log.error(\"Get mountpoint failed: %s\" % rerr)\n return None",
"def _get_mount_path(self, connection_info):\n share = self._normalize_export(connection_info['data']['export'])\n return os.path.join(self._get_mount_point_base(),\n utils.get_hash_str(share))",
"def getPath(device):\n # If there is a entry record for this partition in fstab\n # use path in there.\n if device in listEntries():\n path_, fsType_, options_ = getEntry(device)\n return path_\n path = '/media/'\n label = getLabel(device)\n # There may be partitions without a label\n if not label:\n if not os.path.exists(path+'disk'):\n path = path+'disk'\n elif not os.path.ismount(path+'disk'):\n path = path+'disk'\n else:\n for i in range(1, len(getMounted())):\n if not os.path.exists(path+'disk-'+str(i)):\n path = path+'disk-'+str(i)\n break\n elif not os.path.ismount(path+'disk-'+str(i)):\n path = path+'disk-'+str(i)\n break\n # Labels may be same\n else:\n if not os.path.exists(path+label):\n path = path+label\n elif not os.path.ismount(path+label):\n path = path+label\n else:\n for i in range(1, len(getMounted())):\n if not os.path.exists(path+label+'-'+str(i)):\n path = path+label+'-'+str(i)\n break\n elif not os.path.ismount(path+label+'-'+str(i)):\n path = path+label+'-'+str(i)\n break\n return path",
"def get_part_device_path(disk_device_path, part_number):\n part_device_path = '{}-part{}'.format(disk_device_path, part_number)\n return part_device_path",
"def _get_mount_point_for_gluster_vol(self):\n return os.path.join(self.configuration.glusterfs_mount_point_base,\n self.gluster_manager.volume)",
"def is_mountpoint(path):\r\n return path in [m['dest'] for m in mounts()]",
"def mount(self, pathname):\n \n # Make sure we don't try to mount something twice.\n if pathname in self.mounts:\n raise ProcessorError(\"%s is already mounted\" % pathname)\n \n # Call hdiutil.\n try:\n p = subprocess.Popen((\"/usr/bin/hdiutil\",\n \"attach\",\n \"-plist\",\n \"-mountrandom\", \"/private/tmp\",\n \"-nobrowse\",\n pathname),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (out, err) = p.communicate()\n except OSError as e:\n raise ProcessorError(\"hdiutil execution failed with error code %d: %s\" % (\n e.errno, e.strerror))\n if p.returncode != 0:\n raise ProcessorError(\"mounting %s failed: %s\" % (pathname, err))\n \n # Read output plist.\n output = plistlib.readPlistFromString(out)\n \n # Find mount point.\n for part in output[\"system-entities\"]:\n if \"mount-point\" in part:\n # Add to mount list.\n self.mounts[pathname] = part[\"mount-point\"]\n self.output(\"Mounted disk image %s\" % (pathname))\n return self.mounts[pathname]",
"def fsmounted(mountpoint):\n ProcMounts.initialize()\n for mount in ProcMounts._mounts:\n if mount['fs_file'] == mountpoint:\n return mount\n return None",
"def test_get_drive_mount_point_name_unique_id_None(self):\n try:\n tmpdir = mkdtemp()\n root = os.path.join(tmpdir, 'mnt/gluster-object')\n drive = 'test'\n\n _init_mock_variables(tmpdir)\n gfs._allow_mount_per_server = True\n self.assertTrue(gfs.mount(root, drive))\n finally:\n gfs._allow_mount_per_server = False\n _reset_mock_variables()\n shutil.rmtree(tmpdir)",
"def mountpoint(self):\n return self._properties.get('mountpoint')",
"def get_mount_points():\n\n points = []\n t = subprocess.check_output(['mount'])\n t = t.decode()\n\n for line in t.splitlines():\n t = line.find('smbfs')\n if t < 0: continue\n b = line.find(' on ')\n points.append(line[b+4: t-2])\n # //[email protected]/storage on /Volumes/storage (smbfs, nodev, nosuid, mounted by ruan)\n return points",
"def test_get_drive_mount_point_name_unique_id_exists(self):\n try:\n tmpdir = mkdtemp()\n root = os.path.join(tmpdir, 'mnt/gluster-object')\n drive = 'test'\n\n _init_mock_variables(tmpdir)\n gfs._allow_mount_per_server = True\n gfs._unique_id = 0\n self.assertTrue(gfs.mount(root, drive))\n finally:\n gfs._allow_mount_per_server = False\n gfs._unique_id = None\n _reset_mock_variables()\n shutil.rmtree(tmpdir)",
"def isMounted(device):\n for _device, _path in getMounted():\n if device == _device:\n return _path\n return ''"
]
| [
"0.70490265",
"0.68355423",
"0.67501163",
"0.6665986",
"0.6661757",
"0.63751256",
"0.6372751",
"0.63337064",
"0.63337064",
"0.6311608",
"0.62759066",
"0.6214388",
"0.62104434",
"0.6207753",
"0.6144679",
"0.6122681",
"0.6104213",
"0.6098488",
"0.60473555",
"0.6020198",
"0.6014527",
"0.5975602",
"0.5973778",
"0.5972775",
"0.5953959",
"0.5947068",
"0.587527",
"0.58468246",
"0.58381176",
"0.5822553"
]
| 0.75168204 | 0 |
Identifies the file system type for a specific mount path. | def getmount_fstype(mypath):
mountpoint = getmount(mypath)
return get_fs_type(mountpoint) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_fs_type(mypath):\n\n root_type = ''\n for part in psutil.disk_partitions():\n if part.mountpoint == os.path.sep:\n root_type = part.fstype\n continue\n if str(mypath).startswith(part.mountpoint):\n return part.fstype\n return root_type",
"def get_type(self):\n return self.get_udev_property('ID_FS_TYPE')",
"def get_fs_type(self):\n\t\treturn call_sdk_function('PrlFsInfo_GetFsType', self.handle)",
"def getFsType(partitionDevice):\n if os.path.exists('/dev/{0}'.format(partitionDevice)) and S_ISBLK(os.stat('/dev/{0}'.format(partitionDevice)).st_mode):\n path = '/dev/{0}'.format(partitionDevice)\n elif os.path.isfile(partitionDevice):\n path = partitionDevice\n else:\n fstype = False\n path = False\n if path:\n try:\n fstype = execGetOutput(['/sbin/blkid', '-s', 'TYPE', '-o', 'value', path], shell = False)\n if fstype:\n fstype = fstype[0]\n else:\n fstype = False\n except subprocess.CalledProcessError as e:\n fstype = False\n if not fstype:\n # is it a real error or is it an extended partition?\n try:\n filetype = execGetOutput(['/usr/bin/file', '-s', path], shell = False)\n if 'extended partition table' in filetype:\n fstype = 'Extended'\n except subprocess.CalledProcessError:\n pass\n return fstype",
"def type(path):",
"def check_fs(uuid):\n out, err = run_cmd(['lsblk', '-o', 'UUID,FSTYPE', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('uuid', blkdevice) and blkdevice['uuid'] == uuid:\n return blkdevice['fstype']",
"def _get_path_type(self, path: Path) -> str:\n if path.is_dir():\n return 'dir'\n elif path.is_file():\n return 'file'\n else:\n raise FileNotFoundError(f\"File {path} not found\")",
"def get_fstype(fp):\n fp = op.abspath(fp)\n parent_mountpoints = {}\n for p in pu.disk_partitions(all=True):\n if op.samefile(op.commonpath((fp, p.mountpoint)), p.mountpoint):\n parent_mountpoints[p.mountpoint] = p.fstype\n return max(parent_mountpoints.items(), key=lambda p: len(p[0]))[0]",
"def drive_type():",
"def disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_type\")",
"def disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_type\")",
"def disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_type\")",
"def disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_type\")",
"def disktype(self):\n # easy enough\n return self._disktype",
"def fstype(self):\n return self._properties.get('fstype')",
"def getNametype(self, path):\n if os.path.isdir(path):\n return 'dir'\n elif os.path.isfile(path):\n return 'file'\n else: return None",
"def path_type(cls, path):\n if os.path.isdir(path):\n return 'package'\n else:\n return 'object'",
"def type(self, mpath):\n try:\n return self.stat(mpath)[\"type\"]\n except errors.MantaResourceNotFoundError:\n return None\n except errors.MantaAPIError:\n _, ex, _ = sys.exc_info()\n if ex.code in ('ResourceNotFound', 'DirectoryDoesNotExist'):\n return None\n else:\n raise",
"def get_partfstype(self, part):\n t = self.xlist(\"get-blkinfo\", part, \"TYPE\")\n return t[1][0] if t[0] and (len(t[1]) != 0) else \"\"",
"def get_file_system(disk):\n\n #TODO\n return \"Unknown\"",
"def filetype_of(path: Path) -> str:\n\n filetype = \"unsorted\"\n\n if path.suffix == \".json\":\n filetype = \"json\"\n\n elif path.suffix == \".txt\":\n if search(pattern=\"v[0-9][0-9]_[0-9]\", string=path.stem):\n filetype = \"onsets\"\n elif \"subject_info\" in path.stem:\n filetype = \"subject info\"\n\n elif path.suffix == \".nii\":\n if \"_t1_\" in path.stem:\n filetype = \"anat\"\n elif \"_lessvoids_\" in path.stem:\n filetype = \"func\"\n elif \"field_map\" in path.stem:\n filetype = \"fieldmap\"\n\n return filetype",
"def disk_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"disk_type\")",
"def get_type(full_path):\n status = {'type': []}\n if os.path.ismount(full_path):\n status['type'] += ['mount-point']\n elif os.path.islink(full_path):\n status['type'] += ['symlink']\n if os.path.isfile(full_path):\n status['type'] += ['file']\n elif os.path.isdir(full_path):\n status['type'] += ['dir']\n if not status['type']:\n if os.stat.S_ISSOCK(status['mode']):\n status['type'] += ['socket']\n elif os.stat.S_ISCHR(status['mode']):\n status['type'] += ['special']\n elif os.stat.S_ISBLK(status['mode']):\n status['type'] += ['block-device']\n elif os.stat.S_ISFIFO(status['mode']):\n status['type'] += ['pipe']\n if not status['type']:\n status['type'] += ['unknown']\n elif status['type'] and status['type'][-1] == 'symlink':\n status['type'] += ['broken']\n return status['type']",
"def file_type(self):\n try:\n return self.get_driver().ShortName\n except AttributeError:\n return",
"def file_type(location):\n try:\n return _detect(location, DETECT_TYPE)\n except:\n # TODO: log errors\n return ''",
"def get_type_from_path(path):\n return path.split('.')[-1]",
"def test_fstype():\n device = \"/dev/sdX1\"\n fs_type = \"ext4\"\n mock = MagicMock(return_value=\"FSTYPE\\n{}\".format(fs_type))\n with patch.dict(disk.__grains__, {\"kernel\": \"Linux\"}), patch.dict(\n disk.__salt__, {\"cmd.run\": mock}\n ), patch(\"salt.utils.path.which\", MagicMock(return_value=True)):\n assert disk.fstype(device) == fs_type",
"def guess_type(self, path):\n\n base, ext = posixpath.splitext(path)\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n ext = ext.lower()\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n else:\n return self.extensions_map['']",
"def guess_type(self, path):\n\n base, ext = posixpath.splitext(path)\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n ext = ext.lower()\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n else:\n return self.extensions_map['']",
"def guess_type(self, path):\n\n\t\tbase, ext = posixpath.splitext(path)\n\t\tif ext in self.extensions_map:\n\t\t\treturn self.extensions_map[ext]\n\t\text = ext.lower()\n\t\tif ext in self.extensions_map:\n\t\t\treturn self.extensions_map[ext]\n\t\telse:\n\t\t\treturn self.extensions_map['']"
]
| [
"0.7314071",
"0.7037461",
"0.69199586",
"0.6844241",
"0.6774126",
"0.653924",
"0.64900947",
"0.64872056",
"0.6365881",
"0.6356303",
"0.6356303",
"0.6356303",
"0.6356303",
"0.6336653",
"0.6304011",
"0.6279205",
"0.6275102",
"0.6231511",
"0.6194144",
"0.6176265",
"0.6131921",
"0.6128948",
"0.60644656",
"0.60638946",
"0.60565567",
"0.59936965",
"0.59760314",
"0.59273416",
"0.59273416",
"0.5922786"
]
| 0.7720042 | 0 |
Identifies the file system type for a specific mount path. | def get_fs_type(mypath):
root_type = ''
for part in psutil.disk_partitions():
if part.mountpoint == os.path.sep:
root_type = part.fstype
continue
if str(mypath).startswith(part.mountpoint):
return part.fstype
return root_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getmount_fstype(mypath):\n\n mountpoint = getmount(mypath)\n return get_fs_type(mountpoint)",
"def get_type(self):\n return self.get_udev_property('ID_FS_TYPE')",
"def get_fs_type(self):\n\t\treturn call_sdk_function('PrlFsInfo_GetFsType', self.handle)",
"def getFsType(partitionDevice):\n if os.path.exists('/dev/{0}'.format(partitionDevice)) and S_ISBLK(os.stat('/dev/{0}'.format(partitionDevice)).st_mode):\n path = '/dev/{0}'.format(partitionDevice)\n elif os.path.isfile(partitionDevice):\n path = partitionDevice\n else:\n fstype = False\n path = False\n if path:\n try:\n fstype = execGetOutput(['/sbin/blkid', '-s', 'TYPE', '-o', 'value', path], shell = False)\n if fstype:\n fstype = fstype[0]\n else:\n fstype = False\n except subprocess.CalledProcessError as e:\n fstype = False\n if not fstype:\n # is it a real error or is it an extended partition?\n try:\n filetype = execGetOutput(['/usr/bin/file', '-s', path], shell = False)\n if 'extended partition table' in filetype:\n fstype = 'Extended'\n except subprocess.CalledProcessError:\n pass\n return fstype",
"def type(path):",
"def check_fs(uuid):\n out, err = run_cmd(['lsblk', '-o', 'UUID,FSTYPE', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('uuid', blkdevice) and blkdevice['uuid'] == uuid:\n return blkdevice['fstype']",
"def _get_path_type(self, path: Path) -> str:\n if path.is_dir():\n return 'dir'\n elif path.is_file():\n return 'file'\n else:\n raise FileNotFoundError(f\"File {path} not found\")",
"def get_fstype(fp):\n fp = op.abspath(fp)\n parent_mountpoints = {}\n for p in pu.disk_partitions(all=True):\n if op.samefile(op.commonpath((fp, p.mountpoint)), p.mountpoint):\n parent_mountpoints[p.mountpoint] = p.fstype\n return max(parent_mountpoints.items(), key=lambda p: len(p[0]))[0]",
"def drive_type():",
"def disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_type\")",
"def disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_type\")",
"def disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_type\")",
"def disk_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_type\")",
"def disktype(self):\n # easy enough\n return self._disktype",
"def fstype(self):\n return self._properties.get('fstype')",
"def getNametype(self, path):\n if os.path.isdir(path):\n return 'dir'\n elif os.path.isfile(path):\n return 'file'\n else: return None",
"def path_type(cls, path):\n if os.path.isdir(path):\n return 'package'\n else:\n return 'object'",
"def type(self, mpath):\n try:\n return self.stat(mpath)[\"type\"]\n except errors.MantaResourceNotFoundError:\n return None\n except errors.MantaAPIError:\n _, ex, _ = sys.exc_info()\n if ex.code in ('ResourceNotFound', 'DirectoryDoesNotExist'):\n return None\n else:\n raise",
"def get_partfstype(self, part):\n t = self.xlist(\"get-blkinfo\", part, \"TYPE\")\n return t[1][0] if t[0] and (len(t[1]) != 0) else \"\"",
"def get_file_system(disk):\n\n #TODO\n return \"Unknown\"",
"def filetype_of(path: Path) -> str:\n\n filetype = \"unsorted\"\n\n if path.suffix == \".json\":\n filetype = \"json\"\n\n elif path.suffix == \".txt\":\n if search(pattern=\"v[0-9][0-9]_[0-9]\", string=path.stem):\n filetype = \"onsets\"\n elif \"subject_info\" in path.stem:\n filetype = \"subject info\"\n\n elif path.suffix == \".nii\":\n if \"_t1_\" in path.stem:\n filetype = \"anat\"\n elif \"_lessvoids_\" in path.stem:\n filetype = \"func\"\n elif \"field_map\" in path.stem:\n filetype = \"fieldmap\"\n\n return filetype",
"def disk_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"disk_type\")",
"def get_type(full_path):\n status = {'type': []}\n if os.path.ismount(full_path):\n status['type'] += ['mount-point']\n elif os.path.islink(full_path):\n status['type'] += ['symlink']\n if os.path.isfile(full_path):\n status['type'] += ['file']\n elif os.path.isdir(full_path):\n status['type'] += ['dir']\n if not status['type']:\n if os.stat.S_ISSOCK(status['mode']):\n status['type'] += ['socket']\n elif os.stat.S_ISCHR(status['mode']):\n status['type'] += ['special']\n elif os.stat.S_ISBLK(status['mode']):\n status['type'] += ['block-device']\n elif os.stat.S_ISFIFO(status['mode']):\n status['type'] += ['pipe']\n if not status['type']:\n status['type'] += ['unknown']\n elif status['type'] and status['type'][-1] == 'symlink':\n status['type'] += ['broken']\n return status['type']",
"def file_type(self):\n try:\n return self.get_driver().ShortName\n except AttributeError:\n return",
"def file_type(location):\n try:\n return _detect(location, DETECT_TYPE)\n except:\n # TODO: log errors\n return ''",
"def get_type_from_path(path):\n return path.split('.')[-1]",
"def test_fstype():\n device = \"/dev/sdX1\"\n fs_type = \"ext4\"\n mock = MagicMock(return_value=\"FSTYPE\\n{}\".format(fs_type))\n with patch.dict(disk.__grains__, {\"kernel\": \"Linux\"}), patch.dict(\n disk.__salt__, {\"cmd.run\": mock}\n ), patch(\"salt.utils.path.which\", MagicMock(return_value=True)):\n assert disk.fstype(device) == fs_type",
"def guess_type(self, path):\n\n base, ext = posixpath.splitext(path)\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n ext = ext.lower()\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n else:\n return self.extensions_map['']",
"def guess_type(self, path):\n\n base, ext = posixpath.splitext(path)\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n ext = ext.lower()\n if ext in self.extensions_map:\n return self.extensions_map[ext]\n else:\n return self.extensions_map['']",
"def guess_type(self, path):\n\n\t\tbase, ext = posixpath.splitext(path)\n\t\tif ext in self.extensions_map:\n\t\t\treturn self.extensions_map[ext]\n\t\text = ext.lower()\n\t\tif ext in self.extensions_map:\n\t\t\treturn self.extensions_map[ext]\n\t\telse:\n\t\t\treturn self.extensions_map['']"
]
| [
"0.77212733",
"0.7039252",
"0.6920957",
"0.68442893",
"0.67736435",
"0.6541093",
"0.64906096",
"0.6488126",
"0.6367459",
"0.63570124",
"0.63570124",
"0.63570124",
"0.63570124",
"0.63378847",
"0.6305549",
"0.6281033",
"0.6275993",
"0.62332624",
"0.6194302",
"0.61794585",
"0.6131106",
"0.61298263",
"0.60655814",
"0.60645133",
"0.605582",
"0.5992743",
"0.5977475",
"0.5927404",
"0.5927404",
"0.5922887"
]
| 0.73147815 | 1 |
Gets a list of absolute paths to one or more files associated with a path. If ``path`` is a directory, the files contained in it are returned, otherwise the path to the file is the only item in the list. | def get_files_from_path(path=None):
abspath = os.path.abspath(path)
if os.path.isfile(abspath):
files = [abspath]
elif os.path.isdir(abspath):
files = [
os.path.join(abspath, fname)
for fname in os.listdir(abspath)
]
else:
raise RuntimeError(f"[-] '{path}' must be a file or directory")
return files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_files(path):\r\n\tfiles = []\r\n\tfor dirpath, _, filenames in os.walk(path):\r\n\t\tfor filename in [f for f in filenames]:\r\n\t\t\tfiles.append(os.path.join(dirpath, filename))\r\n\treturn files",
"def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]",
"def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []",
"def get_files(path: str) -> List[str]:\n if not isdir(path):\n return [path] # its expected to return a list each time even if its a single element\n return [file for fileOrDir in listdir(path) for file in get_files(path + '/' + fileOrDir)]\n # return list of each file returned by the recursive call getFiles(fileOrDir) on\n # each fileOrDir in listdir(path)",
"def get_files(path, extension=None, full_path=True):\n\n file_list = list()\n for root, _, files in walk(path):\n for filename in files:\n if extension:\n if filename.endswith(extension):\n if full_path:\n file_list.append(join(root, filename))\n else:\n file_list.append(filename)\n else:\n file_list.append(join(root, filename))\n\n return file_list",
"def input_files_from_path(path):\n import glob\n input_files = None\n if type(path) is list:\n input_files = []\n for p in path:\n if '*' in p:\n input_files.extend(glob.glob(p))\n else: # neither wildcard nor comma separated list\n input_files.append(p)\n else:\n if ',' in path:\n input_files = path.split(',')\n elif '*' in path:\n input_files = glob.glob(path)\n else: # neither wildcard nor comma separated list\n input_files = [path]\n input_files = [os.path.abspath(f) for f in input_files]\n return [f for f in input_files if os.path.exists(f) or f.startswith('/store')]",
"def get_files(path: str) -> List:\n files = []\n for file in os.listdir(path):\n if os.path.isfile(os.path.join(path, file)):\n files.append(file)\n # Reversed to prevent collision upon renaming\n return sorted(files, reverse=True)",
"def _get_files(self, path):\n result = []\n for f in os.listdir(path):\n if os.path.isdir(os.path.join(path, f)):\n result += self._get_files(os.path.join(path, f))\n else:\n result.append(os.path.join(path, f))\n return result",
"def get_filenames(self, path):\n files_list = list()\n for filename in os.listdir(path):\n files_list.append(os.path.join(path, filename))\n return files_list",
"def get_files_from_directory(path):\n files = [f for f in listdir(path) if isfile(join(path, f))]\n return files",
"def list_files(self, path=\"/\"):\n path = j.sal.fs.pathClean(path)\n dir_obj = self._dir_model.get_by_name(path)\n if not dir_obj:\n raise j.exceptions.Base(\"path {} does not exist\".format(path))\n res = [self._file_model.get(item).name for item in dir_obj[0].files]\n return res",
"def get_files(path, exclude=None):\n exclude = exclude or '*.pyc'\n exclude_expr = '{}/**/{}'.format(path, exclude)\n exclude = set(glob.iglob(exclude_expr, recursive=True))\n\n expr = '{}/**'.format(path)\n paths = set(glob.iglob(expr, recursive=True)) - exclude\n\n files = []\n for filename in paths:\n if os.path.isfile(filename):\n files.append(os.path.abspath(filename))\n return files",
"def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files",
"def get_filenames(self, path: str):\n files_list = []\n for filename in os.listdir(path):\n files_list.append(os.path.join(path, filename))\n return files_list",
"def get_files(path='.', file_mask=['*'], recursive=False):\n \n def process_directory(dir_path, items):\n \"\"\"\n Processes files in 1 directory.\n\n \"\"\"\n result = []\n for item in items:\n name = os.path.join(dir_path, item)\n if os.path.isfile(name) and not os.path.islink(name):\n for mask in masks:\n if fnmatch.fnmatch(name, mask):\n result.append(os.path.abspath(name))\n break\n return result\n\n masks = [file_mask] if isinstance(file_mask, str) else file_mask\n assert isinstance(masks, list)\n\n # final list to be returned, contains all files\n res_list = []\n if recursive:\n for root, dirs, files in os.walk(path):\n files_checked = process_directory(root, files)\n res_list.extend(files_checked)\n else:\n res_list = process_directory(path, os.listdir(path))\n return res_list",
"def get_files(path: str, extension: str = '.wav') -> List[Path]:\n\n return list(Path(path).expanduser().resolve().rglob(f'*{extension}'))",
"def get_files_in_dir(path):\n return [os.path.join(dir_name, file)\n for dir_name, subdirs, files in os.walk(path)\n for file in files]",
"def files_in_dir(path):\n return os.listdir(path)",
"def glob(path):\n path = os.path.abspath(path)\n if os.path.isdir(path):\n files = [d for d in [\n os.path.join(path, f) for f in os.listdir(path)\n ] if os.path.isfile(d)]\n else:\n files = glob.glob(path)\n print(\"Found {0} files\".format(len(files)))\n return files",
"def GetFiles(path):\n\n retfiles = []\n target_paths = []\n for root, dirs, files in os.walk(path):\n if root == path:\n target_paths = map(lambda d: os.path.join(root, d), dirs)\n continue\n if root not in target_paths:\n continue\n for f in files:\n if f[-4:] != '.txt':\n continue\n retfiles.append(os.path.join(root, f))\n return retfiles",
"def get_files_from_directory(path):\n return [f for f in listdir(path) if isfile(join(path, f))]",
"def get_paths(file_path):\n return glob(path.join(file_path, '*'))",
"def list_files_and_dirs(self, path=\"/\"):\n dirs = self.list_dirs(path)\n files = self.list_files(path)\n return dirs + files",
"def glob(path: str) -> list[str]:\n fs, relative_path = url_to_fs(path)\n return cast(list[str], fs.glob(relative_path))",
"def get_files(path, formats=[]):\n\n # Uses abs path as the directory\n absolute = abspath(path)\n all_files = os.listdir(absolute)\n\n # Get the absolute path of each file\n absolute_files = [\"/\".join([absolute, i]) for i in all_files]\n\n # Filter out non-files and return\n filtered_files = [f for f in absolute_files if os.path.isfile(f)]\n\n # Filter out unwanted file types (if requested)\n if formats:\n filtered_files = [f for f in filtered_files if is_filetype(f, formats)]\n \n return filtered_files",
"def filenames_from_path(path):\n with open(path) as f:\n filenames = f.read().splitlines()\n\n return filenames",
"def get_files_by_folder(path):\n\n f = []\n for (dirpath, dirnames, filenames) in walk(path):\n f.extend(filenames)\n break\n return f",
"def list_xml_path(path):\n path_list = glob.glob(os.path.join(path, '*', '*.xml'), recursive=True)\n return path_list",
"def get_files(path):\n\n # In case path is singular file:\n if os.path.isfile(path):\n return [path]\n\n all_files = []\n\n # Look for gitignore upstream\n gilist = get_gitignore(path)\n\n # In case path is directory:\n\n # In case no gitignore was found in current directory or up\n if not gilist:\n for root, dirs, files in os.walk(path):\n dirs[:] = [d for d in dirs if d[0] != '.']\n\n # Constantly check for gitignore while walking\n if '.gitignore' in os.listdir(root):\n all_files.extend(get_files(root))\n dirs[:] = []\n files[:] = []\n\n for name in files:\n if not name.startswith('.'):\n all_files.append(os.path.join(root, name))\n\n # In case gitignore was found\n if gilist:\n for root, dirs, files in os.walk(path):\n dirs[:] = [d for d in dirs if d[0] != '.' and d not in gilist]\n\n # If root dir is in gitignore break and go to next directory\n for item in gilist:\n if fnmatch.fnmatch(root, item):\n dirs[:] = []\n break\n\n else:\n # If file is gitignore material break and go to next file\n for name in files:\n for item in gilist:\n if fnmatch.fnmatch(name, item) or item.endswith(name):\n break\n\n else:\n # Finally append the file if it passed all tests\n if not name.startswith('.') and name.endswith(EXTES):\n all_files.append(os.path.join(root, name))\n return all_files",
"def list_files(self, path: str = None, contains: str = \".\") -> list:\n\n return [file for file in self.nlst(path or self.basepath) if contains in file]"
]
| [
"0.7801161",
"0.77511036",
"0.7665156",
"0.76141655",
"0.73560804",
"0.7337085",
"0.73270375",
"0.7286432",
"0.7249565",
"0.7235798",
"0.7231294",
"0.72299063",
"0.7225615",
"0.7196257",
"0.71882355",
"0.71856004",
"0.7184917",
"0.7183777",
"0.71656454",
"0.7161768",
"0.7157009",
"0.7091679",
"0.70159817",
"0.6960252",
"0.69548106",
"0.6942852",
"0.6918385",
"0.6904618",
"0.68988067",
"0.68447554"
]
| 0.8625542 | 0 |
Return sorted list of valid environment paths found in `basedir`. | def get_environment_paths(basedir=None):
basedir = (
get_default_secrets_basedir() if basedir is None
else Path(basedir)
)
results = list()
for item in sorted(basedir.iterdir()):
if is_valid_environment(item):
results.append(item)
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dir_list(load):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = []\n\n if \"saltenv\" not in load:\n return ret\n\n saltenv = load[\"saltenv\"]\n metadata = _init()\n\n if not metadata or saltenv not in metadata:\n return ret\n\n # grab all the dirs from the buckets cache file\n for bucket in _find_dirs(metadata[saltenv]):\n for dirs in bucket.values():\n # trim env and trailing slash\n dirs = _trim_env_off_path(dirs, saltenv, trim_slash=True)\n # remove empty string left by the base env dir in single bucket mode\n ret += [_f for _f in dirs if _f]\n\n return ret",
"def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)",
"def removeduppaths():\r\n # This ensures that the initial path provided by the interpreter contains\r\n # only absolute pathnames, even if we're running from the build directory.\r\n L = []\r\n known_paths = set()\r\n for dir in sys.path:\r\n # Filter out duplicate paths (on case-insensitive file systems also\r\n # if they only differ in case); turn relative paths into absolute\r\n # paths.\r\n dir, dircase = makepath(dir)\r\n if not dircase in known_paths:\r\n L.append(dir)\r\n known_paths.add(dircase)\r\n sys.path[:] = L\r\n return known_paths",
"def file_list(load):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = []\n\n if \"saltenv\" not in load:\n return ret\n\n saltenv = load[\"saltenv\"]\n metadata = _init()\n\n if not metadata or saltenv not in metadata:\n return ret\n for bucket in _find_files(metadata[saltenv]):\n for buckets in bucket.values():\n files = [f for f in buckets if not fs.is_file_ignored(__opts__, f)]\n ret += _trim_env_off_path(files, saltenv)\n\n return ret",
"def storer_paths():\n return [dir_unchecked(), dir_checked(),\n dir_backup(), dir_tests()]",
"def get_source_paths():\r\n script_paths = set()\r\n try:\r\n script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep)))\r\n script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep)))\r\n except AttributeError:\r\n logger.debug('No custom environ variables set.')\r\n\r\n cwd = os.path.dirname(os.path.abspath(__file__))\r\n for each in os.listdir(cwd):\r\n path = os.path.join(cwd, each)\r\n if not os.path.isdir(path) or each.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n script_paths.add(path)\r\n\r\n return script_paths",
"def get_all_dirs(dirpath, base_dir=None):\n\tif not base_dir:\n\t\tpost = os.path.normpath(dirpath)\n\telif base_dir in dirpath:\n\t\t(pre, post) = dirpath.split(os.path.normpath(base_dir))\n\t\tpost = os.path.normpath(post)\n\telse:\n\t\treturn\n\tdirs = []\n\t(head, tail) = os.path.split(post)\n\twhile tail:\n\t\tdirs.append(tail)\n\t\t(head, tail) = os.path.split(head)\n\tdirs.reverse()\n\treturn dirs",
"def _get_paths():\n paths = [\n '/'\n ]\n return paths",
"def __get_environ_path(environ_key):\n environ_value = os.environ.get(environ_key)\n result = []\n\n if not environ_value:\n return result\n\n environ_path_list = environ_value.split(';')\n for each_path in environ_path_list:\n each_path = path.Path(each_path)\n\n if not each_path.exists():\n continue\n\n # make sure default directory first in the order\n if 'FrMaya' in each_path:\n result.insert(0, each_path)\n else:\n result.append(each_path)\n\n return result",
"def _include_paths_from_environ(env_prefix=''):\n paths = os.environ.get(env_prefix + 'WSGI_AUTH_PATHS')\n if not paths:\n return []\n return paths.split(';')",
"def get_directories():\n # get current working dir\n directory = os.getcwd()\n # list of dir to look in repo for files\n directories = [\n directory,\n os.path.expanduser(os.path.join(directory, 'src')),\n os.path.expanduser(os.path.join(directory, 'tests'))\n ]\n return directories",
"def list_envs(self):\n if self.hdfs:\n files = self.hdfs.ls(self.hdfs_home + '/.knitDeps/', True)\n return [f for f in files if f['name'].endswith('.zip')]\n else:\n raise ImportError('Set the `hdfs` attribute to be able to list'\n 'environments.')",
"def get_environment(basedir):\n for env in ('devel', 'staging', 'prod'):\n if os.path.exists(os.path.join(basedir, env)):\n return env\n return 'devel'",
"def _init_pathinfo():\r\n d = set()\r\n for dir in sys.path:\r\n try:\r\n if os.path.isdir(dir):\r\n dir, dircase = makepath(dir)\r\n d.add(dircase)\r\n except TypeError:\r\n continue\r\n return d",
"def find_environment_folders(path: Optional[Path] = None, verbose: int = 0) -> Iterable[Tuple[str, Path]]:\n verbose = max(int(verbose or 0), 0)\n path = path or WORKON_HOME\n for root, directories, files in os.walk(path):\n found = []\n for index, name in enumerate(directories):\n directory = Path(root) / name\n if not validate_environment(directory):\n continue\n yield name, directory\n found.append(name)\n # This makes the search \"fast\" by skipping out on sub folders\n # that do not need to be searched because they have already\n # been identified as valid environments\n directories[:] = [d for d in directories if d not in found]",
"def _local_dir(self):\n return []",
"def required_dirs(self) -> list:\n return [\n self.get(\"campaign.characters.path\"),\n self.get(\"campaign.session.path\"),\n self.get(\"campaign.plot.path\"),\n ]",
"def site_paths(buildout, prefixes):\n\n def is_buildout_dir(path):\n return path.startswith(buildout['eggs-directory']) or \\\n path.startswith(buildout['develop-eggs-directory'])\n\n def is_in_prefixes(path):\n return any([path.startswith(k) for k in prefixes])\n\n retval = [os.path.realpath(k) for k in site.sys.path]\n return [k for k in retval if not (is_buildout_dir(k) or is_in_prefixes(k))]",
"def get_patient_dirs(base_folder):\n patient_dirs = sorted([x for x in base_folder.iterdir() if x.is_dir()])\n return patient_dirs",
"def get_installed_versions(cls) -> list[str]:\n\n pyenv_root = os.getenv(\"PYENV_ROOT\")\n if pyenv_root is None:\n raise Failure(\"PYENV_ROOT is not configured\")\n\n root_dir = Path(pyenv_root)\n version_dir = root_dir / \"versions\"\n\n return [i.name for i in version_dir.iterdir() if i.is_dir()]",
"def _exclude_paths_from_environ(env_prefix=''):\n paths = os.environ.get(env_prefix + 'WSGI_AUTH_EXCLUDE_PATHS')\n if not paths:\n return []\n return paths.split(';')",
"def _trim_env_off_path(paths, saltenv, trim_slash=False):\n env_len = None if _is_env_per_bucket() else len(saltenv) + 1\n slash_len = -1 if trim_slash else None\n\n return [d[env_len:slash_len] for d in paths]",
"def baseline_paths(self) -> Iterator[List[Path]]:\n repo = get_git_repo()\n\n if not repo or self._base_commit is None:\n yield []\n else:\n with self._baseline_context():\n yield [\n relative_path\n for relative_path in self._target_paths\n if self._fname_to_path(repo, str(relative_path))\n not in self._status.added\n ]",
"def list_selfplay_dirs(base_dir):\n\n model_dirs = [os.path.join(base_dir, x)\n for x in tf.io.gfile.listdir(base_dir)]\n return sorted(model_dirs, reverse=True)",
"def parse_paths():\r\n sources = get_source_paths()\r\n results = collections.defaultdict(list)\r\n for root_dir in sources:\r\n\r\n for script_type, dirs in walkdirs(root_dir).iteritems():\r\n\r\n for d in dirs:\r\n logger.debug(d)\r\n\r\n # Add paths to environments\r\n if os.path.basename(d).lower().startswith(ICONS):\r\n results['XBMLANGPATH'].append(d)\r\n os.environ['XBMLANGPATH'] += os.pathsep + d\r\n\r\n if script_type == 'mel':\r\n results['MAYA_SCRIPT_PATH'].append(d)\r\n os.environ['MAYA_SCRIPT_PATH'] += os.pathsep + d\r\n else:\r\n results['PYTHONPATH'].append(d)\r\n site.addsitedir(d)\r\n return results",
"def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)",
"def find_vsh_rc_files(venv_path: Path) -> Iterable[Path]:\n path_sequence = [\n '/usr/local/etc/vsh',\n os.getenv('HOME'),\n '.',\n venv_path,\n ]\n cmds = [\n 'git rev-parse --show-toplevel',\n 'hg root',\n ]\n top_of_current_repo_path = None\n for cmd in cmds:\n cmd_list = shlex.split(cmd)\n if shutil.which(cmd_list[0]):\n top_of_current_repo_path = Path(subprocess.run(cmd_list, stderr=subprocess.PIPE, stdout=subprocess.PIPE).stdout.decode('utf-8').strip()) or None\n if top_of_current_repo_path and top_of_current_repo_path.exists():\n path_sequence.append(top_of_current_repo_path)\n break\n # general set of paths to search for vsh configuration files\n paths = [p for p in map(Path, [p_ for p_ in path_sequence if p_]) if p.exists() and (p / '.vshrc').exists()]\n memoized_paths: Set[Path] = set()\n for p in paths:\n p = p.expanduser().resolve().absolute()\n config_path = p / '.vshrc'\n if not config_path.exists() and p == venv_path:\n working = Path(top_of_current_repo_path or '.')\n build_vsh_rc_file(p, working=working)\n if p.exists() and config_path.exists():\n if config_path.is_file():\n if config_path in memoized_paths:\n continue\n memoized_paths.add(config_path)\n yield config_path\n elif config_path.is_dir():\n for root, folders, files in os.walk(str(config_path)):\n root_path = Path(root)\n for filename in files:\n filepath = (root_path / filename).absolute()\n if filepath in memoized_paths:\n continue\n memoized_paths.add(filepath)\n yield filepath",
"def show_envs(path: Optional[Path] = None):\n path = path or WORKON_HOME or Path.cwd()\n for name, path in sorted(find_environment_folders(path=path, verbose=1)):\n terminal.echo(f'Found {terminal.yellow(name)} under: {terminal.yellow(path)}')",
"def _get_config_dirs(project=None):\n snap = os.environ.get('SNAP')\n snap_c = os.environ.get('SNAP_COMMON')\n\n cfg_dirs = [\n _fixpath(os.path.join('~', '.' + project)) if project else None,\n _fixpath('~'),\n os.path.join('/etc', project) if project else None,\n '/etc',\n os.path.join(snap_c, \"etc\", project) if snap_c and project else None,\n os.path.join(snap, \"etc\", project) if snap and project else None,\n ]\n return [x for x in cfg_dirs if x]",
"def arduino_dist_places(self, dirname_parts):\r\n if 'arduino_dist_dir' in self:\r\n places = [self['arduino_dist_dir']]\r\n else:\r\n places = self.arduino_dist_dir_guesses\r\n return [os.path.join(p, *dirname_parts) for p in places]"
]
| [
"0.69561356",
"0.62635636",
"0.6199789",
"0.6019164",
"0.5972499",
"0.5962681",
"0.5954401",
"0.5949282",
"0.59453523",
"0.5911142",
"0.5864499",
"0.58633655",
"0.58458513",
"0.581863",
"0.5789844",
"0.5776963",
"0.5722316",
"0.5696323",
"0.5690342",
"0.56546295",
"0.5612966",
"0.556123",
"0.5540291",
"0.553824",
"0.5535567",
"0.551275",
"0.5507415",
"0.5498222",
"0.54813105",
"0.54638016"
]
| 0.843912 | 0 |
Derives the CIDR netblocks for an IP via WHOIS lookup. | def get_netblock(ip=None):
ip = str(ip).split('/')[0] if '/' in str(ip) else ip
obj = IPWhois(ip)
results = obj.lookup_whois()
return results['asn_cidr'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ip_get_blocks():\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.post('https://imhsc.imhadmin.net/index.php?v=IPManager')\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # get list of provisioning blocks\n blocklist = []\n for tblk in bs.find_all('table')[3].tr.div.table.find_all('tr'):\n tbx = {\n 'id': re.match(r'.+block_id=([0-9]+).*', tblk.find_all('td')[0].a['href']).group(1),\n 'prefix': tblk.find_all('td')[0].a.string,\n 'block': tblk.find_all('td')[1].string,\n 'usage': tblk.find_all('td')[2].string\n }\n blocklist.append(tbx)\n\n return bs, blocklist",
"def get_nets_lacnic(self, response):\n\n nets = []\n\n # Iterate through all of the networks found, storing the CIDR value\n # and the start and end positions.\n for match in re.finditer(\n r'^(inetnum|inet6num|route):[^\\S\\n]+(.+?,[^\\S\\n].+|.+)$',\n response,\n re.MULTILINE\n ):\n\n try:\n\n net = copy.deepcopy(BASE_NET)\n net_range = match.group(2).strip()\n\n try:\n\n net['range'] = net['range'] = '{0} - {1}'.format(\n ip_network(net_range)[0].__str__(),\n ip_network(net_range)[-1].__str__()\n ) if '/' in net_range else net_range\n\n except ValueError: # pragma: no cover\n\n net['range'] = net_range\n\n temp = []\n for addr in net_range.split(', '):\n\n count = addr.count('.')\n if count is not 0 and count < 4:\n\n addr_split = addr.strip().split('/')\n for i in range(count + 1, 4):\n addr_split[0] += '.0'\n\n addr = '/'.join(addr_split)\n\n temp.append(ip_network(addr.strip()).__str__())\n\n net['cidr'] = ', '.join(temp)\n net['start'] = match.start()\n net['end'] = match.end()\n nets.append(net)\n\n except ValueError:\n\n pass\n\n return nets",
"def cidr_blocks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CidrBlockArgs']]]]:\n return pulumi.get(self, \"cidr_blocks\")",
"def get_nets_arin(self, response):\n\n nets = []\n\n # Find the first NetRange value.\n pattern = re.compile(\n r'^NetRange:[^\\S\\n]+(.+)$',\n re.MULTILINE\n )\n temp = pattern.search(response)\n net_range = None\n net_range_start = None\n if temp is not None:\n net_range = temp.group(1).strip()\n net_range_start = temp.start()\n\n # Iterate through all of the networks found, storing the CIDR value\n # and the start and end positions.\n for match in re.finditer(\n r'^CIDR:[^\\S\\n]+(.+?,[^\\S\\n].+|.+)$',\n response,\n re.MULTILINE\n ):\n\n try:\n\n net = copy.deepcopy(BASE_NET)\n\n if len(nets) > 0:\n temp = pattern.search(response, match.start())\n net_range = None\n net_range_start = None\n if temp is not None:\n net_range = temp.group(1).strip()\n net_range_start = temp.start()\n\n if net_range is not None:\n if net_range_start < match.start() or len(nets) > 0:\n\n try:\n\n net['range'] = '{0} - {1}'.format(\n ip_network(net_range)[0].__str__(),\n ip_network(net_range)[-1].__str__()\n ) if '/' in net_range else net_range\n\n except ValueError: # pragma: no cover\n\n net['range'] = net_range\n\n net['cidr'] = ', '.join(\n [ip_network(c.strip()).__str__()\n for c in match.group(1).split(', ')]\n )\n net['start'] = match.start()\n net['end'] = match.end()\n nets.append(net)\n\n except ValueError:\n\n pass\n\n return nets",
"def destination_cidr_blocks(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"destination_cidr_blocks\")",
"def getBaseIP(url: str) -> list:\n \n response = requests.get(url) #get data \n\n ip_sets = response.text\n ip_list = re.findall(r'(?:\\d{1,3}\\.)+(?:\\d{1,3})', ip_sets)\n \n return ip_list",
"def get_nets_other(self, response):\n\n nets = []\n\n # Iterate through all of the networks found, storing the CIDR value\n # and the start and end positions.\n for match in re.finditer(\n r'^(inetnum|inet6num|route):[^\\S\\n]+((.+?)[^\\S\\n]-[^\\S\\n](.+)|'\n '.+)$',\n response,\n re.MULTILINE\n ):\n\n try:\n\n net = copy.deepcopy(BASE_NET)\n net_range = match.group(2).strip()\n\n try:\n\n net['range'] = net['range'] = '{0} - {1}'.format(\n ip_network(net_range)[0].__str__(),\n ip_network(net_range)[-1].__str__()\n ) if '/' in net_range else net_range\n\n except ValueError: # pragma: no cover\n\n net['range'] = net_range\n\n if match.group(3) and match.group(4):\n\n addrs = []\n addrs.extend(summarize_address_range(\n ip_address(match.group(3).strip()),\n ip_address(match.group(4).strip())))\n\n cidr = ', '.join(\n [i.__str__() for i in collapse_addresses(addrs)]\n )\n\n else:\n\n cidr = ip_network(net_range).__str__()\n\n net['cidr'] = cidr\n net['start'] = match.start()\n net['end'] = match.end()\n nets.append(net)\n\n except (ValueError, TypeError):\n\n pass\n\n return nets",
"def destination_cidr_blocks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"destination_cidr_blocks\")",
"def destination_cidr_blocks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"destination_cidr_blocks\")",
"def __get_scanning_range(self):\n if self.__network is not None:\n return [self.__network]\n networks = []\n interfaces = netifaces.interfaces()\n for data in interfaces:\n ips = netifaces.ifaddresses(data)\n for key, interface_data in ips.items():\n for item in interface_data:\n if item.get(\"netmask\", None) is not None and \\\n item.get(\"addr\", None) is not None and \\\n self.is_legal_ip(item[\"netmask\"]):\n if item.get(\"addr\") not in [\"127.0.0.1\", \"0.0.0.0\"]:\n network = \"{ip}/{cird}\".format(ip=item[\"addr\"],\n cird=IPAddress(item[\"netmask\"]).netmask_bits())\n if network not in networks:\n networks.append(network)\n return networks",
"def list_ipblocks(self, depth=1):\n response = self._perform_request('/ipblocks?depth=%s' % str(depth))\n return response",
"def cidr_block(self):\n return self._cidr_block",
"def digest_ips(self):\n all_subnets = {}\n self.subnets = []\n self.single_ips = []\n # extract all subnets\n for ip in self.iplist:\n subnet = self.__get_sutnet(ip)\n if all_subnets.has_key(subnet):\n all_subnets[subnet].append(ip)\n else:\n new_list = [ip]\n all_subnets[subnet] = new_list\n\n for subnet, subnet_ips in all_subnets.items():\n if len(subnet_ips) > 1:\n self.subnets.append(subnet)\n else:\n self.single_ips.append(subnet_ips[0])\n\n self.subnets.sort()\n self.single_ips.sort()",
"def find_block(int_svip, comp_block):\n print(\"-\" * 20 + \" find_block started\")\n bsz = comp_block\n outsz = 0\n bsdict = {}\n bsdict [0] = bsz\n # Build the dictionary of the host networks\n while outsz < 255:\n outsz = outsz + bsz\n bsdict[outsz] = (outsz + bsz) -1\n #print(outsz)\n \n # Determine the upper and lower bounds of the host network\n for key in bsdict.keys():\n if int_svip >= key and int_svip <= bsdict[key]:\n block_start = key\n block_end = bsdict[key]\n\n #print(\"Block start is {}\\nBlock end is {}\".format(block_start, block_end))\n return block_start, block_end",
"def get_allowed_ipblocks(user):\n try:\n up = user.get_profile()\n except AttributeError:\n return []\n\n if user.has_perm(\"vnswww.ipblock_use_any\"):\n # Can use any blocks\n blocks = db.IPBlock.objects.filter()\n else:\n q_org = Q(org=up.org)\n q_childorg = Q(org=up.org.parentOrg, usable_by_child_orgs=True)\n print user.get_all_permissions()\n if user.has_perm(\"vnswww.ipblock_use_org\"):\n print \"Using blocks from own organization\"\n blocks = db.IPBlock.objects.filter(q_org | q_childorg)\n else:\n print \"Not using blocks from own organization\"\n blocks = []\n\n return blocks",
"def get_static_ip(cidr_block, mask, ip_block):\n return cidr_block.replace(mask,ip_block)",
"def separate_networks(start, end, cidr):\n networks = []\n start_net = IPNetwork(f'{start}/{cidr}')\n end = IPNetwork(f'{end}/{cidr}')\n working_net = start_net\n LOG.info(f'Start net: {start_net}')\n while working_net < end + 1:\n LOG.debug(f'Adding network {working_net}')\n networks.append(working_net)\n working_net = working_net + 1\n return networks",
"def _split_cidrs(vpc_cidr, zones=None, ec2_client=None, region_name=None):\n if not zones:\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_availability_zones()\n zones = {(zone['ZoneId'], zone['ZoneName'])\n for zone in resp['AvailabilityZones']}\n\n dot_parts, length = vpc_cidr.split('/') #pylint:disable=unused-variable\n\n dot_parts = dot_parts.split('.')\n cidr_prefix = '.'.join(dot_parts[:2])\n if len(zones) >= 3:\n web_subnet_cidrs = [\n '%s.0.0/20' % cidr_prefix,\n '%s.16.0/20' % cidr_prefix,\n '%s.32.0/20' % cidr_prefix]\n if len(zones) >= 4:\n web_subnet_cidrs += [\n '%s.48.0/20' % cidr_prefix]\n # We need 2 availability regions for RDS?\n dbs_subnet_cidrs = [\n '%s.64.0/20' % cidr_prefix,\n '%s.80.0/20' % cidr_prefix]\n app_subnet_cidrs = [\n '%s.128.0/20' % cidr_prefix,\n '%s.144.0/20' % cidr_prefix]\n return web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs",
"def ip_get_free(net=\"a\"):\n tnet = net.upper()\n\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.get('https://imhsc.imhadmin.net/index.php',\n params={'v': \"IPManager\", 'net': tnet, 'pool': \"12\"})\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n iplist = []\n for tip in bs.table.tbody.find_all('tr'):\n # get IP id\n try:\n t_id = re.match(r'.+id=([0-9]+).+', tip.find_all('td')[8].a['href'], re.I).group(1)\n except:\n t_id = False\n\n # gather IP infos\n t_info = {\n 'id': t_id,\n 'ip': tip.find_all('td')[0].string,\n 'domain': tip.find_all('td')[1].string,\n 'server': tip.find_all('td')[2].string,\n 'net': tip.find_all('td')[3].string,\n 'user': tip.find_all('td')[5].string,\n 'assigned': tip.find_all('td')[6].string,\n 'edit_url': tip.find_all('td')[8].a['href']\n }\n iplist.append(t_info)\n\n return iplist",
"def set_dhcp_pools(self, cidr):\n start = str(ipaddress.IPv4Network(cidr)[50])\n end = str(ipaddress.IPv4Network(cidr)[200])\n return start, end",
"def url_cidr_to_mask():\n res = {\n \"message\": os.environ.get(\"MESSAGE\", \"nothing\"),\n }\n return jsonify(res)",
"def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []",
"def neighbors_ip(self):\n neighbors = self.neighbors()\n nei_list = []\n net_ip = self._rloc_ip_net_addr()\n if neighbors is not None:\n for nei_rec in neighbors:\n nei_ip = net_ip + hex(nei_rec.rloc16)[2:]\n nei_list.append(nei_ip)\n return nei_list",
"def remote_ipv6_network_cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"remote_ipv6_network_cidr\")",
"def cidr_block(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cidr_block\")",
"def cidr_to_netmask(value):\n return netaddr.IPNetwork(\"1.1.1.1/{}\".format(value)).netmask",
"def ReverseZoneToCIDRBlock(self):\n if( self.origin.endswith('in-addr.arpa.') ):\n ip_parts = self.origin.split('.in-')[0].split('.')\n ip_parts.reverse()\n for ip_part in ip_parts:\n if( not ip_part.isdigit() ):\n raise Error('%s is not a reverse zone.' % self.zone_name)\n\n cidr_block = '.'.join(ip_parts)\n ip_octets = len(ip_parts)\n\n if( ip_octets > 3 or ip_octets < 1 ):\n raise Error('%s is not a reverse zone.' % self.zone_name)\n\n cidr_block = '%s/%s' % (cidr_block, ip_octets * 8)\n\n elif( self.origin.endswith('ip6.arpa.') ):\n ip_parts = self.origin.split('.ip6')[0].split('.')\n ip_parts.reverse()\n ip_quartets = len(ip_parts)\n\n for ip_part in ip_parts:\n try:\n int(ip_part, 16)\n except ValueError:\n raise Error('Invalid hexidecimal number in ipv6 origin: %s' %\n self.origin)\n # fill out the rest of the ipv6 address\n ip_parts.extend(['0' for x in range(32-ip_quartets)])\n\n for x in range(1, 8):\n # Put colons every 4 quartets\n ip_parts.insert((x*4)+(x-1), ':')\n cidr_block = ''.join(ip_parts)\n\n cidr_block = '%s/%s' % (cidr_block, ip_quartets * 4)\n\n else:\n raise Error('%s is not a reverse zone.' % self.zone_name)\n\n return cidr_block",
"def cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cidr\")",
"def remote_ipv4_network_cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"remote_ipv4_network_cidr\")"
]
| [
"0.64359736",
"0.63276446",
"0.6294762",
"0.5917774",
"0.5865237",
"0.5860904",
"0.57907003",
"0.5784271",
"0.5784271",
"0.5753149",
"0.5687009",
"0.56206995",
"0.5578238",
"0.55621284",
"0.55338603",
"0.55087936",
"0.5494223",
"0.5483299",
"0.54381686",
"0.543068",
"0.54203457",
"0.54144335",
"0.54042107",
"0.5351988",
"0.53439176",
"0.5340045",
"0.5320608",
"0.5318732",
"0.5318732",
"0.5255985"
]
| 0.76826394 | 0 |
Make all files in path ``dst`` have ``orwx`` permissions. | def remove_other_perms(dst):
# File permissions on Cygwin/Windows filesystems don't work the
# same way as Linux. Don't try to change them.
# TODO(dittrich): Is there a Better way to handle perms on Windows?
fs_type = get_fs_type(dst)
if fs_type in ['NTFS', 'FAT', 'FAT32']:
msg = (
f"[-] {dst} has file system type '{fs_type}': "
'skipping setting permissions'
)
logger.info(msg)
else:
get_output(['chmod', '-R', 'o-rwx', dst]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fix_permissions(dist):\n for root, _, files in os.walk(dist.location):\n for f in [os.path.join(root, i) for i in files]:\n if f.endswith('.py') or f.endswith('.dll') or \\\n f.endswith('.so') and not 'EGG-INFO' in f:\n mode = ((os.stat(f)[stat.ST_MODE]) | 0555) & 07755\n chmod(os.path.join(f), mode)",
"def fix_permissions(dist):\n for root, _, files in os.walk(dist.location):\n for f in [os.path.join(root, i) for i in files]:\n if f.endswith('.py') or f.endswith('.dll') or \\\n f.endswith('.so') and not 'EGG-INFO' in f:\n mode = ((os.stat(f)[stat.ST_MODE]) | 0555) & 07755\n chmod(os.path.join(f), mode)",
"def _make_writeable(filename):\n import stat\n if sys.platform.startswith('java'):\n # On Jython there is no os.access()\n return\n if not os.access(filename, os.W_OK):\n st = os.stat(filename)\n new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)",
"def fix_permissions():\n for root, dirs, files in os.walk('build'):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o755)\n for f in files:\n os.chmod(os.path.join(root, f), 0o644)\n\n # The executable must be executable.\n os.chmod('build/usr/bin/qastetray', 0o755)",
"def setup_permissions():\n sudo('chown %s:%s -R %s' % (env.apache_user, env.apache_user, env.whole_path_symlinked))",
"def adjust_permission_base_dir(base_dir, destination):\n\n if destination==\"tegner-login-1\":\n #Change group and set permissions for PDC Stockholm\n user_group = DATA_USER_PDC + \":\" + DATA_GROUP_PDC\n \n subprocess.Popen( [\"chown\", \"-R\", user_group, base_dir],\n stdout=subprocess.PIPE )\n \n\n subprocess.Popen( [\"setfacl\", \"-R\", \"-M\", \"/cfs/klemming/projects/xenon/misc/basic\", base_dir],\n stdout=subprocess.PIPE )",
"def _set_rw_permissions_for_all(self, nms, path):\n nms.appliance.execute('chmod ugo+rw %s' % path)",
"def make_writeable(filename):\n if sys.platform.startswith('java'):\n # On Jython there is no os.access()\n return\n if not os.access(filename, os.W_OK):\n stt = os.stat(filename)\n new_permissions = stat.S_IMODE(stt.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)",
"def fix_file_perms():\n yield\n os.chmod('tackle.yaml', int('0o644', 8))",
"def _make_writeable(filename):\n if not os.access(filename, os.W_OK):\n st = os.stat(filename)\n new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)",
"def fixpermissions():\n try:\n stats = os.stat(SCRIPT_LOC)\n os.chown(DNS_LOC, stats.st_uid, stats.st_gid)\n os.chmod(DNS_LOC, stats.st_mode)\n except AttributeError:\n pass\n except OSError:\n print '>> Unable to change permissions of ' + DNS_LOC + os.linesep + \\\n ' ^^ This is a non-fatal error ^^'",
"def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ):\n return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True)\n full_dst = dst\n\n return full_dst",
"def set_syco_permissions():\n x(\"chown -R root:root /opt/syco\")\n x(\"chmod 0755 /opt/syco\")\n x(\"chmod 0750 /opt/syco/var\")\n x(\"chmod 0750 /opt/syco/var/mysql\")\n x(\"chmod 0750 /opt/syco/var/mysql/mysql-lvm-backup.py\")\n x(\"chmod 0750 /opt/syco/var/mysql/mysqldump-backup.sh\")",
"def update_chmod(self):\n pass",
"def pid_permissions():\n config = Config()\n try:\n user = pwd.getpwnam(config.user)\n group = grp.getgrnam(config.group)\n os.chown(config.pidfile, user.pw_uid, group.gr_gid)\n except (KeyError, PermissionError):\n logger.error(\"Unable to change pidfile ownership permissions.\")\n raise SystemExit(os.EX_USAGE)",
"def chown_chmod ( self, fspath ):\n # should be renamed to chmod_chown()\n return (\n self.chmod ( fspath ),\n self.chown ( fspath )\n )",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n rstat = self.exists(dst, stat=True)\n\n if rstat:\n if self.file_interface.isdir(dst, stat=rstat) and src:\n full_dst = os.path.join(dst, os.path.basename(src))\n else:\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True, **kwargs)\n full_dst = dst\n\n return full_dst",
"def make_executable(op):\n if not os.stat(op)[stat.ST_MODE] & stat.S_IXUSR:\n run_command([\"chmod\", \"+x\", op])",
"def _correct_file_mode(self):\n if os.name != 'posix':\n return\n for outfile in self.outfiles:\n if self.dry_run:\n log.info(\"changing mode of %s\", outfile)\n else:\n oldmode = os.stat(outfile).st_mode & 07777\n newmode = (oldmode | 0555) & 07777\n if newmode != oldmode:\n log.info(\"changing mode of %s from %o to %o\",\n outfile, oldmode, newmode)\n os.chmod(outfile, newmode)",
"def Chown(self):\n cmd = 'chmod -R 0775 %s' % self.procdir\n self.ExecCmd(cmd)",
"def set_object_copy_mod_permissions(self, agent):\n\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Copy)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Modify)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 0, PermissionsMask.Transfer)",
"def install_perms(env, target, sources, perms=0644, dirperms=0755):\n assert target.startswith('$prefix')\n install_targs = env.Install(target, sources)\n\n # Set file permissions, and defer directory permission setting\n for targ in install_targs:\n env.AddPostAction(targ, Chmod(targ, perms))\n d = os.path.dirname(os.path.normpath(targ.get_abspath()))\n d_prev = None\n while d != d_prev and not os.path.exists(d):\n if not d in set_dir_postaction:\n env.AddPostAction(targ, Chmod(d, dirperms))\n set_dir_postaction[d] = True\n d_prev = d\n d = os.path.dirname(d)\n\n # Return like Install()\n return install_targs",
"def _ensure_read_write_access(tarfileobj):\n dir_perm = tarfile.TUREAD | tarfile.TUWRITE | tarfile.TUEXEC\n file_perm = tarfile.TUREAD | tarfile.TUWRITE\n\n for tarinfo in tarfileobj.getmembers():\n tarinfo.mode |= (dir_perm if tarinfo.isdir() else file_perm)",
"def sort_permissions(fl):\n\n if oct(os.stat(fl).st_mode)[4:] != '666':\n os.chmod(fl, 0o666)",
"def match_owner_group(dest_path, source_path):\n source_stat = os.stat(source_path)\n return os.chown(dest_path, source_stat[stat.ST_UID], source_stat[stat.ST_GID])",
"def _setup_permissions(self, chown, chmod):\n if chown is not None:\n if isinstance(chown, str):\n user, group = chown, None\n\n else:\n try:\n # Try to extract tuple.\n user, group = chown\n\n except ValueError:\n # If length of iterable is not 2, then allow 1.\n assert len(chown) == 1, 'chown must be user or tuple'\n user, group = chown[0], None\n\n except TypeError:\n # If not iterable, use given value as user.\n user, group = chown, None\n\n # Lookup user id.\n if isinstance(user, str):\n user_info = pwd.getpwnam(user)\n user = user_info.pw_uid\n\n # Lookup group id, or use -1 (do not change group)\n if isinstance(group, str):\n group = grp.getgrnam(group).pw_gid\n\n elif group is None:\n group = -1\n\n # Return tuple usable by os.chown().\n chown = (user, group)\n\n # Ensure chmod is numeric if given.\n if chmod is not None:\n assert isinstance(chmod, numbers.Number), 'chmod must be a number'\n\n return chown, chmod",
"def copy_file ( self, source, dest, chown=True, chmod=True ):\n if self._copy_file ( source, dest ):\n if chmod:\n self.chmod_file ( dest )\n if chown:\n self.chown_file ( dest )\n\n return True\n else:\n return False",
"def chmod_plus_w(path):\r\n path_mode = os.stat(path).st_mode\r\n path_mode &= int('777', 8)\r\n path_mode |= stat.S_IWRITE\r\n os.chmod(path, path_mode)",
"def chmod_dir ( self, fspath ):\n return"
]
| [
"0.6326105",
"0.6326105",
"0.6084609",
"0.60730886",
"0.6067008",
"0.6056444",
"0.6051135",
"0.5906981",
"0.58243513",
"0.5817262",
"0.5775717",
"0.5611725",
"0.54822046",
"0.5456822",
"0.5450532",
"0.54407656",
"0.5432667",
"0.5426817",
"0.5371543",
"0.53671795",
"0.5362671",
"0.5339208",
"0.5338586",
"0.5311117",
"0.5310837",
"0.5286779",
"0.51889056",
"0.51855177",
"0.5182849",
"0.51779854"
]
| 0.73388106 | 0 |
Produces the tree structure for groups and secrets in an environment. If output is specified (e.g., as sys.stdout) it will be used, otherwise a list of strings is returned. | def secrets_tree(
env=None,
outfile=None
):
nodes = dict()
env_name = str(env)
nodes[env_name] = Node(env_name)
root_node = nodes[env_name]
for group in sorted(env.get_groups()):
group_name = os.path.join(env_name, group)
nodes[group_name] = Node(group, parent=root_node)
for variable in sorted(env.get_items_from_group(group)):
nodes[os.path.join(group_name, variable)] = \
Node(variable, parent=nodes[group_name])
output = []
for pre, fill, node in RenderTree(root_node):
output.append((f'{ pre }{ node.name }'))
if outfile is not None:
for line in output:
print(line, file=outfile)
else:
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_envgroup_output(envgroup_id: Optional[pulumi.Input[str]] = None,\n organization_id: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetEnvgroupResult]:\n ...",
"def output_groups(self) -> List[str]:\n return self._output_groups",
"def tree(ctx):\n hokusai.print_command_tree(ctx.find_root().command)",
"def task_output_block_groups():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [dept.block_groups_path],\n 'targets': [dept.block_groups_output],\n 'actions': ['cp %(dependencies)s %(targets)s'],\n 'clean': True,\n }",
"def tree(ctx):\n root_cmd = _build_command_tree(ctx.find_root().command)\n _print_tree(root_cmd)",
"def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)",
"def get_output(tree):\n return get_value(tree) + get_tree(tree)",
"def print_output(tree):\n print_value(tree)\n print_tree(tree)",
"def outputs() -> List[str]:\n return Invocation.current.expanded_outputs",
"def get_outputs():\n outputs = {}\n for obj in vars(acsploit.output).values():\n if hasattr(obj, 'OUTPUT_NAME'):\n outputs[obj.OUTPUT_NAME] = obj\n\n return outputs",
"def dump(self, return_type=None):\n\n # apply defaults\n for option, option_dict in self.OPTIONS.items():\n option_dict['handle'] = option\n for key, value in self.DEFAULTS.items():\n if option_dict.get(key, None) is None:\n option_dict[key] = value\n\n # add options to types\n for type_handle, type_dict in self.TYPES.items():\n type_dict['options'] = []\n for option, option_dict in self.OPTIONS.items():\n if option_dict['type'] == type_handle:\n type_dict['options'].append(self.OPTIONS[option])\n\n # finally, create a list of groups\n output = []\n for type_handle in self.TYPES.keys():\n output.append(self.TYPES[type_handle])\n\n return output",
"def getHierarchies():",
"def getHierarchies():",
"def environments(self):\n envs = self.config[\"tox\"][\"envlist\"]\n #result = re.split(\"[^a-zA-Z0-9]\", envs)\n result = re.split(r'\\n| ,|,', envs)\n #print ([string for string in result if string != \"\"])\n result = (([string.strip() for string in result if string != \"\"]))\n print(list(dict.fromkeys(result)))\n return ((list(dict.fromkeys(result))))",
"def root_outputs(s):\n out = []\n for j in s.jobs:\n out.append(j.outROOT)\n return out",
"def get_groups(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return []\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n groups = []\n for group in config_json[\"groups\"]:\n groups.append(group[\"name\"])\n return groups",
"def _credstash_getall(self, team, exec_env):\n s = check_output([\"credstash\", \"-t\", \"credstash-%s\" % team,\n \"getall\"], env=exec_env)\n return str(s)",
"def get_groups_using_tool():\n global groups_using_tool\n\n if not groups_using_tool:\n groups_using_tool = rsh.groups_using_tool(get_srcs())\n\n return groups_using_tool",
"def parse_output(use_json, output):\n return json.loads(output[0]) if use_json else parse_info.construct_tree(output)",
"def _list_outputs(self):\n outputs = self._outputs().get()\n\n out_dir = os.path.abspath(os.path.join(os.getcwd(), \"slicesdir\"))\n outputs[\"out_dir\"] = out_dir\n outputs[\"out_files\"] = [\n self._gen_fname(\n basename=f.replace(os.sep, \"_\"),\n cwd=out_dir,\n ext=self.inputs.out_extension,\n )\n for f in self.inputs.in_files\n ]\n return outputs",
"def list_groups(args):\n\n for group in get_groups(args):\n print(group)",
"def do_tree(self, args, opts=None):\n global __groupcount\n global __datasetcount\n __groupcount = 0\n __datasetcount = 0\n\n def children(item):\n if isinstance(item, h5py.Dataset):\n return []\n else:\n return [i[1] for i in item.items()]\n\n def format(item):\n name = os.path.basename(item.name)\n if name == '':\n name = '/'\n if isinstance(item, h5py.Dataset):\n if opts.shape:\n name = name + ' ' + str(item.shape)\n global __datasetcount\n __datasetcount += 1\n elif isinstance(item, h5py.Group):\n global __groupcount\n __groupcount += 1\n return name\n\n if len(args) == 0:\n args.append('')\n group = self.explorer.group(args[0])\n tree_format.print_tree(group, format, children)\n print('{} groups, {} datasets'.format(__groupcount - 1, __datasetcount))",
"def get_output(self, state: NestedMap) -> NestedMap:\n raise NotImplementedError('Abstract method')",
"def getOutputNames(self):\n return self.dirs",
"def dumps(self):\n result = []\n pkg_options_dumps = self._package_options.dumps()\n if pkg_options_dumps:\n result.append(pkg_options_dumps)\n for pkg_pattern, pkg_option in sorted(self._deps_package_options.items()):\n dep_pkg_option = pkg_option.dumps(scope=pkg_pattern)\n if dep_pkg_option:\n result.append(dep_pkg_option)\n return \"\\n\".join(result)",
"def output_format(result):\n if 'value' in result and isinstance(result['value'], list):\n result = result['value']\n obj_list = result if isinstance(result, list) else [result]\n return [_format_group(item) for item in obj_list]",
"def test_output_vs_expectations(self):\n inventory = Inventory()\n inventory.add_host('superhost', hostvars={'ansible_connection':'local'})\n inventory.add_host('superhost2', hostvars={'ansible_connection':'local'})\n inventory.add_group('awesome')\n inventory.add_group('awesome2')\n inventory.groups['awesome'].add_host(inventory.hosts['superhost'])\n inventory.groups['awesome'].add_host(inventory.hosts['superhost2'])\n output = inventory.write_output_json()\n assert len(output['_meta']['hostvars']) == 2\n output.pop('_meta')\n assert len(output) == 4 #awesome, awesome2, all, ungrouped",
"def tree_result_list(context):\n context = result_list(context)\n return context",
"def main():\n if config.command == \"list-groups\":\n # Get the list of policies in JSON format for the given network\n if hasattr(config, 'accountSwitchKey'):\n groupList = listGroups(config.accountSwitchKey)\n else:\n groupList = listGroups()\n formatOutputGroupList(groupList, config.output_type)\n\n elif config.command == \"list-connectors\":\n if hasattr(config, 'accountSwitchKey'):\n connectorList = listConnectors(config.accountSwitchKey)\n else:\n connectorList = listConnectors()\n formatOutputConnectorList(connectorList, config.output_type)\n\n elif config.command == \"list-products\":\n if hasattr(config, 'accountSwitchKey'):\n productsList = listProducts(config.accountSwitchKey)\n else:\n productsList = listProducts()\n formatOutputProductList(productsList, config.output_type)\n\n elif config.command == \"list-stream-types\":\n if hasattr(config, 'accountSwitchKey'):\n streamTypeList = listStreamTypes(config.accountSwitchKey)\n else:\n streamTypeList = listStreamTypes()\n formatOutputStreamTypeList(streamTypeList, config.output_type)\n\n elif config.command == \"list-streams\":\n if hasattr(config, 'accountSwitchKey'):\n streamList = listStreams(config.groupid,config.streamstatus,config.accountSwitchKey)\n else:\n streamList = listStreams(config.groupid,config.streamstatus)\n formatOutputStreamList(streamList, config.output_type)\n\n elif config.command == \"list-properties\":\n if hasattr(config, 'accountSwitchKey'):\n propertiesList = listProperties(config.groupid,config.productId,config.accountSwitchKey)\n else:\n propertiesList = listProperties(config.groupid,config.productId)\n formatOutputPropertiesList(propertiesList, config.output_type)\n\n elif config.command == \"list-error-streams\":\n if hasattr(config, 'accountSwitchKey'):\n errorstreamList = listErrorStreams(config.groupid,config.accountSwitchKey)\n else:\n errorstreamList = listErrorStreams(config.groupid)\n formatOutputErrorStreamList(errorstreamList, config.output_type)\n\n elif config.command == \"create\":\n # Opening JSON file\n f = open(config.file.name,'r')\n data = json.load(f)\n json_string = json.dumps(data) #Very Important since when you read it will be in single quotes, it need to be dumped to json and strings are only valid only in double quotes\n\n if hasattr(config, 'accountSwitchKey'):\n createResponse = createStream(json_string,config.accountSwitchKey)\n else:\n createResponse = createStream(json_string)\n formatOutputActDeactResp(createResponse)\n\n elif config.command == \"update\":\n # Opening JSON file\n f = open(config.file.name,'r')\n data = json.load(f)\n json_string = json.dumps(data) #Very Important since when you read it will be in single quotes, it need to be dumped to json and strings are only valid only in double quotes\n print(json_string)\n if hasattr(config, 'accountSwitchKey'):\n updateResponse = updateStream(json_string,config.streamid,config.accountSwitchKey)\n else:\n updateResponse = updateStream(json_string,config.streamid)\n formatOutputActDeactResp(updateResponse)\n\n\n elif config.command == \"get-stream\":\n if hasattr(config, 'accountSwitchKey'):\n streamDetail = getStream(config.streamid,config.accountSwitchKey)\n else:\n streamDetail = getStream(config.streamid)\n formatOutputStreamDetail(streamDetail, config.output_type)\n\n elif config.command == \"activation-history\":\n if hasattr(config, 'accountSwitchKey'):\n activationHistory = getStreamActHistory(config.streamid,config.accountSwitchKey)\n else:\n activationHistory = getStreamActHistory(config.streamid)\n formatOutputActHistory(activationHistory, config.output_type)\n\n elif config.command == \"stream-history\":\n if hasattr(config, 'accountSwitchKey'):\n streamHistory = getStreamHistory(config.streamid,config.accountSwitchKey)\n else:\n streamHistory = getStreamHistory(config.streamid)\n formatOutputStreamHistory(streamHistory, config.output_type)\n\n elif config.command == \"list-datasets\":\n if hasattr(config, 'accountSwitchKey'):\n datasetList = getDatasets(config.template,config.accountSwitchKey)\n else:\n datasetList = getDatasets(config.template)\n formatOutputDatasetList(datasetList, config.output_type)\n\n elif config.command == \"activate\":\n if hasattr(config, 'accountSwitchKey'):\n activateResponse = activateStream(config.streamid,config.accountSwitchKey)\n else:\n activateResponse = activateStream(config.streamid)\n formatOutputActDeactResp(activateResponse)\n\n elif config.command == \"deactivate\":\n if hasattr(config, 'accountSwitchKey'):\n deactivateResponse = deActivateStream(config.streamid,config.accountSwitchKey)\n else:\n deactivateResponse = deActivateStream(config.streamid)\n formatOutputActDeactResp(deactivateResponse)\n\n elif config.command == \"delete\":\n if hasattr(config, 'accountSwitchKey'):\n deleteResponse = deleteStream(config.streamid,config.accountSwitchKey)\n else:\n deleteResponse = deleteStream(config.streamid)\n formatOutputActDeactResp(deleteResponse)",
"def __repr__(self, max_lines=20):\n def _create_str(results_dict, level=0, parent=True):\n \"\"\"\n Creates a string from the results dict\n \"\"\"\n result = ''\n keys = sorted(results_dict.keys())\n if not keys:\n return result\n\n if parent:\n has_remote_entries = any(\n self._map(\n lambda lk, entry: not entry.physical_key.is_local()\n )\n )\n pkg_type = 'remote' if has_remote_entries else 'local'\n result = f'({pkg_type} Package)\\n'\n\n for key in keys:\n result += ' ' + (' ' * level) + '└─' + key + '\\n'\n result += _create_str(results_dict[key], level + 1, parent=False)\n\n return result\n\n if not self.keys():\n return '(empty Package)'\n\n # traverse the tree of package directories and entries to get the list of\n # display objects. candidates is a deque of shape\n # ((logical_key, Package | PackageEntry), [list of parent key])\n candidates = deque(([x, []] for x in self._children.items()))\n results_dict = {}\n results_total = 0\n more_objects_than_lines = False\n\n while candidates:\n [[logical_key, entry], parent_keys] = candidates.popleft()\n if isinstance(entry, Package):\n logical_key = logical_key + '/'\n new_parent_keys = parent_keys.copy()\n new_parent_keys.append(logical_key)\n for child_key in sorted(entry.keys()):\n candidates.append([[child_key, entry[child_key]], new_parent_keys])\n\n current_result_level = results_dict\n for key in parent_keys:\n current_result_level = current_result_level[key]\n current_result_level[logical_key] = {}\n results_total += 1\n\n if results_total >= max_lines:\n more_objects_than_lines = True\n break\n\n repr_str = _create_str(results_dict)\n\n # append '...' if the package is larger than max_size\n if more_objects_than_lines:\n repr_str += ' ' + '...\\n'\n\n return repr_str"
]
| [
"0.55058384",
"0.5480403",
"0.53334093",
"0.53122777",
"0.5256857",
"0.508922",
"0.5059336",
"0.5053877",
"0.50406545",
"0.5032602",
"0.49581128",
"0.4936249",
"0.4936249",
"0.49264872",
"0.48918167",
"0.48805633",
"0.48752487",
"0.48623678",
"0.4862295",
"0.4845437",
"0.4826918",
"0.4826165",
"0.4806193",
"0.47907218",
"0.4790315",
"0.47811252",
"0.4773605",
"0.47355616",
"0.4731298",
"0.47010326"
]
| 0.7626051 | 0 |
Prettyprint environment variable (if set). | def show_current_value(variable=None):
value = os.getenv(variable, None)
return f" ('{value}')" if value is not None else '' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_formatted_env_vars() -> str:\n res = \"\"\n for k, v in os.environ.items():\n res += '{0}={1}\\n'.format(k, v)\n return res",
"def set_pretty_print(value):\n global _with_colors\n _with_colors = True if value else False",
"def print_set_env_command(name, value):\n shell_type = get_shell_type()\n if shell_type == Shell.LINUX:\n print(f'export {name!s}=\"{value!s}\";')\n elif shell_type == Shell.POWER_SHELL:\n print(f'$Env:{name!s}=\"{value!s}\";')\n else:\n print(f\"set {name!s}={value!s}\")",
"def dumpenv(self):\n\n print('-------------------------------')\n pprint.pprint(dict(os.environ))\n print('-------------------------------')",
"def env_var_line(key: str) -> str:\n return str(os.environ.get(key) or \"\").strip()",
"def print_environment():\n import sys\n version = {}\n for pkg in 'moldesign IPython ipywidgets jupyter matplotlib numpy docker pyccc distutils' \\\n 'nbmolviz jupyter_client jupyter_core pint Bio openbabel simtk pyscf pip setuptools'\\\n .split():\n try:\n module = __import__(pkg)\n except ImportError as e:\n version[pkg] = str(e)\n else:\n try:\n version[pkg] = module.__version__\n except AttributeError as e:\n version[pkg] = str(e)\n env = {'platform': sys.platform,\n 'version': sys.version,\n 'prefix': sys.prefix}\n\n try:\n import platform\n env['machine'] = platform.machine()\n env['linux'] = platform.linux_distribution()\n env['mac'] = platform.mac_ver()\n env['windows'] = platform.win32_ver()\n env['impl'] = platform.python_implementation()\n env['arch'] = platform.architecture()\n env['system'] = platform.system()\n env['python_build'] = platform.python_build()\n env['platform_version'] = platform.version()\n\n except Exception as e:\n env['platform_exception'] = str(e)\n\n print(json.dumps({'env': env,\n 'versions': version}))",
"def print_env_vars():\n print(\"Current process environment variables:\")\n for k, v in os.environ.items():\n print('{0}={1}'.format(k, v))",
"def dump():\n import pprint\n import json\n\n notice('dumping env...')\n pprint.pprint(env)\n\n if _config:\n notice('dumping config...')\n print _config",
"def getenv(space, var):\n e = os.environ.get(var)\n if e is None:\n return space.w_False\n return space.newstr(e)",
"def print_environment():\n print(f\"python version: {sys.version}\")\n\n # sys.argv will *always* contain 1 element, depending on how the script was\n # executed.\n #\n # If executed as a script (python simple.py), sys.argv[0] == \"simple.py\"\n # If executed as a module, (python -m simple), sys.argv[0] is the full path\n # to the script. (i.e., sys.argv[0] == \"/Users/dra/projects/python-examples/simple.py\"\n\n logger.info(f\"argv len={len(sys.argv)} = {sys.argv}\")\n\n # The module search path is the list of:\n # * Current directory\n # * $PYTHONPATH environment variable. Takes same format as $PATH (/usr/local:/usr/bin).\n # * Installation dependent default\n logger.info(f\"path == {str(sys.path)}\")\n logger.info(f\"platform == {sys.platform}\")\n\n print(\"--- packages ---\")\n print(sorted([f\"{i.key} = {i.version}\" for i in pkg_resources.working_set]))\n print(\"--- END packages ---\")",
"def add_to_environment(v):\n return \"Environment='{}'\".format(\n \"\\nEnvironment=\".join(\n \"{}={}\".format(k, \"\".join(map(str, v))) for k, v in iteritems(v)\n )\n if isinstance(v, dict)\n else v\n )",
"def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]",
"def overwrite_environment_variable(self, key, value):\n if value is not None:\n self._printer(\"$env:{0} = \\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"$env:{0} = \\\"\\\"\".format(key))",
"def env_str(name: str, default: str) -> str:\n value = stringify(env.get(name))\n return default if value is None else value",
"def show_env():\n envs = [\"PATH\", \"ORACLE_HOME\", \"TNS_ADMIN\", \"NLS_LANG\"]\n result = {}\n for env in envs:\n if env in os.environ:\n result[env] = os.environ[env]\n return result",
"def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''",
"def help_environment_variables():\n click.echo_via_pager(docgen.generate_environment_variables_help())",
"def _get_env(key: str) -> str:\n value = os.getenv(key)\n assert isinstance(value, str), (\n f\"the {key} environment variable must be set and a string, \" f\"{value=}\"\n )\n return value",
"def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])",
"def getenv_string(setting, default=''):\n return os.environ.get(setting, default)",
"def prepend_environment_variable(self, key, value):\n value = BashParentEnvironment._format_environment_value(value)\n script_keys = {\n \"k\": key,\n \"v\": value\n }\n script = \"export {k}=\\\"{v}:${k}\\\"\".format(**script_keys)\n self._printer(script)",
"def prepend_environment_variable(self, key, value):\n script_keys = {\n \"k\": key,\n \"v\": value\n }\n script = \"$env:{k} = \\\"{v};$env:{k}\\\"\".format(**script_keys)\n self._printer(script)",
"def _format_environment_value(value):\n value = str(value)\n if platform.system() == \"Windows\":\n # Split on semicolons first\n components = value.split(os.pathsep)\n\n # On each component, replace anything that looks like\n # a drive letter with a unix-like drive path.\n components = [re.sub(r\"^([A-Za-z]):\\\\\",\n r\"\\\\\\1\\\\\",\n c) for c in components]\n\n return \":\".join(components).replace(\"\\\\\", \"/\")\n\n return value",
"def env(var):\n return os.environ[var]",
"def _env_var_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> str:\n args = node.value.split()\n\n # Check for a default value\n if len(args) > 1:\n return os.getenv(args[0], \" \".join(args[1:]))\n if args[0] in os.environ:\n return os.environ[args[0]]\n logger.error(\"Environment variable %s not defined\", node.value)\n raise XKNXException(node.value)",
"def printSettings():\n print \">>>\\n>>> SettingsTool: global variables:\"\n for variable, value in globals().items():\n if variable.count('__')>1: continue\n print \">>> %-16s = %s\"%(variable,value)\n print \">>>\"",
"def print_params(env) -> None:\n dict_pretty_print(env.config['parameters'])",
"def show_version(env: Environment):\n if env.quiet:\n print(pipper.__version__)\n else:\n print(\"Version: {}\".format(pipper.__version__))",
"def env(key, default=None):\n val = os.getenv(key, default)\n\n if val == 'True':\n val = True\n elif val == 'False':\n val = False\n return val",
"def print_env(command):\n exit_code = 0\n if len(command) <= 1:\n for x, y in environ.items():\n print(\"{}={}\".format(x, y))\n return exit_code\n for x in command[1:]:\n try:\n print(environ[x])\n except KeyError:\n exit_code = 1\n pass\n return exit_code"
]
| [
"0.6456226",
"0.6314747",
"0.6197535",
"0.61365336",
"0.6113619",
"0.610046",
"0.59816974",
"0.5979248",
"0.5904684",
"0.57186013",
"0.5705967",
"0.56352997",
"0.5547639",
"0.5538115",
"0.54955536",
"0.54795843",
"0.54790395",
"0.5462383",
"0.544286",
"0.54425323",
"0.5411556",
"0.5401159",
"0.53947705",
"0.5374062",
"0.5356852",
"0.5320332",
"0.53100216",
"0.53057706",
"0.529328",
"0.5288599"
]
| 0.63266575 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.