query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Publishes application by uploading the manifest to the given marketplace
def _publish(client, manifest_path, marketplace, skip, overrides): try: manifest_json = check_app_manifest(manifest_path, overrides, marketplace) app_url = "{}://{}".format(manifest_json["schemes"][0], manifest_json["host"]) app_ip = urlparse(app_url).hostname if not skip: address = get_zerotier_address(marketplace) if address != app_ip: wrong_ip = click.style("It seems that the IP address that you put in your manifest file (") +\ click.style("{}", bold=True) +\ click.style(") is different than your current 21market IP (") +\ click.style("{}", bold=True) +\ click.style(")\nAre you sure you want to continue publishing with ") +\ click.style("{}", bold=True) +\ click.style("?") if not click.confirm(wrong_ip.format(app_ip, address, app_ip)): switch_host = click.style("Please edit ") +\ click.style("{}", bold=True) +\ click.style(" and replace ") +\ click.style("{}", bold=True) +\ click.style(" with ") +\ click.style("[{}].", bold=True) logger.info(switch_host.format(manifest_path, app_ip, address)) return except exceptions.ValidationError as ex: # catches and re-raises the same exception to enhance the error message publish_docs_url = click.style("https://21.co/learn/21-publish/", bold=True) publish_instructions = "For instructions on publishing your app, please refer to {}".format(publish_docs_url) raise exceptions.ValidationError( "The following error occurred while reading your manifest file at {}:\n{}\n\n{}" .format(manifest_path, ex.args[0], publish_instructions), json=ex.json) app_name = manifest_json["info"]["title"] app_endpoint = "{}://{}{}".format(manifest_json["schemes"][0], manifest_json["host"], manifest_json["basePath"]) logger.info( (click.style("Publishing {} at ") + click.style("{}", bold=True) + click.style(" to {}.")) .format(app_name, app_endpoint, marketplace)) payload = {"manifest": manifest_json, "marketplace": marketplace} try: response = client.publish(payload) except ServerRequestError as e: if e.status_code == 403 and e.data.get("error") == "TO600": logger.info( "The endpoint {} specified in your manifest has already been registered in " "the marketplace by another user.\nPlease check your manifest file and make " "sure your 'host' field is correct.\nIf the problem persists please contact " "[email protected].".format(app_endpoint), fg="red") return else: raise e if response.status_code == 201: response_data = response.json() mkt_url = response_data['mkt_url'] permalink = response_data['permalink'] logger.info( click.style( "\n" "You have successfully published {} to {}. " "You should be able to view the listing within a few minutes at {}\n\n" "Users will be able to purchase it, using 21 buy, at {} ", fg="magenta") .format(app_name, marketplace, permalink, mkt_url) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def submit(ctx, manifest_path, marketplace, skip, parameters):\n if parameters is not None:\n try:\n parameters = _parse_parameters(parameters)\n except:\n logger.error(\n \"Manifest parameter overrides should be in the form 'key1=\\\"value1\\\" \"\n \"key2=\\\"value2\\\".\",\n fg=\"red\")\n return\n\n _publish(ctx.obj['client'], manifest_path, marketplace, skip, parameters)", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()", "def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package", "def installApp(dev, apkFile=None, appPackage=None, outFile=None, local=False):\n certFile = scriptRoot + '/certs/localtest.me.pem'\n with ServerContext(LocalMarketServer(certFile, config.officialServer)) as server:\n if apkFile:\n server.setApk(apkFile.read())\n elif appPackage:\n print('Downloading apk')\n apps = listApps(True)\n if appPackage not in apps:\n raise Exception('Unknown app: %s' % appPackage)\n server.setApk(apps[appPackage].release.asset)\n\n print('Starting task')\n xpdData = server.getXpd()\n\n print('Starting communication')\n # Point the camera to the web api\n result = installer.install(dev, server.host, server.port, xpdData, printStatus)\n if result.code != 0:\n raise Exception('Communication error %d: %s' % (result.code, result.message))\n\n result = server.getResult()\n\n if not local:\n try:\n RemoteAppStore(config.appengineServer).sendStats(result)\n except:\n pass\n\n print('Task completed successfully')\n\n if outFile:\n print('Writing to output file')\n json.dump(result, outFile, indent=2)\n\n return result", "def process_manifest(vb, options):\n if not options.manifest:\n return\n\n vb.add_manifest(options.manifest_id, options.manifest_service, options.manifest_version, options.manifest_version_id,\n options.manifest_release_version)", "def install_app(self, pbz_path, launch_on_install=True):\n\n\t\tbundle = PebbleBundle(pbz_path)\n\t\tif not bundle.is_app_bundle():\n\t\t\traise PebbleError(self.id, \"This is not an app bundle\")\n\t\tapp_metadata = bundle.get_app_metadata()\n\n\t\tbinary = bundle.zip.read(bundle.get_application_info()['name'])\n\t\tif bundle.has_resources():\n\t\t\tresources = bundle.zip.read(bundle.get_resources_info()['name'])\n\t\telse:\n\t\t\tresources = None\n\n\t\tapps = self.get_appbank_status()\n\n\t\tif not apps:\n\t\t\traise PebbleError(self.id, \"could not obtain app list; try again\")\n\n\t\tfirst_free = 1\n\t\tfor app in apps[\"apps\"]:\n\t\t\tif app[\"index\"] == first_free:\n\t\t\t\tfirst_free += 1\n\t\tif first_free == apps[\"banks\"]:\n\t\t\traise PebbleError(self.id, \"All %d app banks are full\" % apps[\"banks\"])\n\t\tlog.debug(\"Attempting to add app to bank %d of %d\" % (first_free, apps[\"banks\"]))\n\n\t\tclient = PutBytesClient(self, first_free, \"BINARY\", binary)\n\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\tclient.init()\n\t\twhile not client._done and not client._error:\n\t\t\tpass\n\t\tif client._error:\n\t\t\traise PebbleError(self.id, \"Failed to send application binary %s/pebble-app.bin\" % pbz_path)\n\n\t\tif resources:\n\t\t\tclient = PutBytesClient(self, first_free, \"RESOURCES\", resources)\n\t\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\t\tclient.init()\n\t\t\twhile not client._done and not client._error:\n\t\t\t\tpass\n\t\t\tif client._error:\n\t\t\t\traise PebbleError(self.id, \"Failed to send application resources %s/app_resources.pbpack\" % pbz_path)\n\n\t\ttime.sleep(2)\n\t\tself._add_app(first_free)\n\t\ttime.sleep(2)\n\n\t\tif launch_on_install:\n\t\t\tself.launcher_message(app_metadata['uuid'].bytes, \"RUNNING\", uuid_is_string=False)", "def publish_manifest(ctx, name, tag, image, signed_push=False):\n manifest_spec = {\"image\": \"{}:{}\".format(name, tag)}\n src_images = []\n\n for img in image:\n img_splitted = img.replace(' ', '').split(',')\n if len(img_splitted) != 2:\n print(\"Impossible to parse source format for: '{}'\".format(img))\n raise Exit(code=1)\n\n platform_splitted = img_splitted[1].split('/')\n if len(platform_splitted) != 2:\n print(\"Impossible to parse platform format for: '{}'\".format(img))\n raise Exit(code=1)\n\n src_images.append(\n {\"image\": img_splitted[0], \"platform\": {\"architecture\": platform_splitted[1], \"os\": platform_splitted[0]}}\n )\n manifest_spec[\"manifests\"] = src_images\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:\n temp_file_path = f.name\n yaml.dump(manifest_spec, f, default_flow_style=False)\n\n print(\"Using temp file: {}\".format(temp_file_path))\n ctx.run(\"cat {}\".format(temp_file_path))\n\n try:\n result = retry_run(ctx, \"manifest-tool push from-spec {}\".format(temp_file_path))\n if result.stdout:\n out = result.stdout.split('\\n')[0]\n fields = out.split(\" \")\n\n if len(fields) != 3:\n print(\"Unexpected output when invoking manifest-tool\")\n raise Exit(code=1)\n\n digest_fields = fields[1].split(\":\")\n\n if len(digest_fields) != 2 or digest_fields[0] != \"sha256\":\n print(\"Unexpected digest format in manifest-tool output\")\n raise Exit(code=1)\n\n digest = digest_fields[1]\n length = fields[2]\n\n if signed_push:\n cmd = \"\"\"\n notary -s https://notary.docker.io -d {home}/.docker/trust addhash \\\n -p docker.io/{name} {tag} {length} --sha256 {sha256} \\\n -r targets/releases\n \"\"\"\n retry_run(ctx, cmd.format(home=os.path.expanduser(\"~\"), name=name, tag=tag, length=length, sha256=digest))\n finally:\n os.remove(temp_file_path)", "def deploy(fingerengine, fingerprint):\n\n base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)\n uri = '/manager/html/upload'\n war_file = fingerengine.options.deploy\n war_path = parse_war_path(war_file)\n cookies = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title, fingerprint.version)\n if not cookies:\n utility.Msg(\"Could not get auth for %s:%s\" %\n (fingerengine.options.ip, fingerprint.port), LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\n\n if fingerprint.version in ['6.0', '7.0', '8.0']:\n # deploying via the gui requires a CSRF token\n (csrf, c) = fetchCSRF(base, cookies)\n if not csrf:\n return\n else:\n # set CSRF and refresh session id\n uri += '?org.apache.catalina.filters.CSRF_NONCE={0}'\n uri = uri.format(csrf)\n cookies = (c, cookies[1])\n\n # read in payload\n try:\n tag = 'deployWar'\n if fingerprint.version in ['4.0', '4.1']:\n tag = 'installWar'\n files = {tag : (war_path + '.war', open(war_file, 'rb'))}\n except Exception, e:\n utility.Msg(e, LOG.ERROR)\n return\n\n # deploy\n response = utility.requests_post(base + uri, files=files, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200 and \"OK\" in response.content:\n utility.Msg(\"Deployed {0} to /{1}\".format(war_file, war_path), LOG.SUCCESS)\n elif 'Application already exists' in response.content:\n utility.Msg(\"Application {0} is already deployed\".format(war_file), LOG.ERROR)\n elif response.status_code is 403:\n utility.Msg(\"This account does not have permissions to remotely deploy. Try\"\\\n \" using manager_deploy\", LOG.ERROR)\n else:\n utility.Msg(\"Failed to deploy (HTTP %d)\" % response.status_code, LOG.ERROR)", "def deploy_app(self, app_info):\n raise NotImplementedError", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')", "def deploy_go_app(app_name, uri):\n execute(local_fetch_s3_artifact, uri)\n execute(deploy_artifact, app_name, uri)\n execute(create_symlink,\n '{}/config/config.yaml'.format(get_app_basedir(app_name)),\n '{}/etc/config.yaml'.format(get_current_release_dir(app_name)))", "def upload_package(self, __contents):\n raise NotImplementedError", "def update_manifest(builder):\r\n\r\n manifest_path = join(builder.Config.SourceRootPath, builder.Config.WMAppManifest)\r\n dom = parse(manifest_path)\r\n\r\n #import pdb;pdb.set_trace()\r\n #version = make_version_string(builder)\r\n version = builder.AppVersion\r\n\r\n update_manifest_with_values(dom,\r\n Title = builder.CustomCfg.Title,\r\n #ProductID = builder.CustomCfg.ProductID,\r\n #PublisherID = builder.Config.PublisherID,\r\n Version = version,\r\n Languages = getattr(builder.CustomCfg, \"Languages\", None ) )\r\n\r\n with open(manifest_path, 'wb') as f:\r\n data = dom.toprettyxml(indent = \" \")\r\n # toprettyxml adds extra new lines\r\n lines = [ x for x in data.split(\"\\n\") if len(x.strip()) > 0]\r\n data = \"\\n\".join(lines)\r\n f.write(data)\r\n\r\n return True", "def serve_manifest(app):\n storeapps = APP.config[\"storage\"]\n manifest = os.path.join(storeapps, \"IPA\", app, \"manifest.plist\")\n app_url = request.host_url + \"application/IPA/\" + app + \"/\" + app + \".ipa\"\n if not os.path.isfile(manifest):\n return \"File not found\", 404\n logging.debug(\"Serving manifest with application url: %s\", app_url)\n return flask.Response(open(manifest).read().replace(\"{{ APPLICATION_URL }}\", app_url.encode(\"utf-8\")),\n mimetype='text/xml')", "def publish_asset(\n self,\n *,\n asset_id: str,\n asset_manifest_path: str,\n asset_selector: str,\n asset_type: \"AssetType\",\n ) -> None:\n ...", "def finish_publish(hash, metadata, engine_id=None, username=USER):\n identity = \"%s@%s\" % (username, get_config('domain'))\n library = Library.objects.get(identity=identity)\n library.add_item(\n engine_id=engine_id,\n origin=identity,\n metadata=metadata\n )\n return \"OK\"", "def upload():\n sh('python setup.py register sdist upload')", "def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)", "def compose_package(app_name, manifest, package_dir,\n require=None, archive_dir=None):\n with open(manifest, 'w') as f:\n fqn = 'io.murano.apps.' + app_name\n mfest_copy = MANIFEST.copy()\n mfest_copy['FullName'] = fqn\n mfest_copy['Name'] = app_name\n mfest_copy['Classes'] = {fqn: 'mock_muranopl.yaml'}\n if require:\n mfest_copy['Require'] = require\n f.write(yaml.dump(mfest_copy, default_flow_style=False))\n\n name = app_name + '.zip'\n\n if not archive_dir:\n archive_dir = os.path.dirname(os.path.abspath(__file__))\n archive_path = os.path.join(archive_dir, name)\n\n with zipfile.ZipFile(archive_path, 'w') as zip_file:\n for root, dirs, files in os.walk(package_dir):\n for f in files:\n zip_file.write(\n os.path.join(root, f),\n arcname=os.path.join(os.path.relpath(root, package_dir), f)\n )\n\n return archive_path, name", "def deploy():\n build()\n copy()\n install()", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)", "def deploy():\n local('appcfg.py --no_cookies [email protected] update .',\n capture=False)", "def create_manifest(\n upload_dir,\n study_id,\n analysis_id,\n song_url,\n auth_token\n):\n files_dir = os.path.join(upload_dir, 'files')\n manifest_dir = os.path.join(upload_dir, 'manifests')\n song_client = SongClient(\n song_url,\n auth_token,\n VERIFY_CERTIFICATES\n )\n manifest = song_client.get_analysis_manifest(\n study_id,\n analysis_id,\n files_dir\n )\n if os.path.isdir(manifest_dir):\n shutil.rmtree(manifest_dir)\n os.makedirs(manifest_dir)\n manifest.write(\n os.path.join(manifest_dir, 'manifest.txt'),\n overwrite=True\n )", "def install_apps(self, app_installers):\n print('[?] Installing missing APK(s) and IPA(s).')\n for app_installer in app_installers:\n with request.urlopen(app_installer[1]) as response, open(app_installer[0], 'wb') as out_app_file:\n if response.getcode() != 200:\n print(f'[-] Failed to install {app_installer[1]}.')\n return\n print(f'[+] Successfully installed {app_installer[1]}.')\n shutil.copyfileobj(response, out_app_file)", "def deploy_app(host_=None):\n run_command_on_selected_server(_deploy_app, host_=host_)", "def mergeManifest(channel, targetManifest, sdkManifest):\n\n if not os.path.exists(targetManifest) or not os.path.exists(sdkManifest):\n utils_log.error(\"the manifest file is not exists.targetManifest:%s;sdkManifest:%s\", targetManifest, sdkManifest)\n return False\n\n ET.register_namespace('android', androidNS)\n targetTree = ET.parse(targetManifest)\n targetRoot = targetTree.getroot()\n\n ET.register_namespace('android', androidNS)\n sdkTree = ET.parse(sdkManifest)\n sdkRoot = sdkTree.getroot()\n\n f = open(targetManifest)\n targetContent = f.read()\n f.close()\n\n permissionConfigNode = sdkRoot.find('permissionConfig')\n if permissionConfigNode != None and len(permissionConfigNode) > 0:\n for child in list(permissionConfigNode):\n key = '{' + androidNS + '}name'\n val = child.get(key)\n if val != None and len(val) > 0:\n attrIndex = targetContent.find(val)\n if -1 == attrIndex:\n targetRoot.append(child)\n\n appConfigNode = sdkRoot.find('applicationConfig')\n appNode = targetRoot.find('application')\n\n if appConfigNode != None:\n\n proxyApplicationName = appConfigNode.get('proxyApplication')\n if proxyApplicationName != None and len(proxyApplicationName) > 0:\n\n if 'PYW_APPLICATION_PROXY_NAME' in channel:\n\n channel['PYW_APPLICATION_PROXY_NAME'] = channel[\n 'PYW_APPLICATION_PROXY_NAME'] + ',' + proxyApplicationName\n else:\n\n channel['PYW_APPLICATION_PROXY_NAME'] = proxyApplicationName\n\n # 获取渠道闪屏名称\n launcherName = appConfigNode.get('channelLauncherName')\n # appKeyWord = appConfigNode.get('keyword')\n\n # exists = appKeyWord != None and len(appKeyWord.strip()) > 0 and targetContent.find(appKeyWord) != -1\n\n # if not exists:\n # remove keyword check...\n for child in list(appConfigNode):\n targetRoot.find('application').append(child)\n\n targetTree.write(targetManifest, 'UTF-8')\n # 修改闪屏 如果渠道 需要闪屏文件则增加此方法 不要则注释掉\n if launcherName != None and len(launcherName) > 0:\n mergeLauncher(launcherName, targetManifest)\n\n return True", "def _TransferPublishManifest(self, publish_manifest, db_path_prefix,\n force_copy):\n for item in publish_manifest:\n src_path = item.current_path\n dest_path = \"%s/%s\" % (db_path_prefix, item.orig_path)\n logger.debug(\"TransferPublishManifest - src_path: %s, dest_path: %s.\",\n src_path, dest_path)\n\n # Transfer manifest file to published database directory.\n tries = 2\n sleep_secs = 5\n while (not serve_utils.LocalTransfer(\n src_path, dest_path,\n force_copy, prefer_copy=True, allow_symlinks=False)):\n tries -= 1\n if tries == 0:\n raise exceptions.PublishServeException(\n \"Could not transfer publish manifest file %s to %s.\" %\n (src_path, dest_path))\n logger.debug(\"Retrying Local Transfer.\")\n time.sleep(sleep_secs)\n sleep_secs *= 2 # Double the sleep time after each retry.", "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))" ]
[ "0.68291336", "0.6059394", "0.6052995", "0.6010262", "0.5933436", "0.5847316", "0.5775076", "0.57516134", "0.5748247", "0.5703469", "0.56963223", "0.56790406", "0.5647197", "0.56322503", "0.5598916", "0.5595371", "0.5584658", "0.5561256", "0.54689205", "0.5453668", "0.54502493", "0.5399678", "0.53928393", "0.53469676", "0.53417796", "0.5337742", "0.53009677", "0.5287064", "0.5279658", "0.526841" ]
0.7366084
0
Queries the marketplace for published apps
def get_search_results(config, client, page): resp = client.get_published_apps(config.username, page) resp_json = resp.json() search_results = resp_json["results"] if search_results is None or len(search_results) == 0: logger.info( click.style("You haven't published any apps to the marketplace yet. Use ", fg="blue") + click.style("21 publish submit {PATH_TO_MANIFEST_FILE}", bold=True, fg="blue") + click.style(" to publish your apps to the marketplace.", fg="blue"), fg="blue") return 0 total_pages = resp_json["total_pages"] logger.info("\nPage {}/{}".format(page + 1, total_pages), fg="green") headers = ["id", "Title", "Url", "Rating", "Is up", "Is healthy", "Average Uptime", "Last Update"] rows = [] for r in search_results: rating = "Not yet Rated" if r["rating_count"] > 0: rating = "{:.1f} ({} rating".format(r["average_rating"], int(r["rating_count"])) if r["rating_count"] > 1: rating += "s" rating += ")" rows.append([r["id"], r["title"], r["app_url"], rating, str(r["is_up"]), str(r["is_healthy"]), "{:.2f}%".format(r["average_uptime"] * 100), util.format_date(r["last_update"])]) logger.info(tabulate(rows, headers, tablefmt="simple")) return total_pages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_apps(provider, query):\n\n workdir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(workdir, '..', 'config.yml')) as f:\n config = yaml.load(f)\n ex = Explorer()\n logging.info('Read bucket: %s', config['SCOOP_BUCKET'])\n apps = ex.get_apps(os.path.expandvars(config['SCOOP_BUCKET']), query)\n logging.info(\"Apps count = %d\", len(apps))\n installed = provider.get_installed()\n\n # check if already installed\n for app in apps:\n app['installed'] = app['name'] in installed\n\n return apps", "async def get_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_APPS, params=params)", "async def get_installed_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_INSTALLEDAPPS, params=params)", "def _list_apps(config, client):\n logger.info(\"Listing all the published apps by {}: \".format(config.username), fg=\"green\")\n current_page = 0\n total_pages = get_search_results(config, client, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n\n next_page = get_next_page(prompt_resp, current_page)\n\n if next_page == -1:\n model_id = prompt_resp\n display_app_info(config, client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n else:\n get_search_results(config, client, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def get_apps(self):\n return self.apps", "def get_all_apps(self):\n return list(self.apps.values())", "def get_apps(self, request, app_ids):\n sq = WebappIndexer.search()\n if request.query_params.get('filtering', '1') == '1':\n # With filtering (default).\n for backend in self.filter_backends:\n sq = backend().filter_queryset(request, sq, self)\n sq = WebappIndexer.filter_by_apps(app_ids, sq)\n\n # Store the apps to attach to feed elements later.\n with statsd.timer('mkt.feed.views.apps_query'):\n apps = sq.execute().hits\n return dict((app.id, app) for app in apps)", "def get(self):\n return read_heroku_apps(request.args)", "def __get_data_from_store(term):\n url_search = PLAY_STORE_URL + \"/search\"\n response = requests.get(url_search, {'c': 'apps', 'q': term})\n soup = BeautifulSoup(response.content, \"html.parser\")\n apps = soup.find_all(\"div\", {\"class\": \"card no-rationale square-cover apps small\"})\n\n result = []\n print(result)\n for i, app in enumerate(apps):\n app_details_basic = app.find(\"div\", {\"class\": \"details\"})\n app_id = app['data-docid']\n app_data = {\n 'uid': app_id,\n 'name': app_details_basic.find(\"a\", {\"class\": \"title\"})['title'].strip().encode('utf-8'),\n 'dev_name': app_details_basic.find(\"a\", {\"class\": \"subtitle\"})['title'].strip(),\n 'icon_url': \"http://\" + app.find(\n \"div\", {\"class\": \"cover-inner-align\"}).img['data-cover-large'].strip(\"//\")\n }\n\n url_app_detail = PLAY_STORE_URL + \"/apps/details\"\n response = requests.get(url_app_detail, {'id': app_id})\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n app_data.update({\n 'category': soup.find(\"a\", {\"itemprop\": \"genre\"}).text,\n 'description': soup.find(\"div\", {\"itemprop\": \"description\"}).text.strip().encode('utf-8'),\n \n })\n\n \n dev_links = soup.find_all(\"a\", {\"class\": \"dev-link\", \"rel\": \"nofollow\"})\n if dev_links:\n for dev_link in dev_links:\n if \"mailto\" in dev_link['href']:\n app_data['dev_email'] = dev_link['href'].replace(\"mailto:\", \"\")\n break\n\n result.append(app_data)\n\n if i + 1 == SEARCH_RESULT_COUNT:\n break\n print(result)\n return result", "def apps(self):\n filters = {\n 'disabled_by_user': False,\n 'status': mkt.STATUS_PUBLIC\n }\n return self._apps.order_by(self.membership_relation).filter(**filters)", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)", "def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def retr_auth_apps() :\n\n\t\t\t_logger.info( '...retr_auth_apps...' )\n\t\t\toutput = []\n\t\t\tdb = mongo.db.auth_apps\n\n\t\t\tcur = db.find()\n\t\t\tif cur.count() == 0 :\n\t\t\t\traise mongo_no_resource_exception( 'no authorized apps found' )\n\t\t\tfor app in db.find() :\n\t\t\t\toutput.append( { 'moniker' : app['moniker'] ,\n\t\t\t\t\t\t\t 'description' : app['description'] ,\n\t\t\t\t\t\t\t\t 'url' : app['url'] } )\n\n\t\t\treturn jsonify( {'result' : output} )", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data", "def search_app(self, search_pattern):\n\n url_params = {'limit': SearchAPI.SCAN_LIMIT, 'expand': 'true'}\n first_search = self.get('mgmt-pop/apps', params=url_params)\n data = first_search.json()\n app_found = 0\n app_scanned = 0\n\n # CLI ouput header\n cli.header('#app_id,type,name,host,cname,cert_id,status,reachable')\n stats = self.process_page(data, search_pattern)\n app_scanned += stats[0]\n app_found += stats[1]\n\n if data.get(\"meta\"):\n\n app_count = data.get(\"meta\").get(\"total_count\")\n page_offset = data.get(\"meta\").get(\"offset\")\n page_limit = data.get(\"meta\").get(\"limit\")\n page_total = ceil(app_count / page_limit)\n\n logging.debug(\"app_count: {}, scanned: {}, offset: {}, limit: {}, pages: {}\".format(\n app_count, app_scanned, page_offset, page_limit, page_total))\n\n for page in range(1, page_total):\n logging.debug(\"Loading application page {} of {}\".format(page, page_total))\n url_params['offset'] = page * page_limit\n search = self.get('mgmt-pop/apps', params=url_params)\n stats = self.process_page(search.json(), search_pattern)\n app_scanned += stats[0]\n app_found += stats[1]\n\n # CLI ouput footer\n if not config.batch:\n if app_found != app_count:\n cli.footer(\"Found %s app(s), total %s app(s)\" % (app_found, app_count))\n else:\n cli.footer(\"%s app(s)\" % app_count)", "def AppGetApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_app_manifests(self,sfilter = None):\n if sfilter:\n try:\n return filter(lambda app: app[\"developer\"] == sfilter[\"developer\"] and\n app[\"name\"] == sfilter[\"name\"] and\n app[\"version\"] == sfilter[\"version\"], self.app_manifests)\n except:\n return []\n else :\n return self.app_manifests", "def get_apps(self, limit, offset=None):\n params = {'v': WIT_API_VERSION}\n if limit:\n params['limit'] = limit\n if offset:\n params['offset'] = offset\n return req(self.logger, self.access_token, 'GET', '/apps', params)", "def applications():\n storeapps = APP.config[\"storage\"]\n base_url = request.host_url + \"application/\"\n\n response = {\"applications\": []}\n for application in nativeapps.io.ls(storeapps, r\".*\\.(apk|ipa)$\"):\n tokens = application.decode(\"utf-8\").split(os.path.sep)\n directory = tokens[-2]\n name, version = os.path.basename(directory).split(\"-\", 1)\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n\n link = base_url + \"/\".join(tokens[-3:])\n if application.endswith(\".ipa\"):\n link = \"itms-services://?action=download-manifest&url=\" + \\\n base_url + \"/\".join(tokens[-3:-1]) + \"/\" + \"manifest.plist\"\n\n response[\"applications\"].append({\n \"url\": base_url + \"/\".join(tokens[-3:]),\n \"name\": name,\n \"version\": version,\n \"metadata\": nativeapps.io.readfile(meta_path),\n \"link\": link,\n \"type\": application.split(\".\")[-1],\n })\n return flask.jsonify(response)", "def _get_all_app_ids(config, client):\n rv = set()\n total_pages = client.get_published_apps(config.username, 0).json()[\"total_pages\"]\n for current_page in range(total_pages):\n current_page_results = client.get_published_apps(config.username, current_page).json()['results']\n for result in current_page_results:\n rv.add(result['id'])\n return rv", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def get_publishers(self):", "def app_list(self, third_only=False):\n return self.adb.app_list(third_only)", "def get_owned_apps(self):\n user = users.get_current_user()\n if not user:\n return []\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n return user_info.owned_apps\n else:\n return []\n except Exception as err:\n logging.exception(err)\n return []", "def apps(self):\n if \"apps\" in self._prop_dict:\n return AppsCollectionPage(self._prop_dict[\"apps\"])\n else:\n return None", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.6542337", "0.65027314", "0.64569217", "0.6388464", "0.618664", "0.6088148", "0.6048241", "0.60335594", "0.60146016", "0.6012545", "0.5858998", "0.5831379", "0.57853997", "0.5776328", "0.5764713", "0.57061666", "0.5654142", "0.56492996", "0.56406236", "0.56354034", "0.56090736", "0.5595303", "0.5546365", "0.55348814", "0.5506419", "0.5478128", "0.5460476", "0.54420274", "0.54384553", "0.5433034" ]
0.66877913
0
Replace "AUTO" in the host and quickbuy with the ZeroTier IP. The server subsequently replaces, in the displayed quickbuy, instances of the manifest host value with a mkt.21.co address.
def replace_auto(manifest_dict, marketplace): manifest_dict = copy.deepcopy(manifest_dict) def get_formatted_zerotier_address(marketplace): host = get_zerotier_address(marketplace) if "." not in host: return "[{}]".format(host) else: return host if 'AUTO' in manifest_dict['host']: manifest_dict['host'] = manifest_dict['host'].replace( 'AUTO', get_formatted_zerotier_address(marketplace)) if 'AUTO' in manifest_dict['info']['x-21-quick-buy']: manifest_dict['info']['x-21-quick-buy'] = manifest_dict['info']['x-21-quick-buy'].replace( 'AUTO', get_formatted_zerotier_address(marketplace)) return manifest_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def test_replace_host_subnet(self):\n pass", "def configure_host_ips(h3, h4, ip_address_hs):\n\n h3.libs.ip.flush_ip('eth1')\n h3.libs.ip.interface('eth1', up=False)\n\n h4.libs.ip.flush_ip('eth1')\n h4.libs.ip.interface('eth1', up=False)\n\n h3.libs.ip.interface(portlbl='eth1', addr=\"{}/{}\".format(\n ip_address_hs[0], MASK), up=True)\n h4.libs.ip.interface(portlbl='eth1', addr=\"{}/{}\".format(\n ip_address_hs[1], MASK), up=True)", "def set_host_ip(self, host, host_ip):\n host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)", "def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)", "def test_patch_host_subnet(self):\n pass", "def softupdate_ip(request, ipaddress):\n\n softupdate_key = settings.SOFTUPDATE_KEY\n if request.POST.get(\"key\", \"invalid_key\") != softupdate_key:\n raise PermissionDenied()\n\n # LC: UGGLY and not \"portable\"\n STATUS_EN_SERVICE = 'En service'\n\n def noanswer(reason=\"\"):\n message = \"\"\"Modification impossible.\\n\"\"\"\n if reason and settings.DEBUG:\n message += \"\"\"%s\\n\"\"\" % (reason,)\n return HttpResponse(message, content_type=\"plain/text\")\n\n serial = request.POST.get(\"serial\", None)\n hostname = request.POST.get(\"hostname\", None)\n\n host = None\n errmsgs = []\n\n if serial:\n hosts = Host.objects.filter(serial=serial)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n if not host:\n errmsgs.append(\"Le host serial=%s est introuvable.\" % (serial,))\n\n if hostname and not host:\n hosts = Host.objects.filter(hostname=hostname,\n status__description=STATUS_EN_SERVICE)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n # Get the last log entry\n hostlogs = HostIPLog.objects.filter(host=host, log_ip=ipaddress) \\\n .order_by(\"-date\")\n if hostlogs:\n hostlog = hostlogs[0]\n else:\n hostlog = HostIPLog(host=host, log_ip=ipaddress)\n \n hostlog.log_queryfrom = get_request_remote_addr(request)\n hostlog.log_hostname = request.POST.get('hostname', 'unknown')\n hostlog.save()\n\n return HttpResponse('ok.', content_type='plain/text')", "def set_static_ip_address(self, payload):\n\n # This request is received from CLI for setting ip address of an\n # instance.\n macaddr = payload.get('mac')\n ipaddr = payload.get('ip')\n\n # Find the entry associated with the mac in the database.\n req = dict(mac=macaddr)\n instances = self.get_vms_for_this_req(**req)\n for vm in instances:\n LOG.info(_LI('Updating IP address: %(ip)s %(mac)s.'),\n {'ip': ipaddr, 'mac': macaddr})\n # Send request to update the rule.\n try:\n rule_info = dict(ip=ipaddr, mac=macaddr,\n port=vm.port_id,\n status='up')\n self.neutron_event.update_ip_rule(str(vm.host),\n str(rule_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE(\"RPC error: Failed to update rules.\"))\n else:\n # Update the database.\n params = dict(columns=dict(ip=ipaddr))\n self.update_vm_db(vm.port_id, **params)\n\n # Send update to the agent.\n vm_info = dict(status=vm.status, vm_mac=vm.mac,\n segmentation_id=vm.segmentation_id,\n host=vm.host, port_uuid=vm.port_id,\n net_uuid=vm.network_id,\n oui=dict(ip_addr=ipaddr,\n vm_name=vm.name,\n vm_uuid=vm.instance_id,\n gw_mac=vm.gw_mac,\n fwd_mod=vm.fwd_mod,\n oui_id='cisco'))\n try:\n self.neutron_event.send_vm_info(vm.host,\n str(vm_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE('Failed to send VM info to agent.'))", "def set_ip_adresses(self):\n # unfold a config tree for the current suffix, if any\n for interface, details in self.interfaces.items():\n for k, v in details.items():\n if k == 'address':\n ip, prefix = address_to_ip_prefix(v)\n self.interfaces[interface]['ip_address'] = ip\n self.interfaces[interface]['ip_prefix'] = prefix\n break\n if interface == 'wan':\n self.ip_address = ip\n if interface == 'ha_sync':\n self.ha_sync_ip_address = ip", "def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False", "def get_externalip(self):\n\n myip = \"\"\n for i in range(5):\n myip = self.fetch(random.choice(self.server_list))\n if myip != \"\":\n return myip\n else:\n continue\n return \"\"", "def set_ip(self, ip: str, host_addr: str) -> None:\n self.config[\"linkIp\"] = ip\n self.config[\"ngapIp\"] = ip\n self.config[\"gtpIp\"] = ip", "def format_host(host):\n\n host = strip_suffix(host, \".lan.urlab.be\")\n host = strip_suffix(host, \".lan\")\n host = strip_suffix(host, \".local\")\n host = strip_suffix(host, \"iPodtouch\")\n host = strip_suffix(host, \"-PC\")\n host = strip_suffix(host, \"-pc\")\n\n host = strip_prefix(host, \"pc-\")\n host = strip_prefix(host, \"PC-\")\n host = strip_prefix(host, \"DESKTOP-\")\n host = strip_prefix(host, \"LAPTOP-\")\n host = strip_prefix(host, \"iPod-de-\")\n host = strip_prefix(host, \"iPadde\")\n\n return host", "def calculate_trima_address(testMachine):\r\n _machineBase = int(testMachine/256)\r\n _machineRemainder = int(testMachine-(_machineBase*256))\r\n _machineBase = str(_machineBase)\r\n _machineRemainder = str(_machineRemainder)\r\n _address = \"172.21.\"+_machineBase+\".\"+_machineRemainder\r\n \r\n return _address", "def real_ip(self):\n if not hasattr(self, \"_real_ip\"):\n response = get(ICANHAZIP)\n self._real_ip = self._get_response_text(response)\n\n return self._real_ip", "def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None", "def get_local_host_ip(self) -> str:", "def test_try_create_auto_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_auto_net_free.json'\n\n # Does get request\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(201, response.status_code)\n\n url = prepare_url('/api/v3/ipv4/%s/' % response.data[0]['id'],\n fields=['ip_formated'])\n response = self.client.get(\n url,\n content_type='application/json')\n\n self.compare_status(200, response.status_code)\n self.compare_values('10.0.1.2', response.data['ips'][0]['ip_formated'])", "def set_one(self, host_name, ip_address):\n self.hosts[host_name] = ip_address", "def set_host(self, host: str) -> None:\n _LOGGER.debug(\"Setting host to %s\", host)\n host_url = urlparse(host)\n self.scheme = host_url.scheme or \"http\"\n self.host = host_url.netloc or host_url.path\n self.base_url = f\"{self.scheme}://{self.host}\"\n self.api_url = f\"{self.base_url}/apps/api/{self.app_id}\"", "def test_add_autoassigned_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.3\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.2\", retries=3)", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "def JP_V0_addr(self, addr):\n\t\tself.IP = addr + self.V[0]", "def elReplaceStaticIP(self, ipaddress, netmask=\"255.255.255.0\", gateway=None, nameservers=None):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n # sanity check\n normalizedStaticIp = NetworkConfigurationStaticParameters.normalizeStaticIp(ipaddress, netmask, gateway, nameservers)\n commandSection = self.sectionByName(\"command\")\n # several set\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--ip[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.ipaddress + r\"\\g<2>\",\n commandSection.string)\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--netmask[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.netmask + r\"\\g<2>\",\n commandSection.string)\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--gateway[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.gateway + r\"\\g<2>\",\n commandSection.string)\n if normalizedStaticIp.nameservers:\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--nameserver[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + \",\".join(normalizedStaticIp.nameservers) + r\"\\g<2>\",\n commandSection.string)\n else:\n # remove option --nameserver\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*)--nameserver[ \\t]*(?:=|[ \\t])[ \\t]*[^\\s]+(.*)$\",\n r\"\\g<1>\" + r\"\\g<2>\",\n commandSection.string)\n return self", "def replace_helmrepo_url_with_floating_address(dbapi, helmrepository_url):\n\n parsed_helm_repo_url = urlparse(helmrepository_url)\n sc_network = \\\n dbapi.network_get_by_type(constants.NETWORK_TYPE_CLUSTER_HOST)\n sc_network_addr_pool = \\\n dbapi.address_pool_get(sc_network.pool_uuid)\n sc_float_ip = sc_network_addr_pool.floating_address\n if is_valid_ipv6(sc_float_ip):\n sc_float_ip = '[' + sc_float_ip + ']'\n\n return \"http://{}:{}{}\".format(\n sc_float_ip,\n get_http_port(dbapi),\n parsed_helm_repo_url.path\n )", "def configure(node):\n script = []\n script.append(Statements.exec(\"hostname %s\" % node.getName()))\n script.append(Statements.createOrOverwriteFile(\n \"/etc/hostname\", [node.getName()]))\n script.append(Statements.exec(\n \"sed -i 's/127.0.0.1/127.0.0.1\\t%s/' /etc/hosts\" % node.getName()))\n return script", "def elReplaceHostname(self, hostname):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n hostname = re.escape(hostname) # precaution\n commandSection = self.sectionByName(\"command\")\n # change to hostname\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--hostname[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + hostname + r\"\\g<2>\",\n commandSection.string)\n return self", "def getHost():", "def getHost():", "def test_add_autoassigned_ipv6(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv6\")\n\n workloads[0].assert_can_ping(\"fd80:24e2:f998:72d6::1\", retries=3)\n workloads[1].assert_can_ping(\"fd80:24e2:f998:72d6::\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv6\")\n\n workloads[0].assert_can_ping(\"fd80:24e2:f998:72d6::3\", retries=3)\n workloads[1].assert_can_ping(\"fd80:24e2:f998:72d6::2\", retries=3)" ]
[ "0.60582787", "0.6001809", "0.57579035", "0.5625545", "0.5491891", "0.5420629", "0.5379404", "0.5277771", "0.52422714", "0.52219576", "0.52120817", "0.52100813", "0.52078915", "0.5189471", "0.5156395", "0.51266915", "0.5107505", "0.5092903", "0.5082044", "0.5071935", "0.50623417", "0.5057758", "0.5056907", "0.50416505", "0.5035362", "0.50150806", "0.5003009", "0.5002411", "0.5002411", "0.4999194" ]
0.7036063
0
Validates the manifest file Ensures that the required fields in the manifest are present and valid
def validate_manifest(manifest_json): manifest_json = copy.deepcopy(manifest_json) for field in ["schemes", "host", "basePath", "info"]: if field not in manifest_json: raise exceptions.ValidationError( click.style("Field '{}' is missing from the manifest file.", fg="red").format(field), json=manifest_json) for field in ["contact", "title", "description", "x-21-total-price", "x-21-quick-buy", "x-21-category"]: if field not in manifest_json["info"]: raise exceptions.ValidationError( click.style( "Field '{}' is missing from the manifest file under the 'info' section.", fg="red").format(field), json=manifest_json) for field in {"name", "email"}: if field not in manifest_json["info"]["contact"]: raise exceptions.ValidationError( click.style( "Field '{}' is missing from the manifest file under the 'contact' section.", fg="red") .format(field), json=manifest_json) for field in ["min", "max"]: if field not in manifest_json["info"]["x-21-total-price"]: raise exceptions.ValidationError( click.style("Field '{}' is missing from the manifest file under the " "'x-21-total-price' section.", fg="red"), json=manifest_json) if len(manifest_json["schemes"]) == 0: raise exceptions.ValidationError( click.style( "You have to specify either HTTP or HTTPS for your endpoint under the " "`schemes` section.", fg="red"), json=manifest_json) valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'} if manifest_json["info"]["x-21-category"].lower() not in valid_app_categories: valid_categories = ", ".join(valid_app_categories) raise exceptions.ValidationError( click.style("'{}' is not a valid category for the 21 marketplace. Valid categories are {}.", fg="red").format( manifest_json["info"]["x-21-category"], valid_categories), json=manifest_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)", "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")", "def test_is_valid_manifest_with_missing_md5_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_md5_column.tsv\",\n )\n missing_md5_message = (\n 'could not find a column name corresponding to required \"Columns.MD5\"'\n )\n assert missing_md5_message in caplog.text\n assert result == False", "def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False", "def validate_output_manifest(self, source, **kwargs):\n return self._validate_manifest(\"output_manifest\", source, **kwargs)", "def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def validate_configuration_manifest(self, source, **kwargs):\n return self._validate_manifest(\"configuration_manifest\", source, **kwargs)", "def _validate_manifest(self, kind, source, cls=None, **kwargs):\n data = self._load_json(kind, source, **kwargs)\n\n # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive\n inbound = True\n if hasattr(data, \"to_primitive\"):\n inbound = False\n data = data.to_primitive()\n\n self._validate_against_schema(kind, data)\n self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)\n\n if cls and inbound:\n return cls(**data)\n\n return data", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False", "def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def validate(self):\n errors = []\n if self.package_format:\n if not re.match('^[1-9][0-9]*$', str(self.package_format)):\n errors.append(\"The 'format' attribute of the package must \"\n 'contain a positive integer if present')\n\n if not self.name:\n errors.append('Package name must not be empty')\n # Must start with a lower case alphabetic character.\n # Allow lower case alphanummeric characters and underscores in\n # keymint packages.\n valid_package_name_regexp = '([^/ ]+/*)+(?<!/)'\n build_type = self.get_build_type()\n if not build_type.startswith('keymint'):\n # Dashes are allowed for other build_types.\n valid_package_name_regexp = '^[a-z][a-z0-9_-]*$'\n if not re.match(valid_package_name_regexp, self.name):\n errors.append(\"Package name '%s' does not follow naming \"\n 'conventions' % self.name)\n\n if self.version:\n if not re.match('^[0-9]+\\.[0-9_]+\\.[0-9_]+$', self.version):\n errors.append(\"Package version '%s' does not follow version \"\n 'conventions' % self.version)\n\n if self.maintainers is not None:\n # if not self.maintainers:\n # errors.append('Package must declare at least one maintainer')\n for maintainer in self.maintainers:\n try:\n maintainer.validate()\n except InvalidPackage as e:\n errors.append(str(e))\n if not maintainer.email:\n errors.append('Maintainers must have an email address')\n\n if self.authors is not None:\n for author in self.authors:\n try:\n author.validate()\n except InvalidPackage as e:\n errors.append(str(e))\n\n if errors:\n raise InvalidPackage('\\n'.join(errors))", "def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):\n # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.\n manifest_schema = getattr(self, manifest_kind)\n\n for expected_dataset_name, expected_dataset_schema in manifest_schema[\"datasets\"].items():\n if expected_dataset_name in manifest[\"datasets\"]:\n continue\n\n if expected_dataset_schema.get(\"optional\", False):\n continue\n\n raise exceptions.invalid_contents_map[manifest_kind](\n f\"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing.\"\n )", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def read_manifest(self): # -> None:\n ...", "def validate(attrs):\n print \"I GOT HERE.\"\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False", "def test_valid_and_empty_manifest(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(None, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 1\n }" ]
[ "0.73052067", "0.6979145", "0.69215554", "0.68080264", "0.6786265", "0.6757262", "0.6450811", "0.6440165", "0.640244", "0.6351523", "0.63133943", "0.62944144", "0.62856257", "0.62499046", "0.62444276", "0.62436587", "0.6221382", "0.61910504", "0.6152198", "0.60909945", "0.60679615", "0.60588336", "0.60163915", "0.6001264", "0.5996636", "0.59722024", "0.59587604", "0.5956518", "0.59325206", "0.5914116" ]
0.741697
0
Gets the zerotier IP address from the given marketplace name
def get_zerotier_address(marketplace): logger.info("You might need to enter your superuser password.") address = zerotier.get_address(marketplace) if not address: join_cmd = click.style("21 join", bold=True, reset=False) no_zt_network = click.style( "You are not part of the {}. Use {} to join the market.", fg="red") raise UnloggedException(no_zt_network.format(marketplace, join_cmd)) return address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vip_address(self, vip_name):\n networks = self.nailgun_client.get_networks(self.cluster_id)\n vip = networks.get('vips').get(vip_name, {}).get('ipaddr', None)\n asserts.assert_is_not_none(\n vip, \"Failed to get the IP of {} server\".format(vip_name))\n\n logger.debug(\"VIP '{0}': {1}\".format(vip_name, vip))\n return vip", "def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer", "def address(self, name):\n return self.query(name).response.answer[0].items[0].address", "def reverse_lookup_zone(ipaddress):\n return reverse_dotted_decimals(ipaddress) + '.in-addr.arpa'", "def getIp(name):\n tmp = []\n ips = socket.getaddrinfo(socket.gethostbyname(name), None)\n for x in ips:\n tmp.append(x[4][0])\n\n return tmp", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def get_ip(pc_name):\n pc_ip = '' \n try: \n pc_ip = socket.gethostbyname(pc_name) \n except Exception, e:\n initlog('failed to get PC ip; %s' % str(e)) \n return pc_ip", "def getIP():\n data = _get_page(\"http://myip.cz\")\n data = data.split(\"Your IP Address is: <b>\")[-1].split(\"</b>\")[0]\n return data.strip()", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def get_address(machine: Machine) -> str:\n default_route, _ = machine.run(\"ip route get 8.8.8.8\")\n return re.search(\" src ([0-9.]+) \", default_route).group(1)", "def get_public_ip(self, name=None):\n raise NotImplementedError", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def get_ip():\n with hide(\"everything\"):\n ip_addresses = run('hostname -I').split(' ')\n return ip_addresses[0]", "def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())", "def internet_address(self) -> str:\n return pulumi.get(self, \"internet_address\")", "def get_transport_address_by_name(transport_name: str) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from transport where name = '{}';\".format(transport_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def get_ip_address(self): # type: () -> t.Optional[str]\n if self.networks:\n network_name = get_docker_preferred_network_name(self.args)\n\n if not network_name:\n # Sort networks and use the first available.\n # This assumes all containers will have access to the same networks.\n network_name = sorted(self.networks.keys()).pop(0)\n\n ipaddress = self.networks[network_name]['IPAddress']\n else:\n ipaddress = self.network_settings['IPAddress']\n\n if not ipaddress:\n return None\n\n return ipaddress", "def _get_ip_address(ifname):\n cmd = (\"ifconfig %s| grep 'inet ' | awk -F: '{print $1}' | awk '{print $2}'\" %str(ifname))\n ip = os.popen(cmd).read().replace(\"\\n\",\"\")\n\n return ip", "def get_local_host_ip(self) -> str:", "def get_ip_address(device):\n try:\n capwap_client_rcb = device.parse('show capwap client rcb')\n except SchemaEmptyParserError as e:\n log.error(e)\n return ''\n\n return capwap_client_rcb.get('mwar_ap_mgr_ip', '')", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'", "def get_node_ip(\n self,\n name,\n ):\n pass", "def get_ip_address(self):\n raise NotImplementedError", "def get_host_ip_addr():\n return nova_conf.my_ip", "def get_ip(self):", "def obtain_public_ip():\n from urllib2 import urlopen\n my_ip = urlopen('http://ip.42.pl/raw').read()\n logger.debug('The public ip is: %s' % my_ip)\n return str(my_ip)", "def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )" ]
[ "0.64503324", "0.63069993", "0.6197815", "0.618933", "0.61700016", "0.609886", "0.5992339", "0.5982672", "0.5972976", "0.5954624", "0.5945781", "0.5938082", "0.5936992", "0.5928978", "0.59271926", "0.59166557", "0.58961654", "0.5854242", "0.581978", "0.58141637", "0.5809872", "0.5806588", "0.5783619", "0.5768035", "0.5764843", "0.57551545", "0.5731219", "0.57216877", "0.57020044", "0.5693605" ]
0.6974309
0
Set mode wireframe only
def setDisplayMode(self, mode): return "Wireframe"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()", "def wireframe_only(self):\n return self._wireframe_only", "def setSurfaceShadingMode(mode='flat'):\n sdict = {'flat':'FLAT','smooth':'SMOOTH'}\n dislin.shdmod(sdict[mode], 'SURFACE')", "def getDefaultDisplayMode(self):\n return \"Wireframe\"", "def add_wireframe_modifier(self):\n scene = self.set_as_active()\n\n # if the user selected a material, use it\n if w_var.cb_mat_wire:\n wireframe_mat = bpy.data.materials[w_var.mat_wire_name]\n\n # else, create a new one with the color selected\n else:\n color_wire = w_var.color_wire\n\n # separating rgb and alpha\n wireframe_color_rgb = color_wire[0:3]\n wireframe_color_alpha = color_wire[-1]\n wireframe_mat = bpy.data.materials.new('wireframe')\n\n renderengine = scene.wirebomb.data_renderengine\n \n if renderengine == 'CYCLES':\n wireframe_mat.use_nodes = True\n tree = wireframe_mat.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_transparent = tree.nodes.new('ShaderNodeBsdfTransparent')\n node_transparent.location = -300, 100\n\n node_diffuse = tree.nodes.new('ShaderNodeBsdfDiffuse')\n node_diffuse.location = -300, -100\n node_diffuse.inputs[0].default_value = wireframe_color_rgb + (1.0,)\n node_diffuse.color = wireframe_color_rgb\n node_diffuse.name = 'addon_wireframe_color' # referencing to this ID in the real-time change\n\n node_mixshader = tree.nodes.new('ShaderNodeMixShader')\n node_mixshader.location = 0, 50\n node_mixshader.inputs[0].default_value = wireframe_color_alpha\n node_mixshader.name = 'addon_wireframe_alpha' # referencing to this ID in the real-time change\n\n node_output = tree.nodes.new('ShaderNodeOutputMaterial')\n node_output.location = 300, 50\n\n # connecting the nodes\n tree.links.new(node_transparent.outputs[0], node_mixshader.inputs[1])\n tree.links.new(node_diffuse.outputs[0], node_mixshader.inputs[2])\n tree.links.new(node_mixshader.outputs[0], node_output.inputs[0])\n\n for node in tree.nodes:\n node.select = False\n\n # sets the viewport color\n wireframe_mat.diffuse_color = wireframe_color_rgb\n\n elif renderengine == 'BLENDER_RENDER':\n wireframe_mat.diffuse_color = wireframe_color_rgb\n wireframe_mat.use_transparency = True\n wireframe_mat.alpha = wireframe_color_alpha\n\n self.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n\n for obj in scene.objects:\n if obj.select:\n obj.data.materials.append(wireframe_mat)\n modifier_wireframe = obj.modifiers.new(name='Wireframe', type='WIREFRAME')\n modifier_wireframe.use_even_offset = False # Causes spikes on some models\n modifier_wireframe.use_replace = False\n modifier_wireframe.thickness = w_var.slider_wt_modifier\n\n # arbitrary high number because wire material is always added to end\n modifier_wireframe.material_offset = 12345\n\n # referencing to this ID in the real-time change\n modifier_wireframe.name = 'addon_wireframe'\n\n return wireframe_mat", "def resMode(mode): \n if mode==0:\n makeMesh(r0x, r0y)\n elif mode==1:\n makeMesh(r1x, r1y)\n elif (mode==2):\n makeMesh(r2x, r2y)", "def setSurfaceMeshing(state='off',shading=1):\n sdict = {'off':'OFF','on':'ON'}\n val = sdict[state]\n if not shading:\n val = 'ONLY'\n dislin.surmsh(val)", "def add_wireframe_freestyle(self):\n scene = self.set_as_active()\n previous_area = bpy.context.area.type\n bpy.context.area.type = 'VIEW_3D'\n previous_layers = tuple(scene.layers)\n\n # can't enter edit mode on objects on inactive layers\n scene.layers = (True,)*20\n self.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n\n for obj in scene.objects:\n if obj.select:\n scene.objects.active = obj\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.mark_freestyle_edge()\n bpy.ops.mesh.select_all(action='DESELECT')\n bpy.ops.object.mode_set(mode='OBJECT')\n\n bpy.context.area.type = previous_area\n scene.layers = previous_layers\n\n scene.render.use_freestyle = True\n scene.render.layers.active = scene.render.layers[w_var.rlname]\n\n for n in scene.render.layers.active.freestyle_settings.linesets:\n scene.render.layers.active.freestyle_settings.linesets.remove(n)\n\n lineset = scene.render.layers.active.freestyle_settings.linesets.new('wireframe')\n lineset.select_edge_mark = True\n lineset.select_crease = False\n\n wire_color = w_var.color_wire\n wire_thickness = w_var.slider_wt_freestyle\n\n wire_color_rgb = wire_color[0:3]\n wire_color_alpha = wire_color[-1]\n\n linestyle = bpy.data.linestyles.new('wire_style')\n linestyle.color = wire_color_rgb\n linestyle.alpha = wire_color_alpha\n linestyle.thickness = wire_thickness\n\n scene.render.layers.active.freestyle_settings.linesets.active.linestyle = linestyle\n\n return linestyle", "def _sketch_mode(self):\r\n self._mode_select(1)", "def render_wireframe(self, **kwds):\n proj = self.projection()\n if self.ambient_dim()==3:\n return proj.render_wireframe_3d(**kwds)\n if self.ambient_dim()==2:\n return proj.render_outline_2d(**kwds)\n raise ValueError, \"render_wireframe is only defined for 2 and 3 dimensional polyhedra.\"", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetPlaneMode(self, *args)", "def setDrawingMode(self):\n pass", "def addWireframe(self, wireframe):\n self.wireframe = wireframe\n self.tf_wireframe = wireframe.copy()", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_SetPlaneMode(self, *args)", "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)", "def set_up_wireframe_modifier(self):\n scene = self.set_as_active()\n \n if w_var.cb_clear_materials and w_var.is_any_affected:\n self.clear_materials()\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clay:\n\n # adding clay material before wireframe material for material offset in wireframe modifier to be correct\n self.set_up_clay()\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n # sets up renderlayer and adds wireframe modifier/material to affected meshes and saves wireframe material\n self.set_up_rlayer('wireframe')\n scene.wirebomb.data_material_wire = self.add_wireframe_modifier().name\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao:\n self.set_up_all_ao()\n\n # deselects all objects as a last thing to clean up\n self.select('DESELECT', objects={'ALL'})", "def set_up_wireframe_freestyle(self):\n scene = self.set_as_active()\n \n # sets up renderlayer(s) (depending on 'Composited wireframing' checkbox) and freestyle wireframing\n # also saves freestyle linestyle name\n self.set_up_rlayer('wireframe', rlname_other='other')\n scene.wirebomb.data_freestyle_linestyle = self.add_wireframe_freestyle().name\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clear_materials and w_var.is_any_affected:\n self.clear_materials()\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n if w_var.cb_clay:\n self.set_up_clay()\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao and not w_var.cb_composited:\n self.set_up_all_ao()\n\n elif w_var.cb_composited:\n\n # sets up composition for wireframe and sets up ambient occlusion lighting if used\n self.comp_add_wireframe_freestyle()\n \n if scene.render.engine == 'CYCLES':\n scene.cycles.film_transparent = True\n\n else:\n scene.render.alpha_mode = 'TRANSPARENT'\n\n if w_var.cb_ao:\n self.set_up_world_ao()\n\n # deselects all objects as a last thing to clean up\n self.select('DESELECT', objects={'ALL'})", "def setColorMode(mode='full'):\n mdict = {'low':'NONE','full':'FULL'}\n dislin.clrmod(mdict[mode])", "def enable_texture_mode():\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n for space in area.spaces:\n if space.type == \"VIEW_3D\":\n space.viewport_shade = \"TEXTURED\"\n return", "def set_mode_point():\n global DRAW_MODE\n DRAW_MODE=\"point\"", "def setCompositionMode(self, mode):\n self.paintMode = mode\n self.update()", "def _set_draw_mode(draw_mode):\n###############################################################################\n global _draw_mode\n _draw_mode = draw_mode", "def SetEdgeMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_WireDivide_SetEdgeMode(self, *args)", "def enable_textured_solid_mode():\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n for space in area.spaces:\n if space.type == \"VIEW_3D\":\n space.viewport_shade = \"SOLID\"\n space.show_textured_solid = True\n return", "def plot_wireframe(Tfull):\n from mpl_toolkits.mplot3d import axes3d\n N = Tfull.shape[0]\n x = y = np.linspace(0, 1, N)\n X, Y = np.meshgrid(x,y)\n # Construct and return a function suitable for interactive demo\n def plot(elev=25, azim=50):\n fig = plt.figure(1, figsize=(14, 8))\n plt.clf()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_wireframe(X, Y, Tfull)\n ax.view_init(elev=elev, azim=azim)\n plt.axis('scaled')\n plt.xlabel('x (m)')\n plt.ylabel('y (m)')\n plt.title('T(x,y) on %dx%d grid' % (N,N))\n plot()\n return plot", "def setProtectSurfaces():\n dislin.shlsur()", "def _setmode(self, mode=None):\n if mode is None:\n return self._mode\n if mode not in [\"standard\", \"logo\", \"world\"]:\n return\n self._mode = mode\n if mode in [\"standard\", \"world\"]:\n self._angleOffset = 0\n self._angleOrient = 1\n else: # mode == \"logo\":\n self._angleOffset = self._fullcircle/4.\n self._angleOrient = -1", "def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)", "def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)", "def DualMode(self) -> bool:" ]
[ "0.8165864", "0.7190839", "0.67805", "0.6758721", "0.65666986", "0.6441041", "0.6434427", "0.6336882", "0.6314271", "0.6215526", "0.61872584", "0.6129271", "0.60960007", "0.6085161", "0.5989606", "0.5981274", "0.59671307", "0.5965656", "0.5920257", "0.58588403", "0.5820351", "0.5812003", "0.5810211", "0.5778697", "0.5736275", "0.5732092", "0.5725577", "0.5725093", "0.5725093", "0.5684997" ]
0.7538678
1
Register an asset required by a dashboard module. Some modules require special scripts or stylesheets, like the
def register_module_asset(self, asset): self._module_assets.append(asset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_xmodule_assets():\r\n sh('xmodule_assets common/static/xmodule')", "def assets():\n pass", "def assets():", "def script_info_assets(app, static_dir, testcss):\n InvenioAssets(app)\n\n blueprint = Blueprint(__name__, \"test_bp\", static_folder=static_dir)\n\n class Ext(object):\n def __init__(self, app):\n assets = app.extensions[\"invenio-assets\"]\n app.register_blueprint(blueprint)\n\n Ext(app)\n\n yield ScriptInfo(create_app=lambda: app)", "def add_assets_mapping(config, mapping):\n assets = config.registry.queryUtility(IAssets) or Assets()\n assets.update(mapping)\n config.registry.registerUtility(assets, IAssets)", "def add_asset(self, asset_name):\r\n self._assets.extend(asset_name)", "def register_scss(assets):\n assets.append_path(app.static_folder, app.static_url_path)\n assets.config['SASS_PATH'] = 'app/scss'\n\n bundle = Bundle('scss/client.scss',\n output='css/gen/client.%(version)s.css',\n depends=('**/*.scss'),\n filters=('scss', 'cssmin'))\n assets.register('scss_client', bundle)", "def test_import_system_asset(self):\n pass", "def register_module():\n\n # Course Dashboard\n tabs.Registry.register(\n base.OfflineAssignmentBase.DASHBOARD_NAV,\n base.OfflineAssignmentBase.DASHBOARD_TAB,\n base.OfflineAssignmentBase.DESCRIPTION,\n off_ass_dashboard.OfflineAssignmentDashboardHandler)\n\n dashboard.DashboardHandler.add_custom_get_action(\n base.OfflineAssignmentBase.DASHBOARD_DEFAULT_ACTION, None)\n\n dashboard.DashboardHandler.add_nav_mapping(\n base.OfflineAssignmentBase.DASHBOARD_NAV,\n base.OfflineAssignmentBase.NAME,\n )\n dashboard.DashboardHandler.add_custom_get_action(\n base.OfflineAssignmentBase.OFFLINE_ASSIGNMENT_DETAILS_ACTION,\n off_ass_dashboard.OfflineAssignmentDashboardHandler.get_assignment_scores\n )\n\n dashboard.DashboardHandler.add_custom_get_action(\n base.OfflineAssignmentBase.SCORE_OFFLINE_ASSIGNMENT_ACTION,\n off_ass_dashboard.OfflineAssignmentDashboardHandler.get_bulk_score\n )\n\n dashboard.DashboardHandler.add_custom_post_action(\n base.OfflineAssignmentBase.SCORE_OFFLINE_ASSIGNMENT_ACTION,\n off_ass_dashboard.OfflineAssignmentDashboardHandler.post_bulk_score\n )\n\n # Course Staff Custom Handlers\n evaluate.EvaluationHandler.add_custom_get_action(\n offline_course_staff.OfflineAssignmentsCourseStaffBase.LIST_ACTION,\n offline_course_staff.OfflineAssignmentsCourseStaffHandler.get_list_offline\n )\n\n evaluate.EvaluationHandler.add_custom_get_action(\n offline_course_staff.OfflineAssignmentsCourseStaffBase.EVALUATE_ACTION,\n offline_course_staff.OfflineAssignmentsCourseStaffHandler.get_evaluate_offline\n )\n\n evaluate.EvaluationHandler.add_custom_post_action(\n offline_course_staff.OfflineAssignmentsCourseStaffBase.POST_SCORE_ACTION,\n offline_course_staff.OfflineAssignmentsCourseStaffHandler.post_score_offline\n )\n\n associated_js_files_handlers = [\n ('/modules/offline_assignments/editor/(.*)', sites.make_zip_handler(\n os.path.join(\n appengine_config.BUNDLE_ROOT,\n 'modules/offline_assignments/lib/ckeditor.zip'))),\n (\n settings.OfflineAssignmentRESTHandler.URI,\n settings.OfflineAssignmentRESTHandler\n )\n ]\n\n\n question_handlers = [\n (base.OfflineAssignmentBase.UNIT_URL,\n assignment.OfflineAssignmentHandler),\n (question.OfflineAssignmentRESTHandler.URI,\n question.OfflineAssignmentRESTHandler)]\n\n global custom_module\n custom_module = custom_modules.Module(\n base.OfflineAssignmentBase.NAME,\n base.OfflineAssignmentBase.DESCRIPTION,\n associated_js_files_handlers, question_handlers)\n\n custom_unit = custom_units.CustomUnit(\n base.OfflineAssignmentBase.UNIT_TYPE_ID,\n base.OfflineAssignmentBase.NAME,\n question.OfflineAssignmentRESTHandler,\n visible_url,\n cleanup_helper=delete_assignement,\n import_helper=import_assignment,\n is_graded=True)\n\n # Add custom unit details to course staff module\n course_staff.CourseStaff.add_custom_unit(\n base.OfflineAssignmentBase.UNIT_TYPE_ID,\n offline_course_staff.OfflineAssignmentsCourseStaffBase.LIST_ACTION)\n\n return custom_module", "def asset(atype, aname):\n if atype not in ('css', 'js'):\n raise template.TemplateSyntaxError('Type can only be one of css or js.')\n\n if aname not in ASSETS[atype]:\n raise ValueError('Invalid asset: %r' % aname)\n\n meta = ASSETS[atype][aname]\n\n return {\n 'USE_MINIFIED': USE_MINIFIED,\n 'type': atype,\n 'asset': aname,\n 'meta': meta,\n }", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def add_asset(urn: str, asset: str, validate_assets: bool) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.add_asset(asset)\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"add assets\")\n if validate_assets:\n _abort_if_non_existent_urn(\n graph,\n asset,\n \"add assets. Use --no-validate-assets if you want to turn off validation\",\n )\n for mcp in dataproduct_patcher.build():\n graph.emit(mcp)", "def add_latesettings_assets(self):\n\n # setting up static file serving\n assetmanager = self.comp('assetmanager')\n\n # add external asset mount point where we can copy public static files so they can be served by a separate traditional web server\n # presumably this directory is being served by a more traditional webserver, at this url we specify below\n assetmanager.add_assetmount(\n massetmanager.MewloAssetMount_ExternalServer('external_assets', filepath = '${mewlofilepath}/public_assets', urlabs = 'http://127.0.0.1/mewlo/mewlo/public_assets' )\n )\n\n # add internal asset mount point where we will serve files internally; a route will be automatically created for any asset source attached to this mount point; we can choose the path prefix for urls served by the route\n assetmanager.add_assetmount(\n massetmanager.MewloAssetMount_InternalRoute('internal_assets', urlpath='assets')\n )\n\n\n # now that we have some mount points, we can specify some files to be hosted on them\n # note that the ids for all asset sources MUST be unique (ATTN:TODO elaborate on this please)\n # first we mount the files in the staticfilesource/ directory as internal assets that we will serve internally via mewlo; the id will be used for alias creation, and for the route\n assetmanager.add_assetsource(\n massetmanager.MewloAssetSource(id='siteinternal', mountid = 'internal_assets', filepath = '${sitefilepath}/staticfilesource', mnamespace=None)\n )\n # then as a test, lets mount same files on the external mount point -- this will cause mewlo to physically copy the files to the external filepath, where presumably another web server can serve them\n assetmanager.add_assetsource(\n massetmanager.MewloAssetSource(id='siteexternal', mountid = 'external_assets', filepath = '${sitefilepath}/staticfilesource', mnamespace=None)\n )\n\n # remember that one should never refer to the assets by a hardcoded url or file path; always use the aliases created by these functions, which will take the form (where ID is the id of the asset source):\n # 'asset_ID_urlrel' | 'asset_ID_urlabs' | 'asset_ID_filepath'\n # you can also use helper function to build these names, which would be better.", "def configure_ext_assets(app, xstatic):\n assets = Environment(app)\n coffee_lib = Bundle(\n 'coffee/lib/*.coffee',\n filters='coffeescript',\n output='gen/lib.js'\n )\n assets.register('coffee_lib', coffee_lib)\n coffee_pages = Bundle(\n 'coffee/pages/*.coffee',\n filters='coffeescript',\n output='gen/pages.js'\n )\n assets.register('coffee_lib', coffee_lib)\n coffee = Bundle(\n coffee_lib,\n coffee_pages,\n output='gen/app.js'\n )\n assets.register('coffee_app', coffee)\n\n coffee_spec = Bundle(\n 'coffee/spec/*.coffee',\n filters='coffeescript',\n output='gen/coffee_spec.js'\n )\n assets.register('coffee_spec', coffee_spec)\n\n vendor_js = Bundle(\n os.path.join(xstatic.path_for('jquery'), 'jquery.min.js'),\n 'vendor/pdfjs-' + app.config['X_PDFJS_VERSION'] + '-dist/build/pdf.js',\n 'vendor/jquery.jeditable.mini.js',\n 'vendor/jquery-ui-1.11.2/jquery-ui.min.js',\n output='gen/vendor_js.js',\n )\n assets.register('vendor_js', vendor_js)\n\n scss_bundle = Bundle(\n 'scss/site.scss',\n depends='**/*.scss',\n filters='pyscss',\n output='gen/app.css'\n )\n assets.register('scss_all', scss_bundle)\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n scss.config.LOAD_PATHS = [\n os.path.join(xstatic.path_for('bootstrap_scss'), 'scss'),\n os.path.join(this_dir, '../static/vendor/bootswatch-darkly'),\n ]", "def createAsset(assFolder, *args):\n createAssetUI(assFolder)", "def autoload():\r\n global _ASSETS_LOADED\r\n if _ASSETS_LOADED:\r\n return False\r\n\r\n # Import this locally, so that we don't have a global Django\r\n # dependency.\r\n from django.conf import settings\r\n\r\n for app in settings.INSTALLED_APPS:\r\n # For each app, we need to look for an assets.py inside that\r\n # app's package. We can't use os.path here -- recall that\r\n # modules may be imported different ways (think zip files) --\r\n # so we need to get the app's __path__ and look for\r\n # admin.py on that path.\r\n #if options.get('verbosity') > 1:\r\n # print \"\\t%s...\" % app,\r\n\r\n # Step 1: find out the app's __path__ Import errors here will\r\n # (and should) bubble up, but a missing __path__ (which is\r\n # legal, but weird) fails silently -- apps that do weird things\r\n # with __path__ might need to roll their own registration.\r\n try:\r\n app_path = import_module(app).__path__\r\n except AttributeError:\r\n #if options.get('verbosity') > 1:\r\n # print \"cannot inspect app\"\r\n continue\r\n\r\n # Step 2: use imp.find_module to find the app's assets.py.\r\n # For some reason imp.find_module raises ImportError if the\r\n # app can't be found but doesn't actually try to import the\r\n # module. So skip this app if its assetse.py doesn't exist\r\n try:\r\n imp.find_module('assets', app_path)\r\n except ImportError:\r\n #if options.get('verbosity') > 1:\r\n # print \"no assets module\"\r\n continue\r\n\r\n # Step 3: import the app's assets file. If this has errors we\r\n # want them to bubble up.\r\n import_module(\"%s.assets\" % app)\r\n #if options.get('verbosity') > 1:\r\n # print \"assets module loaded\"\r\n\r\n # Load additional modules.\r\n for module in getattr(settings, 'ASSETS_MODULES', []):\r\n import_module(\"%s\" % module)\r\n\r\n _ASSETS_LOADED = True", "def compile_static_assets(assets):\n assets.auto_build = True\n assets.debug = False\n\n css = Bundle(\n \"css/*.css\",\n # filters=\"less,cssmin\",\n output=\"gen/avantui.css\",\n # extra={\"rel\": \"stylesheet/less\"},\n )\n\n js = Bundle(\n \"js/*.js\",\n output='gen/avantui.js'\n )\n\n assets.register(\"avantui_css\", css)\n assets.register(\"avantui_js\", js)\n if app.config[\"ENV\"] == \"development\":\n css.build()\n js.build()\n return assets", "def test_import_test_asset(self):\n pass", "def test_create_system_asset(self):\n pass", "def includeme(config):\n\n config.add_translation_dirs('kotti_dashboard:locale')\n config.add_static_view('static-kotti_dashboard', 'kotti_dashboard:static')\n\n config.scan(__name__)", "def asset_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n act_obj = context.active_object\r\n obj_list = [obj for obj in context.scene.objects if obj.select]\r\n thumbnails_path = get_directory('icons')\r\n is_subsurf = False\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if len(obj_list) >= 2:\r\n asset_name = AM.group_name\r\n \r\n else:\r\n asset_name = act_obj.name\r\n if act_obj.modifiers:\r\n for mod in act_obj.modifiers:\r\n if mod.type == 'SUBSURF':\r\n is_subsurf = True\r\n \r\n if asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace':\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if multi_object:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\") \r\n \r\n else:\r\n if len(obj_list) >= 2:\r\n row = box.row()\r\n box.label(\"Choose the asset name\")\r\n box.prop(AM, \"group_name\", text = \"\")\r\n \r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"Name\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n # ---------------------- # \r\n # RENNDER THUMBNAIL #\r\n # ---------------------- #\r\n \r\n if AM.render_type == 'render':\r\n if len(obj_list) == 1 and not is_subsurf:\r\n box.prop(AM, \"add_subsurf\", text = \"Subsurf\")\r\n box.prop(AM, \"add_smooth\", text = \"Smooth\") \r\n \r\n box.prop(AM, \"material_render\", text=\"Addon material\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n elif AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if len(obj_list) == 1:\r\n if (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n else:\r\n if AM.group_name and (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if len(obj_list) >= 2:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def publish_asset(\n self,\n *,\n asset_id: str,\n asset_manifest_path: str,\n asset_selector: str,\n asset_type: \"AssetType\",\n ) -> None:\n ...", "def asset_tag(request, key, **kwargs):\n theme = request.theme\n asset = theme.stacked_assets[key]\n settings = request.registry.settings\n should_compile = asbool(settings.get('pyramid_frontend.compile'))\n\n if should_compile:\n filename = theme.compiled_asset_path(key)\n url_path = '/compiled/' + theme.key + '/' + filename\n else:\n url_path = asset.url_path\n\n return literal(asset.tag(theme, url_path, production=should_compile,\n **kwargs))", "def test_import_software_asset(self):\n pass", "def assets_library_url(request):\n return {\n \"PATTERN_LIBRARY_URL\": settings.PATTERN_LIBRARY_URL,\n }", "def _add_static_files(self, req):\n add_script(req, self._get_jqplot('jquery.jqplot'))\n add_stylesheet(req, 'common/js/jqPlot/jquery.jqplot.css')\n # excanvas is needed for IE8 support\n add_script(req, self._get_jqplot('excanvas.min'))\n add_script(req, self._get_jqplot('plugins/jqplot.dateAxisRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.highlighter'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasTextRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisTickRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisLabelRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.enhancedLegendRenderer'))", "def custom_asset(parser, token):\n \n library = global_asset_library()\n \n try:\n parts = token.split_contents()\n asset_type = None\n \n if len(parts) == 2:\n tag_name, asset_path = parts\n elif len(parts) == 3:\n tag_name, asset_path, asset_type = parts\n else:\n raise ValueError\n except ValueError:\n raise template.TemplateSyntaxError(\"%r tag invalid arguments\" % token.contents.split())\n \n for s in [ '\"', '\"' ]:\n if (asset_path.startswith(s) and asset_path.endswith(s)):\n asset_path = asset_path[len(s):-len(s)]\n break\n \n if asset_type is None:\n if asset_path in ASSET_TYPES:\n asset_type = None\n else:\n asset_type = asset_path[asset_path.rfind('.') + 1:]\n \n return CustomAssetNode(asset_path, asset_type, library)", "def register_dcc_resource_path(resources_path):\n\n pass", "def _create_assets(self):\n\n assets = Environment(self.app)\n # jQuery is served as a standalone file\n jquery = Bundle('js/jquery-*.min.js', output='gen/jquery.min.js')\n # JavaScript is combined into one file and minified\n js_all = Bundle('js/js_all/*.js',\n filters='jsmin',\n output='gen/app.min.js')\n # SCSS (Sassy CSS) is compiled to CSS\n scss_all = Bundle('scss/app.scss',\n filters='libsass',\n output='gen/app.css')\n assets.register('jquery', jquery)\n assets.register('js_all', js_all)\n assets.register('scss_all', scss_all)\n return assets", "def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)" ]
[ "0.6323401", "0.6234134", "0.5986547", "0.57688546", "0.57355887", "0.57000977", "0.5596467", "0.5571452", "0.55544966", "0.55511576", "0.5550094", "0.54957074", "0.548881", "0.54596376", "0.5429053", "0.537195", "0.5304316", "0.52905613", "0.52882594", "0.5239327", "0.5237541", "0.52305305", "0.5193852", "0.5167999", "0.5150966", "0.5135034", "0.5114002", "0.5109932", "0.5101455", "0.5091949" ]
0.70824754
0
Prepare this dashboard instance to run.
def _prepare(self): # Set configuration defaults and save to the project document self.config.setdefault('PAGINATION', True) self.config.setdefault('PER_PAGE', 25) # Create and configure the Flask application self.app = self._create_app(self.config) # Add assets and routes self.assets = self._create_assets() self._register_routes() # Add module assets and routes self._module_assets = [] for module in self.modules: try: module.register(self) except Exception as e: logger.error('Error while registering {} module: {}'.format( module.name, e)) logger.error('Removing module {} from dashboard.'.format( module.name)) self.modules.remove(module) # Clear dashboard and project caches. self.update_cache()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n # Wipe the db\n self.wipe_db()\n\n # Set some global things\n try:\n dashboard_configuration = DashboardConfiguration(type=\"default\")\n dashboard_configuration.save()\n except IntegrityError:\n dashboard_configuration = DashboardConfiguration.objects.filter(type=\"default\").first()\n\n # Add all players from dataset\n group = self.add_players(dashboard_configuration)\n\n # Add all games from the dataset\n self.add_games()\n\n # Create the games played for this group\n self.add_game_played(group)", "def _initialise_run(self) -> None:", "def prepare(self):\n pass", "def prepare(self):\n pass", "def prepare(self):\n pass", "def prepare(self):\n self.__plot_data = [[], []]\n self.final_residual = None\n self.time_value = None\n self.clear_folder_content(self.run_path())\n self.copy_folder_content(self.config_path('system'), self.run_path('system'), overwrite=True)\n self.copy_folder_content(self.config_path('constant'), self.run_path('constant'), overwrite=True)\n self.copy_folder_content(self.config_path('0'), self.run_path('0'), overwrite=True)\n return True", "def prepare(self, config, **kwargs):\n pass", "def initialise(self):\n self.set_up()", "async def prepare(self):\n pass", "def setUp(self):\n\n self.logger_stats = DataScreen()", "def __init__(self, *args, **kwargs):\n super(TurntableCrawler, self).__init__(*args, **kwargs)\n\n parts = self.var(\"name\").split(\"_\")\n\n # Add the job var once job names on disk match job code names in shotgun\n self.setVar('assetName', parts[1], True)\n self.setVar('step', parts[2], True)\n self.setVar('variant', parts[3], True)\n self.setVar('pass', parts[4], True)\n self.setVar('renderName', '{}-{}-{}'.format(\n self.var('assetName'),\n self.var('variant'),\n self.var('pass')\n ),\n True\n )", "def _preparation_workflow(self):\n self._validate_environment()\n self._validate_parameters()\n self._update_verbosity()", "def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")", "def prepare(self):\n self.parse_template()\n self.build_argparser()\n self.parse_arguments()\n self.render_template()\n self.update_relation()", "def prepare(self):", "def bootstrap(self):\n None", "def __init__(self, *args, **kwargs):\n super(ShotRenderCrawler, self).__init__(*args, **kwargs)\n\n parts = self.var(\"name\").split(\"_\")\n locationParts = parts[0].split(\"-\")\n\n # Add the job var once job names on disk match job code names in shotgun\n self.setVar('seq', locationParts[1], True)\n self.setVar('shot', parts[0], True)\n self.setVar('step', parts[1], True)\n self.setVar('pass', parts[2], True)\n self.setVar('renderName', '{}-{}'.format(\n self.var('step'),\n self.var('pass')\n ),\n True\n )", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def prepare(cls):", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_agents_once = False\n self.start_servers_once = False\n self.setup_start_agents = False\n self.setup_start_servers = False", "async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)", "def prepare(self):\n return self", "def __init__(self, config):\n super().__init__(config)\n\n # Prepare the timer.\n self.timer = 0\n\n # Set the current player index.\n self.current_player_index = 0\n # If we are the client, the server goes first.\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"--client\":\n self.current_player_index = 1\n\n # Prepare the phase counter.\n self.__current_phase = Game.PHASE_PREPARE\n # Prepare the shot location store.\n self.__current_fire_location = None\n self.__current_fire_effect = None", "def setup(self):\n self.log.debug('upm - in upm setup()')\n # Add resource setup code here", "def __setup(self):\n\n backupFolder = self.config['destination']\n self.__createBackupFolder(backupFolder)\n\n # create the project based backup folder\n today = date.today()\n\n if 'projects' in self.config:\n for project in self.config['projects'].iterkeys():\n timestamp = datetime.now().strftime('%d-%H-%M-%S')\n backupDestination = os.path.join(backupFolder, project, str(today.year), today.strftime('%m'), timestamp)\n self.__createBackupFolder(backupDestination)\n self.config['projects'][project]['destination'] = backupDestination", "def _prepare(self):\n logging.warning('-> preparing EMPTY experiments...')", "def setup_class(cls):\n self = cls()\n self.remove_files_created_during_previous_runs()\n if not os.path.exists(self.plaintext_directory):\n os.makedirs(self.plaintext_directory)\n\n if not os.path.exists(self.training_path):\n os.makedirs(self.training_path)\n\n if not os.path.exists(self.heldout_path):\n os.makedirs(self.heldout_path)\n\n prepare_data(self.paths)", "def startUp(self):\n pass", "def pre_start(self):\n self.make_runpath_dirs()", "def setup_class(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"agent_1\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)" ]
[ "0.62227356", "0.61323386", "0.6124156", "0.6124156", "0.6124156", "0.603257", "0.6021469", "0.6008526", "0.59598756", "0.5919939", "0.5891619", "0.58891463", "0.5887354", "0.5885531", "0.58851296", "0.58174354", "0.57714", "0.57629365", "0.57341665", "0.57147014", "0.57033503", "0.56929004", "0.5688639", "0.56826", "0.56601846", "0.56572604", "0.56497276", "0.5635938", "0.5629412", "0.5626734" ]
0.6481295
0
Override this method for custom job titles. This method generates job titles. By default, the title is a pretty (but verbose) form of the job state point, based on the project schema.
def job_title(self, job): def _format_num(num): if isinstance(num, bool): return str(num) elif isinstance(num, Real): return str(round(num, 2)) return str(num) try: s = [] for keys in sorted(self._schema_variables()): v = job.statepoint()[keys[0]] try: for key in keys[1:]: v = v[key] except KeyError: # Particular key is present in overall continue # schema, but not this state point. else: s.append('{}={}'.format('.'.join(keys), _format_num(v))) return ' '.join(s) except Exception as error: logger.debug( "Error while generating job title: '{}'. " "Returning job-id as fallback.".format(error)) return str(job)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_job_title(self, job_name):\n return ''", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title(self) -> str:\n raise NotImplementedError", "def title(self) -> str:\n pass", "def job_title(self):\n if \"jobTitle\" in self._prop_dict:\n return self._prop_dict[\"jobTitle\"]\n else:\n return None", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def _make_title(self, ind):\n start = self.df_event_time.loc[ind, 'time']\n date = np.datetime_as_string(start.astype('<M8[ns]'), unit='s')\n start_ns = start - (start // 10**9) * 10**9\n end = self.df_event_time.loc[ind, 'endtime']\n end_ns = end - start + start_ns\n return ''.join((f'##Event {ind} from run {self.run_id}\\n',\n f'##Recorded at ({date[:10]} {date[10:]}) UTC ',\n f'{start_ns} ns - {end_ns} ns'))", "def get_title(self) -> str:\n pass", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def get_title():", "def getTaskTitle(self) -> unicode:\n ...", "def getTaskTitle(self) -> unicode:\n ...", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def getTitle(self): #$NON-NLS-1$\r", "def getTitle(self): #$NON-NLS-1$\r", "def job_subtitle(self, job):\n return str(job)[:max(8, self._project_min_len_unique_id())]", "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "def _prettyfilename(self):\n return self.title", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def makeTitle(self):\n l1=Label(self.app, text=\"Asset Allocation Combinations\")\n l1.grid(row=0, column=0)", "def _defaultSyncTitle(self):\n return f'{self.grandparentTitle} - {self.parentTitle} - ({self.seasonEpisode}) {self.title}'", "def title(self):\n return self.definition.title", "def _defaultSyncTitle(self):\n return f'{self.parentTitle} - {self.title}'", "def Title(self, **kwargs):\n full_name = ''\n if self.getFirstname() == '' or self.getLastname() == '':\n if not self.getOrganization():\n return '...'\n else:\n return self.getOrganization()\n format = kwargs.get('format', None)\n if format == 'natural':\n full_name = '%s %s' % (self.getFirstname(), self.getLastname())\n else:\n full_name = '%s %s' % (self.getLastname(), self.getFirstname())\n return '%s' % full_name", "def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"", "def title(self):\n return self.__title", "def title(self):\n return self.__title" ]
[ "0.6774112", "0.6684482", "0.6545176", "0.65105885", "0.64988434", "0.6343814", "0.6343415", "0.6338762", "0.62752765", "0.6266623", "0.6256214", "0.6256214", "0.6229947", "0.6229947", "0.6226944", "0.61802864", "0.61802864", "0.6152143", "0.6079925", "0.60740346", "0.6057321", "0.6056076", "0.6050757", "0.60467494", "0.6034354", "0.60245293", "0.60205936", "0.60204035", "0.6003444", "0.6003444" ]
0.756392
0
Override this method for custom job subtitles. This method generates job subtitles. By default, the subtitle is a minimal unique substring of the job id.
def job_subtitle(self, job): return str(job)[:max(8, self._project_min_len_unique_id())]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtitle(self, txt):\n num = len(txt)\n ticks = \"-\" * num\n print(txt)\n print(ticks)", "def getSubtitleURL(self):\n\n # If it is a movie, we use this methodology -\n try:\n IndexingParameters = [\"subtitleUrls\", 0, \"url\"]\n TitleParamters = [\n \"catalogMetadata\", \"catalog\", \"title\", \"episodeNumber\"]\n subRequestObject = requests.get(self.subtitleURLContainer)\n\n parsedJsonObject = json.loads(str(subRequestObject.text))\n SubsURL = parsedJsonObject[IndexingParameters[0]][\n IndexingParameters[1]][IndexingParameters[2]]\n if self.title == \"Amazonsubtitles\":\n try:\n self.title = parsedJsonObject[TitleParamters[0]][TitleParamters[1]][TitleParamters[2]] + \"_\" + str(\n parsedJsonObject[TitleParamters[0]][TitleParamters[1]][TitleParamters[3]])\n except:\n pass\n\n return SubsURL\n\n except:\n pass\n pass", "def get_subtitle_print(subs: List[Track]) -> List[str]:\n data = []\n if not subs:\n data.append(\"--\")\n for sub in subs:\n line_items = []\n\n # following sub.title tree checks and supports three different language and title scenarios\n # The second scenario is the recommended option to choose if you are open to choosing any\n # The third scenario should be used if you have nothing unique to state about the track\n # | Language | Track Title | Output |\n # | ------------ | ----------------------------- | --------------------------------------------- |\n # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |\n # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |\n # | es / Spanish | None | - Spanish, SubRip (SRT) |\n language = pycountry.languages.get(alpha_2=sub.language).name\n if sub.title:\n if language.lower() in sub.title.lower():\n line_items.append(sub.title)\n else:\n line_items.append(f\"{language}, {sub.title}\")\n else:\n line_items.append(language)\n\n line_items.append(sub.format.replace(\"UTF-8\", \"SubRip (SRT)\"))\n\n line = \"- \" + \", \".join(line_items)\n data += [\n (\" \" + x if i > 0 else x)\n for i, x in enumerate(textwrap.wrap(line, 64))\n ]\n return data", "def create_subtitle(self):\n label_subtitle = Label(self.frame, text=\"Projet Python 2020\", font=(\"Arial\", 25), bg='light blue',\n fg='white')\n label_subtitle.pack()", "def get_subtitle(annotation, sub_duration, video_clip, seen_annotations):\n if len(annotation[\"text\"]) == 0:\n return None\n\n annotation_txt = calculate_needed_subtitle_height(annotation, seen_annotations, video_clip)\n\n txt_clip = TextClip(annotation_txt, color=\"white\", fontsize=70, font='Sans Serif')\n txt_clip = txt_clip.set_position((\"center\", get_subtitle_offset(annotation, seen_annotations, video_clip)))\n txt_clip = txt_clip.set_start(float(annotation[\"time\"]) / 1000.0)\n txt_clip = txt_clip.set_duration(sub_duration)\n\n return txt_clip", "def get_job_title(self, job_name):\n return ''", "def job_title(self, job):\n def _format_num(num):\n if isinstance(num, bool):\n return str(num)\n elif isinstance(num, Real):\n return str(round(num, 2))\n return str(num)\n\n try:\n s = []\n for keys in sorted(self._schema_variables()):\n v = job.statepoint()[keys[0]]\n try:\n for key in keys[1:]:\n v = v[key]\n except KeyError: # Particular key is present in overall\n continue # schema, but not this state point.\n else:\n s.append('{}={}'.format('.'.join(keys), _format_num(v)))\n return ' '.join(s)\n except Exception as error:\n logger.debug(\n \"Error while generating job title: '{}'. \"\n \"Returning job-id as fallback.\".format(error))\n return str(job)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('source_path', help=\"Path to the video or audio file to subtitle\",\n nargs='?')\n parser.add_argument('-C', '--concurrency', help=\"Number of concurrent API requests to make\",\n type=int, default=DEFAULT_CONCURRENCY)\n parser.add_argument('-o', '--output',\n help=\"Output path for subtitles (by default, subtitles are saved in \\\n the same directory and name as the source path)\")\n parser.add_argument('-F', '--format', help=\"Destination subtitle format\",\n default=DEFAULT_SUBTITLE_FORMAT)\n parser.add_argument('-S', '--src-language', help=\"Language spoken in source file\",\n default=DEFAULT_SRC_LANGUAGE)\n parser.add_argument('-D', '--dst-language', help=\"Desired language for the subtitles\",\n default=DEFAULT_DST_LANGUAGE)\n parser.add_argument('-K', '--api-key',\n help=\"The Google Translate API key to be used. \\\n (Required for subtitle translation)\")\n parser.add_argument('--list-formats', help=\"List all available subtitle formats\",\n action='store_true')\n parser.add_argument('--list-languages', help=\"List all available source/destination languages\",\n action='store_true')\n\n parser.add_argument('--min_height', help=\"minimum height from 0 - 100%\", type=float, default=93)\n\n parser.add_argument('--max_height', help=\"maximum height from 0 - 100%\", type=float, default=99)\n\n parser.add_argument('--l_v', help=\"Light sensitive\", type=float, default=210)\n\n parser.add_argument('--debug', help=\"Allows to show cropped image on the desktop\", action='store_true', default=True)\n\n parser.add_argument('--cloud', help=\"Use google cloud compute to extract text\", action='store_true', default=False)\n\n parser.add_argument('--disable_time', help=\"Parse time function\", action='store_true')\n\n parser.add_argument('--all', help=\"Render all files\", action='store_true')\n\n args = parser.parse_args()\n\n if args.list_formats:\n print(\"List of formats:\")\n for subtitle_format in FORMATTERS:\n print(\"{format}\".format(format=subtitle_format))\n return 0\n\n if args.list_languages:\n print(\"List of all languages:\")\n for code, language in sorted(LANGUAGE_CODES.items()):\n print(\"{code}\\t{language}\".format(code=code, language=language))\n return 0\n\n if not validate(args):\n return 1\n\n try:\n if args.all:\n for file in os.listdir():\n # *.avi *.flv *.mkv *.mpg *.mp4 *.webm\n if file.endswith('.avi') or file.endswith('.flv') or file.endswith('.mkv') or file.endswith('.mpg') or file.endswith('.mp4') or file.endswith(\".webm\"):\n st = time.time()\n subtitle_file_path = generate_subtitles(\n source_path=file,\n dst_language=args.dst_language,\n output=args.output,\n debug=args.debug,\n cloud=args.cloud,\n disable_time=args.disable_time,\n min_height=args.min_height,\n max_height=args.max_height,\n l_v=args.l_v,\n )\n print(\"Subtitles file created at {} time consumer: {}\".format(subtitle_file_path, time.time() - st))\n else:\n st = time.time()\n subtitle_file_path = generate_subtitles(\n source_path=args.source_path,\n dst_language=args.dst_language,\n output=args.output,\n debug=args.debug,\n cloud=args.cloud,\n disable_time=args.disable_time,\n min_height=args.min_height,\n max_height=args.max_height,\n l_v=args.l_v,\n )\n print(\"Subtitles file created at {} time consumer: {}\".format(subtitle_file_path, time.time() - st))\n except KeyboardInterrupt:\n return 1\n\n return 0", "def _get_job_id(self) -> str:\n return self.split_name[2][3:]", "def write_subtitle(self, subtitle: str, break_page: bool, class_txt: str) -> str:\n if break_page:\n str_title = \"\"\"<h2 class=\"break-before\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n else:\n str_title = \"\"\"<h2 class=\\\"\"\"\" + class_txt + \"\"\"\\\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n self.html_doc = self.html_doc + str_title\n return self.html_doc", "def describe_text_translation_job(JobId=None):\n pass", "def subtitle(self):\n worksheet_type = self.options[\"worksheet_type\"].value\n return \"{} - {}\".format(WORKSHEET_OPTIONS[worksheet_type], super().subtitle)", "def friendly_id(self):\n id = f\"{self.annotator_id}_{self.document_title.split('_')[0]}\"\n\n try: # try making an sentence identifier if there is an in_sentence attrib\n sen_id = \",\".join(str(se.element_id + 1) for se in self.in_sentence)\n id += f\"_s{sen_id}\"\n except Exception as e:\n print(e)\n pass\n\n if isinstance(self, Event):\n id += f\"_{self.event_fulltype}\"\n elif isinstance(self, Participant) or isinstance(self, Filler):\n id += f\"_{self.role}\"\n\n text_ellips = (\n (self.text[:15] + \"..\" + self.text[-15:])\n if len(self.text) > 32\n else self.text\n )\n id += f\"-{text_ellips}\"\n return id", "def create_job_id() -> str:\n return str(uuid.uuid1())", "def _get_job_id(self):\n return uuid.uuid4().hex", "def longTitle(self, newLongTitle=None):\n pass", "def _job_id(resource_uuid: str) -> str:\n return resource_uuid if \".\" in resource_uuid else f\"{resource_uuid}.0\"", "def _generate_job_id():\n # CAIP job id can contains only numbers, letters and underscores.\n unique_tag = str(uuid.uuid4()).replace(\"-\", \"_\")\n return \"tf_cloud_train_{}\".format(unique_tag)", "def disable_subtitle(self):\n (\n _,\n __,\n part,\n ) = self._get_current_media()\n part.resetDefaultSubtitleStream()\n self._reset_playback()", "def getSubtitlesContainer(self):\n self.subtitleURLContainer = \"\"\n\n self.subtitleURLContainer += self.parametersDict['PreURL']\n\n for parameters in self.parametersDict:\n if parameters != \"PreURL\":\n self.subtitleURLContainer += \"&\"\n self.subtitleURLContainer += parameters\n self.subtitleURLContainer += \"=\"\n self.subtitleURLContainer += self.parametersDict[parameters]\n pass", "def get_subtitles(self, index: int):\n\n match = self.re_subs[index - 1]\n start = convert_subs_time(match[1])\n end = convert_subs_time(match[2])\n subtitles = match[3]\n subtitles = clean_text(subtitles)\n\n return (subtitles, start, end)", "def create_subtitles(self):\n\n result, selected_observations = self.selectObservations(MULTIPLE)\n if not selected_observations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selected_observations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"The observations with UNPAIRED state events will be removed from the plot<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n selected_observations = [x for x in selected_observations if x not in not_paired_obs_list]\n if not selected_observations:\n return\n\n parameters = self.choose_obs_subj_behav_category(selected_observations, 0)\n if not parameters[\"selected subjects\"] or not parameters[\"selected behaviors\"]:\n return\n export_dir = QFileDialog(self).getExistingDirectory(self, \"Choose a directory to save subtitles\",\n os.path.expanduser(\"~\"),\n options=QFileDialog(self).ShowDirsOnly)\n if not export_dir:\n return\n ok, msg = project_functions.create_subtitles(self.pj, selected_observations, parameters, export_dir)\n if not ok:\n logging.critical(msg)\n QMessageBox.critical(None, programName, msg, QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def to_srt(self, subtitles):\n \n srt_data = ''\n subtitle_num = self.start_index\n for subtitle in subtitles:\n subtitle_num += 1\n \n offset = self.start_time\n \n start_time = self._ms_to_time(subtitle['start_time'] + offset)\n end_time = self._ms_to_time(subtitle['end_time'] + offset)\n \n content = subtitle['content'].replace('<br>', ' ')\n \n srt_data += str(subtitle_num) + '\\r\\n'\n srt_data += '%s --> %s' % (start_time, end_time) + '\\r\\n'\n srt_data += content + '\\r\\n'\n srt_data += '\\r\\n'\n \n self.end_index = subtitle_num\n \n return srt_data", "def replace_job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"replace_job_id\")", "def get_subtitles(self, title):\n return library.subtitles.get_subtitle_url(title)", "def schedule_text():", "def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")", "def __init__(self, subtitle=None):\n self.subtitle = subtitle\n if not self.subtitle:\n self.parse_subtitles()", "def convert_to_tvr_subtitle(df: pd.DataFrame) -> pd.DataFrame:\n\tpass", "def manage_video_subtitles_save(item, user, old_metadata=None, generate_translation=False):\r\n\r\n _ = item.runtime.service(item, \"i18n\").ugettext\r\n\r\n # 1.\r\n html5_ids = get_html5_ids(item.html5_sources)\r\n possible_video_id_list = [item.youtube_id_1_0] + html5_ids\r\n sub_name = item.sub\r\n for video_id in possible_video_id_list:\r\n if not video_id:\r\n continue\r\n if not sub_name:\r\n remove_subs_from_store(video_id, item)\r\n continue\r\n # copy_or_rename_transcript changes item.sub of module\r\n try:\r\n # updates item.sub with `video_id`, if it is successful.\r\n copy_or_rename_transcript(video_id, sub_name, item, user=user)\r\n except NotFoundError:\r\n # subtitles file `sub_name` is not presented in the system. Nothing to copy or rename.\r\n log.debug(\r\n \"Copying %s file content to %s name is failed, \"\r\n \"original file does not exist.\",\r\n sub_name, video_id\r\n )\r\n\r\n # 2.\r\n if generate_translation:\r\n for lang, filename in item.transcripts.items():\r\n item.transcripts[lang] = os.path.split(filename)[-1]\r\n\r\n # 3.\r\n if generate_translation:\r\n old_langs = set(old_metadata.get('transcripts', {})) if old_metadata else set()\r\n new_langs = set(item.transcripts)\r\n\r\n for lang in old_langs.difference(new_langs): # 3a\r\n for video_id in possible_video_id_list:\r\n if video_id:\r\n remove_subs_from_store(video_id, item, lang)\r\n\r\n reraised_message = ''\r\n for lang in new_langs: # 3b\r\n try:\r\n generate_sjson_for_all_speeds(\r\n item,\r\n item.transcripts[lang],\r\n {speed: subs_id for subs_id, speed in youtube_speed_dict(item).iteritems()},\r\n lang,\r\n )\r\n except TranscriptException as ex:\r\n item.transcripts.pop(lang) # remove key from transcripts because proper srt file does not exist in assets.\r\n reraised_message += ' ' + ex.message\r\n if reraised_message:\r\n item.save_with_metadata(user)\r\n raise TranscriptException(reraised_message)" ]
[ "0.6298904", "0.5891134", "0.57354397", "0.57253325", "0.57177824", "0.5635711", "0.5478623", "0.5465625", "0.5414711", "0.54018885", "0.53634965", "0.53630906", "0.535014", "0.5312166", "0.5256253", "0.5206248", "0.5183688", "0.5179891", "0.517678", "0.516628", "0.51632917", "0.5151589", "0.51470906", "0.51376855", "0.5127866", "0.5125466", "0.5065127", "0.50525135", "0.50362927", "0.50329655" ]
0.74129283
0
Override this method for custom job sorting. This method returns a key that can be compared to sort jobs. By
def job_sorter(self, job): key = natsort.natsort_keygen(key=self.job_title, alg=natsort.REAL) return key(job)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def job_priority_key(self, job):\n raise NotImplemented", "def sort_key(self):\n ...", "def get_sort_key(self) -> str:\n return self.name", "def job_priority_key(self, job):\n camp, user = job.camp, job.user\n end = camp.time_left / user.shares # lower value -> higher priority\n # The `end` should be further multiplied by\n # `_stats.active_shares` / `_stats.cpu_used`.\n # However, that gives the same value for all the jobs\n # and we only need the ordering, not the absolute value.\n return (end, camp.created, user.ID, camp.ID,\n job.submit, job.ID)", "def job_priority_key(self, job):\n if not self._stats.total_usage:\n fairshare = 1\n else:\n user = job.user\n effective = user.cpu_clock_used / self._stats.total_usage\n #shares_norm = user.shares # already normalized\n fairshare = 2.0 ** -(effective / user.shares)\n prio = int(fairshare * 100000) # higher value -> higher priority\n # TODO if needed change the constant to a configuration setting\n # TODO and add more components to the priority value\n return (-prio, job.submit, job.ID)", "def __hash__(self):\r\n return hash(f'{self.job_id},{self.job_size},{self.priority}')", "def sort(self, key_func):\n pass", "def cmp_to_key(mycmp): # Taken from Python 2.7's functools\n class K(object):\n __slots__ = ['obj']\n\n def __init__(self, obj, *args):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n\n def __hash__(self):\n raise TypeError('hash not implemented')\n return K", "def benchmark_sort_key(benchmark):\n if not \"label\" in benchmark:\n return \"\"\n return benchmark[\"label\"]", "def sort_key(self, order=None):\n\n # XXX: remove this when issue 5169 is fixed\n def inner_key(arg):\n if isinstance(arg, Basic):\n return arg.sort_key(order)\n else:\n return arg\n\n args = self._sorted_args\n args = len(args), tuple([inner_key(arg) for arg in args])\n return self.class_key(), args, S.One.sort_key(), S.One", "def cmp_to_key(mycmp):\n class K(object):\n __slots__ = ['obj']\n\n def __init__(self, obj):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n __hash__ = None\n\n return K", "def sortKey(self, p_str): # real signature unknown; restored from __doc__\n return QCollatorSortKey", "def key(self):\n return key_for_name(self.name)", "def _grokker_sort_key(args):\n grokker, name, obj = args\n return priority.bind().get(grokker)", "def __cmp__(self,other):\n try:\n other_key = other._make_key()\n except AttributeError:\n return -1\n return cmp(self._make_key(), other_key)", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sort_by_key(request):\n return request.param", "def sort_by_key(request):\n return request.param", "def cmp_to_key(mycmp):\n\n class K:\n def __init__(self, obj):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n\n return K", "def _get_field_sort_key(self, field):\n if not field.is_relation:\n return -1\n return 0 if field.many_to_many else 1", "def sortby(self):\n ...", "def _custom_sorter(self, key1, key2):\n\n col = self._col\n ascending = self._colSortFlag[col]\n real = self.get_real_col(col)\n item1 = self.itemDataMap[key1][real]\n item2 = self.itemDataMap[key2][real]\n\n # Internationalization of string sorting with locale module\n if isinstance(item1, str) and isinstance(item2, str):\n cmpVal = locale.strcoll(item1, item2)\n elif isinstance(item1, bytes) or isinstance(item2, bytes):\n cmpVal = locale.strcoll(str(item1), str(item2))\n else:\n cmpVal = cmp(item1, item2)\n\n # If the items are equal, then pick something else to make the sort value unique\n if cmpVal == 0:\n cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))\n\n if ascending:\n return cmpVal\n else:\n return -cmpVal", "def key(self, sorting):\n if(sorting & Sorting.NoSorting):\n return (lambda x: 1) # All elements get the same key\n\n if(sorting & Sorting.Date):\n return (lambda x: x.date)\n\n if(sorting & Sorting.Code):\n return (lambda x: x.code)\n\n if(sorting & Sorting.User):\n return (lambda x: x.name)\n\n if(sorting & Sorting.Priviledges):\n # Not having priviledges grants \"points\": the more points the higher in the sort\n return (lambda x: (x.filters & Filters.NonSubs) + (x.filters & Filters.NonMods))\n\n if(sorting & Sorting.TimesRequested):\n return (lambda x: x.times_requested)", "def get_row_list_sorting_key(x):\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if utils.is_int(end):\n return (start, int(end))\n return name", "def sortby(self):\n return self._sortby", "def sortKey(self):\n return 'filestore:{0}'.format(id(self.stage))", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def get_key(self):\n return self._determine_key()", "def key(self):\n return self.key_for(self.id)", "def _key_sorting(item):\n key, value = item\n if isinstance(value, Link):\n return (1, key)\n return (0, key)" ]
[ "0.75236887", "0.74009395", "0.71635604", "0.7031687", "0.64361453", "0.6359381", "0.62978643", "0.6156635", "0.61132455", "0.6011168", "0.59698707", "0.59516037", "0.5946783", "0.592785", "0.5922575", "0.59023565", "0.58991647", "0.58991647", "0.5893522", "0.5864389", "0.5845774", "0.58386725", "0.58182067", "0.58018416", "0.5798572", "0.5794754", "0.57921124", "0.57608426", "0.57436067", "0.57374907" ]
0.78755414
0
Registers routes with the Flask application. This method configures context processors, templates, and sets up routes for a basic Dashboard instance. Additionally, routes declared by modules are registered by this method.
def _register_routes(self): dashboard = self @dashboard.app.after_request def prevent_caching(response): if 'Cache-Control' not in response.headers: response.headers['Cache-Control'] = 'no-store' return response @dashboard.app.context_processor def injections(): session.setdefault('enabled_modules', [i for i in range(len(self.modules)) if self.modules[i].enabled]) return { 'APP_NAME': 'signac-dashboard', 'APP_VERSION': __version__, 'PROJECT_NAME': self.project.config['project'], 'PROJECT_DIR': self.project.config['project_dir'], 'modules': self.modules, 'enabled_modules': session['enabled_modules'], 'module_assets': self._module_assets } # Add pagination support from http://flask.pocoo.org/snippets/44/ @dashboard.app.template_global() def url_for_other_page(page): args = request.args.copy() args['page'] = page return url_for(request.endpoint, **args) @dashboard.app.template_global() def modify_query(**new_values): args = request.args.copy() for key, value in new_values.items(): args[key] = value return '{}?{}'.format(request.path, url_encode(args)) @dashboard.app.errorhandler(404) def page_not_found(error): return self._render_error(str(error)) self.add_url('views.home', ['/']) self.add_url('views.settings', ['/settings']) self.add_url('views.search', ['/search']) self.add_url('views.jobs_list', ['/jobs/']) self.add_url('views.show_job', ['/jobs/<jobid>']) self.add_url('views.get_file', ['/jobs/<jobid>/file/<path:filename>']) self.add_url('views.change_modules', ['/modules'], methods=['POST'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_routes(self):\n# from server.flask import views as flask_views\n# flask_views_custom_methods = filter(lambda x: x.startswith(\"view_\"), dir(flask_views))\n# for custom_method in flask_views_custom_methods:\n# # Retrieve data needed to add the URL rule to the Flask app\n# view_method = getattr(locals()[\"flask_views\"], custom_method)\n# docstring = getattr(view_method, \"__doc__\")\n# index_start = docstring.index(\"@app.route\")\n# index_end = index_start + len(\"@app.route\") + 1\n# custom_method_url = docstring[index_end:].replace(\" \",\"\").replace(\"\\n\",\"\")\n# # Get: (a) method URL to bind flask app, (b), method name, (c) method object to invoke\n# self._app.add_url_rule(custom_method_url, custom_method, view_func=view_method(self._app.mongo))\n self._app.register_blueprint(ro_flask_views)", "def build_routes(app):\n app.register_blueprint(workflow_plans_blueprint)\n app.register_blueprint(cache_blueprint)\n app.register_blueprint(config_blueprint)\n app.register_blueprint(dataset_blueprint)\n app.register_blueprint(graph_blueprint)\n app.register_blueprint(jobs_blueprint)\n app.register_blueprint(project_blueprint)\n app.register_blueprint(templates_blueprint)\n app.register_blueprint(version_blueprint)\n app.register_blueprint(apispec_blueprint)\n app.register_blueprint(versions_list_blueprint)", "def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)", "def register_blueprints(self):\n # Local import due to flask/blueprint circular imports.\n from mmapi.views import api_bp\n self.app.register_blueprint(api_bp, url_prefix='/api')", "def _init_routes(self):\n before_hooks = [\n helpers.require_accepts_json,\n helpers.extract_project_id,\n\n # NOTE(kgriffs): Depends on project_id being extracted, above\n functools.partial(helpers.validate_queue_name,\n self._validate.queue_name)\n ]\n\n self.app = falcon.API(before=before_hooks)\n\n queue_controller = self._storage.queue_controller\n message_controller = self._storage.message_controller\n claim_controller = self._storage.claim_controller\n\n # Home\n self.app.add_route('/v1', v1.V1Resource())\n\n # Queues Endpoints\n queue_collection = queues.CollectionResource(self._validate,\n queue_controller)\n self.app.add_route('/v1/queues', queue_collection)\n\n queue_item = queues.ItemResource(queue_controller, message_controller)\n self.app.add_route('/v1/queues/{queue_name}', queue_item)\n\n stats_endpoint = stats.Resource(queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/stats', stats_endpoint)\n\n # Metadata Endpoints\n metadata_endpoint = metadata.Resource(self._wsgi_conf, self._validate,\n queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/metadata', metadata_endpoint)\n\n # Messages Endpoints\n msg_collection = messages.CollectionResource(self._wsgi_conf,\n self._validate,\n message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages', msg_collection)\n\n msg_item = messages.ItemResource(message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages/{message_id}', msg_item)\n\n # Claims Endpoints\n claim_collection = claims.CollectionResource(self._wsgi_conf,\n self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims', claim_collection)\n\n claim_item = claims.ItemResource(self._wsgi_conf, self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims/{claim_id}', claim_item)\n\n # Health\n self.app.add_route('/v1/health', health.HealthResource())", "def register_blueprints_on_app(app):\n app.register_blueprint(views.main_pages)\n app.register_blueprint(views.main_api, url_prefix='/api')", "def _configure_api_routes(self, app: FastAPI):\n authenticator = JWTAuthenticator(self.signer)\n\n data_update_publisher: Optional[DataUpdatePublisher] = None\n if self.publisher is not None:\n data_update_publisher = DataUpdatePublisher(self.publisher)\n\n # Init api routers with required dependencies\n data_updates_router = init_data_updates_router(\n data_update_publisher,\n self.data_sources_config,\n authenticator\n )\n webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)\n security_router = init_security_router(self.signer, StaticBearerAuthenticator(self.master_token))\n\n # mount the api routes on the app object\n app.include_router(bundles_router, tags=[\"Bundle Server\"], dependencies=[Depends(authenticator)])\n app.include_router(data_updates_router, tags=[\"Data Updates\"], dependencies=[Depends(authenticator)])\n app.include_router(webhook_router, tags=[\"Github Webhook\"])\n app.include_router(security_router, tags=[\"Security\"])\n app.include_router(self.pubsub.router, tags=[\"Pub/Sub\"])\n\n if self.jwks_endpoint is not None:\n # mount jwts (static) route\n self.jwks_endpoint.configure_app(app)\n\n # top level routes (i.e: healthchecks)\n @app.get(\"/healthcheck\", include_in_schema=False)\n @app.get(\"/\", include_in_schema=False)\n def healthcheck():\n return {\"status\": \"ok\"}\n\n return app", "def add_routes(app: web.Application):\n ActionsView.register_view(app)\n PingView.register_view(app)\n CoreShutdownView.register_view(app)\n CoreRestartView.register_view(app)\n ReloadConfigView.register_view(app)\n ListItemsView.register_view(app)\n GetItemView.register_view(app)\n ItemStatesView.register_view(app)\n ItemStateView.register_view(app)\n ActionsView.register_view(app)\n ExecuteActionView.register_view(app)\n ListModulesView.register_view(app)", "def initialize_routes(api):\n api.add_resource(WatchlistsApi, '/api/watchlists')\n api.add_resource(WatchlistApi, '/api/watchlist/<id>')\n api.add_resource(RegisterUserApi, '/api/auth/register')\n api.add_resource(LoginUserApi, '/api/auth/login')\n api.add_resource(ResetPassword, '/api/auth/reset')\n api.add_resource(ResetFogottenPassword, '/api/auth/reset/password')\n api.add_resource(ForgotPassword, '/api/auth/forgot')\n api.add_resource(ForgotPasswordReset, '/reset/password/<token>')\n api.add_resource(Home, '/')\n api.add_resource(Logout, '/logout')\n api.add_resource(Dashboard, '/dashboard')\n api.add_resource(DashboardSearch, '/dashboard/search')\n api.add_resource(SearchMovies, '/search/movies/<title>')\n api.add_resource(SearchMovieDetails, '/search/movie/details/<id>')\n api.add_resource(SearchTvShows, '/search/shows/<title>')\n api.add_resource(SearchShowDetails, '/search/show/details/<id>')\n api.add_resource(SearchTrendingMovies, '/search/trending/movies')\n api.add_resource(Recommend, '/recommend')", "def register_routes(self, api):\n # Device Registration\n api.add_resource(controllers.UserDeviceRegistration, '/device-registration')", "def initialize_routes(app):\n # Authentification \n app.add_resource(auth.LoginApi, '/auth/login')\n app.add_resource(auth.SignupApi, '/auth/SignupApi')\n # Intialisation et activation d'un parking\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/add')\n app.add_resource(parkingInit.ActivateParking, '/administrate/activate')\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/getall', endpoint='getall')\n # Gestion de Clients\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/get')\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/getById/<int:idUser>', endpoint='get_by_id')\n # statistiques financéres\n app.add_resource(stats.Money, '/administrate/finance/monthly', endpoint='monthly')\n app.add_resource(stats.Money, '/administrate/finance/yearly', endpoint='yearly')", "def init_app(app):\n app.register_blueprint(index_bl)\n app.register_blueprint(main_bl, url_prefix=\"/main\")\n app.register_blueprint(map_bl, url_prefix=\"/map\")\n app.register_blueprint(login_bl, url_prefix=\"/login\")\n app.register_blueprint(prof_bl, url_prefix=\"/profile\")\n app.register_blueprint(average_bl, url_prefix=\"/average\")", "def add_routes():\n\n # The Home page is accessible to anyone\n @app.route('/admin')\n @roles_required(['Admin', 'Agent'])\n def home_page():\n return render_template('./admin/home.html')\n\n # The Members page is only accessible to authenticated users\n @app.route('/admin/members')\n @roles_required(['Admin', 'Agent']) # Use of @login_required decorator\n def member_page():\n return render_template('./admin/members.html')\n\n # The Admin page requires an 'Admin' role.\n @app.route('/admin/dashboard')\n @roles_required('Admin') # Use of @roles_required decorator\n def admin_page():\n return render_template_string(\"\"\"\n {% extends \"admin_layout.html\" %}\n {% block content %}\n <h2>{%trans%}Admin Page{%endtrans%}</h2>\n <p><a href={{ url_for('user.register') }}>{%trans%}Register{%endtrans%}</a></p>\n <p><a href={{ url_for('user.login') }}>{%trans%}Sign in{%endtrans%}</a></p>\n <p><a href={{ url_for('home_page') }}>{%trans%}Home Page{%endtrans%}</a> (accessible to anyone)</p>\n <p><a href={{ url_for('member_page') }}>{%trans%}Member Page{%endtrans%}</a> (login_required: [email protected] / Password1)</p>\n <p><a href={{ url_for('admin_page') }}>{%trans%}Admin Page{%endtrans%}</a> (role_required: [email protected] / Password1')</p>\n <p><a href={{ url_for('user.logout') }}>{%trans%}Sign out{%endtrans%}</a></p>\n {% endblock %}\n \"\"\")\n\n @app.route('/register')\n def register_page():\n return render_template('./register.html')\n \n @app.route('/payment')\n @login_required\n def payment_page():\n PP_CLIENT_ID = \"AQnE3_uZrT1Vf56AluXZIR1ir4gUYWAMmxquNRnRzGSVukHeGPzUvu5WsW4FtdYhqrHO06IQkKTr8zOh\"\n user = User.query.filter_by(email=current_user.email).first()\n detail = UserDetail.query.filter_by(user_id=user.id).first()\n plan, last_payment_at = detail.plan, detail.last_payment_at\n plan_id = ''\n if plan == 'free':\n return redirect('/enter-exposure')\n if plan == 'premium':\n plan_id = 'P-6WL802942Y8719627L4PXXFY'\n elif plan == 'business':\n plan_id = 'P-306056489A234290WL4PXXLI'\n return render_template('./payment.html', plan_id=plan_id, PP_CLIENT_ID=PP_CLIENT_ID)\n \n @app.route('/payment-complete')\n def payment_complete_page():\n add_payment(current_user.email)\n return redirect('/enter-exposure')\n\n @app.route('/enter-exposure')\n @login_required\n def enter_exposure_page():\n return render_template('./exposures.html', currencies=CURRENCIES)\n\n @app.route('/report', methods=['GET', 'POST'])\n @login_required\n def report_page():\n user = User.query.filter_by(email=current_user.email).first()\n if request.method == 'POST':\n return render_template(\n './report.html',\n **handle_report(request.form, request.files, None, user.id),\n )\n report = Report.query.get_or_404(request.args.get('id'))\n return render_template(\n './report.html',\n **handle_report(request.form, request.files, report.id, user.id),\n )\n\n @app.route('/suggestion-tool')\n @login_required\n def suggestion_tool_page():\n user = User.query.filter_by(email=current_user.email).first()\n reports = Report.query.filter_by(user_id=user.id).all()\n report_id = request.args.get('report_id') or reports.pop().id\n scenario = request.args.get('scenario') or 1\n\n return render_template(\n './suggestion_tool.html', \n **handle_suggestions(report_id, scenario),\n )\n\n @app.route('/account')\n @login_required\n def account_page():\n user = User.query.filter_by(email=current_user.email).first()\n detail = UserDetail.query.filter_by(user_id=user.id).first()\n reports = Report.query.filter_by(user_id=user.id).all()\n for i in range(len(reports)):\n reports[i].created = reports[i].created.strftime(\"%A, %d-%b-%Y %H:%M:%S GMT%z\")\n return render_template(\n './account.html',\n email=current_user.email,\n first_name=detail.first_name,\n last_name=detail.last_name,\n company_name=detail.company_name,\n plan=detail.plan,\n reports=reports,\n )\n\n @app.route('/contact')\n @login_required\n def contact_page(): \n return render_template(\n './contact.html' \n )\n\n @app.route('/')\n def index():\n return render_template('./splash.html')", "def register_blueprints(app):\n blueprints = {INDEX, DASHBOARD, COMMENT_SECTION}\n\n for blueprint in blueprints:\n app.register_blueprint(blueprint)", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def routes(self, *routes):\n self.package.add_routes(*routes)\n for route_group in self.package.routes:\n self.application.make(\"router\").add(\n Route.group(load(route_group, \"ROUTES\", []), middleware=[\"web\"])\n )\n return self", "def register_blueprints():\n from app.routes import blog, client\n blueprints = [blog, client]\n\n for bp in blueprints:\n app.register_blueprint(bp)", "def add_routes(self):\n pass", "def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))", "def create_routes():\n app_dir = os.path.dirname(os.path.abspath(__file__))\n controller_dir = os.path.join(app_dir, \"controllers\")\n routes = Mapper(directory=controller_dir)\n routes.connect(\"/\", controller=\"root\", action=\"index\")\n routes.connect(\"/body\", controller=\"root\", action=\"body\")\n routes.connect(\"/raise_exception\", controller=\"root\", action=\"raise_exception\")\n routes.connect(\"/raise_wrong_code\", controller=\"root\", action=\"raise_wrong_code\")\n routes.connect(\"/raise_custom_code\", controller=\"root\", action=\"raise_custom_code\")\n routes.connect(\"/raise_code_method\", controller=\"root\", action=\"raise_code_method\")\n routes.connect(\"/render\", controller=\"root\", action=\"render\")\n routes.connect(\"/path-params/{year:\\d+}/{month}/\", controller=\"root\", action=\"path_params\") # noqa: W605\n routes.connect(\"/render_exception\", controller=\"root\", action=\"render_exception\")\n routes.connect(\"/response_headers\", controller=\"root\", action=\"response_headers\")\n routes.connect(\"/identify\", controller=\"root\", action=\"identify\")\n return routes", "def register_blueprints(app):\n from .main import main_blueprint\n app.register_blueprint(main_blueprint)\n\n from .submissions import submissions_blueprint\n app.register_blueprint(submissions_blueprint, url_prefix='/submissions')\n from .revisions import revisions_blueprint\n app.register_blueprint(revisions_blueprint, url_prefix='/revisions')", "def register_blueprints(app):\n app.register_blueprint(user)\n app.register_blueprint(messages)\n app.register_blueprint(auth, url_prefix='/auth')\n app.register_blueprint(tasks)\n app.register_blueprint(core)\n app.register_blueprint(errors)", "def register_blueprints(app):\n app.register_blueprint(general.general)\n app.register_blueprint(validate.validate, url_prefix='')\n\n # All done!\n app.logger.info(\"Blueprints registered\")", "def setup_routes(application):\n\n # Do the controller import here after the models have been loaded\n from app.controllers import view_controller, api_controller\n\n # Some helpers to make defining the routes a bit cleaner\n def get(path, rule, func, *args, **kwargs):\n kwargs['methods'] = ['GET']\n application.add_url_rule(path, rule, func, *args, **kwargs)\n\n def post(path, rule, func, *args, **kwargs):\n kwargs['methods'] = ['POST']\n application.add_url_rule(path, rule, func, *args, **kwargs)\n\n get('/', 'index', view_controller.index)\n get('/upload', 'upload', view_controller.upload)\n get('/search', 'search', view_controller.search)\n\n post('/api/publish', 'api_publish', api_controller.publish)\n get('/api/search/<text>', 'api_search', api_controller.search)\n get('/api/search/', 'api_search_empty', api_controller.search)\n get('/api/get/<int:id>', 'api_get_file', api_controller.get_file)", "def build_routes(config):\r\n\r\n config.add_route(\"home\", \"/\")\r\n config.add_route(\"dashboard\", \"/dashboard\")\r\n\r\n # Add routes for the combo loader to match up to static file requests.\r\n config.add_route('convoy', '/combo')\r\n\r\n JS_FILES = config.get_settings()['app_root'] + '/bookie/static/js/build'\r\n application = combo_app(JS_FILES)\r\n config.add_view(\r\n wsgiapp2(application),\r\n route_name='convoy')\r\n\r\n # auth routes\r\n config.add_route(\"login\", \"login\")\r\n config.add_route(\"logout\", \"logout\")\r\n config.add_route(\"reset\", \"{username}/reset/{reset_key}\")\r\n config.add_route(\"signup\", \"signup\")\r\n config.add_route(\"signup_process\", \"signup_process\")\r\n\r\n # celery routes\r\n config.add_route(\"celery_hourly_stats\", \"jobhourly\")\r\n\r\n # bmark routes\r\n config.add_route(\"bmark_recent\", \"recent\")\r\n config.add_route(\"bmark_recent_tags\", \"recent/*tags\")\r\n\r\n config.add_route(\"bmark_recent_rss\", \"rss\")\r\n config.add_route(\"bmark_recent_rss_tags\", \"rss/*tags\")\r\n\r\n config.add_route(\"bmark_readable\", \"bmark/readable/{hash_id}\")\r\n\r\n # user based bmark routes\r\n config.add_route(\"user_bmark_recent\", \"{username}/recent\")\r\n config.add_route(\"user_bmark_recent_tags\", \"{username}/recent/*tags\")\r\n\r\n config.add_route(\"user_bmark_rss\", \"{username}/rss\")\r\n config.add_route(\"user_bmark_rss_tags\", \"{username}/rss/*tags\")\r\n\r\n config.add_route(\"user_bmark_edit\", \"{username}/edit/{hash_id}\")\r\n config.add_route(\"user_bmark_edit_error\",\r\n \"{username}/edit_error/{hash_id}\")\r\n config.add_route(\"user_bmark_new\", \"{username}/new\")\r\n config.add_route(\"user_bmark_new_error\", \"{username}/new_error\")\r\n config.add_route(\r\n \"user_delete_all_bookmarks\",\r\n \"{username}/account/delete_all_bookmarks\")\r\n\r\n # config.add_route(\"bmark_delete\", \"/bmark/delete\")\r\n # config.add_route(\"bmark_confirm_delete\", \"/bmark/confirm/delete/{bid}\")\r\n\r\n # tag related routes\r\n config.add_route(\"tag_list\", \"tags\")\r\n config.add_route(\"tag_bmarks\", \"tags/*tags\")\r\n\r\n # user tag related\r\n config.add_route(\"user_tag_list\", \"{username}/tags\")\r\n config.add_route(\"user_tag_bmarks\", \"{username}/tags/*tags\")\r\n\r\n config.add_route(\"user_import\", \"{username}/import\")\r\n config.add_route(\"search\", \"search\")\r\n config.add_route(\"user_search\", \"{username}/search\")\r\n\r\n config.add_route(\"search_results\", \"results\")\r\n config.add_route(\"user_search_results\", \"{username}/results\")\r\n\r\n # matches based on the header\r\n # HTTP_X_REQUESTED_WITH\r\n # ajax versions are used in the mobile search interface\r\n config.add_route(\"search_results_ajax\", \"results/*terms\", xhr=True)\r\n config.add_route(\"search_results_rest\", \"results/*terms\")\r\n config.add_route(\"user_search_results_ajax\",\r\n \"{username}/results*terms\",\r\n xhr=True)\r\n config.add_route(\"user_search_results_rest\", \"{username}/results*terms\")\r\n\r\n config.add_route(\"redirect\", \"redirect/{hash_id}\")\r\n config.add_route(\"user_redirect\", \"{username}/redirect/{hash_id}\")\r\n\r\n config.add_route(\"user_account\", \"{username}/account\")\r\n config.add_route(\"user_export\", \"{username}/export\")\r\n config.add_route(\"user_stats\", \"{username}/stats\")\r\n\r\n #\r\n # NEW API\r\n #\r\n\r\n # stats\r\n config.add_route('api_bookmark_stats',\r\n '/api/v1/stats/bookmarks',\r\n request_method='GET')\r\n config.add_route('api_user_stats',\r\n '/api/v1/stats/users',\r\n request_method='GET')\r\n\r\n # ping checks\r\n config.add_route('api_ping',\r\n '/api/v1/{username}/ping',\r\n request_method='GET')\r\n config.add_route('api_ping_missing_user',\r\n '/api/v1/ping',\r\n request_method='GET')\r\n config.add_route('api_ping_missing_api',\r\n '/ping',\r\n request_method='GET')\r\n\r\n # auth related\r\n config.add_route(\"api_user_account\",\r\n \"/api/v1/{username}/account\",\r\n request_method=\"GET\")\r\n config.add_route(\"api_user_account_update\",\r\n \"/api/v1/{username}/account\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_user_api_key\",\r\n \"/api/v1/{username}/api_key\",\r\n request_method=\"GET\")\r\n config.add_route(\"api_reset_api_key\",\r\n \"/api/v1/{username}/api_key\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_user_reset_password\",\r\n \"/api/v1/{username}/password\",\r\n request_method=\"POST\")\r\n\r\n config.add_route(\"api_user_suspend_remove\",\r\n \"api/v1/suspend\",\r\n request_method=\"DELETE\")\r\n config.add_route(\"api_user_suspend\",\r\n \"api/v1/suspend\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_user_invite\",\r\n \"api/v1/{username}/invite\",\r\n request_method=\"POST\")\r\n\r\n # many bookmark api calls\r\n config.add_route(\"api_bmarks_export\", \"api/v1/{username}/bmarks/export\")\r\n\r\n # we have to search before we hit the bmarks keys so that it doesn't think\r\n # the tag is \"search\"\r\n config.add_route(\"api_bmark_search\", \"api/v1/bmarks/search/*terms\")\r\n config.add_route(\"api_bmark_search_user\",\r\n \"/api/v1/{username}/bmarks/search/*terms\")\r\n\r\n config.add_route('api_bmarks', 'api/v1/bmarks')\r\n config.add_route('api_bmarks_tags', 'api/v1/bmarks/*tags')\r\n config.add_route('api_bmarks_user', 'api/v1/{username}/bmarks')\r\n config.add_route('api_bmarks_user_tags', 'api/v1/{username}/bmarks/*tags')\r\n config.add_route('api_count_bmarks_user',\r\n 'api/v1/{username}/stats/bmarkcount')\r\n\r\n # user bookmark api calls\r\n config.add_route(\"api_bmark_add\",\r\n \"/api/v1/{username}/bmark\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_bmark_update\",\r\n \"/api/v1/{username}/bmark/{hash_id}\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_extension_sync\", \"/api/v1/{username}/extension/sync\")\r\n\r\n config.add_route(\"api_bmark_hash\",\r\n \"/api/v1/{username}/bmark/{hash_id}\",\r\n request_method=\"GET\")\r\n config.add_route(\"api_bmark_remove\",\r\n \"/api/v1/{username}/bmark/{hash_id}\",\r\n request_method=\"DELETE\")\r\n\r\n config.add_route(\"api_tag_complete_user\",\r\n \"/api/v1/{username}/tags/complete\")\r\n config.add_route(\"api_tag_complete\",\r\n \"/api/v1/tags/complete\")\r\n\r\n # admin api calls\r\n config.add_route(\"api_admin_readable_todo\", \"/api/v1/a/readable/todo\")\r\n config.add_route(\r\n \"api_admin_readable_reindex\",\r\n \"/api/v1/a/readable/reindex\")\r\n config.add_route(\r\n \"api_admin_accounts_inactive\",\r\n \"/api/v1/a/accounts/inactive\")\r\n config.add_route(\r\n \"api_admin_accounts_invites_add\",\r\n \"/api/v1/a/accounts/invites/{username}/{count}\",\r\n request_method=\"POST\")\r\n config.add_route(\r\n \"api_admin_accounts_invites\",\r\n \"/api/v1/a/accounts/invites\",\r\n request_method=\"GET\")\r\n config.add_route(\r\n \"api_admin_imports_list\",\r\n \"/api/v1/a/imports/list\",\r\n request_method=\"GET\")\r\n config.add_route(\r\n \"api_admin_imports_reset\",\r\n \"/api/v1/a/imports/reset/{id}\",\r\n request_method=\"POST\")\r\n\r\n config.add_route(\r\n \"api_admin_users_list\",\r\n \"/api/v1/a/users/list\",\r\n request_method=\"GET\")\r\n config.add_route(\r\n \"api_admin_new_user\",\r\n \"/api/v1/a/users/add\",\r\n request_method=\"POST\")\r\n config.add_route(\r\n \"api_admin_del_user\",\r\n \"/api/v1/a/users/delete/{username}\",\r\n request_method=\"DELETE\")\r\n config.add_route(\r\n \"api_admin_bmark_remove\",\r\n \"/api/v1/a/bmark/{username}/{hash_id}\",\r\n request_method=\"DELETE\")\r\n\r\n config.add_route(\r\n \"api_admin_applog\",\r\n \"/api/v1/a/applog/list\",\r\n request_method=\"GET\")\r\n\r\n config.add_route(\r\n \"api_admin_non_activated\",\r\n \"/api/v1/a/nonactivated\",\r\n request_method=\"GET\")\r\n\r\n config.add_route(\r\n \"api_admin_delete_non_activated\",\r\n \"/api/v1/a/nonactivated\",\r\n request_method=\"DELETE\")\r\n\r\n # these are single word matching, they must be after /recent /popular etc\r\n config.add_route(\"user_home\", \"{username}\")\r\n\r\n return config", "def init_app():\n app = Flask(__name__)\n\n with app.app_context():\n # Import parts of our core Flask app\n from . import routes\n\n from .plotlydash.index import init_dashboard\n app = init_dashboard(app)\n\n return app", "def register_routes(\n config: Configurator,\n route_name_ext: str = \"x-pyramid-route-name\",\n root_factory_ext: str = \"x-pyramid-root-factory\",\n apiname: str = \"pyramid_openapi3\",\n route_prefix: t.Optional[str] = None,\n) -> None:\n\n def action() -> None:\n spec = config.registry.settings[apiname][\"spec\"]\n for pattern, path_item in spec[\"paths\"].items():\n route_name = path_item.get(route_name_ext)\n if route_name:\n root_factory = path_item.get(root_factory_ext)\n config.add_route(\n route_name,\n pattern=route_prefix + pattern\n if route_prefix is not None\n else pattern,\n factory=root_factory or None,\n )\n\n config.action((\"pyramid_openapi3_register_routes\",), action, order=PHASE1_CONFIG)", "def configure_blueprints(app):\n\n for blueprint in _blueprints:\n app.register_blueprint(blueprint)", "def configure_app(self):\n self.app.route('/', callback=self.get_api)", "def add_app_routes(app):\n\n # Routes for demo pages to visit with a web browser\n @app.route('/')\n def index():\n return render_template('index.html')\n\n @app.route('/video_stream_demo')\n def video_stream_demo():\n \"\"\"Video streaming demo page.\"\"\"\n return render_template('video_stream_demo.html')\n\n @app.route('/image_capture_demo')\n def image_capture_demo():\n \"\"\"Image capture demo page.\"\"\"\n return render_template('image_capture_demo.html')\n\n\n\n # Routes to use to use for programmatic connectivity\n @app.route('/video_feed')\n def video_feed():\n \"\"\"Video streaming route.\"\"\"\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n @app.route('/image')\n def image():\n \"\"\"Image capture route.\"\"\"\n return Response(gen_image(Camera()),\n mimetype='image/jpeg')\n\n # TODO: Probably makes more sense to have a POST url \n # so it'll be easier to set multiple settings\n @app.route('/settings')\n def settings():\n \"\"\"Settings route\"\"\"\n stop_req = request.args.get('stop')\n frame_sleep_req = request.args.get('frame_sleep')\n\n global stop\n if stop_req == '1':\n stop = True\n elif stop_req == '0':\n stop = False\n\n global frame_sleep\n if frame_sleep_req:\n frame_sleep = int(frame_sleep_req)\n\n return jsonify({'message': 'Set settings: {}'.format(request.args)})\n\n\n return app" ]
[ "0.72350115", "0.6712779", "0.67111504", "0.66817683", "0.6653695", "0.6587917", "0.6505443", "0.64713275", "0.6450273", "0.63990045", "0.63630635", "0.63256997", "0.63007975", "0.6283785", "0.6241679", "0.6237398", "0.62150884", "0.61836624", "0.6177383", "0.6159926", "0.6128027", "0.61154926", "0.611158", "0.6100133", "0.6093866", "0.6023158", "0.6012855", "0.5919565", "0.5889659", "0.58879066" ]
0.7511353
0
Clear project and dashboard server caches. The dashboard relies on caching for performance. If the data space is altered, this method may need to be called before the dashboard reflects those changes.
def update_cache(self): # Try to update signac project cache. Requires signac 0.9.2 or later. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) try: self.project.update_cache() except Exception: pass # Clear caches of all dashboard methods members = inspect.getmembers(self, predicate=inspect.ismethod) for func in filter(lambda f: hasattr(f, 'cache_clear'), map(lambda x: x[1], members)): func.cache_clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_cache():\n # TODO\n pass", "def clear_cache(self):\n\n for dataset in self._datasets:\n dataset.clear_cache()", "def clear_cache(self):\n pass", "def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]", "def clear(self, cacheDir):", "def clear_data_cache():\n load_glove.cache_clear()", "def clear_required_caches():\n\n return get_component(CachingPackage.COMPONENT_NAME).clear_required_caches()", "def clear_cache(self):\n requests.get(url=self.proxy_url+'/clear_cache')", "def clear_cache(self):\n self.part_cache.clear()", "def _clear_cache(self):\n\n self._cache = dict()", "def _clear_cache(self):\n self.cache = {}", "def clear_cache(self):\n local_app_data = os.getenv('LOCALAPPDATA')\n edge_root = os.path.join(local_app_data, 'Packages',\n 'Microsoft.MicrosoftEdge_8wekyb3d8bbwe')\n directories = ['AC', 'AppData']\n for directory in directories:\n path = os.path.join(edge_root, directory)\n try:\n shutil.rmtree(path)\n except Exception:\n pass", "def cache_clear(self):\n\t\tself.__cache = {}", "def clear_cache():\n cache = Cache()\n cache.reset()", "def clear_all(self):\n self.clear_redis()\n self.clear_cache()", "def clear(self):\n if self.__log:\n self.__logger.info(\"Cleared cache\")\n shutil.rmtree(self.cacheDir) # Remoeve the cache directory\n os.mkdir(self.cacheDir) # Create cache dir again\n self.__recentAccessed = [] # Reset recent accessed nodes", "def test_clear_cache(self):\n api_helpers.clear_cache()", "def clear_scache(cls) -> None:\n cls.scache = {}", "def clear_cache(self):\n self._cache = dict()", "def flushCaches(self):\n self.rehabTreeCache = {} \n self.frailRehabTreeCache = {} \n self.frailTreeCache = {}", "def clear_dashboard(dashId):\n default_tables = {\n \"Counts\": {\n \"sizex\": 10,\n \"sizey\": 13,\n \"row\": 1,\n \"col\": 1\n },\n \"Top Campaigns\": {\n \"sizex\": 25,\n \"sizey\": 8,\n \"row\": 1,\n \"col\": 20\n },\n \"Recent Indicators\": {\n \"sizex\": 50,\n \"sizey\": 8,\n \"row\": 15,\n \"col\": 1\n },\n \"Recent Emails\": {\n \"sizex\": 50,\n \"sizey\": 8,\n \"row\": 23,\n \"col\": 1\n },\n \"Recent Samples\": {\n \"sizex\": 50,\n \"sizey\": 8,\n \"row\": 31,\n \"col\": 1\n },\n }\n try:\n for search in SavedSearch.objects(dashboard=dashId):\n if search.isDefaultOnDashboard:\n tempDict = default_tables[search.name]\n search.sizex = tempDict[\"sizex\"]\n search.sizey = tempDict[\"sizey\"]\n search.row = tempDict[\"row\"]\n search.col = tempDict[\"col\"]\n search.save()\n else:\n search.update(unset__col=1,unset__row=1,unset__sizex=1)\n except Exception as e:\n print e\n return {'success': False, \n 'message': \"An unexpected error occurred while resetting dash. Please refresh and try again\"}\n return {'success': True, \n 'message': \"Dashboard Reset\"}", "def reset_cache(self):\n if self.cache_address is not None:\n for add in self.cache:\n os.remove(add + \".cd\")\n os.remove(add + \".cl\")\n self.cache = [None] * len(self)", "def clear(self):\n try:\n shutil.rmtree(self._cache_path)\n self._init_cache_path()\n except Exception:\n return", "def reset_cache():\n global _CACHE\n _CACHE.clear()", "def clear_cache(self):\n self.mongo_database.cache.delete_many({})", "def clear_cache(sender, **kwargs):\n# print \"Post save() -> clear cache\"\n cache.clear() # FIXME: This cleaned the complete cache for every site!", "def _clean_cache(self):\n del self._cache\n self._cache = {}", "def invalidateCaches(self):\n\n self._vertexCacheValid = False\n self._genusCacheValid = False\n self._vertexCharacteristicCacheValid = False\n self._coreCacheValid = False", "def clear_cache():\n sudo('service varnish restart')", "def _clear_caches(self):\n self._brushes = {}\n self._formats = {}" ]
[ "0.67237765", "0.67170703", "0.6651066", "0.65608287", "0.6541997", "0.64387864", "0.642453", "0.63758636", "0.6363097", "0.6251716", "0.62509453", "0.62315863", "0.6219807", "0.6218337", "0.6209813", "0.62015516", "0.61923134", "0.61808413", "0.6152699", "0.61314887", "0.6119369", "0.61038476", "0.6102192", "0.60880035", "0.6086108", "0.6078355", "0.6063535", "0.60626245", "0.60619646", "0.60574824" ]
0.72654325
0
Runs the command line interface. Call this function to use signacdashboard from its command line
def main(self): def _run(args): kwargs = vars(args) if kwargs.get('host', None) is not None: self.config['HOST'] = kwargs.pop('host') if kwargs.get('port', None) is not None: self.config['PORT'] = kwargs.pop('port') self.config['PROFILE'] = kwargs.pop('profile') self.config['DEBUG'] = kwargs.pop('debug') self.run() parser = argparse.ArgumentParser( description="signac-dashboard is a web-based data visualization " "and analysis tool, part of the signac framework.") parser.add_argument( '--debug', action='store_true', help="Show traceback on error for debugging.") parser.add_argument( '--version', action='store_true', help="Display the version number and exit.") subparsers = parser.add_subparsers() parser_run = subparsers.add_parser('run') parser_run.add_argument( '-p', '--profile', action='store_true', help='Enable flask performance profiling.') parser_run.add_argument( '-d', '--debug', action='store_true', help='Enable flask debug mode.') parser_run.add_argument( '--host', type=str, help='Host (binding address). Default: localhost') parser_run.add_argument( '--port', type=int, help='Port to listen on. Default: 8888') parser_run.set_defaults(func=_run) # This is a hack, as argparse itself does not # allow to parse only --version without any # of the other required arguments. if '--version' in sys.argv: print('signac-dashboard', __version__) sys.exit(0) args = parser.parse_args() if args.debug: logger.setLevel(logging.DEBUG) if not hasattr(args, 'func'): parser.print_usage() sys.exit(2) try: self.observer.start() args.func(args) except RuntimeWarning as warning: logger.warning("Warning: {}".format(warning)) if args.debug: raise sys.exit(1) except Exception as error: logger.error('Error: {}'.format(error)) if args.debug: raise sys.exit(1) finally: self.observer.stop() self.observer.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)", "def cli():\n pass", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def main_cli():\n pass", "def main():\n CLI_APP.run()", "def cli():\r\n pass", "def cli():\n parser=argparse.ArgumentParser(\n description = 'Rotate through a given AWS account for per application keys. Keys are temporarily loaded into environment variables. Asks for a SSO cookie value.')\n parser.add_argument('role', help = 'Role to harvest session keys as')\n parser.add_argument(\n '-c', '--command', help = 'Custom command to run.', default = None)\n parser.add_argument('-a', '--application',\n help = 'Provide a specific application', default = None)\n parser.add_argument(\n '-l', '--list', help = 'Provide a list of applications. Lists should be one Application#,Application Name per line', default = None)\n parser.add_argument(\n '-p', '--awspx', help = 'Run awspx across all applications. Install from https://github.com/FSecureLABS/awspx', action=argparse.BooleanOptionalAction, default = False)\n parser.add_argument(\n '-s', '--scoutsuite', help = 'Run ScoutSuite across all applications. Install from https://github.com/nccgroup/ScoutSuite', action=argparse.BooleanOptionalAction, default = False)\n args=parser.parse_args()\n\n print(\"Please provide an SSO cookie value. Obtain from the dev console on a web browser, probably named something like x-amz-sso_authn\")\n token=input()\n\n return args.role, args.list, args.application, args.command, token, args.awspx, args.scoutsuite", "def cli():\n logger.debug('cli() called')", "def cli():\n\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass" ]
[ "0.73368174", "0.6704575", "0.66263586", "0.6620892", "0.65879875", "0.65856606", "0.6576689", "0.6504645", "0.64861155", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405" ]
0.70329654
1
Test the popxl simple addition example
def test_documentation_popxl_addition(self): filename = "simple_addition.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_and_numbers(self):\n self.assertEqual(add(3,8), 11)", "def test_add_two_numbers(self):\n self.assertEqual(add(5, 9), 14)", "def test_add2(self):\n self.assertEqual(5, add(10 , -5), \"should be 5\")", "def test_add1(self):\n self.assertEqual(15, add(10 , 5), \"should be 15\")", "def test_add_integers(self):\n print(\"---running test_add_integers\")\n result = some_math.add(1, 2)\n assert result == 3", "def test_add(self):\n self.assertEqual(add(1, 1), 2, \"Wrong answer\")\n self.assertEqual(add(10, 1), 11, \"Wrong answer\")\n self.assertEqual(add(15, 15), 30, \"Wrong answer\")", "def test_add4(self):\n self.assertEqual(-15, add(-10 , -5), \"should be -15\")", "def test_add_numbers():\n assert add(3, 8) == 11", "def test_add_numbers(self):\n self.assertEqual(addNums(3, 8), 11)", "def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0", "def test_add3(self):\n self.assertEqual(-5, add(-10 , 5), \"should be -5\")", "def test_getSum_twoNumbers(self):\r\n self.assertEqual(17, Arith().add(8, 9))", "def test_add_numbers(self):\n a, b = 5, 6\n expected = a + b\n # check for equality, real vs expected\n self.assertEqual(add(a, b), expected)", "def test_add_integer(self):\n assert cr.add(3, 2) == 3 + 2", "def test_add_int(self):\n self.assertEqual(operations.add(3,4), 7)", "def test_add_returns_correct_result(self):\n result = self.calc.add(2, 2)\n self.assertEqual(4, result)", "def test_add(self):\n print('test_add')\n \n self.assertEqual(120, add(100, 20))\n self.assertNotEqual(3, add(10, 10))", "def test_two_plus_two():\n assert add.add(2, 2) == 4", "def test_add(self):\r\n operation = Operation(3, 4)\r\n result = operation.add()\r\n self.assertEqual(result, 7)", "def test_addition(l1, l2):\n result = addTwoNumbers(l1, l2)\n assert result.val == '5'\n assert result.next.val == '8'\n assert result.next.next.val == '0'\n assert result.next.next.next.val == '1'", "def test_calculate_multiplication_and_adding(self):\n result = self.calcuate.calcuate('1+2x3')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def test_arithmetic(self):\n for test in [\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Int(5), right = sir.Int(6)), SymbolType.Integer),\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Bytes('05'), right = sir.Bytes('06')), SymbolType.Integer),\n ]:\n self._test(test)", "def test_add():\n\n assert add(1, 1) == 2\n assert add(1, 2) == add(2, 1) == 3", "def test_add(self):\n self.assertEqual(work_file.add(10, 5), 15)\n self.assertEqual(work_file.add(-1, 1), 0)\n self.assertEqual(work_file.add(-1, -1), -2)", "def test_addition(self):\n\n a1 = points.Point(3, -2, 5)\n a2 = vectors.Vector(-2, 3, 1)\n\n a3 = a1 + a2\n\n self.assertEqual(a3,\n tuples.Tuple([\"x\", \"y\", \"z\", \"w\"], 1, 1, 6, 1))\n self.assertEqual(a3, points.Point(1, 1, 6))" ]
[ "0.65502137", "0.65502137", "0.65502137", "0.64431727", "0.63226366", "0.6308587", "0.628632", "0.6280519", "0.6253776", "0.6226606", "0.62078625", "0.61861867", "0.61754084", "0.6170412", "0.61692727", "0.615465", "0.6137472", "0.61353207", "0.6135082", "0.6111764", "0.6055483", "0.6052546", "0.60271466", "0.60074615", "0.5998734", "0.5931518", "0.59088784", "0.59009635", "0.5900211", "0.5895206" ]
0.70790774
0
Test the popxl basic subgraph example
def test_documentation_popxl_basic_subgraph(self): filename = "basic_graph.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_create_multi_subgraph(self):\n filename = \"create_multi_graphs_from_same_func.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def sub_graph_merging(self):", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def populate_graph(self):", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def test_documentation_popxl_multi_callsites_graph_input(self):\n filename = \"multi_call_graph_input.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def test_extract_graph(default_plugin_resolver):\n dpr = default_plugin_resolver\n nx_graph = nx.Graph()\n nx_graph.add_weighted_edges_from(\n [(1, 0, 2), (1, 4, 3), (2, 5, 5), (2, 7, 6), (3, 1, 7), (5, 6, 10), (6, 2, 11),]\n )\n desired_nodes = {2, 5, 6}\n nx_extracted_graph = nx.Graph()\n nx_extracted_graph.add_weighted_edges_from([(2, 5, 5), (5, 6, 10), (6, 2, 11)])\n graph = dpr.wrappers.Graph.NetworkXGraph(nx_graph)\n desired_nodes_wrapped = dpr.wrappers.NodeSet.PythonNodeSet(desired_nodes)\n extracted_graph = dpr.wrappers.Graph.NetworkXGraph(nx_extracted_graph)\n MultiVerify(\n dpr, \"subgraph.extract_subgraph\", graph, desired_nodes_wrapped\n ).assert_equals(extracted_graph)", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_setup(self):\n self.setup()\n print(\"Nodes in graph\")\n for node in self.graph.graph.nodes:\n print(node)\n print(\"Edges in graph\")\n for edge in self.graph.graph.edges(data=True):\n print(edge)", "def test_ExplorePath_Simple( self ):\n links = []\n n1 = graph.Node( 10, 50 )\n n2 = graph.Node( 10, 50 )\n n3 = graph.Node( 10, 50 )\n n7 = graph.Node( 10, 50 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, roots, n1 )\n expected = [ n1, n2, n3, n7 ]\n self.assertEqual( expected, actual )", "def graph(self):\n ...", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))", "def test_get_vertex_from_subvertex(self):\n subvertices = list()\n subvertices.append(PartitionedVertex(None, \"\"))\n subvertices.append(PartitionedVertex(None, \"\"))\n\n subvert1 = PartitionedVertex(None, \"\")\n subvert2 = PartitionedVertex(None, \"\")\n\n graph_mapper = GraphMapper()\n vert = TestVertex(10, \"Some testing vertex\")\n\n vertex_slice = Slice(0, 1)\n graph_mapper.add_subvertex(subvert1, vertex_slice, vert)\n vertex_slice = Slice(2, 3)\n graph_mapper.add_subvertex(subvert2, vertex_slice, vert)\n\n self.assertEqual(\n vert, graph_mapper.get_vertex_from_subvertex(subvert1))\n self.assertEqual(\n vert, graph_mapper.get_vertex_from_subvertex(subvert2))\n self.assertEqual(\n None, graph_mapper.get_vertex_from_subvertex(subvertices[0]))\n self.assertEqual(\n None, graph_mapper.get_vertex_from_subvertex(subvertices[1]))", "def subplot_1(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n print(\"\\t\"*(n_tabs+1),\"feature value :\",val)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_1(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2),\"class :\", sub_graph)", "def test_build_graph(self):\n insert_good_data()\n dataframe = get_dataframe()\n results = processing.build_graph(dataframe, figure_path, False)\n # 1\n self.assertEqual(results, \"Updated html File and Opened it\")", "def test_dot(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def test_Tree():", "def main(dot_file):\n global SUBGRAPHS, PARENTS\n graph = graph_from_dot(dot_file)\n SUBGRAPHS = {}\n PARENTS = {}\n extract_subgraphs([graph])\n \n for (name, subgraph) in SUBGRAPHS.items():\n nodes = extract_nodes(subgraph)\n for node in nodes:\n (name_function, result, function_call_line) = analyse_label_function_calls(node)\n if name_function is not None:\n (label_node1, label_node2, bb) = create_labels(node, result, function_call_line)\n node.set_label(label_node1)\n nodes_to_update = get_nodes_to_update(subgraph, graph.get_name())\n update_nodes(nodes_to_update, bb)\n nodes.append(create_new_node(subgraph, node, label_node2, bb))\n update_edges(subgraph, graph.get_name(), bb)\n create_new_edge(graph, node.get_name(), SUBGRAPHS[name_function])\n recreate_subgraphs_name()\n export_graph(graph, \"main_output\", \"png\")\n export_graph(graph, \"main_output\", \"dot\")\n return graph", "def testGraphExtract(self):\n graph = Graph2()\n graph.parseFile(TESTFILE)", "def __init__(self, prefix, downstream, upstream, root):\n super(SubGraph, self).__init__(prefix, downstream, upstream, root)", "def test_k_core(default_plugin_resolver):\n dpr = default_plugin_resolver\n k = 2\n nx_graph = nx.Graph()\n nx_graph.add_weighted_edges_from(\n [(1, 0, 2), (1, 4, 3), (2, 5, 5), (2, 7, 6), (3, 1, 7), (5, 6, 10), (6, 2, 11),]\n )\n nx_k_core_graph = nx.Graph()\n nx_k_core_graph.add_weighted_edges_from(\n [(2, 5, 5), (5, 6, 10), (6, 2, 11),]\n )\n graph = dpr.wrappers.Graph.NetworkXGraph(nx_graph)\n k_core_graph = dpr.wrappers.Graph.NetworkXGraph(nx_k_core_graph)\n MultiVerify(dpr, \"subgraph.k_core\", graph, k).assert_equals(k_core_graph)", "def __test(graph): \n \n if not isinstance(graph, basegraph):\n raise TypeError(\"Expected type was Graph.\")\n \n print \"### iPATH TEST DATA STRUCTURE\"\n print \"### Data Type: Graph ({})\".format(str(graph.__class__.__bases__[0].__name__))\n print \"### Implementation: {}\".format(str(graph.__class__.__name__))\n \n print \"\\n*** ADD NODE ***\\n\" \n for i in range(10):\n print \"add_node({})\".format(str(i)) \n graph.add_node(i) \n \n print \"\\n*** ADD ARC ***\\n\" \n for i in range(10):\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 1), str(2 * (i + 1)))\n graph.add_arc(i, i + 1, 2 * (i + 1))\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 2), str(2 * (i + 2)))\n graph.add_arc(i, i + 2, 2 * (i + 2))\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE NODE ***\\n\" \n print \"remove_node(5)\"\n graph.remove_node(5)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE ARC ***\\n\" \n print \"remove_arc(7, 8)\" \n graph.remove_arc(7, 8)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** INCIDENT ARCS ***\\n\" \n for node in graph.get_nodes():\n print \"Incident Arcs of {}\\t{}\\n\".format(str(node), str(graph.get_incident_arcs(node._id)))\n \n print \"\\n*** ADJACENCY ***\\n\" \n for i in range(10):\n for j in range(10):\n if graph.are_adjacent(i, j) == True:\n print \"Adjacency Between ({}, {}): True\\n\".format(str(i), str(j))\n \n print \"\\n*** NODES ***\\n\" \n print \"numNodes: {}\\n\".format(str(graph.get_num_nodes())) \n print \"Nodes: {}\\n\".format(str(graph.get_nodes())) \n \n print \"\\n*** ARCS ***\\n\" \n print \"numArcs: {}\\n\".format(str(graph.get_num_arcs())) \n print \"Arcs: {}\\n\".format(str(graph.get_arcs())) \n \n print \"\\n*** SEARCH BFS ***\\n\" \n for i in range(10): \n print \"bfs({})\".format(str(i))\n Lbfs = graph.bfs(i)\n for n in Lbfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n*** SEARCH DFS ***\\n\" \n for i in range(9):\n print \"dfs({})\".format(str(i))\n Ldfs = graph.dfs(i)\n for n in Ldfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n### END OF TEST ###\\n\"" ]
[ "0.75176567", "0.62960446", "0.60611916", "0.6042744", "0.6038377", "0.5965643", "0.5938931", "0.58985436", "0.587293", "0.5843227", "0.58157665", "0.5796282", "0.5735608", "0.5665891", "0.5604234", "0.5579183", "0.5560783", "0.5559308", "0.5536412", "0.5530756", "0.5525885", "0.548392", "0.5461244", "0.545128", "0.5449947", "0.5442631", "0.542125", "0.54199004", "0.5406626", "0.54005134" ]
0.8505168
0
Test the popxl replication example
def test_documentation_popxl_replication(self): filename = "replication.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_replicate_pg_to_pg(self):\n # TODO - Real and more complex e2e tests will be added here\n assert True", "def test_replicate_mariadb_to_pg(self):\n # TODO - Real and more complex e2e tests will be added here\n assert True", "def test_11_clone(self):\n # Test basic operation of cloning repo which contains one\n # publisher to repo which contains same publisher\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that packages in dst which are not in src get removed.\n self.pkgsend_bulk(self.durl2, (self.amber30))\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone reports publishers not in the dest repo.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2), exit=1)\n\n # Test that clone adds new publishers if requested.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1,\n self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone removes all packages if source is empty\n self.pkgrecv(self.durl3, \"--clone -d {0}\".format(self.dpath2))\n self.pkgrepo(\"-s {0} list -H -p test2\".format(self.dpath2))\n self.assertEqualDiff(\"\", self.output)\n\n # Test that clone works fine with mulitple publishers\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n\n path = os.path.join(self.dpath2, \"publisher/test1\")\n shutil.rmtree(path)\n path = os.path.join(self.dpath2, \"publisher/test2\")\n shutil.rmtree(path)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2 -p test1\".format(\n self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\",\n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone fails if --raw is specified.\n self.pkgrecv(self.durl1, \"--raw --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -c is specified.\n self.pkgrecv(self.durl1, \"-c /tmp/ --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -a is specified.\n self.pkgrecv(self.durl1, \"-a --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if --newest is specified.\n self.pkgrecv(self.durl1, \"--newest --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_clone_deployment(self):\n pass", "def test_backup_restore_with_xdcr(self):\n rest_src = RestConnection(self.backupset.cluster_host)\n rest_dest = RestConnection(self.servers[1])\n\n try:\n rest_src.remove_all_replications()\n rest_src.remove_all_remote_clusters()\n kwargs = {}\n if self.input.param(\"enforce_tls\", False):\n kwargs[\"demandEncryption\"] = 1\n trusted_ca = rest_dest.get_trusted_CAs()[-1][\"pem\"]\n kwargs[\"certificate\"] = trusted_ca\n rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port, self.backupset.cluster_host_username,\n self.backupset.cluster_host_password, \"C2\", **kwargs)\n rest_dest.create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(10)\n repl_id = rest_src.start_replication('continuous', 'default', \"C2\")\n if repl_id is not None:\n self.log.info(\"Replication created successfully\")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n tasks = self._async_load_all_buckets(self.master, gen, \"create\", 0)\n\n reps = rest_src.get_replications()\n start_time = datetime.datetime.now()\n while reps[0][\"status\"] != \"running\" or reps[0][\"changesLeft\"] > 0:\n if (datetime.datetime.now() - start_time).total_seconds() > 600:\n self.fail(\"Timed out waiting for replications\")\n self.sleep(10, \"Waiting for replication...\")\n reps = rest_src.get_replications()\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")\n for task in tasks:\n task.result()\n finally:\n rest_dest.delete_bucket()", "def test_clone_scenario(self):\n pass", "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_in_sequence(self):\n filename = \"in_sequence.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_clone_system(self):\n pass", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_mnist(self):\n filename = \"mnist.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_check_replication_ok(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result, [(STATUS_OK, 'OK')])", "def test_relic():\n mongo_db = pymongo.MongoClient()\n init_db(mongo_db.roguesim_python)\n populate_db(mongo_db.roguesim_python)", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_redis_increase_replica_count_usual_case():", "def test_clone_repository(m_check):\n m_check.return_value = 0\n assert clone_repository(\"test\", \"test\", \"test\") == 0", "def replication(self, replication):\n self._replication = replication", "def test_endpointPOP3(self):\n self._endpointTest(\"pop3\")", "def test_pop3(self):\n self._endpointServerTest(\"pop3\", protocols.POP3Factory)", "def test_connection_duplication():", "def test_ipcrm():\n IPCComm.ipcrm()", "def test_multihop_intermediate_replica_lifecycle(vo, did_factory, root_account, core_config_mock, caches_mock, metrics_mock):\n src_rse1_name = 'XRD1'\n src_rse1_id = rse_core.get_rse_id(rse=src_rse1_name, vo=vo)\n src_rse2_name = 'XRD2'\n src_rse2_id = rse_core.get_rse_id(rse=src_rse2_name, vo=vo)\n jump_rse_name = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse_name, vo=vo)\n dst_rse_name = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse_name, vo=vo)\n\n all_rses = [src_rse1_id, src_rse2_id, jump_rse_id, dst_rse_id]\n did = did_factory.upload_test_file(src_rse1_name)\n\n # Copy replica to a second source. To avoid the special case of having a unique last replica, which could be handled in a special (more careful) way\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=src_rse2_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=src_rse2_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rse_core.set_rse_limits(rse_id=jump_rse_id, name='MinFreeSpace', value=1)\n rse_core.set_rse_usage(rse_id=jump_rse_id, source='storage', used=1, free=0)\n try:\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n\n # Submit transfers to FTS\n # Ensure a replica was created on the intermediary host with epoch tombstone\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.SUBMITTED\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['tombstone'] == datetime(year=1970, month=1, day=1)\n assert replica['state'] == ReplicaState.COPYING\n\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # Fake an existing unused source with raking of 0 for the second source.\n # The ranking of this source should remain at 0 till the end.\n\n @transactional_session\n def __fake_source_ranking(*, session=None):\n models.Source(request_id=request['id'],\n scope=request['scope'],\n name=request['name'],\n rse_id=src_rse2_id,\n dest_rse_id=request['dest_rse_id'],\n ranking=0,\n bytes=request['bytes'],\n url=None,\n is_using=False). \\\n save(session=session, flush=False)\n\n __fake_source_ranking()\n\n # The intermediate replica is protected by its state (Copying)\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses=jump_rse_name, exclude_rses=None)\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.COPYING\n\n # Wait for the intermediate replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # ensure tha the ranking was correct for all sources and intermediate rses\n assert __get_source(request_id=request['id'], src_rse_id=src_rse1_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=src_rse2_id, **did).ranking == 0\n # Only group_bulk=1 part of the path was submitted.\n # run submitter again to copy from jump rse to destination rse\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # Wait for the destination replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses='test_container_xrd=True', exclude_rses=None)\n\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=jump_rse_id, **did)\n\n # 3 request: copy to second source + 2 hops (each separately)\n # Use inequalities, because there can be left-overs from other tests\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 3\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_common_submit_transfer_total') >= 3\n # at least the failed hop\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_finisher_handle_requests_total') > 0\n finally:\n\n @transactional_session\n def _cleanup_all_usage_and_limits(rse_id, *, session=None):\n session.query(models.RSELimit).filter_by(rse_id=rse_id).delete()\n session.query(models.RSEUsage).filter_by(rse_id=rse_id, source='storage').delete()\n\n _cleanup_all_usage_and_limits(rse_id=jump_rse_id)", "def test_documentation_popxl_autodiff(self):\n filename = \"autodiff.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def testExecute(self):\n client = ICSClientFactory(self.config, 'slave', 'master') \n\n #Test reading and writing coils\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[0] == False \n assert reply[1] == False\n reply = client.execute(cst.WRITE_SINGLE_COIL, 10, 1, output_value=1)\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[0] == True\n assert reply[1] == False\n reply = client.execute(cst.WRITE_MULTIPLE_COILS, 10, 2,\n output_value=[0,1])\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[1] == True\n assert reply[0] == False\n \n #Test reading and writing input regs\n #reply = client.execute(cst.READ_INPUT_REGISTERS, 30002, 1)\n #print \"BRDEBUG: Reply: \", reply\n #assert reply[0] == 17\n\n #Test reading and setting holding regs\n reply = client.execute(cst.READ_HOLDING_REGISTERS, 40003, 2)\n #print \"BRDEBUG: Reply\", reply\n assert reply[0] == 16752 \n assert reply[1] == 0 \n reply = client.execute(cst.WRITE_SINGLE_REGISTER, 40003, 1,\n output_value = 10)\n reply = client.execute(cst.READ_HOLDING_REGISTERS, 40003, 1)\n #print \"BRDEBUG: Reply\", reply\n assert reply[0] == 10", "def empty_test_case():\n # Mirror server\n empty_test_path = os.path.dirname(os.path.realpath(__file__)) + \"/empty.rpl\"\n test_config = {'ROOT_ADDR': '127.0.0.10',\n '_SOCKET_FAMILY': socket.AF_INET}\n return scenario.parse_file(empty_test_path)[0], test_config", "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)" ]
[ "0.6849515", "0.5858241", "0.567002", "0.5621691", "0.5603418", "0.5590965", "0.55826133", "0.55337054", "0.55278075", "0.5499861", "0.5477127", "0.539578", "0.53619903", "0.5334775", "0.5324242", "0.53177917", "0.53116417", "0.53056175", "0.52885664", "0.5232968", "0.5212479", "0.518361", "0.5143064", "0.51113", "0.50958025", "0.5082429", "0.500379", "0.5002982", "0.4990406", "0.49888358" ]
0.8200137
0
Test the popxl create multiple subgraph example
def test_documentation_popxl_create_multi_subgraph(self): filename = "create_multi_graphs_from_same_func.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def sub_graph_merging(self):", "def populate_graph(self):", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_multi_callsites_graph_input(self):\n filename = \"multi_call_graph_input.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def create_nodes(self):", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def gen_graph(self):", "def generate_subgraph(format):\n\n # get business information\n directorypath = genpath+directory\n if os.path.isfile(directorypath):\n \n bizdata = pd.read_csv( directorypath, escapechar='\\\\')\n\n #create a directory of page-id and object-ids\n tempdf = bizdata.set_index('pageid')\n tempdf = tempdf['objectid']\n dictionary = tempdf.to_dict()\n\n uncgraph = pd.read_csv(inpath+graphfile, escapechar='\\\\')\n uncgraph = uncgraph.dropna()\n uncgraph['likee_object_id'] = uncgraph.apply(lambda x: dictionary.get(x['likee_page_id']), axis=1)\n cgraph = uncgraph.dropna()\n cgraph = cgraph[['liker_page_id', 'likee_page_id']]\n cgraph.columns = ['Source', 'Target']\n\n \n print_stats(cgraph)\n if format == 'networkx' :\n print \"[Generating a networkX graph...]\" \n cgraph.to_csv(genpath+subgraph+'.ntx', index=False, header=False, sep= ' ')\n else:\n print \"[Generating a csv graph...]\" \n cgraph.to_csv(genpath+subgraph+'.csv', index=False)\n\n\n else:\n print \"Either file is missing or is not readable\"", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_build_graph(self):\n insert_good_data()\n dataframe = get_dataframe()\n results = processing.build_graph(dataframe, figure_path, False)\n # 1\n self.assertEqual(results, \"Updated html File and Opened it\")", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def graph(self):\n ...", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def main(dot_file):\n global SUBGRAPHS, PARENTS\n graph = graph_from_dot(dot_file)\n SUBGRAPHS = {}\n PARENTS = {}\n extract_subgraphs([graph])\n \n for (name, subgraph) in SUBGRAPHS.items():\n nodes = extract_nodes(subgraph)\n for node in nodes:\n (name_function, result, function_call_line) = analyse_label_function_calls(node)\n if name_function is not None:\n (label_node1, label_node2, bb) = create_labels(node, result, function_call_line)\n node.set_label(label_node1)\n nodes_to_update = get_nodes_to_update(subgraph, graph.get_name())\n update_nodes(nodes_to_update, bb)\n nodes.append(create_new_node(subgraph, node, label_node2, bb))\n update_edges(subgraph, graph.get_name(), bb)\n create_new_edge(graph, node.get_name(), SUBGRAPHS[name_function])\n recreate_subgraphs_name()\n export_graph(graph, \"main_output\", \"png\")\n export_graph(graph, \"main_output\", \"dot\")\n return graph", "def subplot_1(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n print(\"\\t\"*(n_tabs+1),\"feature value :\",val)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_1(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2),\"class :\", sub_graph)", "def create_four_subplots():\n pass", "def sub_graph_merging(self):\n raise NotImplementedError()", "def dump_subgraph_for_debug(self):\n\n import pypipegraph2 as ppg\n\n nodes = []\n seen = set()\n edges = []\n counter = [0]\n node_to_counters = {}\n\n def descend(node):\n if node in seen:\n return\n seen.add(node)\n j = self.runner.jobs[node]\n if isinstance(j, ppg.FileInvariant):\n nodes.append(f\"Path('{counter[0]}').write_text('A')\")\n nodes.append(f\"job_{counter[0]} = ppg.FileInvariant('{counter[0]}')\")\n elif isinstance(j, ppg.ParameterInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.ParameterInvariant('{counter[0]}', 55)\"\n )\n elif isinstance(j, ppg.FunctionInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.FunctionInvariant('{counter[0]}', lambda: 55)\"\n )\n elif isinstance(j, ppg.SharedMultiFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.SharedMultiFileGeneratingJob('{counter[0]}', {[x.name for x in j.files]!r}, dummy_smfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.TempFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.TempFileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.FileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.FileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiTempFileGeneratingJob):\n files = [counter[0] + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiTempFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiFileGeneratingJob):\n files = [str(counter[0]) + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.DataLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.DataLoadingJob('{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.AttributeLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.AttributeLoadingJob('{counter[0]}', DummyObject(), 'attr_{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n else:\n raise ValueError(j)\n node_to_counters[node] = counter[0]\n counter[0] += 1\n for parent in self.runner.dag.predecessors(node):\n descend(parent)\n\n def build_edges(node):\n for parent in self.runner.dag.predecessors(node):\n edges.append(\n f\"edges.append(('{node_to_counters[node]}', '{node_to_counters[parent]}'))\"\n )\n build_edges(parent)\n\n descend(self.job_id)\n edges.append(\"edges = []\")\n build_edges(self.job_id)\n edges.extend(\n [\n \"for (a,b) in edges:\",\n \" if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\",\n \" ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\",\n ]\n )\n with open(\"subgraph_debug.py\", \"w\") as op:\n lines = \"\"\"\nclass DummyObject:\n pass\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\"\"\".split(\n \"\\n\"\n )\n lines += nodes\n lines += edges\n lines += [\"\", \"ppg.run()\", \"ppg.run\"]\n\n op.write(\"\\n\".join(\" \" + l for l in lines))", "def test_build_poset_lattice():\n lattice = build_poset_lattice(all_games_gen(2))\n assert len(lattice.edges()) == 36", "def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))", "def generate_pristine_graphene(x_dim, y_dim, filename1):\n y_number = round(y_dim / 1.228)\n x_number = int(x_dim / 2.127)\n x_addition = (x_dim / 2.127 ) % 1\n list_of_coords = []\n a = 0\n b = 0\n c = 0\n list_of_coords = fill_row(list_of_coords, y_number, a,b,c, [], 5, prev = False)\n for i in range(1,x_number):\n if (i == x_number-1):\n if (i % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n fill_hexagon(list_of_coords, -1.228, b, c, [0, 1, 3, 4, 5], full=6, prev=False)\n if (i % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n fill_hexagon(list_of_coords, y_number*1.228, b, c, [0, 1, 3, 4, 5], full=6, prev=False)\n elif (i % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n elif (i % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n list_x_steps = [0, 0.33, 0.66, 1]\n x_step = min(list_x_steps, key=lambda x:abs(x-x_addition))\n if (x_step == 0.33):\n list_of_coords = fill_row(list_of_coords, y_number, 0, 0, 0, [], 6, prev = False)\n fill_hexagon(list_of_coords, y_number*1.228, 0, 0, [0, 1, 2, 3, 4], full=6, prev=False)\n elif (x_step == 0.66):\n if (x_number % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [2], 6, prev = True)\n elif (x_number % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [2], 6, prev = False)\n elif (x_step == 1):\n if (x_number % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n elif (x_number % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n writepdb3(list_of_coords, filename1)\n print('done.')\n return list_of_coords" ]
[ "0.79100144", "0.65580076", "0.64052117", "0.63540345", "0.6348667", "0.61608034", "0.6133355", "0.5854858", "0.5819954", "0.5817296", "0.5729944", "0.57231635", "0.5651493", "0.5646747", "0.5616531", "0.5610424", "0.56057763", "0.5573422", "0.55470526", "0.5529936", "0.55223936", "0.548946", "0.54719025", "0.5463685", "0.542794", "0.54104775", "0.5367909", "0.5331161", "0.53173673", "0.531653" ]
0.8254092
0
Test the code loading example
def test_documentation_popxl_code_loading(self): filename = "code_loading.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_example(decorated_example):\n import visual_coding_2p_analysis", "def test_examples():\n import airconics\n # pytest runs test files in ./__pycache__: need to go up two levels\n example_dir = os.path.abspath(\n os.path.join(__file__, '..', '..', 'examples', 'core'))\n example_scripts = os.listdir(example_dir)\n for script in example_scripts:\n if script.endswith('.py'):\n fname = os.path.join(example_dir, script)\n try:\n subprocess.check_call(['python', fname])\n except subprocess.CalledProcessError:\n raise AssertionError('Example {} failed'.format(fname))", "def test_load_simple_module():\n loader = Loader()\n main_fname = loader.load(\"https://gist.githubusercontent.com/miohtama/80391980c2e73b285cfe/raw/dd89a55497ba33a6014453d9bb7432ab424c01cf/kivyhello.py#main\")\n mod = path_to_mod_name(main_fname)\n result = loader.run(mod, \"hello\")\n assert result == \"Hello there\"\n loader.close()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def test_script(self) -> None:\n main()", "def test_module(self):\n pass", "def test_load_quality_codes():\n assert len(code_reader.load_quality_codes()) > 0", "def _test():\n import doctest", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def test_documentation_popxl_nested_code_loading(self):\n filename = \"code_loading_nested.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def test():\n import doctest\n from . import locate\n return doctest.testmod(locate)", "def test_main():\n # Setup\n # Exercise\n # Verify", "def __main() :\n launchTests()", "def runtest(self):", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def test():\n pass", "def test_pep8_conformance_example(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../docs/examples/\"\n\n # Find all the examples files\n file_paths = []\n for root, dirnames, filenames in os.walk(path):\n for file_path in fnmatch.filter(filenames, '*.py'):\n file_paths.append(os.path.join(root, file_path))\n\n for path in file_paths:\n self.run_check(path)", "def test(self):\n pass", "def tests():", "def test():\n loader = unittest.TestLoader()\n suite = loader.discover(os.path.dirname(__file__))\n runner = unittest.TextTestRunner()\n runner.run(suite)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_functional(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.example.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n self.assertEqual(\n data['items'][0]['fullName'],\n 'example.example'\n )", "def setUp(self):\n self.example = Example()", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_examples(fname):\n app = use_app()\n app.start_timer(0, app.quit)\n if \"OLD\" in fname:\n with pytest.warns(FutureWarning):\n runpy.run_path(fname)\n else:\n try:\n runpy.run_path(fname)\n except ImportError as e:\n if \"Numpy required to use images\" in str(e):\n pytest.skip(\"numpy unavailable: skipping image example\")" ]
[ "0.7195194", "0.7059802", "0.6890149", "0.6869557", "0.6869557", "0.6869557", "0.6869557", "0.67875314", "0.6767249", "0.6749942", "0.67483723", "0.67235684", "0.6716485", "0.6689319", "0.66765344", "0.66539055", "0.66304076", "0.6627514", "0.65707266", "0.6563701", "0.6548684", "0.65423787", "0.6523631", "0.6505159", "0.6482269", "0.6477886", "0.64554393", "0.6444179", "0.64405406", "0.6438618" ]
0.74833703
0
Test the nested code loading example
def test_documentation_popxl_nested_code_loading(self): filename = "code_loading_nested.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_example(decorated_example):\n import visual_coding_2p_analysis", "def inner_test():\n pass", "def inner_test():\n pass", "def test_documentation_popxl_code_loading(self):\n filename = \"code_loading.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def test_loading_document(self):", "def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]", "def test_simple(self):\n bento_info = \"\"\"\\\nName: foo\n\nLibrary:\n Packages: foo, foo.bar\n Modules: fubar\n\"\"\"\n self._test_run(bento_info)", "def test_module(self):\n pass", "def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]", "def _test():\n import doctest", "def test_001(settings, inspector):\n sourcepath = os.path.join(settings.sample_path, 'main_basic.scss')\n\n inspector.inspect(sourcepath)\n\n inspector.reset()\n\n assert inspector._CHILDREN_MAP == {}\n assert inspector._PARENTS_MAP == {}\n assert inspector.children(sourcepath) == set([])\n assert inspector.parents(sourcepath) == set([])", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def test_module(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"nested_folder\",\n \"another.html\",\n ),\n \"\",\n )\n\n content = self._get_fake_project_nested_module()\n\n expected = textwrap.dedent(\n '''\\\n #!/usr/bin/env python\n # -*- coding: utf-8 -*-\n\n \"\"\"A module that shows every type of documentable class / method / function.\n\n Attributes:\n ATTRIBUTE_VALUE (float):\n Some number.\n\n \"\"\"\n\n\n ATTRIBUTE_VALUE = 14.3\n\n\n class MyKlass(object):\n \"\"\"A class that does something.\n\n Multi-line information here.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"asdfasdf\"\n\n def __init__(self, value):\n \"\"\"Create this instance.\"\"\"\n # A comment that should show up in the unittest's results\n super(MyKlass, self).__init__()\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n\n class ParentClass(object):\n \"\"\"The outter class.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"tttt\"\n\n class NestedClass(object):\n \"\"\"A class within a class.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"zzzzzzzzzzzzz\"\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n\n def _set_private_function_thing(value, another):\n \"\"\"Do something here.\"\"\"\n # Do something with these values\n # and more comment text, here.\n #\n if value:\n return 2\n\n # Another comment\n return 1\n\n\n def set_function_thing(value, another):\n \"\"\"Do something here.\"\"\"\n # Do something with these values\n # and more comment text, here.\n #\n if value:\n return 2\n\n # Another comment\n return 1'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter", "def test_documentation_popxl_nested_session_contexts(self):\n filename = \"nested_session_contexts.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test(self):\n pass", "def test_load_testcase_in_module(self):\n tests = self.loader.load(\"tests.sampletest.InitTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest import InitTest\n\n self.assertEqual(type(tests[0]), InitTest)", "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_load_simple_module():\n loader = Loader()\n main_fname = loader.load(\"https://gist.githubusercontent.com/miohtama/80391980c2e73b285cfe/raw/dd89a55497ba33a6014453d9bb7432ab424c01cf/kivyhello.py#main\")\n mod = path_to_mod_name(main_fname)\n result = loader.run(mod, \"hello\")\n assert result == \"Hello there\"\n loader.close()", "def test_main():\n # Setup\n # Exercise\n # Verify", "def fixture_example_data():\n import_example_data()", "def test_001_basic(settings, inspector):\n sources = [\n os.path.join(settings.sample_path, 'main_basic.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-3.scss'),\n os.path.join(settings.sample_path, 'main_with_subimports.scss'),\n os.path.join(settings.sample_path, 'main_using_libs.scss'),\n ]\n sourcepath = os.path.join(settings.sample_path, 'main_basic.scss')\n\n inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)\n\n parents = inspector.parents(sourcepath)\n assert parents == set([\n os.path.join(settings.sample_path, 'main_depth_import-1.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-2.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-3.scss'),\n os.path.join(settings.sample_path, 'main_with_subimports.scss'),\n os.path.join(settings.sample_path, 'main_using_libs.scss'),\n ])", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def runtest(self):", "def test_compute_glycemic_load(self):\n pass", "def test_functional(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.example.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n self.assertEqual(\n data['items'][0]['fullName'],\n 'example.example'\n )", "def test_pep8_conformance_example(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../docs/examples/\"\n\n # Find all the examples files\n file_paths = []\n for root, dirnames, filenames in os.walk(path):\n for file_path in fnmatch.filter(filenames, '*.py'):\n file_paths.append(os.path.join(root, file_path))\n\n for path in file_paths:\n self.run_check(path)" ]
[ "0.6656562", "0.6570933", "0.6570933", "0.6489614", "0.6259519", "0.6246904", "0.61843383", "0.61584216", "0.61110884", "0.61107814", "0.6097114", "0.6087612", "0.60835487", "0.60752654", "0.60712975", "0.6059714", "0.60356414", "0.60127175", "0.60122466", "0.601197", "0.5992272", "0.5991382", "0.59841245", "0.59821856", "0.59772164", "0.5974786", "0.5959991", "0.5928582", "0.59175533", "0.58958703" ]
0.83801514
0
Test the nested Session contexts example
def test_documentation_popxl_nested_session_contexts(self): filename = "nested_session_contexts.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_session():", "def test_resource(data_manager):\n sessions = set([])\n with data_manager.dal():\n context1 = current_context._get_current_object()\n session = context1.sqlalchemy\n assert isinstance(session, orm.Session)\n sessions.add(session)\n\n with data_manager.dal():\n context2 = current_context._get_current_object()\n assert context2 != context1\n session = context2.sqlalchemy\n assert isinstance(session, orm.Session)\n sessions.add(session)\n\n # Make sure we have two unique sessions\n assert len(sessions) == 2", "def test_existing_session_cookie(self):\n\n with self.app_sess1 as c:\n ret1 = c.get('/')\n ret2 = c.get('/')\n self.assertEqual(ret1.data, ret2.data)", "def test_session_promotion(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 0)\")\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def session(db):\n db.session.begin_nested()\n\n yield db.session\n\n db.session.rollback()", "def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def test_newSession(self):\n session = self.mdk.session()\n session2 = self.mdk.session()\n self.assertSessionHas(session, session._context.traceId, [0])\n self.assertSessionHas(session2, session2._context.traceId, [0])\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)", "def test_childSession(self):\n session = self.mdk.session()\n session.setProperty(\"other\", 123)\n session._context.tick()\n session._context.tick()\n session._context.tick()\n session.setTimeout(13.0)\n session2 = self.mdk.derive(session.externalize())\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)\n self.assertEqual(session2.getRemainingTime(), None)\n self.assertSessionHas(session2, session2._context.traceId, [1],\n other=123)", "def db_subsession(session):\n try:\n with session.begin_nested():\n yield\n except:\n logger.exception(\"Problem with DB sub-session, rolling back.\")", "def test_do_login(self):\r\n\r\n with app.test_request_context():\r\n u1 = User.query.filter_by(username='testuser').one()\r\n\r\n self.assertNotIn(CURR_USER_KEY, session)\r\n do_login(u1)\r\n self.assertEqual(session[CURR_USER_KEY], u1.id)", "def session(self):", "def test_distinct_sessions_cookie(self):\n\n sess1 = None\n sess2 = None\n with self.app_sess1 as c:\n sess1 = c.get('/').data\n\n with self.app_sess2 as c:\n sess2 = c.get('/').data\n\n self.assertNotEqual(sess1, sess2)", "def test_client_custom_session():\n c_session = requests.Session()\n client = ConfigureClients(custom_session=c_session)\n assert client.session == c_session", "def test_joinSession(self):\n session = self.mdk.session()\n session.setProperty(\"key\", 456)\n session.setProperty(\"key2\", [456, {\"zoo\": \"foo\"}])\n session2 = self.mdk.join(session.externalize())\n self.assertSessionHas(session2, session._context.traceId, [1, 0],\n key=456, key2=[456, {\"zoo\": \"foo\"}])", "def session(request):\n session = get_test_db_session()\n request.cls.session = session\n return session", "def test_session_auth_token(self):\n\n sess1 = None\n sess2 = None\n test_header = {'X-Auth-Token': 'pretend_token'}\n\n with self.app_sess1 as c:\n ret = c.get('/', headers=test_header)\n sess1 = ret.data\n\n with self.app_sess2 as c:\n ret = c.get('/', headers=test_header)\n sess2 = ret.data\n\n self.assertEqual(sess1, sess2)", "def test_find_where_multiple_infos(server, session):\n\n for session in server.sessions:\n session_id = session.get('session_id')\n session_name = session.get('session_name')\n find_where = server.find_where(\n {'session_id': session_id, 'session_name': session_name}\n )\n\n assert find_where == session\n assert isinstance(find_where, Session)\n\n # session.find_where\n for window in session.windows:\n window_id = window.get('window_id')\n window_index = window.get('window_index')\n\n find_where = session.find_where(\n {'window_id': window_id, 'window_index': window_index}\n )\n\n assert find_where == window\n assert isinstance(find_where, Window)\n\n # window.find_where\n for pane in window.panes:\n pane_id = pane.get('pane_id')\n pane_tty = pane.get('pane_tty')\n\n find_where = window.find_where(\n {'pane_id': pane_id, 'pane_tty': pane_tty}\n )\n\n assert find_where == pane\n assert isinstance(find_where, Pane)", "def test_get_session(self):\n with self.settings(SESSION_ENGINE='django.contrib.sessions.backends.db'):\n self._run_expired_session_test_for_engine()\n with self.settings(SESSION_ENGINE='django.contrib.sessions.backends.cache'):\n self._run_expired_session_test_for_engine()\n with self.settings(SESSION_ENGINE='django.contrib.sessions.backends.file'):\n self._run_expired_session_test_for_engine()\n with self.settings(SESSION_ENGINE='django.contrib.sessions.backends.cached_db'):\n self._run_expired_session_test_for_engine()", "def session(self, context: InjectionContext = None) -> \"ProfileSession\":", "def test_modify_detached_session(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('john', 1, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('john', 1, 'foo', 'bar')\")\r\n\r\n session = DetachedSession(self.env, 'john')\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute \"\r\n \"WHERE sid='john' AND name='foo'\")\r\n self.assertEqual('baz', cursor.fetchone()[0])", "def test_sessions():\n CHECKS = (check_correct_usage, check_expiration, check_bad_cookie, check_various_session_sizes)\n for no_datastore in (False, True):\n if no_datastore:\n test_db = 'without'\n else:\n test_db = 'with'\n for cot in (0, 10*1024, 2**30):\n if cot == 0:\n test_cookie = 'no data stored in cookies'\n elif cot == 2**30:\n test_cookie = 'data only stored in cookies'\n else:\n test_cookie = 'store data in cookies when its encoded size<=%dB' % cot\n for check in CHECKS:\n logger.debug('\\n\\n' + '*'*50)\n logger.debug('Running %s %s datastore and %s' % (check.__name__, test_db, test_cookie))\n yield check, no_datastore, cot", "def test_set_session_id(self, context):\n context.set_session_id(b\"abc\")", "def test_server_get_session(self):\n server, client = loopback()\n session = server.get_session()\n assert isinstance(session, Session)", "def session_context(pytestconfig, request, tmp_env):\n ctx = Context.only()\n\n # Temporary, empty local directory for local data\n session_tmp_dir = Path(request.config._tmp_path_factory.mktemp(\"data\"))\n\n # Set the cache path according to whether pytest --local-cache was given. If True,\n # pick up the existing setting from the user environment. If False, use a pytest-\n # managed cache directory that persists across test sessions.\n ctx.cache_path = (\n ctx.local_data.joinpath(\"cache\")\n if request.config.option.local_cache\n # TODO use pytestconfig.cache.mkdir() when pytest >= 6.3 is available\n else Path(pytestconfig.cache.makedir(\"cache\"))\n )\n\n # Other local data in the temporary directory for this session only\n ctx.local_data = session_tmp_dir\n\n platform_name = \"message-ix-models\"\n\n # Add a platform connected to an in-memory database\n # NB cannot call Config.add_platform() here because it does not support supplying a\n # URL for a HyperSQL database.\n # TODO add that feature upstream.\n ixmp_config.values[\"platform\"][platform_name] = {\n \"class\": \"jdbc\",\n \"driver\": \"hsqldb\",\n \"url\": f\"jdbc:hsqldb:mem://{platform_name}\",\n }\n\n # Launch Platform and connect to testdb (reconnect if closed)\n mp = Platform(name=platform_name)\n mp.open_db()\n\n ctx.platform_info[\"name\"] = platform_name\n\n yield ctx\n\n ctx.close_db()\n ixmp_config.remove_platform(platform_name)", "def load_session(session):\n def inner():\n web.ctx.session = session\n return inner", "def test_client_get_session(self):\n server, client = loopback()\n session = client.get_session()\n assert isinstance(session, Session)", "def test_get_activities_from_recursive_contexts(self):\n from .mockers import context_query\n from .mockers import create_context\n from .mockers import subscribe_contextA, create_contextA, user_status_contextA\n from .mockers import subscribe_contextB, create_contextB, user_status_contextB\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context, permissions=dict(read='public', write='restricted', subscribe='restricted', invite='restricted'))\n self.create_context(create_contextA, permissions=dict(read='subscribed', write='subscribed', subscribe='restricted', invite='restricted'))\n self.create_context(create_contextB, permissions=dict(read='subscribed', write='subscribed', subscribe='restricted', invite='restricted'))\n self.admin_subscribe_user_to_context(username, subscribe_contextA)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_contextA)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_contextB)\n self.create_activity(username, user_status_contextA)\n self.create_activity(username_not_me, user_status_contextA)\n self.create_activity(username_not_me, user_status_contextB)\n\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username), status=200)\n result = json.loads(res.text)\n self.assertEqual(len(result), 2)\n self.assertEqual(result[0].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[0].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[0].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n self.assertEqual(result[1].get('actor', None).get('username'), 'messi')\n self.assertEqual(result[1].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[1].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username_not_me), status=200)\n result = json.loads(res.text)\n self.assertEqual(len(result), 3)\n self.assertEqual(result[0].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[0].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[0].get('contexts', None)[0]['url'], subscribe_contextB['object']['url'])\n self.assertEqual(result[1].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[1].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[1].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n self.assertEqual(result[2].get('actor', None).get('username'), 'messi')\n self.assertEqual(result[2].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[2].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])", "def test_create_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)", "def test_scoring_logic():\n app = create_ctfd()\n with app.app_context():\n admin = login_as_user(app, name=\"admin\", password=\"password\")\n\n register_user(app, name=\"user1\", email=\"[email protected]\", password=\"password\")\n client1 = login_as_user(app, name=\"user1\", password=\"password\")\n register_user(app, name=\"user2\", email=\"[email protected]\", password=\"password\")\n client2 = login_as_user(app, name=\"user2\", password=\"password\")\n\n chal1 = gen_challenge(app.db)\n flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')\n chal1_id = chal1.id\n\n chal2 = gen_challenge(app.db)\n flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')\n chal2_id = chal2.id\n\n # user1 solves chal1\n with freeze_time(\"2017-10-3 03:21:34\"):\n with client1.session_transaction() as sess:\n data = {\n \"key\": 'flag',\n \"nonce\": sess.get('nonce')\n }\n r = client1.post('/chal/{}'.format(chal1_id), data=data)\n\n # user1 is now on top\n scores = get_scores(admin)\n assert scores[0]['team'] == 'user1'\n\n # user2 solves chal1 and chal2\n with freeze_time(\"2017-10-4 03:30:34\"):\n with client2.session_transaction() as sess:\n # solve chal1\n data = {\n \"key\": 'flag',\n \"nonce\": sess.get('nonce')\n }\n r = client2.post('/chal/{}'.format(chal1_id), data=data)\n # solve chal2\n data = {\n \"key\": 'flag',\n \"nonce\": sess.get('nonce')\n }\n r = client2.post('/chal/{}'.format(chal2_id), data=data)\n\n # user2 is now on top\n scores = get_scores(admin)\n assert scores[0]['team'] == 'user2'\n\n # user1 solves chal2\n with freeze_time(\"2017-10-5 03:50:34\"):\n with client1.session_transaction() as sess:\n data = {\n \"key\": 'flag',\n \"nonce\": sess.get('nonce')\n }\n r = client1.post('/chal/{}'.format(chal2_id), data=data)\n\n # user2 should still be on top because they solved chal2 first\n scores = get_scores(admin)\n assert scores[0]['team'] == 'user2'\n destroy_ctfd(app)", "def test_find_where(server, session):\n # server.find_where\n for session in server.sessions:\n session_id = session.get('session_id')\n\n assert server.find_where({'session_id': session_id}) == session\n assert isinstance(server.find_where({'session_id': session_id}), Session)\n\n # session.find_where\n for window in session.windows:\n window_id = window.get('window_id')\n\n assert session.find_where({'window_id': window_id}) == window\n assert isinstance(session.find_where({'window_id': window_id}), Window)\n\n # window.find_where\n for pane in window.panes:\n pane_id = pane.get('pane_id')\n\n assert window.find_where({'pane_id': pane_id}) == pane\n assert isinstance(window.find_where({'pane_id': pane_id}), Pane)" ]
[ "0.67018175", "0.6668321", "0.6427891", "0.63258743", "0.62819177", "0.6264992", "0.6244206", "0.6197762", "0.6128129", "0.60963386", "0.59784377", "0.59195083", "0.5882602", "0.58783644", "0.5873574", "0.58591706", "0.5849194", "0.584282", "0.5837098", "0.58088905", "0.58034945", "0.5788038", "0.5782187", "0.5770325", "0.5761921", "0.57494193", "0.5720195", "0.5711418", "0.5710981", "0.56825006" ]
0.8149533
0
Test the popxl call_with_info example
def test_documentation_popxl_call_with_info(self): filename = "call_with_info.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hxlinfo():\n run_script(hxlinfo_main)", "def test_get_info(self):\n pass", "def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)", "def verify_call(obj):\n\tassert obj.tag == 'OMOBJ'\n\tattr = obj[0]\n\t\n\tassert attr.tag == 'OMATTR'\n\tpairs, application = attr\n\t\n\tassert application.tag == 'OMA'\n\tsymbol, args = application\n\t\n\tassert symbol.tag == 'OMS'\n\tassert symbol.get('cd') == \"scscp1\"\n\tassert symbol.get('name') == \"procedure_call\"\n\t\n\tassert args.tag == 'OMA'\n\tassert len(args) > 0\n\tname_symbol = args[0]\n\t\n\tassert name_symbol.tag == 'OMS'\n\tcd = name_symbol.get('cd')\n\tproc_name = name_symbol.get('name')\n\t\n\t#2. Now handle the extra information\n\tassert pairs.tag == 'OMATP'\n\tassert len(pairs) % 2 == 0\n\t\n\textras = {}\n\tcall_id = None\n\treturn_type = None\n\t\n\tfor i in range(0, len(pairs), 2):\n\t\tsymbol = pairs[i]\n\t\tassert symbol.tag == 'OMS'\n\t\tassert symbol.get('cd') == \"scscp1\"\n\t\tname = symbol.get('name')\n\t\textras[name] = pairs[i+1]\n\t\t\n\t\tif name == 'call_id':\n\t\t\tassert call_id is None\n\t\t\tcall_id = pairs[i+1].text\n\t\t\tprint(call_id)\n\t\telif name.startswith('option_return_'):\n\t\t\tassert return_type is None\n\t\t\treturn_type = ReturnTypes[name[14:]]\n\t\n\t#Some information is mandatory\n\tassert call_id is not None\n\tassert return_type is not None\n\t\n\treturn cd, proc_name, call_id, return_type, args[1:], extras", "def info(self, *args, **kwargs):", "def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)", "def get_info(self, info):\r\n pass", "def setInfo(*args):", "def getInfo():", "def info() -> None:", "def test_get_info_function() -> None:\n current_directory = Path.cwd()\n with zipfile.ZipFile(\n current_directory / 'app' / 'tests' / 'files' / 'oneFile.zip') as zip_object:\n res = get_info_about_file(zip_object, 'dotnetfx.exe')\n assert res == {'path': 'dotnetfx.exe', 'size': 21823560}", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def hxlinfo_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):\n parser = make_args(\n 'Display JSON-formatted metadata for a data source (does not have to be HXLated).',\n hxl_output=False\n )\n\n args = parser.parse_args(args)\n\n do_common_args(args)\n\n json.dump(hxl.input.info(args.infile or stdin, make_input_options(args)), stdout, indent=2, ensure_ascii=False)\n\n return EXIT_OK", "def test_print_info(clarisse):\n info = \"test print info\"\n assert bool(clarisse.print_info(info)) is False", "def test_get_cell_info(self):\n expected = { 'Experiment': \"20220101_EGS1_12345AA\",\n 'Cell': '12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef',\n 'Pool': '12345AA0018',\n 'Date': '20220101',\n 'Number': '1234',\n 'Slot': '1-A1-A1',\n 'CellID': 'AAA66666',\n 'Checksum': 'deadbeef',\n 'Project': '12345',\n 'Base': '12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef/'\n '20220101_EGS1_12345AA_12345AA0018_AAA66666_deadbeef',\n 'Files in pass': 'unknown',\n 'Files in fail': 1,\n 'Files in fast5 fail': 1,\n '_counts': [\n {'_barcode': '.', '_label': 'All passed reads', '_part': 'pass', 'total_reads': 200},\n {'_barcode': '.', '_label': 'Passed and lambda-filtered reads', '_part': 'nolambda'},\n {'_barcode': '.', '_label': 'All failed reads', '_part': 'fail'} ],\n '_blobs': ['../../__blob__'],\n '_duplex' : [ ['Duplex pairs', 1],\n ['from total passing reads', 200],\n ['% of passing reads', '1.00%'] ],\n '_filter_type': 'none',\n '_final_summary': {'is_rna': False},\n '_nanoplot': '../../__nanoplot__',\n }\n\n\n got = get_cell_info( experiment = \"20220101_EGS1_12345AA\",\n cell = \"12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef\",\n cell_content = { '.': dict( fast5_pass = ['x.fast5'],\n fastq_fail = ['y.fastq'],\n fast5_fail = ['y.fast5'] ) },\n counts = { ('.','pass'): dict(total_reads = 200),\n ('.','fail'): dict(),\n ('.','nolambda'): dict() },\n fin_summary = dict(is_rna = False),\n blobs = ['__blob__'],\n nanoplot = '__nanoplot__',\n duplex = 1,\n fast5_meta = dict() )\n\n if VERBOSE:\n pprint(got)\n\n self.assertEqual( type(got), OrderedDict )\n self.assertEqual( dict(got), expected )", "def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)", "def _handle_info_response(self, resp, info, prev_info):\r\n if info.line_num != prev_info.line_num:\r\n return\r\n\r\n if resp['calltip']:\r\n info.editor.show_calltip('Arguments', resp['calltip'],\r\n signature=True,\r\n at_position=prev_info.position)\r\n\r\n if resp['name']:\r\n self.send_to_inspector.emit(\r\n resp['name'], resp['argspec'],\r\n resp['note'], resp['docstring'],\r\n not prev_info.auto)", "def test_get_patch_info_returns(self):\n # This test assumes IIQ isn't installed, thus the pile of errors that'll\n # occur shouldn't prevent us from getting a PatchInfo object\n fake_log = MagicMock()\n patch_info = versions.get_patch_info('bogus-patch.tgz', fake_log)\n\n self.assertTrue(isinstance(patch_info, versions._PatchInfo))\n self.assertEqual(patch_info.iiq_dir, '')", "def rpc_info():", "def test_all_applications_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'dummy_1': None, 'dummy_2': None}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'appli_1'}, {'name': 'appli_2'}],\n rpc.get_all_applications_info())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertItemsEqual([call('dummy_1'), call('dummy_2')],\n mocked_get.call_args_list)", "def svn_info_invoke_receiver(svn_info_receiver_t__obj, void_baton, char_path, svn_info_t_info, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def after_call_used(self, function_info, subscript, call_code, return_value, code_reference):\n # pylint: disable=too-many-arguments\n if function_info == ('pandas.io.parsers', 'read_csv'):\n operator_context = OperatorContext(OperatorType.DATA_SOURCE, function_info)\n return_value = self.execute_inspection_visits_no_parents(operator_context, code_reference,\n return_value, function_info)\n if function_info == ('pandas.core.groupby.generic', 'agg'):\n operator_context = OperatorContext(OperatorType.GROUP_BY_AGG, function_info)\n return_value = self.execute_inspection_visits_no_parents(operator_context, code_reference,\n return_value.reset_index(), function_info)\n elif function_info == ('pandas.core.frame', 'dropna'):\n operator_context = OperatorContext(OperatorType.SELECTION, function_info)\n return_value = execute_inspection_visits_unary_operator_df(self, operator_context, code_reference,\n self.input_data[-1],\n self.input_data[-1].annotations,\n return_value)\n elif function_info == ('pandas.core.frame', '__getitem__'):\n # TODO: Can this also be a select\n if self.select:\n self.select = False\n # Gets converted to Selection later?\n operator_context = OperatorContext(OperatorType.SELECTION, function_info)\n return_value = execute_inspection_visits_unary_operator_df(self, operator_context, code_reference,\n self.input_data[-1],\n self.input_data[-1].annotations,\n return_value)\n elif isinstance(return_value, MlinspectDataFrame):\n operator_context = OperatorContext(OperatorType.PROJECTION, function_info)\n return_value['mlinspect_index'] = range(1, len(return_value) + 1)\n return_value = execute_inspection_visits_unary_operator_df(self, operator_context, code_reference,\n self.input_data[-1],\n self.input_data[-1].annotations,\n return_value)\n elif isinstance(return_value, MlinspectSeries):\n operator_context = OperatorContext(OperatorType.PROJECTION, function_info)\n return_value = self.execute_inspection_visits_unary_operator_series(operator_context, code_reference,\n return_value,\n function_info)\n elif function_info == ('pandas.core.frame', 'groupby'):\n description = self.code_reference_to_description[code_reference]\n return_value.name = description # TODO: Do not use name here but something else to transport the value\n if function_info == ('pandas.core.frame', 'merge'):\n operator_context = OperatorContext(OperatorType.JOIN, function_info)\n return_value = self.execute_inspection_visits_join_operator_df(operator_context, code_reference,\n return_value,\n function_info)\n\n self.input_data.pop()\n\n return return_value", "def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)", "def test_info_get(self):\n response = self.client.open(\n '/info',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def info(): # noqa: E501\n return 'do some magic!'", "def print_info(*args):\n print(CGREEN2 + str(*args) + CEND)", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def get_system_info():\n query = {\"type\": \"op\", \"cmd\": \"<show><system><info></info></system></show>\"}\n\n return __proxy__[\"panos.call\"](query)", "def test_ctcpQuery_USERINFO(self):\n self.client.userinfo = \"info\"\n self.client.ctcpQuery_USERINFO(self.user, self.channel, \"data\")\n self.assertEqual(\n self.client.methods, [(\"ctcpMakeReply\", (\"Wolf\", [(\"USERINFO\", \"info\")]))]\n )", "def test_rpcCall(self):\n pass" ]
[ "0.6383911", "0.5714342", "0.55663514", "0.5375583", "0.532167", "0.52700174", "0.5258182", "0.5240253", "0.51907164", "0.5187251", "0.51472026", "0.50939924", "0.5084021", "0.50806963", "0.507654", "0.5028689", "0.50142485", "0.49954024", "0.4964053", "0.49308", "0.49162632", "0.4889757", "0.48548278", "0.48462722", "0.4829606", "0.48205596", "0.48201934", "0.48180994", "0.47867846", "0.4786442" ]
0.82054126
0
Test the popxl basic repeat example
def test_documentation_popxl_repeat_0(self): filename = "repeat_graph_0.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_in_sequence(self):\n filename = \"in_sequence.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def repeat(x, repeats, axis=None):\r\n return RepeatOp(axis=axis)(x, repeats)", "async def repeat(ctx, times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "def populate(self, pop_size):\n for _ in range(pop_size):\n sample = next(self._exp_source)\n self._add(sample)", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def repeat(self):\n return self._repeat", "async def repeat(self,ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "def repeat(self, count):\n return self.Sequence((self,) * count)", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element", "def test_repeat_seq():\n\n out_file = \"out.txt\"\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {repeat}')\n assert rv == 0\n expected = (' 1: amigo_repeat.txt\\n'\n 'Wrote 5 gene IDs from 1 file to file \"out.txt\"')\n assert out == expected\n assert os.path.isfile(out_file)\n exp_repeat = '\\n'.join(\n sorted(\"\"\"\n AT4G14690 AT5G41340 AT5G03720 AT5G12020 AT2G22360\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_repeat.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def test_repeat_seq():\n\n out_file = \"out.txt\"\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {repeat}')\n assert rv == 0\n expected = (' 1: amigo_repeat.txt\\n'\n 'Wrote 5 gene IDs from 1 file to file \"out.txt\"')\n assert out == expected\n assert os.path.isfile(out_file)\n exp_repeat = '\\n'.join(\n sorted(\"\"\"\n AT4G14690 AT5G41340 AT5G03720 AT5G12020 AT2G22360\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_repeat.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n if random.random() <= (CURRENTRABBITPOP/MAXRABBITPOP):\r\n CURRENTRABBITPOP -= 1\r\n # fox reproducing\r\n if random.random() <= (1/3):\r\n CURRENTFOXPOP += 1\r\n else:\r\n # fox dying\r\n if random.random() <= 0.1:\r\n CURRENTFOXPOP -= 1", "def test_x_repeating(name, ipset_x_repeating):\n with pytest.raises(ValueError):\n interpolation.interpolate(*ipset_x_repeating, kind=name, **IPARGS.get(name, {}))", "def repeat_nd(x, reps):\n return RepeatND(reps)(x)", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n if random.random() <= (CURRENTRABBITPOP/MAXRABBITPOP):\r\n CURRENTRABBITPOP -= 1\r\n # fox reproducing\r\n if random.random() <= (1/3):\r\n CURRENTFOXPOP += 1\r\n else:\r\n # fox dying\r\n if random.random() <= 0.9:\r\n CURRENTFOXPOP -= 1", "async def repeat(times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "def rabbitGrowth():\r\n # you need this line for modifying global variables\r\n global CURRENTRABBITPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTRABBITPOP):\r\n if random.random() <= (1 - (CURRENTRABBITPOP/MAXRABBITPOP)):\r\n CURRENTRABBITPOP += 1", "def rabbitGrowth():\r\n # you need this line for modifying global variables\r\n global CURRENTRABBITPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTRABBITPOP):\r\n if random.random() <= (1 - (CURRENTRABBITPOP/MAXRABBITPOP)):\r\n CURRENTRABBITPOP += 1", "def repeat(self, repeats):\n return SeriesDefault.register(pandas.Series.repeat)(self, repeats=repeats)", "def test_op_repeat(self) -> None:\n op_base = OpIncrForTest()\n kwargs_per_step_to_add = [\n dict(key_in=\"data.val.a\", key_out=\"data.val.b\"),\n dict(key_in=\"data.val.b\", key_out=\"data.val.c\"),\n dict(key_in=\"data.val.b\", key_out=\"data.val.d\"),\n dict(key_in=\"data.val.d\", key_out=\"data.val.d\"),\n ]\n op_repeat = OpRepeat(op_base, kwargs_per_step_to_add)\n sample_dict = NDict({})\n sample_dict[\"data.val.a\"] = 5\n sample_dict = op_repeat(sample_dict, \"_.test_repeat\", incr_value=3)\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 14)\n\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.d\",\n key_to_reverse=\"data.val.d\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 8)\n\n sample_dict[\"data.val.e\"] = 48\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.d\",\n key_to_reverse=\"data.val.e\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 8)\n self.assertEqual(sample_dict[\"data.val.e\"], 42)", "def repeat(self, repeat: bool=None):\n self._select_interface(self._rc_repeat, self._http_repeat, repeat)", "def test_make_pop(self, pop_size, cell_number, microcell_number):\n for i in [0, 1]:\n pe.Parameters.instance().use_ages = i\n # Population is initialised with no households\n pop_params = {\"population_size\": pop_size,\n \"cell_number\": cell_number,\n \"microcell_number\": microcell_number}\n test_pop = ToyPopulationFactory.make_pop(pop_params)\n\n total_people = 0\n count_non_empty_cells = 0\n for cell in test_pop.cells:\n for microcell in cell.microcells:\n total_people += len(microcell.persons)\n if len(cell.persons) > 0:\n count_non_empty_cells += 1\n # Test there is at least one non-empty cell\n self.assertTrue(count_non_empty_cells >= 1)\n # Test that everyone in the population has been assigned a\n # microcell\n self.assertEqual(total_people, pop_size)\n\n # Test a population class object is returned\n self.assertIsInstance(test_pop, pe.Population)", "async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "def MakeRepeat1(self,content):\n return self.register(Repeat1(content,reg=self))", "def test_pop(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 100, sched)\n self.assertEqual(inst_map.pop(\"tmp\", 100), sched)\n self.assertFalse(inst_map.has(\"tmp\", 100))\n\n self.assertEqual(inst_map.qubit_instructions(100), [])\n self.assertEqual(inst_map.qubits_with_instruction(\"tmp\"), [])\n with self.assertRaises(PulseError):\n inst_map.pop(\"not_there\", (0,))", "def runSimulation(numSteps):\n\n rabbit_pop = []\n fox_pop = [] \n \n for steps in range(numSteps):\n rabbitGrowth()\n foxGrowth()\n rabbit_pop.append(CURRENTRABBITPOP)\n fox_pop.append(CURRENTFOXPOP)\n \n return (rabbit_pop, fox_pop)", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def repeat(self, number_of_repeats):\n return \"G\" + str(number_of_repeats)" ]
[ "0.6505946", "0.6401242", "0.58164656", "0.54440105", "0.53839743", "0.5373807", "0.53707576", "0.53479195", "0.5337674", "0.5326383", "0.5316677", "0.53121865", "0.53121865", "0.5300285", "0.5297226", "0.52901715", "0.5276409", "0.5268856", "0.5196304", "0.5196304", "0.5173073", "0.51597667", "0.51544595", "0.5149219", "0.51400036", "0.51352286", "0.5117778", "0.5116364", "0.51089984", "0.51067406" ]
0.6540377
0
Test the popxl getting / setting tensor data example
def test_documentation_popxl_get_set_tensors(self): filename = "tensor_get_write.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_predictor():", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def test_add_get_tensor(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 1D tensors of all data types\n data = mock_data.create_data(10)\n add_get_arrays(dataset, data)", "def test_documentation_popxl_adv_get_write(self):\n filename = \"tensor_get_write_adv.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_machine_learning():", "def test_synth_tr():\n test_path = tempfile.mkdtemp()\n x_train, metadata = synth_tr(test_path)\n try:\n assert x_train.shape == (250, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def test_init_prediction_data(raw_data):\n prediction_data = PredictionData(**raw_data)\n assert prediction_data", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def rand_data():\n # 100 examples, with seq_len=10, each holding 300 features\n return torch.randn((100, 10, 300))", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def _creatExamplesTensorData(self, examples):\n\n images = []\n \n images2 = []\n images3 = []\n images4 = []\n images5 = [] \n labels = []\n for (img_idx, label) in examples:\n img = self.dataset[img_idx][0]\n #print(img)\n ##exit(0)\n if self.load:\n img = Image.fromarray(img)\n else:\n img = read_image(img)\n #print(img.size)\n #print(np.array(img).shape)\n #exit(0)\n if self.transform is not None:\n img1 = self.transform(img)\n\n img2 = self.transform_test(img)\n img3 = self.transform_test(img)\n img4 = self.transform_test(img)\n img5 = self.transform_test(img) \n #print((img2-img1).abs().sum(),(img3-img1).abs().sum(),(img2-img3).abs().sum())\n #print(img.shape,'located in test_loader.py at 146')\n #exit(0)\n images.append(img1)\n \n images2.append(img2)\n images3.append(img3)\n images4.append(img4)\n images5.append(img5) \n labels.append(label)\n images = torch.stack(images, dim=0)\n\n images2 = torch.stack(images2, dim=0)\n images3 = torch.stack(images3, dim=0)\n images4 = torch.stack(images4, dim=0)\n images5 = torch.stack(images5, dim=0) \n labels = torch.LongTensor(labels)\n return images, images2,images3,images4,images5,labels", "def test_add_get_tensor_2D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 2D tensors of all data types\n data_2D = mock_data.create_data((10, 10))\n add_get_arrays(dataset, data_2D)", "def test_meteo():\n test_path = tempfile.mkdtemp()\n x_train, metadata = meteo(test_path)\n try:\n assert x_train.shape == (11, 6)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_training(self):\n\t\tpass", "def setUp(self):\n output = np.zeros((1, 5, 2))\n target = np.zeros((1, 5, 2))\n # first channel\n output[0, 0] = [10, 4]\n target[0, 0] = [10, 0]\n # second channel\n output[0, 1] = [10, 18]\n target[0, 1] = [10, 10]\n # third channel\n output[0, 2] = [0, 0]\n target[0, 2] = [0, -1]\n # fourth channel\n output[0, 3] = [40, 40]\n target[0, 3] = [30, 30]\n # fifth channel\n output[0, 4] = [20, 10]\n target[0, 4] = [0, 10]\n\n gt_instances = InstanceData()\n gt_instances.keypoints = target\n gt_instances.keypoints_visible = np.array(\n [[True, True, False, True, True]])\n\n pred_instances = InstanceData()\n pred_instances.keypoints = output\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict()\n }\n\n self.data_batch = [data]\n self.data_samples = [data_sample]", "def test_get_iris_setosa_data(self):\n iris = get_iris_setosa_data()\n self.assertEqual(len(iris.data), 150)\n self.assertEqual(len(iris.labels), 150)", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n path = os.getcwd() # reads the current path\n x_train = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_train = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n x_test = np.load(path + '/files/tinyX_test.npy', 'r') # reads the input file\n x_train, y_train = shuffle(x_train, y_train)\n\n return x_train, y_train, x_test", "def test_income():\n test_path = tempfile.mkdtemp()\n x_train, metadata = income(test_path)\n try:\n assert x_train.shape == (44, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_gen():\n tpot_obj = TPOTClassifier()\n\n pipeline = tpot_obj._gen_grow_safe(tpot_obj._pset, 1, 3)\n\n assert len(pipeline) > 1\n assert pipeline[0].ret == Output_DF", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)" ]
[ "0.6889487", "0.65411484", "0.62041", "0.61115885", "0.60792667", "0.60625935", "0.5973008", "0.5798675", "0.579797", "0.57839084", "0.5775764", "0.5711267", "0.57093495", "0.5704983", "0.5672286", "0.5643146", "0.5622259", "0.5604334", "0.5597279", "0.5581926", "0.558076", "0.5573886", "0.55467963", "0.55462253", "0.5545013", "0.55389446", "0.5538212", "0.5531791", "0.55170566", "0.55123717" ]
0.7280651
0
Test the popxl autodiff op
def test_documentation_popxl_autodiff(self): filename = "autodiff.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data_norange(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n lensumrange = random.randint(1, 10)\n\n ex.nreps = nreps\n ex.sumrange = [\"j\", range(lensumrange)]\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.vary[\"Y\"][\"with\"].add(\"j\")\n ex.vary[\"Y\"][\"along\"] = 0\n ex.vary[\"Z\"][\"with\"].update([\"rep\", \"j\"])\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n], cmds)\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", idx * m * n, \"X_%d\" % idx], cmds)\n\n self.assertIn([\n \"dmalloc\", \"Y\", lensumrange * m * m + (lensumrange - 1) * m\n ], cmds)\n idx = random.randint(0, lensumrange - 1)\n self.assertIn([\"doffset\", \"Y\", idx * m, \"Y_%d\" % idx], cmds)\n\n self.assertIn([\"cmalloc\", \"Z\", nreps * lensumrange * n * n], cmds)\n idxrep = random.randint(0, nreps - 1)\n idxrange = random.randint(0, lensumrange - 1)\n self.assertIn([\"coffset\", \"Z\",\n (idxrep * lensumrange + idxrange) * n * n,\n \"Z_%d_%d\" % (idxrep, idxrange)], cmds)", "def test_57o_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 7.9)", "def test_is_old_papernum(self):\n self.assertFalse(util.is_old_papernum(\"9106001\"))\n self.assertTrue(util.is_old_papernum(\"9107001\"))\n self.assertFalse(util.is_old_papernum(\"9200001\"))\n self.assertTrue(util.is_old_papernum(\"9201001\"))\n self.assertTrue(util.is_old_papernum(\"0703999\"))\n self.assertFalse(util.is_old_papernum(\"0704001\"))", "def test_99_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 15.6)", "def test_pressure_increasing_check_some_decreasing(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)", "def test_g_asignar_rol(self):", "def test_open_fill(self):", "def test31_clear_air():\n assert not is_precip_mode(31), 'VCP 31 is not precip'", "def test_modexp(self):\n self.assertEqual(MathFunctions.modexp(2, 5, 7), 4)\n self.assertEqual(MathFunctions.modexp(2, 10, 8), 0)", "def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)", "def test_analytical_vs_numerical():\n pass", "def test_devide_int(self):\n self.assertEqual(operations.devide(8,4), 2)", "def test_pressure_increasing_check_some_bad(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)", "def test_without_orographic_enhancement(self):\n input_cube = self.precip_cube.copy()\n input_cube.rename(\"air_temperature\")\n input_cube.units = \"K\"\n plugin = CreateExtrapolationForecast(input_cube, self.vel_x, self.vel_y)\n result = plugin.extrapolate(10)\n expected_result = np.array(\n [[np.nan, np.nan, np.nan], [np.nan, 1, 2], [np.nan, 1, 1], [np.nan, 0, 2]],\n dtype=np.float32,\n )\n expected_result = np.ma.masked_invalid(expected_result)\n expected_forecast_period = np.array([600], dtype=np.int64)\n # Check we get the expected result, and the correct time coordinates.\n self.assertArrayEqual(\n np.ma.getmask(expected_result), np.ma.getmask(result.data)\n )\n self.assertArrayAlmostEqual(expected_result.data, result.data.data)\n self.assertArrayAlmostEqual(\n result.coord(\"forecast_period\").points, expected_forecast_period\n )\n self.assertEqual(result.coord(\"forecast_period\").units, \"seconds\")\n self.assertEqual(\n result.coord(\"forecast_reference_time\").points,\n input_cube.coord(\"time\").points,\n )\n self.assertEqual(\n result.coord(\"time\").points, input_cube.coord(\"time\").points + 600\n )", "def test_one_pop(data_: tuple, _is_pop: bool):\n x_bar = cls.get_mean(data_)\n s_x = cls.get_stdev(data_, is_population=_is_pop)\n n_x = cls.get_n(data_)\n return (x_bar - h0) / (s_x / sqrt(n_x))", "def test_get_meta_range(self):\n pass", "def test_with_orographic_enhancement(self):\n plugin = CreateExtrapolationForecast(\n self.precip_cube,\n self.vel_x,\n self.vel_y,\n orographic_enhancement_cube=self.oe_cube,\n )\n result = plugin.extrapolate(10)\n expected_result = np.array(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, 1.03125, 1.0],\n [np.nan, 1.0, 0.03125],\n [np.nan, 0, 2.0],\n ],\n dtype=np.float32,\n )\n expected_result = np.ma.masked_invalid(expected_result)\n expected_forecast_period = np.array([600], dtype=np.int64)\n # Check we get the expected result, and the correct time coordinates.\n self.assertArrayEqual(\n np.ma.getmask(expected_result), np.ma.getmask(result.data)\n )\n self.assertArrayAlmostEqual(expected_result.data, result.data.data)\n self.assertArrayAlmostEqual(\n result.coord(\"forecast_period\").points, expected_forecast_period\n )\n self.assertEqual(result.coord(\"forecast_period\").units, \"seconds\")\n self.assertEqual(\n result.coord(\"forecast_reference_time\").points,\n self.precip_cube.coord(\"time\").points,\n )\n self.assertEqual(\n result.coord(\"time\").points, self.precip_cube.coord(\"time\").points + 600\n )", "def testspec(arr: list[int]) -> None:\n\n print(50*'-')\n print(arr)\n print_rem(arr)\n rev_dupes(arr)\n print(arr)", "def test_FlexCrop1(self):", "def clean_data(self, opz):\n# pdb.set_trace()\n mask = (opz['Opzetstuk Noord (°)']<-1) | (opz['Opzetstuk Noord (°)']>100)\n opz = opz.drop(opz.loc[mask].index)\n opz['open'] = opz[\"Opzetstuk Noord (°)\"].apply(lambda x: 1 if x < 80 else 0)\n #Deze klopt niet. We hebben het moment nodig van opengaan en het moment van dichtgaat. Moment van openen is: wanneer de verandering van de aantal graden >1 graad is. Moment van sluiten is de laatste verandering totdat het niet meer veranderd. Zie ook code van Pieter in C#.\n opz['diff'] = opz['open'].diff()\n beweegt=opz[opz['diff']!=0]\n return beweegt", "def c_test_population_function(self, function):\r\n return 1", "def test_get_range(self):\n pass", "def test_170329_notimp(self):\n spc = parser(get_file('PTSDY2_notimp.txt'))\n # spc.draw_outlooks()\n outlook = spc.get_outlook('CATEGORICAL', 'MRGL')\n self.assertAlmostEqual(outlook.geometry.area, 110.24, 2)", "def testMedicationsImmunosupp(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"immunosupp\")\n\n self.util.boolPropertyTest(self, attr, \"immunosupp\")", "def test_wb(self):\n df = dep.read_wb(get_path('wb.txt'))\n self.assertAlmostEquals(df['precip'].max(), 162.04, 2)", "def test21_precip():\n assert is_precip_mode(21), 'VCP 21 is precip'", "def test_for_more_flags(self):\n shorter = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.inc\", True, False)\n longer = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.ini\", True, True)\n\n self.assertGreater(longer, shorter)", "def test_4_4_1_1(self):\n pass", "def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '[>............] 0%')", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)" ]
[ "0.49731633", "0.49007177", "0.4868027", "0.48571473", "0.48517647", "0.48516244", "0.4770387", "0.47574338", "0.474696", "0.4732291", "0.47281486", "0.47268423", "0.47249767", "0.47073162", "0.46844417", "0.46804345", "0.46610695", "0.4651014", "0.4639932", "0.46340024", "0.46131676", "0.45845267", "0.45754358", "0.45751292", "0.45408538", "0.45391086", "0.45374107", "0.4537361", "0.45317215", "0.45218405" ]
0.6394983
0
Test the popxl in sequence context manager
def test_documentation_popxl_in_sequence(self): filename = "in_sequence.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pop_methods(self):\n\n batch = Batch(Mock())\n\n # mock BatchRequests\n mock_obj = Mock()\n mock_ref = Mock()\n batch._objects_batch = mock_obj\n batch._reference_batch = mock_ref\n\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_not_called()\n\n # pop object default value\n batch.pop_object()\n mock_obj.pop.assert_called_with(-1)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop object at index\n batch.pop_object(10)\n mock_obj.pop.assert_called_with(10)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference default value\n batch.pop_reference()\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(-1)\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference at index\n batch.pop_reference(9)\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(9)", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def test_close():\n while True:\n yield", "def test_pop(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 100, sched)\n self.assertEqual(inst_map.pop(\"tmp\", 100), sched)\n self.assertFalse(inst_map.has(\"tmp\", 100))\n\n self.assertEqual(inst_map.qubit_instructions(100), [])\n self.assertEqual(inst_map.qubits_with_instruction(\"tmp\"), [])\n with self.assertRaises(PulseError):\n inst_map.pop(\"not_there\", (0,))", "def testGetSequence():\r\n\t\r\n\t#a few of hand-tested genome positions\r\n\ttest_data = [\t('1',500,520,'GTCTGACCTGAGGAGAACTGT'),\r\n\t\t\t\t\t('2',500,520,'CCCGACCCCGACCCCGACCCA'),\r\n\t\t\t\t\t('3',50000,50020,'TCTTCTTTTATGAAAAAGGAT'),\r\n\t\t\t\t\t('4',50000,50020,'AGAGCCCTGCAATTTGAAGAT'),\r\n\t\t\t\t\t('5',100000,100020,'AATGTTCACCAGTATATTTTA'),\r\n\t\t\t\t\t('X',100000,100020,'TAGGTCTCATTGAGGACAGAT'),\r\n\t\t\t\t\t('Y',100000,100020,'TAGGTCTCATTGAGGACAGAT')]\r\n\t\t\t\t\t\r\n\tfor this_check in test_data:\r\n\t\tyield CheckGetSequence, this_check", "def tearDown(self):\n del self.pop", "def pop():", "def test_push_pop(values):\n test_stack = stack.Stack()\n\n for value in values:\n test_stack.push(value)\n\n for expected_value in reversed(values):\n value = test_stack.pop()\n assert value == expected_value\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.pop()", "def test_pop_on_empty_raises_error(sample_priorityq):\n with pytest.raises(IndexError):\n sample_priorityq.pop()", "def test_pop_gate(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(XGate(), 100, sched)\n self.assertEqual(inst_map.pop(XGate(), 100), sched)\n self.assertFalse(inst_map.has(XGate(), 100))\n\n self.assertEqual(inst_map.qubit_instructions(100), [])\n self.assertEqual(inst_map.qubits_with_instruction(XGate()), [])\n with self.assertRaises(PulseError):\n inst_map.pop(\"not_there\", (0,))", "def test_pop_no_args(self):\r\n msg_list = messages.MessageList()\r\n # Adds 5 Message objects to the list.\r\n msg_list.push(messages.StringMessage(\"a\"))\r\n msg_list.push(messages.StringMessage(\"b\"))\r\n msg_list.push(messages.StringMessage(\"c\"))\r\n msg_list.push(messages.StringMessage(\"d\"))\r\n msg_list.push(messages.StringMessage(\"e\"))\r\n\r\n self.assertEqual(msg_list.length(), 5)\r\n popped = msg_list.pop()\r\n self.assertEqual(msg_list.length(), 4)\r\n self.assertEqual(popped.msg, \"e\")\r\n msg_list.pop()\r\n msg_list.pop()\r\n msg_list.pop()\r\n msg_list.pop()\r\n self.assertRaises(IndexError, msg_list.pop)", "def test_open_fill(self):", "def test_peek_empty():\n test_stack = stack.Stack()\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.peek()", "def test_ignore_close():\n try:\n yield\n except GeneratorExit:\n yield", "def test_pop_on_small_stack(small_stack):\n assert small_stack.pop().val == 3\n assert small_stack.pop().val == 2\n assert small_stack._size == 1", "def _pop(self, actual_call):\n try:\n expected_call, mock_result = self._queue.popleft()\n except IndexError as ex:\n error = UnexpectedCall(\n \"Queue is empty. call: {0}\"\n .format(actual_call)\n )\n self._store_pop_error(error)\n raise error\n if actual_call != expected_call:\n error = UnexpectedCall(\n \"Call does not match expectation. actual: {0}; expected: {1}\"\n .format(actual_call, expected_call)\n )\n self._store_pop_error(error)\n raise error\n # let it raise if the result is an exception or exception type.\n return mock_result()", "def testSeq(self, mock_gs):\n self.mr._sequences = ['apple', 'banana']\n\n self.assertEqual(\n 'apple',\n self.mr.seq\n )\n\n mock_gs.assert_called_once_with()\n mock_gs.reset_mock()\n\n self.mr._is_seq = False\n\n self.assertEqual(\n None,\n self.mr.seq\n )\n\n # Test that we pulled from the cache\n self.assertFalse(\n mock_gs.called\n )", "def test_pop_left_check_head(dq_3):\n dq_3.pop_left()\n assert dq_3._dll.head.data == 4", "def test_pop_returns_value_of_tail(dq_3):\n assert dq_3.pop() == 'ragtime'", "def test_appended(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper._queue.pop()\n self.assertEqual(expected, actual)", "def testPushPopItem(self):\n test_queue = multi_process.MultiProcessingQueue()\n\n for item in self._ITEMS:\n test_queue.PushItem(item)\n\n test_queue.SignalEndOfInput()\n test_queue_consumer = test_lib.TestQueueConsumer(test_queue)\n test_queue_consumer.ConsumeItems()\n\n self.assertEqual(test_queue_consumer.number_of_items, len(self._ITEMS))", "def test_peek_single(values, expected_value):\n test_stack = stack.Stack()\n for value in values:\n test_stack.push(value)\n\n returned_value = test_stack.peek()\n\n assert returned_value == expected_value\n\n for value in reversed(values):\n assert test_stack.pop() == value", "def test_sequence(self):\n seq_name = 'test_seq'\n\n with self.dbh.sequence_recreate(seq_name):\n try:\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 1)\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 2)\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 3)\n except Exception:\n self.dbh.rollback()\n raise", "def test_valueInQueue(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper.pop()\n self.assertEqual(expected, actual)", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def test_populator_aborts_early():\n o1, o2 = MediaBag(media=1), MediaBag(media=2)\n\n def multi_get(*keys):\n raise AssertionError('tried calling multi_get')\n\n results = media.build_populator('id', multi_get)([o1, o2])\n assert results == [o1, o2]", "def test_generator_cleanup():\n try:\n yield 1\n finally:\n print('cleanup')", "def pop_write(self):\n ...", "def test_pushpop2_dir(self):\n TempfileManager.push()\n os.mkdir(tempdir + 'pushpop2')\n TempfileManager.add_tempfile(tempdir + 'pushpop2')\n\n TempfileManager.push()\n os.mkdir(tempdir + 'pushpop2a')\n TempfileManager.add_tempfile(tempdir + 'pushpop2a')\n TempfileManager.pop()\n if not os.path.exists(tempdir + 'pushpop2'):\n self.fail(\"pop() clean out all files\")\n if os.path.exists(tempdir + 'pushpop2a'):\n self.fail(\"pop() failed to clean out files\")\n\n TempfileManager.pop()\n if os.path.exists(tempdir + 'pushpop2'):\n self.fail(\"pop() failed to clean out files\")", "def popitem(self): # real signature unknown; restored from __doc__\n pass" ]
[ "0.58948493", "0.5882581", "0.5770228", "0.54232585", "0.54092556", "0.54075843", "0.5373329", "0.5303253", "0.53030944", "0.5287886", "0.52534574", "0.5221736", "0.52018183", "0.5189823", "0.5175281", "0.5172834", "0.5172073", "0.51521003", "0.51421815", "0.5127545", "0.51249427", "0.51089585", "0.5103546", "0.5087489", "0.5045468", "0.5040717", "0.5031918", "0.5026089", "0.50086796", "0.4999565" ]
0.65936136
0
Test the popxl remote variable
def test_documentation_popxl_remote_var(self): filename = "remote_variable.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_remote_rts_var(self):\n filename = \"remote_rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def _is_pop(self, words):\n if words[0] == 'pop':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_POP command.\".format(self._file_line))\n if words[1] not in ['temp', 'pointer', 'static', 'local', 'argument', 'this', 'that']:\n raise SyntaxError(\"File line {}: Invalid second argument.\".format(self._file_line))\n return True\n else:\n return False", "def test_endpointPOP3(self):\n self._endpointTest(\"pop3\")", "def _platformix_get(self, context, fake_reply, prop):\r\n if hasattr(self.host, prop):\r\n self._reply(context, proto_success(getattr(self.host, prop), prop), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Property {} not found on {}\".format(prop, self.host.name)), fake_reply)", "def test_check_replication_unknown_valueerror2(self, mock_urlopen):\n base_url = 'http://localhost:6000/recon/'\n jdata = PropertyMock(return_value=b'X')\n mock_urlopen.return_value = MagicMock(read=jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_UNKNOWN,\n \"Can't parse status data\")])", "def remote_status():", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_property():\n\n sdk = '23'\n contents = (\"[Info]\\n\"\n \"sdk = %s\" % sdk)\n\n testutils.deploy_config_raw(contents)\n\n assert prop.get_prop('info', 'sdk') == sdk\n\n testutils.undeploy()\n\n return 0", "def test_z_remote_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"check_nrpe\"])\n\t\t\tif (str(\"/check_nrpe\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\t\ttry:\n\t\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"ssh\"])\n\t\t\t\tif (str(\"/ssh\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception:\n\t\t\t\ttheResult = False\n\t\tassert theResult", "def remote_installed(self):\n gxp_dir = os.environ[\"GXP_DIR\"]\n flag = os.path.join(gxp_dir, \"REMOTE_INSTALLED\")\n if dbg>=2: \n ioman.LOG(\"checking remote flag %s/%s\\n\" % (gxp_dir, flag))\n if os.path.exists(flag):\n if dbg>=2: ioman.LOG(\"exists, remotely installed\\n\")\n return 1\n else:\n if dbg>=2: ioman.LOG(\"does not exit, locally installed\\n\")\n return 0", "def remote():\n pass", "def test_pop3(self):\n self._endpointServerTest(\"pop3\", protocols.POP3Factory)", "def _is_pop_command(self):\n return self._match_memory_pattern(\"pop\")", "def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def hasGridProxy():\n import os\n from subprocess import Popen, PIPE\n \n arguments = 'dirac-proxy-info --checkvalid'\n arguments = ['dirac-command'] + arguments.split()\n logger.verbose ( 'hasGridProxy:use Popen(%s)' % arguments)\n\n p = Popen(arguments, stdout=PIPE, stderr=PIPE)\n (cout, cerr) = p.communicate()\n #\n if 0 != p.returncode: return False\n #\n if py3 :\n cout = cout.decode ( 'utf-8' ) if cout else cout \n cerr = cerr.decode ( 'utf-8' ) if cerr else cerr \n # \n\n if 'expired' in cout : return False\n if 'Insane' in cout : return False\n if 'Error' in cout : return False\n #\n return 0 == p.returncode and cout and not cerr", "def test_set_get(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"set %s/one 'bye'\" % (self.tests_path))\n self.shell.onecmd(\"get %s/one\" % (self.tests_path))\n self.assertEqual(\"bye\\n\", self.output.getvalue())", "def test_test_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n assert prop.test_prop('info', 'sdk') == 1\n\n testutils.undeploy()\n\n return 0", "def on_pullVar(self):\n if not os.path.exists(grapher.binPath):\n mess = \"!!! ERROR: rndBin path not found, check user pref !!!\"\n self.mainUi._defaultErrorDialog(mess, self.mainUi)\n else:\n tmpPath = os.path.join(self.grapher.userBinPath, 'tmp')\n tmpFile = os.path.join(tmpPath, 'varBuffer.py')\n varDict = pFile.readPyFile(tmpFile)\n for var in sorted(varDict.keys()):\n if var.startswith('selVar_'):\n newItem = self.on_addVar()\n self.setItem(newItem, **varDict[var])", "def SEMIHook(p):\n x = p['sy']['pop']()\n if (x == ';'):\n p['sy']['push'](p['OK'])\n else:\n p['sy']['push'](p['NOK'])\n #endif", "def check_remote_rpm_install(self, rpm_package_name, host):\n results = run_remote_command(\"rpm -q %s --dbpath %s\" % (rpm_package_name, RPM_DATABASE), host)\n self.assertEqual(results, rpm_package_name)", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_existing_value(self):\n var_name = \"PICCOLO_TEST_2\"\n initial_value = \"hello\"\n new_value = \"goodbye\"\n\n os.environ[var_name] = initial_value\n\n with set_env_var(var_name=var_name, temp_value=new_value):\n self.assertEqual(os.environ.get(var_name), new_value)\n\n self.assertEqual(os.environ.get(var_name), initial_value)", "def test_ipcrm():\n IPCComm.ipcrm()", "def nremote(self):", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def is_remote(self): # -> Any | bool:\n ...", "def get_xcom(**context):\n ti = context['ti']\n data = ti.xcom_pull(task_ids='xcom_from_bash', key='return_value')\n logging.info(data)", "def test_remote(self):\n\n self.assertEqual(description.RepositoryDescription(\n '[email protected]:/example/remote', '/path/to/local').remote,\n implementation.RemoteRepository(\n '[email protected]:/example/remote'))" ]
[ "0.62325144", "0.53577113", "0.53224456", "0.5209967", "0.51961607", "0.5094487", "0.50844604", "0.5058393", "0.5020116", "0.49631917", "0.49456343", "0.49408296", "0.4912187", "0.48899758", "0.48812777", "0.48232034", "0.47519946", "0.46955076", "0.46878415", "0.46760944", "0.46738386", "0.46679908", "0.46611106", "0.4651345", "0.4638157", "0.46368185", "0.4623075", "0.4617728", "0.46174634", "0.46088743" ]
0.70022154
0
Test the popxl remote rts variable
def test_documentation_popxl_remote_rts_var(self): filename = "remote_rts_var.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_ipcrm():\n IPCComm.ipcrm()", "def TestRetract(portVXM,portArd):\n commandString = \"F\"\n portVXM.write(commandString)\n commandString = \"PM-3,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L3,R \"\n portVXM.write(commandString)\n\tresp='abs'\n\twhile( '^' not in resp ):\n\t resp = portVXM.read(1000)\n\t\n\tprint \"Moving to Standard Operation.\"\t\n commandString = \"PM-2,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L0,R \"\n portVXM.write(commandString)\n\tt0 = time.time() \n\tt = time.time() \n\tportArd.flushInput()\n\tresp='abs'\n\t\n #while( ('21,1,1' not in resp) and ((t-t0)>30.0)):\n while( not('21,0,1\\n\\r21,1,1\\n\\r21,0,0\\n\\r' in resp) ):\n\t resp = portArd.read(10000)\n\t t = time.time()\n\n\t#print \"CONDITION: \\t\" ,('21,1,1' not in resp),'\\t'\n\tportVXM.write(\"D,\")\n resp = portVXM.read(1000)\n\tXpos, Zpos = GetXZ(portVXM)\n localtime = time.asctime(time.localtime(time.time()))\n\tprint \"Source Fully Retracted at (X,Y) : \",Xpos, \"\\t\", Zpos ,\"\\t\", \"at localtime: \", localtime\n\tprint abs(t-t0) , \"\\t Seconds \\r\"\n\tWaitUntilReady(portVXM)\n\tportVXM.write(\"C,IA1M-0,IA3M-0,R \")\n\tportVXM.write(\"Q, \")\n\tportArd.flush()", "def test_plc_read_val(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n tagg = plc.read_tag(tag_name)\n plc.close()\n return (tagg)\n \n else:\n print(\"Unable to open\", plc_ip)", "def TestResponse(port):\n\tcommandString = \"F\"\n\tport.write(commandString)\n\tcommandString = \"PM3,C,I1M500,I3M-500,I3M500,I1M-500,R\"\n\tport.write(commandString)\n\tWaitUntilReady(port)\n\tport.write(\"R\")\n\tresp=WaitUntilReady(port)\n\tcount=0\n\tprint(\"starting loop:\")\n\twhile('^' in resp):\n \tport.write(\"X\")\n\t\txpos=port.read(9)\n\t\tprint(xpos)\n\t\tport.write(\"R\")\n\t\ttime.sleep(5)\n\t\tresp=WaitUntilReady(port)\n\t\tcount = count+1\n\t\tprint(count)", "def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)\n self.gdb.stepi()\n assertEqual((self.target.ram + 4), self.gdb.p(\"$pc\"))\n self.gdb.stepi()\n assertEqual((self.target.ram + 8), self.gdb.p(\"$pc\"))", "def remote_status():", "def test_rpcCall(self):\n pass", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def test_check_replication_unknown_valueerror2(self, mock_urlopen):\n base_url = 'http://localhost:6000/recon/'\n jdata = PropertyMock(return_value=b'X')\n mock_urlopen.return_value = MagicMock(read=jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_UNKNOWN,\n \"Can't parse status data\")])", "def test_returnCar(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect(ADDRESS)\n self.assertTrue(Functions.returnCar(s, '38'))", "def test_emirp_check():\r\n pass", "def test_rsp_unknown_status(self):\n\n def handle(event):\n return 0xFFF0, event.modification_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_set(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0xFFF0\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_connect():\n PV_name = \"TEST:TEST.VAL\"\n casput(PV_name, 1)\n assert caget(PV_name) == 1", "def test_get_property():\n\n sdk = '23'\n contents = (\"[Info]\\n\"\n \"sdk = %s\" % sdk)\n\n testutils.deploy_config_raw(contents)\n\n assert prop.get_prop('info', 'sdk') == sdk\n\n testutils.undeploy()\n\n return 0", "def testAllRead(self):\n import time,copy\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n ptnames = [ pt['name'] for pt in pts ]\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n value = filter(lambda x: x['name']==pt, pts)[0]['value']\n #assert value == reply[ptnames.index(pt)]\n received = reply[ptnames.index(pt)]\n if not value == received: \n print pt, ' was %s but should be %s'%(str(received),str(value))", "def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )", "def test_rsp_failure(self):\n\n def handle(event):\n return 0x0112, None\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0112\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_set_get_value_1(self):\n value = 23.0\n port = cn.Port(value)\n self.assertEqual(port.value, value)", "def test_check_query_response(self, node_id):\n\n print('\\n### Testing query node status RESPONSE ###')\n print('Remember that node_id must be the same 3 characters string that in test_query_node_id(node_id)')\n\n received_bytes = self.serport.readline()\n if received_bytes == b'E\\r\\n':\n print(\"You received Error Msg!\")\n print(f'Did not receive correct query status response from node {node_id}')\n print(f'Query again the node {node_id} if required')\n return False\n\n elif (len(received_bytes) == 13) and (received_bytes[0:8] == b'#B' + node_id.encode() + b'06V'):\n supply_voltage = received_bytes.decode()[8:13]\n print(f\"supply_voltage of {node_id} is {supply_voltage}\")\n print(\"response from the remote node SUCCESS\")\n return True\n else:\n print(f'Did not receive correct query status response from node {node_id}')\n print(f'Query again the node {node_id} if required')\n return False", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply, \"Expected:\", expectedValue\n assert reply[0] == expectedValue", "def test_rsp_unknown_status(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test\"\n ds.SOPClassUID = DisplaySystem\n ds.SOPInstanceUID = \"1.2.3.4\"\n return 0xFFF0, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0xFFF0\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_rangetocidr_command(runner):\n\n result = runner.invoke(command, ['rangetocidr', '127.0.0.0', '128.0.0.3'])\n assert result.exit_code == 0\n\n assert '127.0.0.0/8' in result.output\n assert '128.0.0.0/30' in result.output", "def test(self,version=''):\n p5cmd = ['srvinfo', 'lexxvers']\n try:\n res = self.nsdchat_call(p5cmd,5)\n p5_version = singlevalue(res)\n if (p5_version >= str(version)):\n return True\n return False\n except subprocess.TimeoutExpired:\n print(\"Could not connect to the archiware p5 server.\\nPlease review\"\n \"the connection and firewall settings.\")\n raise", "def test_set_get_value_2(self):\n value = (1,)\n port = cn.Port()\n port.value = value\n self.assertEqual(port.value, value)", "def test_ipcrm_not_isntalled(): # pragma: windows\n IPCComm.ipcrm()", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3" ]
[ "0.6584408", "0.6459491", "0.5550423", "0.54029787", "0.53768945", "0.5342253", "0.528288", "0.52809453", "0.5237307", "0.51818335", "0.5181425", "0.50585806", "0.5056922", "0.50432426", "0.503633", "0.50314456", "0.50298536", "0.50265086", "0.5013454", "0.500096", "0.49899554", "0.49847114", "0.49791014", "0.49777964", "0.4974934", "0.49557754", "0.4948672", "0.4935801", "0.49343306", "0.4926512" ]
0.6953812
0
Test the popxl rts variable
def test_documentation_popxl_rts_var(self): filename = "rts_var.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)\n self.gdb.stepi()\n assertEqual((self.target.ram + 4), self.gdb.p(\"$pc\"))\n self.gdb.stepi()\n assertEqual((self.target.ram + 8), self.gdb.p(\"$pc\"))", "def test_documentation_popxl_remote_rts_var(self):\n filename = \"remote_rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_robax_tostring_incorrect(self):\n got_var, const_number = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_number')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n # Checks if wrong rapid data is inserted.\n robax = rapid_jointtarget.get_robax_tostring(const_number)\n self.assertEqual(robax, 'DataType is num and not jointtarget.')\n # Checks if wrong data is inserted.\n robax = rapid_jointtarget.get_robax_tostring(10)\n self.assertIsInstance(robax, Exception)", "def test_get_extax_tostring_incorrect(self):\n got_var, const_number = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_number')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n # Checks if wrong rapid data is inserted.\n extax = rapid_jointtarget.get_extax_tostring(const_number)\n self.assertEqual(extax, 'DataType is num and not jointtarget.')\n # Checks if wrong data is inserted.\n extax = rapid_jointtarget.get_extax_tostring(10)\n self.assertIsInstance(extax, Exception)", "def has_regvar(*args):\n return _ida_frame.has_regvar(*args)", "def PHook(p):\n x = p['sy']['pop']()\n if (x == '.'):\n p['sy']['push'](p['OK'])\n else:\n p['sy']['push'](p['NOK'])\n #endif", "def test_get_robax_tostring_correct(self):\n got_var, const_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n robax = rapid_jointtarget.get_robax_tostring(const_jtar)\n self.assertEqual(robax, 'RobAx: [Rax_1,Rax_2,Rax_3,Rax_4,Rax_5,Rax_6] = [0,0,0,10,0,0]')", "def pintest(self, barcode, pin):\n u = self.dump(barcode)\n if 'ERRNUM' in u:\n return False\n return len(barcode) == 14 or pin == barcode[0] * 4", "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_pslx_34_002(self, testf=\"pslx_34_002.pslx\"):\n BlatPslCases.test_psl_34_002(self, \"pslx_34_002.pslx\", pslx=True)", "def test_is_rock_valid():\n\n\tassert game.is_val('rock') is True # muze byt jenom 'True', vsechny True stejne ID i False", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def _is_pop(self, words):\n if words[0] == 'pop':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_POP command.\".format(self._file_line))\n if words[1] not in ['temp', 'pointer', 'static', 'local', 'argument', 'this', 'that']:\n raise SyntaxError(\"File line {}: Invalid second argument.\".format(self._file_line))\n return True\n else:\n return False", "def test_prodcode_4(self):\n self.assertEqual(prodcode.functio(), \"production value\")", "def test_03_pass(self):\n if x==1:\n pass", "def test_02_pass(self):\n if x==1:\n pass", "def check():", "def test_get_extax_tostring_correct(self):\n got_var, const_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n extax = rapid_jointtarget.get_extax_tostring(const_jtar)\n self.assertEqual(extax, 'Extax: [Eax_a,Eax_b,Eax_c,Eax_d,Eax_e,Eax_f] = [9E9,9E9,9E9,9E9,9E9,9E9]')", "def test_func(self):\n rol_nu = rol_get_huidige(self.request)\n return rol_nu == Rollen.ROL_RCL", "def test_01_pass(self):\n if x==1:\n pass", "def test_01_pass(self):\n if x==1:\n pass", "def TestRetract(portVXM,portArd):\n commandString = \"F\"\n portVXM.write(commandString)\n commandString = \"PM-3,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L3,R \"\n portVXM.write(commandString)\n\tresp='abs'\n\twhile( '^' not in resp ):\n\t resp = portVXM.read(1000)\n\t\n\tprint \"Moving to Standard Operation.\"\t\n commandString = \"PM-2,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L0,R \"\n portVXM.write(commandString)\n\tt0 = time.time() \n\tt = time.time() \n\tportArd.flushInput()\n\tresp='abs'\n\t\n #while( ('21,1,1' not in resp) and ((t-t0)>30.0)):\n while( not('21,0,1\\n\\r21,1,1\\n\\r21,0,0\\n\\r' in resp) ):\n\t resp = portArd.read(10000)\n\t t = time.time()\n\n\t#print \"CONDITION: \\t\" ,('21,1,1' not in resp),'\\t'\n\tportVXM.write(\"D,\")\n resp = portVXM.read(1000)\n\tXpos, Zpos = GetXZ(portVXM)\n localtime = time.asctime(time.localtime(time.time()))\n\tprint \"Source Fully Retracted at (X,Y) : \",Xpos, \"\\t\", Zpos ,\"\\t\", \"at localtime: \", localtime\n\tprint abs(t-t0) , \"\\t Seconds \\r\"\n\tWaitUntilReady(portVXM)\n\tportVXM.write(\"C,IA1M-0,IA3M-0,R \")\n\tportVXM.write(\"Q, \")\n\tportArd.flush()", "def test_plc_read_val(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n tagg = plc.read_tag(tag_name)\n plc.close()\n return (tagg)\n \n else:\n print(\"Unable to open\", plc_ip)", "def test_variablepresentations_get(self):\n pass", "def test_ge(self):\n # Success\n script = self.write_script(\"\"\"\n variable = 5\n if variable >= 5:\n check = 130\n else:\n check = 0\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 130)\n\n # Failure\n script = self.write_script(\"\"\"\n variable = 5\n if variable >= 8:\n check = 130\n else:\n check = 0\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 0)", "def test_le(self):\n # Success\n script = self.write_script(\"\"\"\n variable = 10\n if variable <= 10:\n check = 80\n else:\n check = 10\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 80)\n\n # Failure\n script = self.write_script(\"\"\"\n variable = 10\n if variable <= 8:\n check = 80\n else:\n check = 10\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 10)", "def test_getx(self):\n point = (1,2)\n x = utils.getx(point)\n self.assertEqual(1, x)", "def test_MINX_pass(self):\n self.assertTrue(self.mod.minx.isset)", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)" ]
[ "0.5512495", "0.54203445", "0.5234334", "0.5186235", "0.513492", "0.5132137", "0.5124492", "0.5119325", "0.5085518", "0.50540435", "0.50286376", "0.5028228", "0.50170285", "0.50151104", "0.50103855", "0.4989811", "0.49670818", "0.49645856", "0.49455714", "0.493474", "0.493474", "0.49287537", "0.49034783", "0.49002767", "0.48985684", "0.48946097", "0.4867602", "0.48639008", "0.48600543", "0.48600543" ]
0.6135992
0
Test the popxl mnist with replication example
def test_documentation_popxl_mnist_replication_train(self): filename = "mnist_rts.py --replication-factor 2" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_rand(self):\n assert len(self._mnist.random()[:5]) == 5\n pass", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def test_documentation_popxl_mnist(self):\n filename = \"mnist.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def test_readme_minimal():\n # Data sampler that generates balanced batches from MNIST dataset\n sampler = TFDatasetMultiShotMemorySampler(\n dataset_name='mnist',\n classes_per_batch=10\n )\n\n # Build a Similarity model using standard Keras layers\n inputs = layers.Input(shape=(28, 28, 1))\n x = layers.experimental.preprocessing.Rescaling(1/255)(inputs)\n x = layers.Conv2D(64, 3, activation='relu')(x)\n x = layers.Flatten()(x)\n x = layers.Dense(64, activation='relu')(x)\n outputs = MetricEmbedding(64)(x)\n\n # Build a specialized Similarity model\n model = SimilarityModel(inputs, outputs)\n\n # Train Similarity model using contrastive loss\n model.compile('adam', loss=MultiSimilarityLoss())\n model.fit(sampler, epochs=5)\n\n # Index 100 embedded MNIST examples to make them searchable\n sx, sy = sampler.get_slice(0, 100)\n model.index(x=sx, y=sy, data=sx)\n\n # Find the top 5 most similar indexed MNIST examples for a given example\n qx, qy = sampler.get_slice(3713, 1)\n nns = model.single_lookup(qx[0]) # noqa\n\n # ! don't add viz its block the test in certain env.\n # Visualize the query example and its top 5 neighbors\n # viz_neigbors_imgs(qx[0], qy[0], nns)", "def pick_data(ns, digits):\n f = gzip.open('data/mnist.pkl.gz', 'rb')\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n images, labels = train_set\n\n originals = []; \n shapes = []; \n true_labels = [];\n i = 0\n for n, d in zip(ns, digits):\n # picking n elements with digit d\n x = np.where(labels==d)[0]\n idx = np.random.choice(x, n, replace=False)\n imgs = images[idx]\n originals.append(imgs)\n contours = [mnistshape.get_shape2(im.reshape((28,28)), n=30, s=5, ir=2)\n for im in imgs]\n shapes.append(contours)\n true_labels.append([i]*n)\n i += 1\n originals = np.concatenate(originals)\n true_labels = np.concatenate(true_labels)\n \n new_shapes = []\n for cluster in shapes:\n for shape in cluster:\n new_shapes.append(shape)\n new_shapes = np.array(new_shapes)\n\n # return shuffled data\n idx = range(len(originals))\n np.random.shuffle(idx)\n return originals[idx], new_shapes[idx], true_labels[idx]", "def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test", "def replicateExample(self):\n\n C, E = self.getAtomStrainMatches(matches = 5000)\n self.removeByAtomStrain(keep = 5000)\n r = self.getAtomStrainRatio(const = C, exp = E)\n self.indexSortInterfaces(index = np.argsort(r))", "def create_mnistm(X: Any) -> Any:\n\n bst_path = \"./data/MNIST_M/BSR_bsds500.tgz\"\n\n rand = np.random.RandomState(42)\n train_files = []\n\n with tarfile.open(bst_path, \"r\") as bsr_file:\n for name in bsr_file.getnames():\n if name.startswith(\"BSR/BSDS500/data/images/train/\"):\n train_files.append(name)\n\n print(\"Loading BSR training images\")\n background_data = []\n for name in train_files:\n try:\n fp = bsr_file.extractfile(name)\n bg_img = skimage.io.imread(fp)\n background_data.append(bg_img)\n except:\n continue\n\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\n for i in range(X.shape[0]):\n if i % 1000 == 0:\n print(\"Processing example\", i)\n\n bg_img = rand.choice(background_data)\n d = mnist_to_img(X[i])\n d = compose_image(d, bg_img)\n X_[i] = d\n\n return X_", "def test_mnist():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_fast'\n else:\n yaml_file = 'mnist'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized", "def create_mnistm(X):\r\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\r\n for i in range(X.shape[0]):\r\n bg_img = rand.choice(background_data)\r\n d = mnist_to_img(X[i])\r\n d = compose_image(d, bg_img)\r\n X_[i] = d\r\n return X_", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def test_generate_nb_testing(self):\n pass", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test_generate_nb(self):\n pass", "def mnist_noniid(dataset, num_users):\n # num_shards, num_imgs = 2*num_users, int(dataset.data.size()[0]/2/num_users) # choose two number from a set with num_shards, each client has 2*num_imgs images\n # idx_shard = [i for i in range(num_shards)]\n # dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n # idxs = np.arange(dataset.data.size()[0])\n # labels = dataset.train_labels.numpy()\n #\n # # sort labels\n # idxs_labels = np.vstack((idxs, labels))\n # idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]\n # idxs = idxs_labels[0,:]\n #\n # # divide and assign\n # for i in range(num_users):\n # rand_set = set(np.random.choice(idx_shard, 2, replace=False))\n # idx_shard = list(set(idx_shard) - rand_set)\n # for rand in rand_set:\n # dict_users[i] = np.concatenate((dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)\n # return dict_users\n\n label_list = dataset.targets.numpy()\n minLabel = min(label_list)\n numLabels = len(dataset.classes)\n\n dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n for i in range(0, len(label_list)):\n tmp_target_node = int((label_list[i] - minLabel) % num_users)\n if num_users > numLabels:\n tmpMinIndex = 0\n tmpMinVal = math.inf\n for n in range(0, num_users):\n if (n) % numLabels == tmp_target_node and len(dict_users[n]) < tmpMinVal:\n tmpMinVal = len(dict_users[n])\n tmpMinIndex = n\n tmp_target_node = tmpMinIndex\n dict_users[tmp_target_node] = np.concatenate((dict_users[tmp_target_node], [i]), axis=0)\n return dict_users", "def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)" ]
[ "0.664523", "0.6479431", "0.6476196", "0.64748347", "0.64239633", "0.64103997", "0.6363367", "0.6277", "0.62729543", "0.61764646", "0.60306984", "0.596759", "0.5963632", "0.5930057", "0.58732194", "0.58112574", "0.58030224", "0.5776231", "0.57753116", "0.56806064", "0.5629193", "0.5615691", "0.5602601", "0.55982304", "0.55744094", "0.5548192", "0.55394477", "0.55367374", "0.5528279", "0.5498519" ]
0.7531117
0
Test the popxl mnist with RTS example
def test_documentation_popxl_mnist_rts_train(self): filename = "mnist_rts.py --replication-factor 2 --rts" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def test_documentation_popxl_mnist(self):\n filename = \"mnist.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"", "def test_rand(self):\n assert len(self._mnist.random()[:5]) == 5\n pass", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def create_mnistm(X: Any) -> Any:\n\n bst_path = \"./data/MNIST_M/BSR_bsds500.tgz\"\n\n rand = np.random.RandomState(42)\n train_files = []\n\n with tarfile.open(bst_path, \"r\") as bsr_file:\n for name in bsr_file.getnames():\n if name.startswith(\"BSR/BSDS500/data/images/train/\"):\n train_files.append(name)\n\n print(\"Loading BSR training images\")\n background_data = []\n for name in train_files:\n try:\n fp = bsr_file.extractfile(name)\n bg_img = skimage.io.imread(fp)\n background_data.append(bg_img)\n except:\n continue\n\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\n for i in range(X.shape[0]):\n if i % 1000 == 0:\n print(\"Processing example\", i)\n\n bg_img = rand.choice(background_data)\n d = mnist_to_img(X[i])\n d = compose_image(d, bg_img)\n X_[i] = d\n\n return X_", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_predictor():", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def run(prefix):\n # run_tests.assert_folder_is_empty(prefix=prefix)\n xrs_good,xrs_poor,f_obs,r_free_flags = run_tests.setup_helix_example()\n # pdb_inp = os.path.join(qr_unit_tests,\"data_files\",\"2lvr.pdb\")\n r = run_tests.run_cmd(prefix,\n args = [\"restraints=cctbx\",\"mode=gtest\",\"g_scan=20\",\"g_mode=1\"],\n pdb_name = 'm00_poor.pdb', mtz_name='')\n assert os.path.isfile('1-20.npy')", "def test_machine_learning():", "def try4():\n path = '/Users/mayankkejriwal/git-projects/bioExperiments/tsne_python/'\n mnist = path+'mnist2500_X.txt'\n X = numpy.loadtxt(mnist)\n labels = numpy.loadtxt(path+\"mnist2500_labels.txt\")\n Y = tsne.tsne(X, 2, 50, 20.0)\n pylab.scatter(Y[:,0], Y[:,1], 20, labels)\n pylab.show()", "def main():\n # Import or download the mnist data, from target file path.\n mnist = input_data.read_data_sets(\"Data/\", one_hot=True)\n\n # Train and test model.\n train(mnist)", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test_readme_minimal():\n # Data sampler that generates balanced batches from MNIST dataset\n sampler = TFDatasetMultiShotMemorySampler(\n dataset_name='mnist',\n classes_per_batch=10\n )\n\n # Build a Similarity model using standard Keras layers\n inputs = layers.Input(shape=(28, 28, 1))\n x = layers.experimental.preprocessing.Rescaling(1/255)(inputs)\n x = layers.Conv2D(64, 3, activation='relu')(x)\n x = layers.Flatten()(x)\n x = layers.Dense(64, activation='relu')(x)\n outputs = MetricEmbedding(64)(x)\n\n # Build a specialized Similarity model\n model = SimilarityModel(inputs, outputs)\n\n # Train Similarity model using contrastive loss\n model.compile('adam', loss=MultiSimilarityLoss())\n model.fit(sampler, epochs=5)\n\n # Index 100 embedded MNIST examples to make them searchable\n sx, sy = sampler.get_slice(0, 100)\n model.index(x=sx, y=sy, data=sx)\n\n # Find the top 5 most similar indexed MNIST examples for a given example\n qx, qy = sampler.get_slice(3713, 1)\n nns = model.single_lookup(qx[0]) # noqa\n\n # ! don't add viz its block the test in certain env.\n # Visualize the query example and its top 5 neighbors\n # viz_neigbors_imgs(qx[0], qy[0], nns)", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def test_active_inference_SPM_1b(self):", "def main(): \n symbolic_sample()\n print 'Done.'", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()" ]
[ "0.69393843", "0.6572477", "0.6475336", "0.6470662", "0.63731503", "0.6351866", "0.62574834", "0.62303245", "0.6214079", "0.61291355", "0.6029285", "0.6016668", "0.601599", "0.6014156", "0.59936774", "0.5969918", "0.595324", "0.5940546", "0.59249955", "0.5917552", "0.58708", "0.5852096", "0.5836193", "0.5835081", "0.5788991", "0.57672447", "0.5757812", "0.5727704", "0.5723906", "0.5711445" ]
0.6889731
1
Test the popxl mnist with RTS example
def test_documentation_popxl_mnist_rts_train_test(self): filename = "mnist_rts.py --replication-factor 2 --rts --test" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def test_documentation_popxl_mnist(self):\n filename = \"mnist.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"", "def test_rand(self):\n assert len(self._mnist.random()[:5]) == 5\n pass", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def create_mnistm(X: Any) -> Any:\n\n bst_path = \"./data/MNIST_M/BSR_bsds500.tgz\"\n\n rand = np.random.RandomState(42)\n train_files = []\n\n with tarfile.open(bst_path, \"r\") as bsr_file:\n for name in bsr_file.getnames():\n if name.startswith(\"BSR/BSDS500/data/images/train/\"):\n train_files.append(name)\n\n print(\"Loading BSR training images\")\n background_data = []\n for name in train_files:\n try:\n fp = bsr_file.extractfile(name)\n bg_img = skimage.io.imread(fp)\n background_data.append(bg_img)\n except:\n continue\n\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\n for i in range(X.shape[0]):\n if i % 1000 == 0:\n print(\"Processing example\", i)\n\n bg_img = rand.choice(background_data)\n d = mnist_to_img(X[i])\n d = compose_image(d, bg_img)\n X_[i] = d\n\n return X_", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_predictor():", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def run(prefix):\n # run_tests.assert_folder_is_empty(prefix=prefix)\n xrs_good,xrs_poor,f_obs,r_free_flags = run_tests.setup_helix_example()\n # pdb_inp = os.path.join(qr_unit_tests,\"data_files\",\"2lvr.pdb\")\n r = run_tests.run_cmd(prefix,\n args = [\"restraints=cctbx\",\"mode=gtest\",\"g_scan=20\",\"g_mode=1\"],\n pdb_name = 'm00_poor.pdb', mtz_name='')\n assert os.path.isfile('1-20.npy')", "def test_machine_learning():", "def try4():\n path = '/Users/mayankkejriwal/git-projects/bioExperiments/tsne_python/'\n mnist = path+'mnist2500_X.txt'\n X = numpy.loadtxt(mnist)\n labels = numpy.loadtxt(path+\"mnist2500_labels.txt\")\n Y = tsne.tsne(X, 2, 50, 20.0)\n pylab.scatter(Y[:,0], Y[:,1], 20, labels)\n pylab.show()", "def main():\n # Import or download the mnist data, from target file path.\n mnist = input_data.read_data_sets(\"Data/\", one_hot=True)\n\n # Train and test model.\n train(mnist)", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test_readme_minimal():\n # Data sampler that generates balanced batches from MNIST dataset\n sampler = TFDatasetMultiShotMemorySampler(\n dataset_name='mnist',\n classes_per_batch=10\n )\n\n # Build a Similarity model using standard Keras layers\n inputs = layers.Input(shape=(28, 28, 1))\n x = layers.experimental.preprocessing.Rescaling(1/255)(inputs)\n x = layers.Conv2D(64, 3, activation='relu')(x)\n x = layers.Flatten()(x)\n x = layers.Dense(64, activation='relu')(x)\n outputs = MetricEmbedding(64)(x)\n\n # Build a specialized Similarity model\n model = SimilarityModel(inputs, outputs)\n\n # Train Similarity model using contrastive loss\n model.compile('adam', loss=MultiSimilarityLoss())\n model.fit(sampler, epochs=5)\n\n # Index 100 embedded MNIST examples to make them searchable\n sx, sy = sampler.get_slice(0, 100)\n model.index(x=sx, y=sy, data=sx)\n\n # Find the top 5 most similar indexed MNIST examples for a given example\n qx, qy = sampler.get_slice(3713, 1)\n nns = model.single_lookup(qx[0]) # noqa\n\n # ! don't add viz its block the test in certain env.\n # Visualize the query example and its top 5 neighbors\n # viz_neigbors_imgs(qx[0], qy[0], nns)", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def test_active_inference_SPM_1b(self):", "def main(): \n symbolic_sample()\n print 'Done.'", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()" ]
[ "0.6889731", "0.6572477", "0.6475336", "0.6470662", "0.63731503", "0.6351866", "0.62574834", "0.62303245", "0.6214079", "0.61291355", "0.6029285", "0.6016668", "0.601599", "0.6014156", "0.59936774", "0.5969918", "0.595324", "0.5940546", "0.59249955", "0.5917552", "0.58708", "0.5852096", "0.5836193", "0.5835081", "0.5788991", "0.57672447", "0.5757812", "0.5727704", "0.5723906", "0.5711445" ]
0.69393843
0
Sets the errors of this MigrateListingResponse.
def errors(self, errors): self._errors = errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors(self, errors):\n \n self._errors = errors", "def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def errors(self):\n return self._errors", "def errors(self):\n return self.__errors", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def error(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.ERROR))", "def errors(self):\n raise NotImplementedError", "def getErrorsList(self):\n return self.__errors", "def add_errors(self, errors):\n self.errors = merge_errors(self.errors, errors)", "def errors(self):\n return self._properties.get(\"errors\")", "def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def validation_errors(self):\n return self._validation_errors", "def getErrors(self):\n return self.errors", "def mark_error(self):\r\n self.status = ERROR", "def mark_failed(self):\n self.status = self.FAILED\n self.traceback = self._format_traceback()\n self.save(update_fields={'status', 'traceback', 'updated_at'})", "def set_error(self, name, value):\n self.errors[name] = value", "def errors(self) -> List[Error]:", "def __set_errors_json(self, error_count_by_operation, errors_by_operation):\n message = \"{0} error/s reported.\".format(error_count_by_operation)\n log_file_path = self.logger.file_logger.log_file_path\n message += \" The latest {0} error/s are shared in detail. To view all errors, review this log file on the machine: {1}\".format(len(errors_by_operation), log_file_path) if error_count_by_operation > 0 else \"\"\n return {\n \"code\": Constants.PatchOperationTopLevelErrorCode.SUCCESS if error_count_by_operation == 0 else Constants.PatchOperationTopLevelErrorCode.ERROR,\n \"details\": errors_by_operation,\n \"message\": message\n }", "def error_data(self):\n\n if not self.__settings:\n return []\n\n return self.__transaction_errors", "def set_error(self, index: int) -> None:\n ...", "def set_limit(self, errors):\n self.limit = errors", "def Errors(self):\n return self._get_attribute('errors')", "def errors_summary(self, errors_summary):\n\n self._errors_summary = errors_summary", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def pin_errors(self):\n for m in range(self.stage_width_list[-1]):\n error, _ = rqrmilib.calculate_submodel_error(self._get_native_object(), self.probe, len(self)-1, m)\n if error < 0: error = 0\n self.error_list[m] = int(error)\n self.rqrmi_state_changed = True\n return self.error_list", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def error_count(self, error_count):\n\n self._error_count = error_count", "def setError(self,err):\n self.error = err" ]
[ "0.6449945", "0.586166", "0.5679023", "0.56335896", "0.5626413", "0.56153715", "0.56153715", "0.55020136", "0.54732645", "0.5438516", "0.5403028", "0.53804886", "0.5344446", "0.53443104", "0.532449", "0.5315556", "0.5290784", "0.52629125", "0.52527165", "0.52467006", "0.5245283", "0.52259946", "0.52225095", "0.52133447", "0.518861", "0.5150667", "0.51299673", "0.5128954", "0.5104128", "0.505946" ]
0.63594306
1
Sets the inventory_item_group_key of this MigrateListingResponse.
def inventory_item_group_key(self, inventory_item_group_key): self._inventory_item_group_key = inventory_item_group_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def with_group_key(self, group_key):\n self.group_key = group_key\n return self", "def add_inventory_group(self, key):\n host_dict = {'hosts': [], 'vars': {}}\n self.inventory[key] = host_dict\n return", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def group_id(self, group_id):\n\n self._group_id = group_id", "def inventory_id(self, inventory_id):\n\n self._inventory_id = inventory_id", "def item_group_href(self, item_group_href):\n\n self._item_group_href = item_group_href", "def group_identifier(self, group_identifier):\n\n self._group_identifier = group_identifier", "def set_group(self, group):\n self._group = group", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def group(self, group):\n\n self._group = group", "def item_group_type(self, item_group_type):\n\n self._item_group_type = item_group_type", "def group(self, group):\n self._group = group", "def set_group(self, group: str) -> None:\n self.group = group", "def set_group(self, id_: str, player: str, group: list):\n self._groups[id_] = {\n 'player': player,\n 'group': group\n }", "def setitem_key_value(self):\n raise NotImplementedError", "def set(self, name_group, key, value):\n self.psettings.beginGroup(name_group)\n self.psettings.setValue(key, value)\n self.closeGroup()", "def instance_group(self, instance_group):\n if instance_group is None:\n raise ValueError(\"Invalid value for `instance_group`, must not be `None`\")\n\n self._instance_group = instance_group", "def set_group(self, group):\n # Implemented from template for osid.resource.ResourceForm.set_group_template\n if self.get_group_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_boolean(group):\n raise errors.InvalidArgument()\n self._my_map['group'] = group", "def inventory_reference_id(self, inventory_reference_id):\n\n self._inventory_reference_id = inventory_reference_id", "def signing_group_id(self, signing_group_id):\n\n self._signing_group_id = signing_group_id", "def with_group(self, group):\n\t\tself.variables['group'] = group\n\t\treturn self", "def group_oid(self, group_oid):\n\n self._group_oid = group_oid", "def signing_group_name(self, signing_group_name):\n\n self._signing_group_name = signing_group_name", "async def async_set_multiroom_group(self, multiroom_group):\n self._multiroom_group = multiroom_group", "def setGroup(self, group):\n\t\tself.config.GROUP = group" ]
[ "0.61608106", "0.5747907", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.54646176", "0.5375903", "0.53397626", "0.53107274", "0.53079605", "0.53079605", "0.53079605", "0.5276046", "0.5275524", "0.5234625", "0.51415646", "0.5023584", "0.50226706", "0.49493527", "0.49384618", "0.49356917", "0.49240038", "0.48625207", "0.47992325", "0.4719829", "0.47021872", "0.4699198" ]
0.8215857
0
Sets the inventory_items of this MigrateListingResponse.
def inventory_items(self, inventory_items): self._inventory_items = inventory_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inventory(self, inventory):\n\n self._inventory = inventory", "def inventory_id(self, inventory_id):\n\n self._inventory_id = inventory_id", "def items(self, items: List[InlineResponse200Items]):\n if items is None:\n raise ValueError(\"Invalid value for `items`, must not be `None`\") # noqa: E501\n\n self._items = items", "def inventory(self):\n data = self.client.inventory(self.creds, self.transaction, self.environment)\n return list(data) if isinstance(data, set) else data", "def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)", "def set_ingredients(self, ingredients: [Ingredient]):\n self.ingredients = ingredients", "def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]", "def inventory(self):\n return self._inventory", "def inventory_id(self, inventory_id):\n if inventory_id is None:\n raise ValueError(\"Invalid value for `inventory_id`, must not be `None`\") # noqa: E501\n\n self._inventory_id = inventory_id", "def populate_initial_inventory(self):\r\n\r\n weapons_file = open('initial-inventory.json', \"r\")\r\n json_data = json.loads(weapons_file.read())\r\n weapons_file.close()\r\n\r\n weapons = json_data['weapons']\r\n for weapon in weapons:\r\n requests.post(\"http://\" + self.ip_address + \":3000/Weapons\", data=weapon)", "def update(self):\n try:\n data = self.api.get_inventory(self.site_id)\n inventory = data[\"Inventory\"]\n except KeyError:\n _LOGGER.error(\"Missing inventory data, skipping update\")\n return\n except (ConnectTimeout, HTTPError):\n _LOGGER.error(\"Could not retrieve data, skipping update\")\n return\n\n self.data = {}\n self.attributes = {}\n\n for key, value in inventory.items():\n self.data[key] = len(value)\n self.attributes[key] = {key: value}\n\n _LOGGER.debug(\"Updated SolarEdge inventory: %s, %s\", self.data, self.attributes)", "def get_users_inventory_with_http_info(self, **kwargs):\n\n all_params = ['inactive', 'size', 'page', 'filter_item_name', 'filter_item_id', 'filter_username', 'filter_group', 'filter_date']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_users_inventory\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'inactive' in params:\n query_params.append(('inactive', params['inactive']))\n if 'size' in params:\n query_params.append(('size', params['size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n if 'filter_item_name' in params:\n query_params.append(('filter_item_name', params['filter_item_name']))\n if 'filter_item_id' in params:\n query_params.append(('filter_item_id', params['filter_item_id']))\n if 'filter_username' in params:\n query_params.append(('filter_username', params['filter_username']))\n if 'filter_group' in params:\n query_params.append(('filter_group', params['filter_group']))\n if 'filter_date' in params:\n query_params.append(('filter_date', params['filter_date']))\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']\n\n return self.api_client.call_api('/inventories', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PageResourceUserInventoryResource',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def populate_variants(self, inventory=None):\n self.variants = list()\n\n option_combos = self.generate_option_combos()\n\n for combo in option_combos:\n self.variants.append(Variant(\n self.style_number,\n option_combo=combo,\n inventory=inventory))", "def set_all(self, value):\n self.__items = value", "def __init__(self, items: List[InlineResponse200Items]=None): # noqa: E501\n self.swagger_types = {\n 'items': List[InlineResponse200Items]\n }\n\n self.attribute_map = {\n 'items': 'items'\n }\n self._items = items", "def setitems(self, items):\n self.clear()\n # FIXME: this allows you to pass in an OrderedDict as well :-)\n self.update(items)", "def get_items_from(self, inventory=False):\n # if no outer inventory is provided, assume own inventory is needed\n if not inventory:\n inventory = self.inventory\n # get items normally\n items_ = MetaBeing.get_items_from(self, inventory)\n # return items in question\n return items_", "def add_item_to_user_inventory_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'user_inventory_add_request']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_item_to_user_inventory\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `add_item_to_user_inventory`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'user_inventory_add_request' in params:\n body_params = params['user_inventory_add_request']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']\n\n return self.api_client.call_api('/users/{id}/inventory', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InvoiceResource',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def inventory_reference_id(self, inventory_reference_id):\n\n self._inventory_reference_id = inventory_reference_id", "async def update_items(self):\n items = self.steam_api.get_game_items()\n\n with open(\"Dota/items.json\", 'w') as f:\n json.dump(items, f, ensure_ascii=True, indent=4)", "async def list_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n start_date = request.args[\"start_date\"][0]\n end_date = request.args[\"end_date\"][0]\n inventory = model.list_inventory(hotel_id, start_date, end_date)\n if inventory == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"inventory\": inventory})", "def add_to_inventory(self, item_to_add_to_inventory):\n raise NotImplementedError(\"Subclasses define what adding to the inventory entails\")", "def update(self):\n inventoryJson = self.__agent__.getInventoryJson()\n itemsLeft = len(inventoryJson) != 0\n itemTypesInObservation = []\n itemsAdded = []\n itemsDeleted = []\n\n # Loop over all item types in the observation\n while (itemsLeft):\n itemType = inventoryJson[0][\"type\"]\n itemTypesInObservation.append(itemType)\n numOfItemInObs = inventoryJson[0][\"quantity\"]\n\n if itemType not in self.__inventory__: # Add an array of ids for this item type if it was never discovered\n self.__inventory__[itemType] = []\n numOfItemInInv = len(self.__inventory__[itemType])\n\n for i in range(1, len(inventoryJson)): # Loop over remaining items, and for each item of matching type, add to counter\n if inventoryJson[i][\"type\"] == itemType:\n numOfItemInObs += inventoryJson[i][\"quantity\"]\n inventoryJson = [item for item in inventoryJson if item[\"type\"] != itemType] # Remove all of those inventory items\n \n if numOfItemInObs > numOfItemInInv: # Add more items with unique id of this type to inventory\n for i in range(numOfItemInInv, numOfItemInObs):\n newItem = self.addItem(itemType)\n itemsAdded.append(newItem)\n elif numOfItemInObs < numOfItemInInv: # Remove some items of this type from inventory\n for i in range(numOfItemInObs, numOfItemInInv):\n if len(self.__inventory__[itemType]) > 0:\n lostItem = self.__inventory__[itemType].pop(0)\n itemsDeleted.append(lostItem)\n\n # Only perform another iteration if there are more items of different types that we have not yet checked\n if len(inventoryJson) == 0:\n itemsLeft = False\n \n # For any items in the inventory that was not in the observation, set the quantity to 0\n for itemType in self.__inventory__:\n if itemType not in itemTypesInObservation:\n self.__inventory__[itemType].clear()\n\n return (itemsAdded, itemsDeleted)", "def set_invocation_metadata(self, items: Tuple[Tuple[str, str]]):\n self._invocation_metadata = items", "def SetItems(self, items: Union[Iterable, dict]):\n if not items:\n return\n if isinstance(items, dict):\n items = [[key, str(value)] for key, value in items.items()]\n if self._sorted:\n items = sorted(items, key=lambda x: x[1])\n self._items = [key for key, _ in items]\n super().SetItems([value for _, value in items])\n else:\n if self._sorted:\n self._items = tuple(sorted(items))\n else:\n self._items = tuple(items)\n super().SetItems([str(v) for v in self._items])\n self.SetSelection(0)", "async def update(self) -> None:\n data = await self._state.http.get_user_inventory(self.owner.id64, self.game.app_id, self.game.context_id)\n self._update(data)", "def equip_items(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/EquipItems/\"))", "def items(self, items: List[RadioStation]):\n if items is None:\n raise ValueError(\"Invalid value for `items`, must not be `None`\") # noqa: E501\n\n self._items = items", "def Inventory(cls):\r\n l = ServerSet()\r\n rs = cls.find()\r\n for server in rs:\r\n l.append(server)\r\n return l", "def items(self) -> 'ItemsView[str, str]':\n return _EntityFixupItems(self)" ]
[ "0.64316744", "0.5630393", "0.56029606", "0.53314924", "0.52890193", "0.52213633", "0.5127491", "0.5053964", "0.50323474", "0.50142586", "0.49879345", "0.4986131", "0.48904306", "0.48881936", "0.48744634", "0.4842486", "0.48069787", "0.466679", "0.4665811", "0.46414807", "0.4606507", "0.45951387", "0.4577093", "0.45438474", "0.45054206", "0.45002583", "0.447945", "0.44677284", "0.44659328", "0.4442392" ]
0.78284794
0
Sets the listing_id of this MigrateListingResponse.
def listing_id(self, listing_id): self._listing_id = listing_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_listing(request, listing_id):\n listing = get_object_or_404(Listing, pk=listing_id)\n\n listing.delete()\n messages.success(\n request,\n 'Your listing has been removed from the database.')\n\n return redirect(reverse('addlisting'))", "def update(self, amz_listing):\n amz_listing.sku = self.asin\n amz_listing.title = self.title\n amz_listing.brand = self.brand\n amz_listing.model = self.model\n amz_listing.upc = self.upc\n amz_listing.quantity = self.quantity\n amz_listing.url = self.url\n amz_listing.salesrank = self.salesrank\n amz_listing.offers = self.offers\n amz_listing.hasprime = self.prime\n\n # Only update price if price information is provided\n if self._tag.xpath('.//Offers'):\n amz_listing.price = self.price", "def view_and_edit_listing(request, listing_id):\n categories = Category.objects.all()\n listing = get_object_or_404(Listing, pk=listing_id)\n\n if request.method == 'POST':\n editform = AddListingForm(\n request.POST,\n request.FILES,\n instance=listing)\n if editform.is_valid():\n listing.save()\n messages.success(\n request,\n 'Thank you. Your listing has been updated')\n return redirect(reverse('addlisting'))\n else:\n editform = AddListingForm(instance=listing)\n\n context = {\n 'editform': editform,\n 'listing': listing,\n 'categories': categories\n }\n return render(request, 'editlisting.html', context)", "def listing_show(listing_id):\n\n listing = Listing.query.get_or_404(listing_id)\n return (jsonify(listing=listing.serialize(isDetailed=True)), 200)", "def set_id_number(self, id_number):\n self.id_number = id_number", "def set_id(self, id_):\n\n self.id_ = id_", "def sportsbook_id(self, sportsbook_id):\n\n self._sportsbook_id = sportsbook_id", "def setID(self, idf):\n self.id = idf", "def deleteListing(id):\n try:\n # Call delete_one() on listings collection\n db.listings.delete_one({\"_id\": id})\n return redirect(url_for(\"main.landingPage\"))\n except (ValueError):\n # Return custom 500 error page, set status code to 500\n return render_template(\"500.html\"), 500", "def listing_create():\n listing_data = request.json.get(\"listing\")\n form = ListingCreateForm(data=listing_data)\n\n if form.validate():\n listing = Listing.create(form)\n db.session.commit()\n # TODO: reevaluate error with a try and except later\n return (jsonify(listing=listing.serialize(isDetailed=True)), 201)\n else:\n errors = []\n for field in form:\n for error in field.errors:\n errors.append(error)\n return (jsonify(errors=errors), 400)", "def set_id(self, id):\n self.data['id'] = id", "def add_comment(request, listing_id):\n if request.method == \"POST\":\n try:\n listing = Listing.objects.get(pk=listing_id)\n except Listing.DoesNotExist:\n return render(request, \"auctions/errors.html\", {\"error_message\":\n \"something went wrong, the id url argument is not valid\"})\n\n CommentForm = modelform_factory(Comment, exclude=(\"commenter\",\"listing\"))\n # validate and save from the formdata to the database\n form = CommentForm(request.POST)\n try:\n comment = form.save(commit=False)\n comment.commenter = request.user\n comment.listing = listing\n comment.save()\n except:\n # if something went wrong with comment form \n return render(request, \"auctions/errors.html\", {\"error_message\":\n \"something went wrong with the submission of your comment, try again\"})\n\n return redirect(reverse(\"single_listing\", \n args=[listing.title]) +f\"?id={listing.id}\")", "def loan_id(self, loan_id):\n\n self._loan_id = loan_id", "def get(self, request, listing_id, format=None):\n try:\n fav_listing = FavoriteListing.objects.get(user_id=request.user.id, listing_id=listing_id)\n except FavoriteListing.DoesNotExist:\n fav_listing = None\n\n response = {}\n\n # this should not be necessary since we have declared above that the person who access this url needs IsAuthenticated permission\n # if self.request.user.is_authenticated:\n response['status'] = 'ok'\n if fav_listing:\n fav_listing.delete()\n response['message'] = 'Listing removed from favorites!'\n else:\n fav_listing = FavoriteListing(user_id=request.user,\n listing_id=Listing.objects.get(id=listing_id))\n fav_listing.save()\n response['message'] = 'Listing added to favorites!'\n # else:\n # response['status'] = 'error'\n # response['message'] = 'You must be authenticated for this.'\n\n return Response(response)", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def recipe_id(self, recipe_id):\n\n self._recipe_id = recipe_id", "def building_id(self, building_id):\n if self.local_vars_configuration.client_side_validation and building_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `building_id`, must not be `None`\") # noqa: E501\n\n self._building_id = building_id", "def instance_id(self, instance_id):\n\n self._instance_id = instance_id", "def feed_id(self, feed_id):\n\n self._feed_id = feed_id", "def id(self, _id):\n self.metadata[\"id\"] = _id", "def set_id(self, id):\n self.__id = id", "def batch_id(self, batch_id):\n\n self._batch_id = batch_id", "def status_id(self, status_id):\n\n self._status_id = status_id", "def id_status(self, id_status):\n self._id_status = id_status", "def close_bid(request, listing_id): \n try:\n listing = Listing.objects.get(pk=listing_id) \n except Listing.DoesNotExist:\n return JsonResponse({\"success\":False})\n\n if request.user == listing.seller:\n listing.isActive = False\n listing.save()\n return JsonResponse({\"success\":True})\n\n return JsonResponse({\"success\":False})", "def update(self, listing):\n q = QSqlQuery()\n\n # Add the product group if it isn't there already\n q.exec_(\"INSERT OR IGNORE INTO ProductGroups(ProductGroupName) VALUES('{}')\".format(listing.productgroup))\n q.exec_(\"SELECT ProductGroupId FROM ProductGroups WHERE ProductGroupName='{}'\".format(listing.productgroup))\n q.first()\n\n productgroupId = q.value(0)\n\n # Get the category association\n q.exec_(\"SELECT CategoryId FROM ProductGroups WHERE ProductGroupId={}\".format(productgroupId))\n q.first()\n\n categoryId = q.value(0)\n\n # Add the merchant name\n merchname = re.sub(r\"'\", \"\\'\\'\", listing.merchant) # SQL uses two single quotes to escape a single quote...\n q.exec_(\"INSERT OR IGNORE INTO Merchants(MerchantName) VALUES('{}')\".format(merchname))\n q.exec_(\"SELECT MerchantId FROM Merchants WHERE MerchantName='{}'\".format(merchname))\n q.first()\n\n merchantId = q.value(0)\n\n tracking = 0\n myprice = 0\n mycost = 0\n fbafees = 0\n monthlyvolume = 0\n\n # Check if the listing has already been added to the database\n q.exec_(\"SELECT * FROM Products WHERE Asin='{}'\".format(listing.asin))\n q.first()\n if q.isValid():\n record = q.record()\n # The listing is already in the database. Add it's current values to the observation table\n q.prepare(\"INSERT INTO Observations(Asin, Timestamp, SalesRank, Offers, Prime, Price, MerchantId) \"\n \"VALUES(?, ?, ?, ?, ?, ?, ?)\")\n q.addBindValue(record.value('Asin'))\n q.addBindValue(record.value('Timestamp'))\n q.addBindValue(record.value('SalesRank'))\n q.addBindValue(record.value('Offers'))\n q.addBindValue(record.value('Prime'))\n q.addBindValue(record.value('Price'))\n q.addBindValue(record.value('MerchantId'))\n q.exec_()\n\n # Grab values that we don't want to overwrite\n q.exec_(\"SELECT Tracking, MyPrice, MyCost, FBAFees, MonthlyVolume FROM Products WHERE Asin='{}'\".format(\n listing.asin))\n q.first()\n tracking = q.value(0)\n myprice = q.value(1)\n mycost = q.value(2)\n fbafees = q.value(3)\n monthlyvolume = q.value(4)\n\n # Calculate the CRank\n crank = self.getListingRank(categoryId, listing.salesrank, listing.offers, listing.prime)\n\n # Determine if it is a private label product\n if (fuzz.partial_ratio(listing.merchant.lower(), listing.title.lower()) > 80) or \\\n (fuzz.partial_ratio(listing.merchant.lower(), listing.make.lower()) > 80):\n privatelabel = True\n else:\n privatelabel = False\n\n time = QDateTime.currentDateTimeUtc().toTime_t()\n\n q.prepare(\n 'INSERT OR REPLACE INTO Products(Tracking, CRank, Timestamp, Asin, ProductGroupId, CategoryId, SalesRank, Offers,'\n 'Prime, Price, MerchantId, Title, Url, PrivateLabel, Manufacturer, PartNumber, Weight, ItemLength,'\n 'ItemWidth, ItemHeight, MyPrice, MyCost, FBAFees, MonthlyVolume, UPC) '\n 'VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)')\n\n fields = [tracking, crank, time, listing.asin, productgroupId, categoryId, listing.salesrank, listing.offers,\n listing.prime, listing.price, merchantId, listing.title, listing.url, privatelabel,\n listing.make, listing.model, listing.weight / 100, listing.length / 100,\n listing.width / 100, listing.height / 100, myprice, mycost, fbafees, monthlyvolume, listing.upc]\n\n for field in fields:\n q.addBindValue(field)\n\n q.exec_()\n\n if q.lastError().type() != QSqlError.NoError:\n print('Could not insert record: ' + q.lastError().text())\n\n self.select()", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id", "def id(self, id):\n\n self._id = id" ]
[ "0.54151386", "0.50424653", "0.49489492", "0.49403444", "0.454123", "0.4485277", "0.4483447", "0.44773117", "0.44772324", "0.44687784", "0.4437293", "0.44051337", "0.4390934", "0.4387617", "0.43608093", "0.43608093", "0.4329542", "0.430929", "0.4308864", "0.4278942", "0.42528984", "0.42509404", "0.423614", "0.42245173", "0.4179956", "0.41413036", "0.41024202", "0.40986386", "0.40986386", "0.40986386" ]
0.8304697
0
Sets the marketplace_id of this MigrateListingResponse.
def marketplace_id(self, marketplace_id): self._marketplace_id = marketplace_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_AWSMarketplaceId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMarketplaceId', value)", "def registration_marketplace_id(self, registration_marketplace_id):\n\n self._registration_marketplace_id = registration_marketplace_id", "def listing_id(self, listing_id):\n\n self._listing_id = listing_id", "def feed_id(self, feed_id):\n\n self._feed_id = feed_id", "def sportsbook_id(self, sportsbook_id):\n\n self._sportsbook_id = sportsbook_id", "def get_marketplace(self, marketplace_id):\n return MarketplaceResource(self._config).get(marketplace_id)", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def team_id(self, team_id):\n\n self._team_id = team_id", "def branding_theme_id(self, branding_theme_id):\n\n self._branding_theme_id = branding_theme_id", "def set_AWSMerchantId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMerchantId', value)", "def financial_offer_id(self, financial_offer_id):\n\n self._financial_offer_id = financial_offer_id", "def set_id(self, id_):\n\n self.id_ = id_", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def put_place_by_id(place_id):\n place_obj = storage.get(\"Place\", place_id)\n if place_obj is None:\n abort(404)\n json_obj = request.get_json()\n if not request.json:\n return jsonify(\"Not a JSON\"), 400\n ignore = [\"id\", \"user_id\", \"city_id\", \"created_at\", \"updated_at\"]\n for key, value in json_obj.items():\n if key not in ignore:\n setattr(place_obj, key, value)\n place_obj.save()\n updated_place = place_obj.to_dict()\n return jsonify(updated_place), 200", "def put_place(place_id):\n place = storage.get('Place', place_id)\n if place is None:\n abort(404)\n kwargs = request.get_json()\n if kwargs is None:\n return ('Not a JSON', 400)\n for k, v in kwargs.items():\n setattr(place, k, v)\n place.save()\n return (jsonify(place.to_json()), 200)", "def set_id(self, id):\n self.data['id'] = id", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def market(self, market):\n self._market = market", "def _set_id(self):\n raise NotImplementedError()", "def warehouse_id(self, warehouse_id):\n\n self._warehouse_id = warehouse_id", "def set_available_places_for_run(\n self,\n run_id: str,\n actual_available_places: int,\n listed_available_places: int,\n ) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n UPDATE runs\n SET actual_available_places = ?, listed_available_places = ?\n WHERE run_id = ?\n \"\"\",\n (\n actual_available_places,\n listed_available_places,\n run_id,\n ),\n )\n conn.commit()\n return None", "def update_place(place_id):\n place = storage.get(Place, place_id)\n\n if place is None:\n abort(404)\n\n put_data = request.get_json()\n if not put_data:\n abort(400, 'Not a JSON')\n\n for k, v in put_data.items():\n if k not in ['id', 'user_id', 'city_id', 'created_at',\n 'updated_at']:\n setattr(place, k, v)\n place.save()\n storage.save()\n return make_response(jsonify(place.to_dict()), 200)", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def flavor_id(self, flavor_id):\n self._flavor_id = flavor_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def stock_id(self, stock_id):\n\n self._stock_id = stock_id" ]
[ "0.68738204", "0.60930204", "0.5767067", "0.5177802", "0.5172527", "0.49900728", "0.49834523", "0.49834523", "0.49834523", "0.49834523", "0.49759004", "0.49217004", "0.4853357", "0.48184267", "0.47949788", "0.47403908", "0.4719058", "0.46994156", "0.46824563", "0.4669242", "0.4631705", "0.4620464", "0.460385", "0.45889878", "0.45663267", "0.45663267", "0.4558081", "0.4558081", "0.4558081", "0.45579296" ]
0.7607272
0
Sets the status_code of this MigrateListingResponse.
def status_code(self, status_code): self._status_code = status_code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status_code(self, status_code):\n allowed_values = [1, 100, 101, 102, 103, 104, 105] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status_code not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status_code` ({0}), must be one of {1}\" # noqa: E501\n .format(status_code, allowed_values)\n )\n\n self._status_code = status_code", "def extract_status(self, status_headers):\n self.status = status_headers.get_statuscode()\n if not self.status:\n self.status = '-'", "def setStatus(self, status):\n self.__status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def status_code(self, status_code):\n allowed_values = [\"DRAFT\", \"IN_PROGRESS\", \"CREATED\", \"COMPLETED\", \"PARTIAL\", \"FAILED\", \"REFUNDED\", \"CANCELLED\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status_code not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status_code` ({0}), must be one of {1}\" # noqa: E501\n .format(status_code, allowed_values)\n )\n\n self._status_code = status_code", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n self._status = status" ]
[ "0.62368333", "0.61369103", "0.59862417", "0.5981169", "0.5981169", "0.5981169", "0.5980721", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.59049493" ]
0.6872091
0
Sets the warnings of this MigrateListingResponse.
def warnings(self, warnings): self._warnings = warnings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warnings(self):\n return self.__warnings", "def allow_warnings(self, allow_warnings):\n self._allow_warnings = allow_warnings", "def setwarnings(self, on):\n # diese Funktion macht eigentlich nichts, ist aber wegen der Kombatibilitaet vorhanden\n print(f\"setwarnings: {on}\")", "def warnings(self) -> List[Error]:\n return self._get_warnings()", "def get_warnings(self):\n pass", "def warnings(self) -> List[Error]:", "def warning_count(self, warning_count):\n\n self._warning_count = warning_count", "def warning_count(self, warning_count):\n\n self._warning_count = warning_count", "def warnings(self):\n return self.warning_buffer.warnings", "def warning(self, warning):\n pass", "def _warn(self, warning=None):\r\n debug.err('Warning: %s' % warning)\r\n\r\n if core.FW_conf['settings'].TestRun.ExecutionMode == 'Leader' and warning != None:\r\n executeInFollower(\"self.warn('%s')\" % (warning,))\r\n\r\n if type(warning) != types.ListType:\r\n warning = [warning]\r\n\r\n self.result.addStepWarning(warning)", "def warning(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.WARNING))", "def warning_spoilers(self, warning_spoilers):\n\n self._warning_spoilers = warning_spoilers", "def show_warnings(self):\n for w in self.warnings:\n w()", "def allow_warnings(self):\n return self._allow_warnings", "def set_warning(warning):\n impl.set_warning(**locals())", "async def setwarns(self, ctx, user: discord.Member, warnings: int = None):\r\n server = ctx.message.guild\r\n await self._create_warn(server, user)\r\n dataIO.save_json(self.JSON, self.data)\r\n if not warnings:\r\n del self.data[str(server.id)][\"user\"][str(user.id)]\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been reset\".format(user.name))\r\n return\r\n if warnings == 0:\r\n del self.data[str(server.id)][\"user\"][str(user.id)]\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been reset\".format(user.name))\r\n return\r\n if warnings <= 0:\r\n await ctx.send(\"You can set warnings to 1-4 only :no_entry:\")\r\n return\r\n if warnings >= 5:\r\n await ctx.send(\"You can set warnings to 1-4 only :no_entry:\")\r\n return\r\n self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] = warnings\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been set to **{}**\".format(user.name, warnings))", "def warnings_active(self) -> List[Error]:", "def warning(self) -> 'outputs.AnyResponse':\n return pulumi.get(self, \"warning\")", "def get_warning(self) -> List[str]:\n return []", "def get_warning(self) -> List[str]:\n return []", "def eval_warnings(self):\n\n # Ensure the minimum number of warnings were raised.\n assert len(self.war) >= len(self.warn_msgs)\n\n # Test the warning messages, ensuring each attribute is present.\n testing.eval_warnings(self.war, self.warn_msgs)\n return", "async def warnings(self, ctx):\n server = ctx.message.server\n server_id = server.id\n if not (server_id in self.warnlist2 and self.warnlist2[server_id]):\n await self.bot.say(\"No users are currently punished.\")\n return\n\n def getmname(mid):\n member = discord.utils.get(server.members, id=mid)\n if member:\n if member.nick:\n return '%s (%s)' % (member.nick, member)\n else:\n return str(member)\n else:\n return '(member not present, id #%d)'\n\n headers = ['Member', 'Warning Number', 'Moderator', 'Reason']\n table = []\n disp_table = []\n now = time.time()\n for member_id, data in self.warnlist2[server_id].items():\n\n #if not member_id.isdigit():\n #continue\n print (\"704\")\n member_name = getmname(data['User'])\n warnnum = data['Warning Number']\n punisher_name = getmname(data['Mod'])\n reason = data['Reason']\n table.append((member_name, warnnum, punisher_name, reason))\n\n #for _, name, warnum, mod, reason in sorted(table, key=lambda x: x[0]):\n disp_table.append((member_name, warnnum, punisher_name, reason))\n\n for page in pagify(tabulate(disp_table, headers)):\n await self.bot.say(box(page))", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass" ]
[ "0.6353443", "0.6246493", "0.61963874", "0.6170872", "0.61554074", "0.60583675", "0.6048729", "0.6048729", "0.5980356", "0.5967424", "0.5896825", "0.58848745", "0.5873911", "0.5829192", "0.5811515", "0.56476164", "0.56447476", "0.56372", "0.562612", "0.5593209", "0.5593209", "0.55876017", "0.548944", "0.5425909", "0.5425909", "0.5425909", "0.5425909", "0.5425909", "0.5425909", "0.5425909" ]
0.74362904
0
Return 'WHERE' clause that implements kwds_filter constraints.
def _build_where_clause(**kwds_filter): clause = [] params = [] items = kwds_filter.items() items = sorted(items, key=lambda x: x[0]) # Ordered by key. for key, val in items: if _is_nsiterable(val): clause.append(key + ' IN (%s)' % (', '.join('?' * len(val)))) for x in val: params.append(x) else: clause.append(key + '=?') params.append(val) clause = ' AND '.join(clause) if clause else '' return clause, params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_where_clause(**kwds_filter):\n clause = []\n params = []\n items = kwds_filter.items()\n items = sorted(items, key=lambda x: x[0]) # Ordered by key.\n for key, val in items:\n if nonstringiter(val):\n clause.append(key + ' IN (%s)' % (', '.join('?' * len(val))))\n for x in val:\n params.append(x)\n else:\n clause.append(key + '=?')\n params.append(val)\n\n clause = ' AND '.join(clause) if clause else ''\n return clause, params", "def get_where_clause(self, params: Dict) -> str:\n return ''", "def get_where_clause(self, feature, params=()):\n where_clause = []\n for pk in self.pk_cols:\n params += (feature[pk],)\n where_clause.append(pk + \" = (?)\")\n\n where_clause = \" WHERE \" + \" AND \".join(where_clause)\n return where_clause, params", "def where(self, *wheres, **kw):\n if wheres: # arbitrary expressions\n self._whereskw.update(kw)\n for where in wheres:\n self._wheres.append(where)\n else:\n # plain x=<val> expressions\n self._kw.update(kw)\n return self", "def _getSQLWhere(self, inputTable, queryMeta):\n\t\tsqlPars = {}\n\t\tinputPars = dict((p.name, p.value) for p in inputTable.iterParams())\n\t\treturn base.joinOperatorExpr(\"AND\",\n\t\t\t[cd.asSQL(inputPars, sqlPars, queryMeta)\n\t\t\t\tfor cd in self.condDescs]), sqlPars", "def where(self, cond):\n return self.filter(lambda x: _(x).contains(cond))", "def where(condition):\r\n return ('', []) if condition.clause == '' else (f'WHERE {condition.clause}', list(condition.params))", "def where(condition):\n return partial(filter, condition)", "def _extract_where(self, query) :\n\t\tquery = copy.copy(query)\n\t\t\n\t\t# discard the insert information\n\t\tif self.n.sparql.insert in query :\n\t\t\tdel query[self.n.sparql.insert]\n\t\t\n\t\t# discard the delete information\n\t\tif self.n.sparql.delete in query :\n\t\t\tdel query[self.n.sparql.delete]\n\t\t\n\t\t# build the where clause with outlined variables\n\t\treturn self.python_to_SPARQL_long(query)", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def where(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"where\")", "def where(self, condition):\n raise NotImplementedError(\"This should have been implemented.\")", "def sqlwhere(dictionary, grouping=' AND '):\n return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)", "def condition_filter(self, filter_id):\r\n return filters.Filter(self, filter_id)", "def build_where_statement(attr_converter):\n statements = []\n values = []\n # iterate over the querystring params\n for attr, val in request.args.iteritems():\n try:\n statements.append(attr_converter[attr]['converter'](\n attr_converter[attr]['column']))\n values.append(val) # add after the possible keyerror\n except KeyError:\n # TODO: handle invalid params somehow or continue to ignore\n pass\n if statements:\n return 'WHERE '+' AND '.join(statements), values\n return '', []", "def where(cls, *lst, **dct):\n cls.runtime.set_where(lst, dct)\n return cls", "def _build_where(where):\n # Build limit\n if 'rn' in where:\n limit_sql = 'limit 1 offset {}'.format(where['rn'])\n else:\n limit_sql = ''\n # Build where\n where_sql = ''\n where_values = []\n for key, value in where.items():\n if isinstance(value, list):\n if len(value) == 1:\n where_sql += ' and {} = {}'.format(key, value[0])\n else:\n where_sql += ' and {} in {}'.format(key, tuple(value))\n elif key == '*': # Literal where clause\n where_sql += ' and {}'.format(value)\n elif key == 'rn': # Ignore\n pass\n else:\n if value is None:\n where_sql += ' and {} is Null'.format(key)\n else:\n where_sql += ' and {} = ?'.format(key)\n where_values.append(value)\n if len(where_sql) > 0:\n where_sql = 'where ' + where_sql[5:]\n # Done\n return where_sql, where_values, limit_sql", "def where(self, predicate=lambda row: True):\n where_table = Table(self.columns)\n where_table.rows = list(filter(predicate, self.rows))\n return where_table", "def _sql_where(cur, tables, andalso, orelse, prefix=None, aggregate=False):\n disjunctions = []\n andsql = _cond_where_sql(cur, andalso, tables, prefix=prefix,\n aggregate=aggregate)\n andsql = ' AND '.join(andsql)\n\n if len(andsql) > 0:\n andsql = '(%s)' % andsql\n disjunctions.append(andsql)\n disjunctions += _cond_where_sql(cur, orelse, tables, prefix=prefix,\n aggregate=aggregate)\n\n if len(disjunctions) == 0:\n return ''\n return '(%s)' % (' OR '.join(disjunctions))", "def where(self, column, *args):\n\n operator, value = self._extract_operator_value(*args)\n\n if value is None:\n value = \"\"\n elif value is True:\n value = \"1\"\n elif value is False:\n value = \"0\"\n\n if inspect.isfunction(column):\n builder = column(self.new())\n self._wheres += (\n (QueryExpression(None, operator, SubGroupExpression(builder))),\n )\n elif isinstance(value, QueryBuilder):\n self._wheres += (\n (QueryExpression(column, operator, SubSelectExpression(value))),\n )\n else:\n self._wheres += ((QueryExpression(column, operator, value, \"value\")),)\n return self", "def _blacklisted_pairings_filter_query(self):\n if self._restrict_exceptions_list:\n blacklisted_filter_sql = sql.SQL('is_blacklisted IS TRUE')\n else:\n blacklisted_filter_sql = sql.SQL('TRUE')\n return blacklisted_filter_sql", "def amh_attr_filter_query(self):\n \n attr_filter_query = \"\"\"\n WITH {final_cte_name} as (\n -- Pull list of devices that were active (has any row; don't need TVT >0) in the past 4 weeks\n SELECT DISTINCT device_id\n FROM tubidw.all_metric_hourly\n WHERE DATE_TRUNC('week',hs) >= dateadd('week',-4,DATE_TRUNC('week',GETDATE()))\n AND DATE_TRUNC('week',hs) < DATE_TRUNC('week',GETDATE())\n {attr_filter} -- attribute filters dynamically populate here\n -- TODO: currently can't get a metric/attribute combo filter, like \"devices that watched at least 50% of a specific content_id\"\n )\n \"\"\"\n return attr_filter_query", "def where_raw(self, query: str, bindings=()):\n self._wheres += ((QueryExpression(query, \"=\", None, \"value\", raw=True)),)\n return self", "def AddWhereTerms(self, where_cond_pairs, **kwargs):\n where_cond_pairs = where_cond_pairs or []\n\n for cond, args in where_cond_pairs:\n assert _IsValidWhereCond(cond), cond\n assert cond.count('%s') == len(args), cond\n self.where_conds.append(cond)\n self.where_args.extend(args)\n\n for col, val in sorted(kwargs.items()):\n assert _IsValidColumnName(col), col\n eq = True\n if col.endswith('_not'):\n col = col[:-4]\n eq = False\n\n if isinstance(val, set):\n val = list(val) # MySQL inteface cannot handle sets.\n\n if val is None or val == []:\n op = 'IS' if eq else 'IS NOT'\n self.where_conds.append(col + ' ' + op + ' NULL')\n elif isinstance(val, list):\n op = 'IN' if eq else 'NOT IN'\n # Sadly, MySQLdb cannot escape lists, so we flatten to multiple \"%s\"s\n self.where_conds.append(\n col + ' ' + op + ' (' + PlaceHolders(val) + ')')\n self.where_args.extend(val)\n else:\n op = '=' if eq else '!='\n self.where_conds.append(col + ' ' + op + ' %s')\n self.where_args.append(val)", "def _validate_select_where(self):\r\n #check that there's either a = or IN relationship with a primary key or indexed field\r\n equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)]\r\n token_comparison = any([w for w in self._where if isinstance(w.value, Token)])\r\n if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison:\r\n raise QueryException('Where clauses require either a \"=\" or \"IN\" comparison with either a primary key or indexed field')\r\n\r\n if not self._allow_filtering:\r\n #if the query is not on an indexed field\r\n if not any([w.index for w in equal_ops]):\r\n if not any([w.partition_key for w in equal_ops]) and not token_comparison:\r\n raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset')", "def sql_filter(my_table='', colName='', var='', **kw):\n\tif (my_table=='') or (colName=='') or (var==''):\n\t\treturn dict(sql='',clauseTables=[])\n\telse:\n\t\tsql = my_table+\".\"+colName+\" LIKE '%\"+var+\"%'\"\n\t\treturn dict(sql=sql,clauseTables=[])", "def where(self, cond, other, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.where)(\n self, cond=cond, other=other, **kwargs\n )", "def where(self, table, what='*', order=None, group=None, limit=None, \n offset=None, _test=False, **kwargs):\n where = self._where_dict(kwargs) \n return self.select(table, what=what, order=order, \n group=group, limit=limit, offset=offset, _test=_test, \n where=where)", "def condition_filters(self):\r\n return filters.Filters(self)" ]
[ "0.70142585", "0.68189335", "0.66024506", "0.6276264", "0.6107378", "0.6103338", "0.6057521", "0.5986232", "0.59493124", "0.5914506", "0.5914506", "0.5914506", "0.58369356", "0.5758533", "0.569798", "0.56843215", "0.56451374", "0.5600097", "0.55459034", "0.5538299", "0.5530821", "0.55130196", "0.5501634", "0.54928416", "0.5474489", "0.5445694", "0.5443927", "0.54369766", "0.54351234", "0.5434608" ]
0.7027632
0
Normalize value for use as SQLite column name.
def _normalize_column(column): if not isinstance(column, str): msg = "expected column of type 'str', got {0!r} instead" raise TypeError(msg.format(column.__class__.__name__)) column = column.strip() column = column.replace('"', '""') # Escape quotes. if column == '': column = '_empty_' return '"' + column + '"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self, value):\n return str(value)", "def _normalize_expanded_field(value):\n\n value = value.strip()\n value = re.sub(r'\\s{2,}', ' ', value)\n value = re.sub(r'/{2,}', '/', value)\n value = re.sub(r'\\\\{2,}', '\\\\\\\\', value)\n value = re.sub(r'-{2,}', '-', value)\n value = re.sub(r'\\*{2,}', '*', value)\n value = re.sub(r'\\.{2,}', '.', value)\n value = value.upper()\n\n return value", "def __normalize_name(self):\n self.normalized_name = normalizeSimplified(self.name)", "def normalize(val):\n \n if val.find('-') != -1:\n val = val.replace('-','_')\n\n return val", "def normalize(name):\n name = name.lower()\n name = name.replace('-', '')\n name = name.replace(' ', '')\n return name", "def normalize(value):\n value = value.lower()\n for normalized, compare in _NORMALIZE.iteritems():\n if value in compare:\n return normalized\n return value.upper()", "def normalize_name(self, value):\n import unicodedata\n import re\n\n self.log('Converting string %s' % value)\n \n # Double try in name conversion\n try:\n value = unicodedata.normalize('NFKD', u'%s' % value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n except:\n self.log('Conversion error: \\n%s' % traceback.format_exc())\n\n value = unicode(value, 'ascii', errors='ignore')\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n\n\n self.log('Conversion finished to %s' % value)\n\n return value", "def normalize(item):\n item = item.lower().strip().rstrip('_')\n return item", "def _column_original_name(name):\n if ':' in name:\n return name.split(':')[-1]\n else:\n return name", "def prepare_value(self, value):\n if value is None:\n return value\n value = value.replace(\" \", \"\").replace(\".\", \"\")\n if value:\n return \"%s.%s.%s.%s\" % (value[0:3], value[3:7], value[7:11], value[11:])\n return value", "def _normalize(self, key, value):\n\n # None value should not be converted by normalizer\n if value is None:\n return None\n\n normalize_func = getattr(self, 'normalize_{0}'.format(key),\n lambda x: x)\n\n return normalize_func(value)", "def safe_column_name(string):\n string = unidecode(string.replace(' ', '_').lower())\n return re.sub(r'[^0-9a-z_]','', string)", "def sanitize_name(self, value):\n if self.sanitize_names:\n new_value = re.sub('[^a-zA-Z0-9_]', '_', value[:127])\n else:\n new_value = value\n return new_value", "def _validate_column_name(col_name : str) -> str:\n\n if col_name[0].isdigit():\n return f'\"{col_name}\"'\n return col_name", "def normalize_name(field_name):\n fixes = (\n (r\"/\", \"_per_\"),\n (r\"%\", \"_pct_\"),\n (r\"\\W\", \"_\"),\n (r\"^_+\", \"\"), # remove '_' if field_name begins with '_'\n (r\"_+$\", \"\"),\n (r\"__+\", \"_\"),\n )\n result = field_name.strip().lower() or None\n # result = field_name.strip().upper() or None\n if result:\n if result.endswith(\"?\"):\n if not re.match(r\"is[_\\W]\", result):\n result = \"is_\" + result\n for pattern, replacement in fixes:\n result = re.sub(pattern, replacement, result)\n return result", "def _valid_column(column_name):\n return str(column_name)", "def normalize_username(value):\n return value.lower()", "def _normalize_package_name(self, name):\n return Prepared.normalize(name)", "def normalize_column(data: DataFrame, column: str):\n m = mean(data[column])\n s = sd(data[column])\n return data[column].map(lambda x: (x - m) / s)", "def _sanitize_field_name(self, field_name):\n field_name = field_name.replace(self._field_prefix, '')\n return field_name.replace('.', '_')", "def _normalize(self, metric_name, submit_method, prefix):\n metric_prefix = \"mongodb.\" if not prefix else \"mongodb.{0}.\".format(prefix)\n metric_suffix = \"ps\" if submit_method == RATE else \"\"\n\n # Replace case-sensitive metric name characters\n for pattern, repl in self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES.iteritems():\n metric_name = re.compile(pattern).sub(repl, metric_name)\n\n # Normalize, and wrap\n return u\"{metric_prefix}{normalized_metric_name}{metric_suffix}\".format(\n normalized_metric_name=self.normalize(metric_name.lower()),\n metric_prefix=metric_prefix, metric_suffix=metric_suffix\n )", "def clean_numeric_column(name : float) -> float:\n if name > -1 and name < 1:\n name = 0\n return name", "def apply_column_value(raw_column_name, column_value, model, mapping, is_extra_data, cleaner):\n # If the item is the extra_data column, then make sure to save it to the\n # extra_data field of the database\n if raw_column_name in mapping:\n table_name, mapped_column_name, display_name, is_extra_data = mapping.get(raw_column_name)\n\n # special postal case:\n if mapped_column_name in ['postal_code', 'owner_postal_code']:\n if '-' in str(column_value):\n postal = str(column_value).split('-')[0].zfill(5)\n ext = str(column_value).split('-')[1].zfill(4)\n column_value = postal + '-' + ext\n column_value = str(column_value).zfill(5)\n\n cleaned_value = None\n if cleaner:\n # Get the list of Quantity fields from the Column object in SEED. This is non-ideal, since the\n # rest of the mapping code does not use SEED models. Perhaps make this an argument.\n if (model.__class__.__name__, mapped_column_name) in apps.get_model('seed',\n 'Column').QUANTITY_UNIT_COLUMNS:\n # clean against the database type first\n cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data)\n\n # This is a temporary fix for when the raw_column_name and the mapped_column_name\n # are the same. It causes the units to be cast twice since the cleaner look up finds\n # the same column twice. The cleaner needs to be cleaned up quite a bit to handle\n # this error correctly.\n if mapped_column_name != raw_column_name:\n # now clean against the raw name with pint (Quantity Units) because that's the column\n # that holds the units needed to interpret the value correctly\n cleaned_value = cleaner.clean_value(cleaned_value, raw_column_name,\n is_extra_data)\n else:\n cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data)\n else:\n cleaned_value = default_cleaner(column_value)\n\n if is_extra_data:\n if hasattr(model, 'extra_data'):\n # only save it if the model and the mapping are the same\n if model.__class__.__name__ == table_name:\n if isinstance(cleaned_value, (datetime, date)):\n # TODO: create an encoder for datetime once we are in Django 1.11\n model.extra_data[mapped_column_name] = cleaned_value.isoformat()\n else:\n model.extra_data[mapped_column_name] = cleaned_value\n else:\n # Simply set the field to the cleaned value if it is the correct model\n if model.__class__.__name__ == table_name:\n setattr(model, mapped_column_name, cleaned_value)\n\n return model", "def normalize(path):\n return os.path.normcase(os.path.realpath(path))", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def short_column(name : str) -> str:\n return name.split(\"-\")[1]", "def _normalize_query(self, query):\n return re.sub('\\s+', ' ', query).strip().lower()", "def _normalize(self, entry):\n sql = generalize_sql(entry.get('query'))\n return '{}-{}-{}'.format(self.REPORT_LABEL, sql, entry.get('query_class'))" ]
[ "0.67484534", "0.6414139", "0.64000803", "0.6394291", "0.62145936", "0.62083966", "0.62030125", "0.6104114", "0.6069231", "0.604914", "0.59973955", "0.5969188", "0.59618396", "0.59541255", "0.5941385", "0.5940318", "0.58399045", "0.5810996", "0.5807061", "0.5782344", "0.57602286", "0.57570565", "0.5699301", "0.56776536", "0.5676518", "0.5676518", "0.5676518", "0.56664056", "0.5626696", "0.5621128" ]
0.6757992
1
Alternate constructor to load an existing collection of records into a tempoarary SQLite database. Loads data (an iterable of lists, tuples, or dicts) into a temporary table
def from_records(cls, data, columns=None): temptable = TemporarySqliteTable(data, columns) return cls(temptable.connection, temptable.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_records(cls, data, columns=None):\n connection, table = _load_temp_sqlite_table(columns, data)\n return cls(connection, table)", "def load_data(cursor, table, *args, **kwds):\n try:\n records, = args\n columns = None\n except ValueError:\n columns, records = args\n\n default = kwds.pop('default', '')\n if kwds:\n msg = 'load_data() got unexpected keyword argument {0!r}'\n raise TypeError(msg.format(next(iter(kwds.keys()))))\n\n records = iter(records)\n first_record = next(records, None)\n if columns:\n if first_record:\n records = chain([first_record], records)\n else:\n if not first_record:\n return # <- EXIT! (No table created.)\n try: # Try mapping.\n columns = list(first_record.keys())\n records = chain([first_record], records)\n except AttributeError:\n try: # Try namedtuple.\n columns = first_record._fields\n records = chain([first_record], records)\n except AttributeError:\n columns = first_record # Use first row as column names.\n\n if not isinstance(columns, Iterable) or isinstance(columns, str):\n msg = 'expected iterable of strings, got {0!r}'\n raise TypeError(msg.format(columns))\n columns = list(columns) # Make sure columns is a sequence.\n\n if isinstance(first_record, Mapping):\n records = ([rec.get(c, '') for c in columns] for rec in records)\n\n with savepoint(cursor):\n if table_exists(cursor, table):\n alter_table(cursor, table, columns, default=default)\n else:\n create_table(cursor, table, columns, default=default)\n insert_records(cursor, table, columns, records)", "def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()", "def __init__(self,dbname='',**kwds):\n self._skip = kwds.get('skip',[])\n self._limit= kwds.get('limit',[])\n self._keys= kwds.get('keys',[])\n self._db = getDBConnection()[dbname]\n self._collection_name=kwds.get('collection','all')\n self._collection = []\n self._skip_rec=0\n self._props = {}\n if self._limit and self._skip:\n self._nrows = self._limit[0]\n if len(self._limit)>1:\n self._ncols = self._limit[1]\n else:\n self._ncols = 1\n if len(self._skip)==2:\n self._skip_rows=self._skip[0]\n self._skip_cols=self._skip[1]\n else:\n self._skip_rec = self._skip[0]\n self._table=dict()\n self._is_set=False\n self._set_collection()\n self._row_heads=[]\n self._col_heads=[]", "def __init__(self, entries):\n # objects representing database records\n self.entries = entries", "def load_data(db_handler):\n\n from random import seed\n from random import random\n \n seed(1)\n\n new_notes = []\n\n for i in range(1,10):\n\n new_notes.append({\n\n\n 'title': str(i) + str(random()),\n 'content': 'Lorem ipsum' + str(i),\n 'active': True,\n 'created_by':\"Cristhian\" + str(i),\n 'created_at': date.today(),\n 'edited_at':date.today(),\n \n })\n\n new_notes.append(\n {\n \"active\": False,\n \"content\": \"Jesenia\",\n \"edited_at\": \"2019-10-24\",\n \"title\": \"Jesenia La chica de al lado\",\n \"created_by\": \"Cristhian1\",\n \"created_at\": \"2019-10-24\"\n })\n\n new_notes.append(\n {\n \"active\": False,\n \"title\": \"La vida de los numeros\",\n \"content\": \"Lorem ipsum y los numeros de la muerte\",\n \"edited_at\": \"2019-10-25\",\n \"created_by\": \"Jesenia\",\n \"created_at\": \"2019-10-24\"\n })\n\n Note.insert_many(new_notes).execute()\n\n User(name=\"Cristhian\", email=\"[email protected]\",\n password=b'$2b$12$U/QjtHt/j0xRT4r8Hx3fOe93EssM6M0iiUaQJOrTd64RXbxvhw6Ii').save()", "def __init__(self, db_location = ':memory:'):\n self.connection = sqlite3.connect(db_location)\n self.cur = self.connection.cursor()\n self.create_table()", "def load(values):\n import sqlite3\n conn = sqlite3.connect('./example.db')\n df = pd.DataFrame(values)\n df.to_sql('observations', conn)", "def load_data(connection, insert_sql, data):\n cur = connection.cursor()\n for d in data:\n cur.execute(insert_sql, d)\n connection.commit()", "def _load_fixture(filename):\n\n # Read the binary data into text\n with open(filename, 'rb') as stream:\n content = stream.read().decode('utf-8')\n\n # Decode the data as JSON\n data = json.loads(content)\n\n # Instantiate a session.\n session = Session()\n\n # Iterate through the entries to add them one by one.\n for item in data:\n # Resolve model from the table reference.\n table = Base.metadata.tables[item['model'].split('.')[-1]]\n\n # Add the primary key.\n item['fields']['id'] = item['pk']\n\n # Add a new row.\n session.connection().execute(table.insert().values(**item['fields']))\n\n # Commit the session to the database.\n session.commit()", "def load_fixtures(self, dbname, table, data):\n db = self.databases[dbname]['db']\n db.execute('BEGIN')\n for row in data:\n columns = row.keys()\n q = db.Insert(table, cols=columns)\n db.execute(q, row)\n db.execute('COMMIT')", "def setup_sample_data(no_of_records):\n rows_in_database = [{'id': counter, 'name': get_random_string(string.ascii_lowercase, 20), 'dt': '2017-05-03'}\n for counter in range(0, no_of_records)]\n return rows_in_database", "def connect_db_and_load_data(cls):\n db.connect()\n db.create_tables([Product], safe=True)\n load_data(transform_data('./inventory.csv'))", "def SQLNewFactory(cls, data, dbh=None, dbh_key = \"default\"):\n release = False\n if dbh is None:\n release = True\n dbh = dbstuff.getRW(dbh_key)\n try:\n columns = []\n values = []\n for k,v in data.items():\n columns.append('`' + k + '`')\n values.append(v)\n query = \"INSERT INTO \" + cls.SQLTable + \" (\" + \",\".join(columns) + \") VALUES (\" + \",\".join([\"%s\" for v in values]) + \")\"\n c = dbh.cursor()\n if (DEBUG):\n print query\n c.execute( query, tuple(values) )\n id = c.lastrowid\n c.close()\n dbh.commit()\n finally:\n if release:\n dbstuff.release(dbh,dbh_key)\n return cls(id, dbh)", "def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()", "def __init__(self, db_connection, table):\r\n self.elements = []\r\n self.db = db_connection\r\n self.table = table", "def load_expenditures():\n\n Expenditure.query.delete()\n\n with open(expenditure_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n expenditure_data = row.split(\",\")\n print(expenditure_data)\n\n id = expenditure_data[0]\n category_id = expenditure_data[1]\n price = expenditure_data[2]\n date_of_expenditure = expenditure_data[3]\n expenditure_userid = expenditure_data[4]\n where_bought = expenditure_data[5]\n description = expenditure_data[6]\n\n expenditure = Expenditure(\n id = id,\n category_id = category_id,\n price = price,\n date_of_expenditure = get_datetime(date_of_expenditure),\n expenditure_userid = expenditure_userid,\n where_bought = where_bought,\n description = description\n )\n\n db.session.add(expenditure)\n\n db.session.commit()", "def __init__(self, home_dir, temp_tables, *args, **kwargs):\n self.home_dir = home_dir\n self.temp_tables = temp_tables\n self._results = None\n self.haveExpr = False\n self.module = _MODULE_NAME\n self.type = 'MOCKDB'\n self.configdict = {'user': 'non-user',\n 'passwd': 'non-passwd',\n 'meta_file': 'non-file',\n 'meta_section': 'non-section'}\n # register data converters\n sqlite3.register_adapter(datetime.datetime, adapt_timestamp)\n sqlite3.register_converter('TIMESTAMP', convert_timestamp)\n sqlite3.register_converter('DATE', convert_timestamp)\n\n\n needSetup = False\n\n # see if the database needs to be set up\n if not os.path.exists(os.path.join(self.home_dir, DB_FILE)):\n needSetup = True\n elif not os.path.isfile(os.path.join(self.home_dir, DB_FILE)):\n shutil.rmtree(os.path.join(self.home_dir, DB_FILE))\n needSetup = True\n # initialize the connection\n sqlite3.Connection.__init__(self, database=os.path.join(self.home_dir, DB_FILE),\n detect_types=sqlite3.PARSE_DECLTYPES,\n check_same_thread=False)\n cur = self.cursor()\n cur.execute(\"PRAGMA synchronous = OFF\")\n cur.close()\n self._autocommit = False\n self.setupTempTables()\n if needSetup:\n self.setup()", "def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def __init__(self, table_id='', columns=(),\n file_name='', table_data=(), verbose=True):\n self.table_id = table_id\n self.columns = list(columns)\n self.file_name = file_name\n self.table_data = list(table_data)\n self.verbose = verbose", "def from_database(cls, expt_class=ImagingExperiment,\n name='unnamed', parallelize=False, **db_kwargs):\n trial_ids = fetch_trials(**db_kwargs)\n return cls.from_trial_ids(trial_ids, expt_class=expt_class,\n name=name, parallelize=parallelize)", "def test_create_from_dicts(self):\n cols = list(zip(*self.dtypes))[0]\n dicts = [dict([(cols[i], d[i]) for i in xrange(len(d))])\n for d in self.idata]\n\n tbl = Table.create(\n ':memory:', \"Bar\", dicts, verbose=True,\n primary_key='id', autoincrement=True)\n\n self.check_index(self.idata, tbl.select())\n for idx, col in enumerate(cols):\n if col == 'id':\n continue\n self.check_data(self.idata[:, [0, idx]], tbl[col])", "def __init__(self, filename='store.sqlite'):\n self.conn = sqlite3.connect(filename or ':memory:')\n self.cur = self.conn.cursor()\n self.create_tables()\n self.changed = False", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)", "def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)" ]
[ "0.6956785", "0.6560394", "0.5986285", "0.597718", "0.5919446", "0.5904428", "0.58205336", "0.57884705", "0.5786599", "0.57413965", "0.5737391", "0.57340336", "0.56946224", "0.5691723", "0.56570345", "0.56243944", "0.56065536", "0.5595917", "0.55688184", "0.5563457", "0.55514586", "0.5522938", "0.54994094", "0.54774976", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403", "0.5471403" ]
0.7079159
0
Parse the auditbeat log file, to generate audit event model and write to the result file(optional)
def parse(self, output=True): if not self.type == LogType.audit: log.error("LogParser doesn't support nonetype yet.") return stashes = list() with open(self.path_log, 'r') as f: for line in f.readlines(): event: Dict = json.loads(line) keys = event.keys() # drop irrelevant keys of dict for key in DROPS: if key in event.keys(): event.pop(key) # retrieve json info timestamp, process, file = None, None, None if "@timestamp" in event.keys(): timestamp = event["@timestamp"] if "process" in event.keys(): process = event["process"] if "file" in event.keys(): file = event["file"] try: audit:Dict = event["auditd"] except KeyError: raise KeyError(f"line: {line} does not have audit field, parse failed.") # recontruct audit unit paths, session = None, None if "paths" in audit.keys(): paths = audit["paths"] if "session" in audit.keys(): session = audit["session"] try: msg_type, result, sequence, data = \ audit["message_type"],audit["result"], audit["sequence"], audit["data"] except KeyError: raise KeyError(f"Audit {audit} does not have certain keys, parse failed.") auditd = Auditd(paths, msg_type, sequence, result, data, session) beat_state = BeatState(timestamp, process, file, auditd) # # TODO: the current code is to add dict format data # self.events.append(beat_state) stashes.append(beat_state) return stashes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']", "def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()", "def parse_log_file(filename, job_name):\n\n time_re = \"(\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2})\"\n time_pat = re.compile(time_re)\n pat = re.compile(time_re + \".*RUM\\.Workflow.*(START|FINISH)\\s+(.*)\")\n\n time_fmt = \"%Y/%m/%d %H:%M:%S\"\n\n first_time = None\n \n with open(filename) as f:\n for line in f:\n if first_time is None:\n m = time_pat.match(line)\n if m is None:\n raise Exception(\"Couldn't parse time from \" + line)\n tm = m.group(1)\n print \"TM is \" + str(tm)\n first_time = time.strptime(tm, time_fmt)\n print \"First time is \" + str(first_time)\n\n yield Event(first_time, 'START', 'log', job_name, filename)\n m = pat.match(line)\n if (m is not None):\n (tm, type, step) = m.groups()\n t = time.strptime(tm, time_fmt)\n e = Event(t, type, step, job_name, filename)\n yield e", "def generate_audit(self, output_path):\n \n with open(output_path, 'wb') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow([entry[0] for entry in self.fields])\n for trade, bo_error in self.errors.items():\n values = self.get_values(trade)\n csvwriter.writerow(values)\n \n print('Output written to %s' % output_path)", "def process_log_file(cur, filepath):\n \n # open log file\n df = pd.read_json(filepath,lines=True)\n\n # filter by NextSong action - i.e. get only listening music events from the logs\n df = df[(df.page == \"NextSong\")]\n\n # insert time records\n __insert_time_data(cur, df)\n \n # insert user records\n __insert_user_data(cur, df)\n \n # insert songplay records\n __insert_songplay_data(cur, df)\n \n # erase dataframe\n df = df.iloc[0:0]", "def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2", "def events(self):\n for line_num, line in enumerate(self.file_handler):\n if not line:\n break\n # process line input to dictionary\n data = json.loads(line)\n # add id information\n data['id'] = line_num\n # update timestamp history\n timestamp = self._get_timestamp(data)\n self.last_two_timestamps = [self.last_two_timestamps[-1], timestamp]\n self.event_timestamps[line_num] = timestamp\n\n self.alarms.append(0) # add field for alarms\n self.users.append(data['user']) # add field for user\n self.anomalies.append(data.get('is_anomaly', 0)) # add field for anomalies\n if 'is_anomaly' in data:\n del data['is_anomaly'] # remove anomaly information from data for contestants\n\n # return line id and serialized JSON as string representing one event\n str_dump = json.dumps(data)\n logger.info(self._get_inner_time() + ' > ' + str_dump)\n yield line_num, str_dump", "def export_log(self):\r\n if self.log[\"datetime\"] is not None and not self.log[\"datetime\"] == \"\":\r\n logs_dir = ''\r\n user = 'default'\r\n program_data = 'data\\program_data.json5'\r\n with open(program_data) as f:\r\n config = json.load(f)\r\n logs_dir = config.get(\"logs_records_path\", \"\")\r\n user = config.get(\"user\", \"default\")\r\n file_name = user+\" \"+self.log[\"datetime\"].replace(\"/\", \"\")\r\n file_name = file_name.replace(\" \", \"_\")\r\n file_name = file_name.replace(\":\", \"\")\r\n cwd = os.getcwd()\r\n if not logs_dir == \"\" and os.path.exists(logs_dir):\r\n if not user in os.listdir(logs_dir):\r\n os.makedirs(os.path.join(logs_dir, user))\r\n logs_dir = os.path.join(logs_dir, user)\r\n file_name = os.path.join(logs_dir, file_name)\r\n self.save_records(file_name)\r\n elif \"logs\" in os.listdir(cwd):\r\n folder = os.path.join(cwd, \"logs\")\r\n file_name = os.path.join(folder, file_name)\r\n self.save_records(file_name)\r\n self.reset_values()", "def read_game_logs(file_path):\n\n if os.path.isfile(file_path):\n with open(file_path, \"r\") as read_file:\n log = json.load(read_file)\n # event_type = set([e[\"event\"] for e in log ])\n # the event types: command, text_message, set_attribute, join\n # print(\"event types\", event_type)\n\n # sort all messages chronologically\n log.sort(key=lambda x: x[\"date_modified\"])\n\n start = None\n end = None\n real_end = None # WHen The came master says COngrats or you die, because rest of the messages looks like bugs...\n episode_list = []\n length = len(log)\n game_finished = False\n # Episode are being searched between 2 starts commands\n # only the one where the command done has been issued is kept\n for i, l in enumerate(log):\n if \"command\" in l.keys():\n if l[\"command\"] == \"start\":\n if start == None:\n start = i\n elif end == None:\n end = i\n if l[\"command\"] == \"done\":\n game_finished = True\n\n if l[\"user\"][\"id\"] == 1 and l[\"event\"] == \"text_message\" and type(l[\"message\"]) is str and (\n l[\"message\"].startswith(\"Congrats\") or l[\"message\"].startswith(\n \"The rescue robot has not reached you\")):\n real_end = i + 1 # +1 because we want to include this message in the log slice...\n if start is not None and end is not None:\n if game_finished:\n episode_list.append(log[start:real_end])\n start = end\n end = None\n real_end = None\n game_finished = False\n\n if i + 1 == length:\n if start is not None and end is None and game_finished:\n episode_list.append(log[start:real_end])\n\n score_list = {}\n for i, e in enumerate(episode_list):\n # the number of answers the avatar utters gives us the number of question asked\n # num_questions = sum(\n # [1 for m in e if m[\"user\"][\"name\"] == \"Avatar\" and m[\"event\"] == \"text_message\"])\n\n # Just sum every messages ending with a question mark issueed by the user...\n num_questions = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].endswith(\"?\")])\n\n # user id 1 is alway the game master, we are looping here on the messages of the \"real\" player\n # when we tell the avatar to change location, we don't get an answer, this is why the substraction gives the number of orders\n # this does not include the order \"done\"\n # num_orders = sum(\n # [1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n # \"event\"] == \"text_message\"]) - num_questions\n\n # Just sum every order of type \"go west\". Describe orders are not counted.\n num_orders = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and (\n \"east\" in m[\"message\"].lower() or \"north\" in m[\"message\"].lower() or \"west\" in m[\n \"message\"].lower() or \"south\" in m[\"message\"].lower() or \"back\" in m[\"message\"].lower())])\n\n game_won = sum([1 for m in e if m[\"user\"][\"id\"] == 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].startswith(\"Congrats\")]) > 0\n\n # Work-Around - the final reward giving +1.0 on success and -1.0 on loss happens after the messages\n # Saying \"congratulations\" or \"you die horribly\" just repeating the message when the game starts.\n # We had to exclude that message to segment finished games but this is why we have to add these rewards here manually...\n\n final_reward = -1.0\n if game_won:\n final_reward = 1.0\n score_list[i] = {\"score\": sum([m[\"message\"][\"observation\"][\"reward\"] for m in e if\n \"message\" in m.keys() and type(m[\"message\"]) is dict])+final_reward,\n \"num_questions\": num_questions, \"num_orders\": num_orders, \"game_session\": e,\n \"game_won\": game_won}\n\n return score_list\n\n else:\n raise Exception(f\"{file_path} is not a correct file path.\")", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def __parse(self):\n lines = self.file.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n tokens = line.split()\n if tokens[0] == \"#start\":\n trial_name = tokens[1]\n trial = Trial(trial_name)\n self.trials[trial_name] = trial\n elif tokens[0] == \"#end\":\n continue\n else:\n date_str = tokens[0] + \" \" + tokens[1]\n date = datetime.strptime(date_str, \"%m/%d/%y %H:%M:%S\")\n sound_file = line[18:-1].strip()\n event = Event(date, sound_file, 0)\n trial.addevent(event)", "def process(self, event):\n # the file will be processed there\n print event.src_path, event.event_type # print now only for degug\n\n for i in self.ignore:\n if i in event.src_path or os.path.isdir(event.src_path):\n print \"Ignoring...\"\n return\n\n mod_file = event.src_path.split(self.source)[1]\n for r in self.rules:\n mod_file = mod_file.replace(r[0], r[1])\n\n print \"Writing:\", (self.destination + mod_file)\n \n input_file = utils.readFile(event.src_path)\n\n file_type = mod_file.split(\".\")[-1]\n reverted = utils.revert( input_file, \"(*\", \"*)\" ) if file_type == \"thy\" else utils.revert( input_file, \"/*\", \"*/\" )\n \n if len( reverted ) == 0 and len( input_file ) != 0:\n print \"Something might be wrong??\"\n else: utils.writeFile( self.destination + mod_file, reverted )", "def processEventLog(log):\n pass", "def parse_log(path_to_log):\n regex_iteration = re.compile('Iteration (\\d+), loss = ([\\.\\deE+-]+)')\n regex_train_output = re.compile('Train net output #(\\d+): (\\S+) = ([\\.\\deE+-]+)')\n regex_learning_rate = re.compile('lr = ([\\.\\deE+-]+)')\n regex_test_output = re.compile('Test net output #(\\d+): detection_eval = ([\\.\\deE+-]+)')\n\n\n # Pick out lines of interest\n iteration = 0\n loss = -1\n learning_rate = 0.001\n train_dict_list = []\n train_row = None\n test_score=0.0\n\n logfile_year = extract_seconds.get_log_created_year(path_to_log)\n with open(path_to_log) as f:\n start_time = extract_seconds.get_start_time(f, logfile_year)\n last_time = start_time\n\n for line in f:\n iteration_match = regex_iteration.search(line)\n if iteration_match:\n iteration = float(iteration_match.group(1))\n loss = float(iteration_match.group(2))\n try:\n time = extract_seconds.extract_datetime_from_line(line,\n logfile_year)\n except:\n # Skip lines with bad formatting, for example when resuming solver\n continue\n\n # if it's another year\n if time.month < last_time.month:\n logfile_year += 1\n time = extract_seconds.extract_datetime_from_line(line, logfile_year)\n last_time = time\n\n seconds = (time - start_time).total_seconds()\n\n learning_rate_match = regex_learning_rate.search(line)\n\n if learning_rate_match:\n learning_rate = float(learning_rate_match.group(1))\n\n test_score_match = regex_test_output.search(line)\n if test_score_match:\n test_score = float(test_score_match.group(2))\n\n train_dict_list, train_row = parse_line_for_net_output(\n regex_train_output, train_row, train_dict_list,\n line, iteration, seconds, learning_rate,loss,test_score\n )\n\n\n return train_dict_list", "def parse(self, **kwargs):\n output_filename = self.node.get_option('output_filename')\n jobname = self.node.get_option('jobname')\n if jobname is not None:\n output_filename = \"log-\" + jobname + \".yaml\"\n # Check that folder content is as expected\n files_retrieved = self.retrieved.list_object_names()\n files_expected = [output_filename]\n # Note: set(A) <= set(B) checks whether A is a subset of B\n if not set(files_expected) <= set(files_retrieved):\n self.logger.error(\"Found files '{}', expected to find '{}'\".format(\n files_retrieved, files_expected))\n return self.exit_codes.ERROR_MISSING_OUTPUT_FILES\n\n # add output file\n self.logger.info(\"Parsing '{}'\".format(output_filename))\n# print(self.retrieved._repository._get_base_folder().get_abs_path(output_filename))\n output = BigDFTLogfile(self.retrieved._repository._get_base_folder().\n get_abs_path(output_filename))\n try:\n output.store()\n except ValidationError:\n self.logger.info(\"Impossible to store LogFile - ignoring '{}'\".\n format(output_filename))\n\n# with self.retrieved.open(output_filename, 'rb') as handle:\n# output_node = SinglefileData(file=handle)\n# output_dict_aiida=orm.Dict(dict=output_dict)\n# output_dict_aiida.store()\n# output_log_aiida=BigDFTLogfile(output)\n self.out('bigdft_logfile', output)\n\n return ExitCode(0)", "def setup_audit_log(cfg=CFG):\n if not runez.DRYRUN and not runez.log.file_handler:\n runez.log.setup(\n file_format=\"%(asctime)s %(timezone)s [%(process)d] %(context)s%(levelname)s - %(message)s\",\n file_level=logging.DEBUG,\n file_location=cfg.meta.full_path(\"audit.log\"),\n greetings=\":: {argv}\",\n rotate=\"size:500k\",\n rotate_count=1,\n )", "def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")", "def _readin_evtx(file):\n\tcontent = []\n\tunparsed_entries = 0\n\twith evtx.Evtx(file) as log:\n\t\tc = 0\n\t\tsources = []\n\t\tfor record in log.records():\n\t\t\tc += 1\n\t\t\t_print_progress(c)\n\t\t\ttry:\n\t\t\t\tobj = untangle.parse(record.xml())#untangle can produce an OSError on Windows, since Windows uses a different format for timestamps\n\t\t\texcept OSError:\n\t\t\t\tc -= 1\n\t\t\t\tunparsed_entries += 1\n\t\t\t\tcontinue\n\t\t\tcurr_obj = obj.Event.System\n\t\t\tdate = curr_obj.TimeCreated['SystemTime']\n\t\t\tif '.' in date:\n\t\t\t\tdate = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\telse:\n\t\t\t\tdate = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S\")\n\t\t\tfull_line = record.xml()\n\t\t\tif hasattr(curr_obj,'Provider'):\n\t\t\t\tsource = curr_obj.Provider['Name']\n\t\t\telse:\n\t\t\t\tsource = ''\n\t\t\tif ( (not source in sources) and (not sources == '')):\n\t\t\t\tsources.append(source)\n\t\t\tline_nr = curr_obj.EventRecordID.cdata\n\t\t\tcontent.append(logfile_entry(int(line_nr), file, curr_obj.EventID.cdata, full_line, date, curr_obj.Computer.cdata, source))\n\t\t_delete_print()\n\tif unparsed_entries > 0:\n\t\tprint('Unfortunately, {} entries could not be parsed. Please see the documentation'.format(unparsed_entries))\n\t\tprint()\n\treturn logfile(file, len(content), 'evtx', content, sources)", "async def _record_logs(self, report):\n\t\tif report.action == Frame.Report.PARSE:\n\t\t\t# Collects the tests parsing log for further writing to Test_Parser.log\n\t\t\tif report.success:\n\t\t\t\tself._parse_logs[\"success\"] += [report.log]\n\t\t\telse:\n\t\t\t\tself._parse_logs[\"failure\"] += [report.log]\n\t\telif report.action == Frame.Report.EXECUTE:\n\t\t\t# Writes a test log and dump to the results directory\n\t\t\ttest_log = (\"EXECUTE STATUS: SUCCESS\\n\\n\" if report.success else \"EXECUTE STATUS: FAILURE\\n\\n\") + report.log\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, FileSystem.dump_to, \n\t\t\t\t self._result_directory_name + \"/Log/\" + report.test_name + \".log\", test_log)]):\n\t\t\t\tawait task\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, TestLogger._write_test_dump, \n\t\t\t\t self._result_directory_name + \"/Dump/\" + report.test_name + \".pcap\", report.dump)]):\n\t\t\t\tawait task", "def test_fortify_parse_class_audit(fortify_tool_plugin):\n package = Package('test', os.path.dirname(__file__))\n tree = etree.parse(os.path.join(os.path.dirname(__file__),\n 'class_audit.fvdl'))\n root = tree.getroot()\n issues = fortify_tool_plugin.parse_output(root, package)\n assert len(issues) == 1\n assert issues[0].filename\n assert issues[0].line_number == '542'\n assert issues[0].tool == 'fortify'\n assert issues[0].issue_type == 'structural'\n assert issues[0].severity == '3'\n assert issues[0].message", "def process(self, event):\n # the file will be processed there\n print (event.src_path, event.event_type) # print now only for degug", "def ParseLogFile(log_file, test_data_dict, failure_dict, test, builder,\n build_num, build_link):\n\n lines = []\n with open(log_file, 'r') as infile:\n lines = infile.readlines()\n\n passed = {}\n failed = {}\n not_run = {}\n date = ''\n status = ''\n board = ''\n num_provision_errors = 0\n build_ok = True\n afe_line = ''\n\n for line in lines:\n if line.rstrip() == '<title>404 Not Found</title>':\n print('Warning: File for %s (build number %d), %s was not found.' %\n (builder, build_num, test))\n build_ok = False\n break\n if '[ PASSED ]' in line:\n test_name = line.split()[0]\n if test_name != 'Suite':\n passed[test_name] = True\n elif '[ FAILED ]' in line:\n test_name = line.split()[0]\n if test_name == 'provision':\n num_provision_errors += 1\n not_run[test_name] = True\n elif test_name != 'Suite':\n failed[test_name] = True\n elif line.startswith('started: '):\n date = line.rstrip()\n date = date[9:]\n date_obj = time.strptime(date, '%a %b %d %H:%M:%S %Y')\n int_date = (\n date_obj.tm_year * 10000 + date_obj.tm_mon * 100 + date_obj.tm_mday)\n date = time.strftime('%a %b %d %Y', date_obj)\n elif not status and line.startswith('status: '):\n status = line.rstrip()\n words = status.split(':')\n status = words[-1]\n elif line.find('Suite passed with a warning') != -1:\n status = 'WARNING'\n elif line.startswith('@@@STEP_LINK@Link to suite@'):\n afe_line = line.rstrip()\n words = afe_line.split('@')\n for w in words:\n if w.startswith('http'):\n afe_line = w\n afe_line = afe_line.replace('&amp;', '&')\n elif 'INFO: RunCommand:' in line:\n words = line.split()\n for i in range(0, len(words) - 1):\n if words[i] == '--board':\n board = words[i + 1]\n\n test_dict = test_data_dict[test]\n test_list = test_dict['tests']\n\n if build_ok:\n for t in test_list:\n if not t in passed and not t in failed:\n not_run[t] = True\n\n total_pass = len(passed)\n total_fail = len(failed)\n total_notrun = len(not_run)\n\n else:\n total_pass = 0\n total_fail = 0\n total_notrun = 0\n status = 'Not found.'\n if not build_ok:\n return [], date, board, 0, ' '\n\n build_dict = dict()\n build_dict['id'] = build_num\n build_dict['builder'] = builder\n build_dict['date'] = date\n build_dict['build_link'] = build_link\n build_dict['total_pass'] = total_pass\n build_dict['total_fail'] = total_fail\n build_dict['total_not_run'] = total_notrun\n build_dict['afe_job_link'] = afe_line\n build_dict['provision_errors'] = num_provision_errors\n if status.strip() == 'SUCCESS':\n build_dict['color'] = 'green '\n elif status.strip() == 'FAILURE':\n build_dict['color'] = ' red '\n elif status.strip() == 'WARNING':\n build_dict['color'] = 'orange'\n else:\n build_dict['color'] = ' '\n\n # Use YYYYMMDD (integer) as the build record key\n if build_ok:\n if board in test_dict:\n board_dict = test_dict[board]\n else:\n board_dict = dict()\n board_dict[int_date] = build_dict\n\n # Only keep the last 5 records (based on date)\n keys_list = board_dict.keys()\n if len(keys_list) > MAX_SAVE_RECORDS:\n min_key = min(keys_list)\n del board_dict[min_key]\n\n # Make sure changes get back into the main dictionary\n test_dict[board] = board_dict\n test_data_dict[test] = test_dict\n\n if len(failed) > 0:\n RecordFailures(failure_dict, board, test, builder, int_date, log_file,\n build_num, failed)\n\n summary_result = '[%2d/ %2d/ %2d]' % (total_pass, total_fail, total_notrun)\n\n return summary_result, date, board, int_date, build_dict['color']", "def main():\n \n Y1, Y2 = 2005, 2017 ### range with coordinates supplied in pre-2018 generated archive\n\n if len(sys.argv) > 1 and int(sys.argv[1]) > 0:\n Y1 = int(sys.argv[1])\n \n if len(sys.argv) > 2 and int(sys.argv[2]) > Y1:\n Y2 = int(sys.argv[2])\n \n with open('data/audit.log','w') as output:\n for Y in range(Y1, Y2):\n df = pd.read_csv('data/{}.csv'.format(Y), low_memory = False)\n output.write('\\n--- {} --------------------\\n'.format(Y))\n\n # remove `deleted` records\n df['deleted'] = df['deleted'].apply(yes_no)\n df = df[df['deleted'] == 0]\n\n # remove misc misdemeanors\n df = df[~df['category'].isin(drop)]\n\n # validate date and expand into Y,N,D,W,H\n df['dt'] = df['incident_date'].apply(extract)\n df = df[~df['dt'].isnull()]\n\n # convert from plane state to longitude-latitude\n df['ll'] = df.apply(to_lnglat, axis = 1)\n\n # init features\n features = df.loc[:,['category','stat','address','city','zip']]\n features['id'] = df['incident_id']\n dt = ['year','month','day','weekday','hour']\n for i in range(len(dt)):\n features[dt[i]] = df['dt'].apply(lambda x: x[i] )\n\n features['lng'] = df['ll'].apply(lambda x: x[0])\n features['lat'] = df['ll'].apply(lambda x: x[1])\n\n features['gang'] = df['gang_related'].apply(yes_no)\n features['category'] = df['category'].apply(collapse)\n cat = set(features.groupby(['category']).size().reset_index(name='count')['category'].tolist())\n output.write('Categories: {}\\n'.format(len(cat)))\n\n output.write('Date miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['year'] > 2000) & (~features['weekday'].isnull())])/len(features))))\n output.write('Location miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['zip'] > 0) | (features['lat'] > 0)])/len(features))))\n\n # keep records with valid date\n features['date'] = df['dt'].apply(lambda x: datetime.date(x[0], x[1], x[2]))\n features = features[(features['year'] > 2000) & (~features['weekday'].isnull())]\n output.write('Time miss: {:.4f}%\\n'.format(100 * len(features[features['hour'] == -1])/len(features)))\n\n # potential `time-unknown` issue\n output.write('Hour ZERO: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 0])/len(features)))\n output.write('Hour NOON: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 12])/len(features)))\n\n features = features[(features['zip'] > 0) | (features['lat'] > 0)]\n\n # get the best possible coordinates + zipcode assessment\n features[['zip','lng','lat']] = features[['zip','lng','lat']].apply(fix_location, axis = 1)\n output.write('Failed location: {:.4f}%\\n'.format(100 * len(features[features['zip'].isnull()])/len(features)))\n features = features[~features['zip'].isnull()]\n features['zip'] = df['zip'].apply(lambda x: str(x)[:5])\n \n # normalize city attr\n features = features.join(zipcodes[['zip','city']].set_index('zip'), on = 'zip', lsuffix = '_orig', rsuffix = '')\n features.loc[features['city'].isnull(), 'city'] = features.loc[features['city'].isnull(), 'city_orig']\\\n .apply(lambda x: x if type(x) == float else ' '.join([l[0].upper() + l[1:] for l in x.split()]))\n\n # reduce to LA bounding-box\n features = features[(features['lng'] > -119) & (features['lng'] < -116)]\n features = features[(features['lat'] > 32) & (features['lat'] < 35)]\n\n # save csv\n features[fields].to_csv('data/F{}.csv'.format(Y), index = False)\n features[fields].to_json('data/F{}.json'.format(Y), orient = 'records')\n output.close()", "def parse_log_file(self, compute_stress=False):\n output_filename = self.node.get_option('output_filename')\n output_txt = self.retrieved.get_object_content(output_filename)\n try:\n output_data = read_log_file(output_txt, compute_stress=compute_stress)\n except Exception:\n traceback.print_exc()\n return None, self.exit_codes.ERROR_LOG_PARSING\n return output_data, None", "def parse_cutadapt_logs(self, f):\n fh = f['f']\n regexes = {\n 'bp_processed': \"Total basepairs processed:\\s*([\\d,]+) bp\",\n 'bp_written': \"Total written \\(filtered\\):\\s*([\\d,]+) bp\",\n 'quality_trimmed': \"Quality-trimmed:\\s*([\\d,]+) bp\",\n 'r_processed': \"Total reads processed:\\s*([\\d,]+)\",\n 'r_with_adapters': \"Reads with adapters:\\s*([\\d,]+)\"\n }\n s_name = None\n for l in fh:\n # New log starting\n if l.startswith('This is cutadapt'):\n s_name = None\n \n # Get sample name from end of command line params\n if l.startswith('Command line parameters'):\n s_name = l.split()[-1]\n s_name = self.clean_s_name(s_name, f['root'])\n if s_name in self.cutadapt_data:\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n self.cutadapt_data[s_name] = dict()\n self.cutadapt_length_counts[s_name] = dict()\n self.cutadapt_length_exp[s_name] = dict()\n self.cutadapt_length_obsexp[s_name] = dict()\n \n if s_name is not None:\n # Search regexes for overview stats\n for k, r in regexes.items():\n match = re.search(r, l)\n if match:\n self.cutadapt_data[s_name][k] = int(match.group(1).replace(',', ''))\n\n if 'length' in l and 'count' in l and 'expect' in l:\n # Nested loop to read this section while the regex matches\n for l in fh:\n r_seqs = re.search(\"^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\", l)\n if r_seqs:\n a_len = int(r_seqs.group(1))\n self.cutadapt_length_counts[s_name][a_len] = int(r_seqs.group(2))\n self.cutadapt_length_exp[s_name][a_len] = float(r_seqs.group(3))\n if float(r_seqs.group(3)) > 0:\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2)) / float(r_seqs.group(3))\n else:\n # Cheating, I know. Infinity is difficult to plot.\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2))\n else:\n break\n \n # Calculate a few extra numbers of our own\n for s_name in self.cutadapt_data.keys():\n if 'bp_processed' in self.cutadapt_data[s_name] and 'bp_written' in self.cutadapt_data[s_name]:\n self.cutadapt_data[s_name]['percent_trimmed'] = (float(self.cutadapt_data[s_name]['bp_processed'] - self.cutadapt_data[s_name]['bp_written']) / self.cutadapt_data[s_name]['bp_processed']) * 100", "def inner():\n for line in file_obj:\n logdata = tilak_haproxylog.parse_line(line)\n if logdata is not None:\n logdata[\"hits\"] = 1\n for value_key in value_keynames:\n if value_key not in logdata:\n logdata[value_key] = 0\n status_code = int(logdata[\"status_code\"])\n if 100 <= status_code <= 199:\n logdata[\"rsp_1xx\"] = 1\n elif 200 <= status_code <= 299:\n logdata[\"rsp_2xx\"] = 1\n elif 300 <= status_code <= 399:\n logdata[\"rsp_3xx\"] = 1\n elif 400 <= status_code <= 499:\n logdata[\"rsp_4xx\"] = 1\n elif 500 <= status_code <= 599:\n logdata[\"rsp_5xx\"] = 1\n else:\n logdata[\"rsp_other\"] = 1\n ret_data = dict(zip(index_keynames, (logdata[index_key] for index_key in index_keynames)))\n ret_data.update(dict(zip(value_keynames, (logdata[value_key] for value_key in value_keynames))))\n yield (logdata[\"ts\"], ret_data)", "def parseLog(self, log_lines):\n abstract", "def parseMonitorLog(log_file, attack_props):\n if not os.path.exists(log_file):\n return\n report = open(log_file, 'r')\n lines = report.readlines()\n #print lines\n report.close()\n \n readingStations = False\n readingAps = False\n for line in lines:\n line = line.strip()\n #print line\n if not readingStations and not readingAps:\n if line.startswith(\"BSSID\"):\n readingAps = True\n continue\n elif line.startswith(\"Station\"):\n readingStations = True\n continue\n elif readingAps:\n if len(line) < 4:\n readingAps =False\n else:\n fields = line.split(',')\n #print fields\n ap_mac = fields[0].strip()\n if attack_props.hasAP(ap_mac):\n ap = attack_props.getActiveAP(ap_mac)\n else:\n ap = AccessPoint(ap_mac, attack_props.log_path)\n attack_props.addActiveAP(ap)\n ap.update(fields)\n elif readingStations and len(line) > 4:\n fields = line.split(',')\n station_mac = fields[0].strip()\n ap_mac = fields[5].strip()\n if attack_props.hasAP(ap_mac):\n ap = attack_props.getAP(ap_mac) \n if ap.stations.has_key(station_mac):\n station = ap.stations[station_mac]\n else:\n station = Station(station_mac)\n ap.stations[station_mac] = station\n station.ap = station\n station.update(fields)", "def run(self):\n if self.log_file: # if path of SSH-log file is valid\n # Rotate & parse the log file\n self.parse_log_file()\n # Analyze the log for deviating algorithm\n self.check_manipulation()" ]
[ "0.61394227", "0.6064906", "0.59986395", "0.5834477", "0.5793847", "0.57524127", "0.5733288", "0.5686557", "0.5646877", "0.55704165", "0.55531", "0.5498819", "0.5413356", "0.53873146", "0.5380587", "0.5363391", "0.5344947", "0.53289014", "0.53170085", "0.5265918", "0.5260259", "0.524851", "0.5238336", "0.52189004", "0.52171785", "0.5207587", "0.5203746", "0.5198268", "0.51955444", "0.5180761" ]
0.63416183
0
Initialise clusters by alternating the bins to which the vectors are assigned.
def alternating_bins_initialisation(self, pixel_data, a=None, b=None): if not a or not b: a = 0 b = len(pixel_data) clusters = defaultdict(list) for i in range(a, b): # selecting sevens as data set clusters[i % self.K].append(pixel_data[i]) return clusters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center", "def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1", "def populate_grid(self):\n from cemc_cpp_code import hoshen_kopelman\n self.bins[:, :, :] = 0\n for atom in self.atoms:\n if atom.symbol in self.track_elements:\n n = self.get_bin(atom.index)\n self.bins[n[0], n[1], n[2]] += 1\n\n # Run the Hoshen-Kopelman algorithm to label the \n # bins into clusters\n self.clusters = hoshen_kopelman(self.bins)", "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def _initial_clusters(self):\n clusters = []\n for i in range(self.point_count):\n clusters.append(self._create_cluster_from_index(i))\n return clusters", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def initialize(self):\n self.SIZE = self.vectors.shape[0]\n # todo can use max distance to allocation farthest apart points\n self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]", "def atlas_clusters():\n pass", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]", "def initialize_dom(img: np.ndarray):\n\n channels = img.shape[2]\n\n for cluster in range(numclusters):\n for channel in range(channels):\n cmin = np.amin(img[:,:,channel]) # channel's min\n cmax = np.amax(img[:,:,channel]) # channel's max\n current_cluster_centers[cluster, 0, channel] = np.random.uniform(cmin, cmax)\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust", "def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]", "def denseBinsToClusters(candidates, plot=False, debug=False):\n graph = np.identity(len(candidates))\n for i in range(len(candidates)):\n for j in range(len(candidates)):\n graph[i, j] = int(neighbour(candidates[i], candidates[j]))\n # Find connected components in order to merge neighbouring bins\n nbConnectedComponents, components = scipy.sparse.csgraph.connected_components(\n graph, directed=False)\n if debug:\n print(graph)\n print(nbConnectedComponents, components)\n candidates = np.array(candidates)\n clusterAssignment = -1 * np.ones(data.shape[0])\n # For every cluster\n for i in range(nbConnectedComponents):\n # Get dense units of the cluster - 获取集群的密集单元\n cluster_dense_units = candidates[np.where(components == i)[0]]\n if debug:\n for v in cluster_dense_units:\n for z in v:\n print(z)\n clusterDimensions = {}\n for j in range(len(cluster_dense_units)):\n for k in range(len(cluster_dense_units[j])):\n if cluster_dense_units[j][k].dimension not in clusterDimensions:\n clusterDimensions[cluster_dense_units[j][k].dimension] = []\n clusterDimensions[cluster_dense_units[j][k].dimension].extend(cluster_dense_units[j][k].points)\n points = reduce(np.intersect1d, list(clusterDimensions.values()))\n clusterAssignment[points] = i\n if plot:\n pred = -1 * np.ones(data.shape[0])\n pred[points] = i\n plt.figure()\n plt.title(f'In yellow, clusters in {list(clusterDimensions.keys())} dimensions ')\n plt.scatter(data[:, 0], data[:, 1], c=pred)\n for g in grid[0]:\n plt.axvline(x=g, c='red', linestyle='--')\n for g in grid[1]:\n plt.axhline(y=g, c='red', linestyle='--')\n plt.show()\n if debug:\n print(clusterDimensions.keys(), points)\n return clusterAssignment", "def _cluster_into_bins(eval_data, ref_data, num_clusters):\r\n\r\n cluster_data = np.vstack([eval_data, ref_data])\r\n kmeans = sklearn.cluster.MiniBatchKMeans(n_clusters=num_clusters, n_init=10)\r\n labels = kmeans.fit(cluster_data).labels_\r\n\r\n eval_labels = labels[:len(eval_data)]\r\n ref_labels = labels[len(eval_data):]\r\n\r\n eval_bins = np.histogram(eval_labels, bins=num_clusters,\r\n range=[0, num_clusters], density=True)[0]\r\n ref_bins = np.histogram(ref_labels, bins=num_clusters,\r\n range=[0, num_clusters], density=True)[0]\r\n return eval_bins, ref_bins", "def _init_cluster(self):\n self._Init_Cluster()", "def clusters_allocate_cells(self):\n for cluster in self.clusters:\n cluster.cells[:] = []\n for cell in self.block_proc:\n wdists = []\n for cluster in self.clusters:\n s = cluster.size\n d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +\n (cell.z-cluster.z)**2 )\n d = numpy.sqrt(d)\n c = self.c\n # TODO: choose a better distance function below\n r = d*(c+(1-c)*numpy.exp(-s/d))\n r = numpy.clip(r,0,r)\n wdists.append(r)\n self.clusters[numpy.argmin(wdists)].cells.append(cell)", "def __init__(self, count):\n\n self.clusters_count = count\n self._leaders = [i for i in range(count)]\n self._ranks = [0] * count", "def __init__(self):\n ## self.clusters[cluster] = list of coordinates\n self.clusters = {}\n ## self.centroids[cluster] = centroid\n self.centroids = {}", "def _init_centroid(self, seed: int):\n random.seed(seed)\n self.centroid_info = dict()\n self.cluster_result = dict()\n self.centroid_stable_flag = dict()\n for key_index, chosen_value in enumerate(\n random.sample(self.list_data, self.n_cluster)):\n self.centroid_info.setdefault(\"c\" + str(key_index), float(chosen_value))\n self.cluster_result.setdefault(\"c\" + str(key_index), list())\n self.centroid_stable_flag.setdefault(\"c\" + str(key_index), False)", "def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters", "def __init__(self, bins):\n self.bins = bins", "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroids", "def makeCluster(self):\n for i in range(self.k):\n #vector of length total users, pick random number 1-5\n self.centroids.append(np.random.uniform(low=1,high=5,size=len(self.user)))\n memberList = []\n self.membership.append(memberList)\n self.centroids = np.round(self.centroids)\n\n for movie in self.dictionary.keys():\n #Finds the index of the closest centroid\n closest = np.argmin(self.calculateDistance(self.dictionary[movie]))\n newVector = []\n newVector.append(movie)\n #Add the movie to the list of members of the closest centroid\n self.membership[closest].append(newVector)\n self.recalculateCentroid(self.membership[closest], closest)", "def make_all_zero(curr_clusters, k, num_of_cords):\r\n for i in range(k):\r\n for j in range(num_of_cords):\r\n curr_clusters[i][j] = 0", "def _10x10_grid_clusters_spread():\n return [mn(mean=np.array([i * 25, j * 25]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(10)\n for j in range(10)]", "def initial_clusters(self, points):\n groups = {}\n d = int(256 / (self.initial_k))\n for i in range(self.initial_k):\n j = i * d\n groups[(j, j, j)] = []\n for i, p in enumerate(points):\n # if i%100000 == 0:\n # print('processing pixel:', i)\n go = min(groups.keys(), key=lambda c: euclidean_distance(p, c)) \n groups[go].append(p)\n return [g for g in groups.values() if len(g) > 0]", "def initializeClusters(numClusters: int, numPrototypes: int) -> ndarray:\n result: ndarray = np.empty((numClusters, numPrototypes), dtype=int)\n for i in range(numClusters):\n result[i, :] = [j for j in range(i * numPrototypes, (i + 1) * numPrototypes)]\n return result", "def __grow_cluster(self, init_loc, thresh):\n cluster = np.zeros_like(self.__array, dtype=bool)\n cluster[init_loc[0], init_loc[1]] = True\n pocket = [init_loc]\n adjacent = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n m, n = self.__array.shape\n while pocket:\n pt = pocket.pop(0)\n neighbors_in_cluster = [\n (pt[0] - i, pt[1] - j) for (i, j) in adjacent\n if 0 <= pt[0] - i < m and 0 <= pt[1] - j < n and\n not cluster[pt[0] - i, pt[1] - j] and\n np.absolute(self.__array[pt[0], pt[1]]\n - self.__array[pt[0] - i, pt[1] - j])\n < thresh]\n for nbr in neighbors_in_cluster:\n pocket.append(nbr)\n cluster[nbr[0], nbr[1]] = True\n return cluster", "def assign_clusters(self):\n running_perts = {}\n for name in self.tensor_info:\n item = self.tensor_info[name]\n pert_list = item[1]\n pert_names = []\n prob_list = []\n if pert_list is not None:\n for pert in pert_list:\n pert_names.append(pert.__class__.__name__)\n prob_list.append(pert.p)\n pert_names = '_'.join(pert_names)\n if pert_names not in running_perts:\n running_perts[pert_names] = [(name, prob_list)]\n else:\n running_perts[pert_names].append((name, prob_list))\n\n running_perts.pop('')\n\n assert len(running_perts) <= len(self.clusters), \"More different perturbations than clusters available, cannot assign tensors to clusters\"\n\n # ONLY BITWISEPERT FOR THE TIME BEING\n bitwises = running_perts['BitwisePert']\n bitwise_probs = [item[1][0] for item in bitwises]\n centers, _ = kmeans(bitwise_probs, len(self.clusters))\n groups, _ = vq(bitwise_probs, centers)\n\n for tensor, cluster in zip(bitwises, groups):\n name = tensor[0]\n tensor_ref = self.tensor_info[name][0]\n repr = self.tensor_info[name][2]\n self.clusters[cluster].add_tensor(tensor_ref, repr)\n\n for cluster, rate in zip(self.clusters, centers):\n pert_dict = {\n \"name\": \"BitwisePert\",\n \"p\": rate}\n pert = P.construct_pert(pert_dict)\n cluster.set_perturb([pert])" ]
[ "0.73569727", "0.6890101", "0.66579676", "0.65458935", "0.6461489", "0.6289846", "0.62089014", "0.6200618", "0.61897707", "0.60647523", "0.60470694", "0.603594", "0.6009594", "0.6008557", "0.596944", "0.59262115", "0.5919822", "0.58949685", "0.58171034", "0.58091116", "0.57956856", "0.57412386", "0.57399946", "0.57152337", "0.57129997", "0.57125646", "0.57111555", "0.5706938", "0.57018036", "0.5694504" ]
0.700094
1
Get the codebook vectors.
def get_cb_vectors(self): return self.cb_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vectors(self):\n return self.vecs[:]", "def get_vectors(self, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = self.model_dbow.docvecs[prefix]\n return vectors", "def get_vectors(model, corpus_size, vectors_size, vectors_type):\r\n vectors = np.zeros((corpus_size, vectors_size))\r\n for i in range(0, corpus_size):\r\n prefix = vectors_type + '_' + str(i)\r\n vectors[i] = model.docvecs[prefix]\r\n return vectors", "def get_vectors(model, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = model.docvecs[prefix]\n return vectors", "def boxVectors(self):\n return self.box_vectors", "def getVectors(self):\n vectors = dict()\n i = 0\n N = len(self.db.invertedIndex)\n for w, (idf, docs) in self.db.invertedIndex.items():\n for doc, tf in docs.items():\n try:\n vectors[doc][i] = tf * idf\n except KeyError as k:\n vectors[doc] = {i: tf * idf}\n i += 1\n i = 0;\n return vectors", "def bow_vecs(docs):\n return CECTORIZER.transform(docs).toarray()", "def get_vectors_for_all_docs(docs, vocab):\n docs_vectors = [get_feature_vector(doc, vocab) for doc in docs]\n return np.array(docs_vectors)", "def getVector(text):\n url = cfg.use_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']", "def vectors():\n r = db.execute(\"select word, year, c from counts where conf=? order by word, year\", (conf,))\n vects = defaultdict(dict)\n for w,y,c in r:\n l = vects[w]\n l[y] = float(c) \n\n\n ret = []\n for w in vects:\n d = vects[w]\n\n # if word is super uncommon, skip it\n if (max(d.values()) <= 3):\n continue\n if (max([v / (1.+year2c.get(y,0)) for y, v in d.items()]) < .1): \n continue\n\n # some years may not have the word\n counts = dict2arr(d, xrange(minyear, maxyear+1), 1.0)\n\n \n # naive window averaging smoothing over the trend curve\n smooth = []\n for i in xrange(len(counts)):\n smooth.append(np.mean(counts[max(0,i-2):i+2]))\n if max(smooth) > 2:\n ret.append([w] + smooth)\n return np.array(ret)", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def basis_vectors(self):\n return self._basis_vectors", "def infer_vectors(self, reports, labels):\n logger.info('Inferring vectors from Doc2Vec model')\n tagged_docs = self.tag_dataset(reports, labels)\n vecs = [self.model.infer_vector(tag.words) for tag in tagged_docs]\n vecs = np.array(vecs)\n return vecs", "def vocabulary(self):\n return [recid for recid in self._model.vocab]", "def calculate_cb_vecs(self, clusters):\n if not clusters or not clusters[0]:\n return None\n\n # :param:`n` is the dimension of the vectors\n n = len(clusters[0][0])\n # Initialize the codebook vectors to 0\n cb_vectors = np.zeros([n * self.K]).reshape(self.K, n)\n for i in range(self.K):\n sum = np.zeros([n], dtype=np.uint).reshape(1, n)\n for vector in clusters[i]:\n sum += vector\n # divide the sum of the vectors by the size of the cluster\n cb_vectors[i] = np.divide(sum, len(clusters[i]))\n return cb_vectors", "def load_vector_dictionary():\n return read_word2vecs_from_file(VECTOR_FILE)", "def generate_voc(self):\n\n observations = [\"walk\", \"shop\", \"clean\", \"tennis\", \"read\"]\n states = [\"sunny\", \"rainy\", \"snowy\"]\n\n # Sort them alphabetically, just to be on the safe side\n observations.sort()\n states.sort()\n\n return (observations, states)", "def codelists():\n return CodelistSet()", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list(map(str, vocabulary))\n\n return vocabulary", "def getVocabulary(self): # real signature unknown; restored from __doc__\n pass", "def getVectors(self):\n l = len(self.points)\n return [Vector.createFromTwoPoints(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]", "def getVector(self, p):\n vector = {}\n i = 0\n tr = ParseDumpWiki.normName(p)\n if(self.db.isInPage(tr)):\n for w, (idf, docs) in self.db.invertedIndex.items():\n if (p in docs):\n vector[i] = idf * docs[p]\n i += 1\n else:\n freqDist = self.db.transformDocument(wikipedia.page(p).content)\n indexesWords = list(self.db.invertedIndex.keys())\n commonWords = set(indexesWords).intersection(freqDist.keys())\n for w in commonWords:\n idf, docs = self.db.invertedIndex[w]\n vector[indexesWords.index(w)] = idf * freqDist[w]\n return vector", "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def get_bravais_vectors(p_state, idx_image=-1, idx_chain=-1):\n _a = (3*ctypes.c_float)()\n _b = (3*ctypes.c_float)()\n _c = (3*ctypes.c_float)()\n _Get_Bravais_Vectors(ctypes.c_void_p(p_state), _a, _b, _c,\n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [a for a in _a], [b for b in _b], [c for c in _c]", "def stateVector(self):\n simulator=Aer.get_backend('statevector_simulator')\n result=execute(self.circuit,backend=simulator).result()\n statevector=result.get_statevector(decimals=4) #\"decimals=4\" doesn't work in version 0.20.0 \n return statevector.tolist()", "def load_vectors(fname):\r\n # taken from: https://fasttext.cc/docs/en/english-vectors.html\r\n vectors_data = vocab.Vectors(name=fname)\r\n\r\n return vectors_data", "def getVectors(self,graph):\n return [Vector.createFromTwoTuples(graph[i],graph[i+1]) for i in range(len(graph)-1)]", "def get_vector(self): \n #print(self.state)\n '''\n print(\"\"\"\n Price {}\n Last Price {}\n Last Period Transaction {}\n Last Transaction {}\n Las Value {}\n Last day {}\n Last hour {}\n Last minute {}\n --------------\n Balance {}\n Bag {}\n \"\"\".format(\n self.state['price'],\n self.states[-1]['price'],\n self.states[-1]['transaction'],\n self.transactions[-1]['transaction'],\n self.value,\n self.state['day'],\n self.state['hour'],\n self.state['minute'], \n self.balance, \n self.bag, \n )) \n ''' \n self.state_vector = np.array([\n self.state['price'],\n self.states[-1]['price'],\n self.states[-1]['transaction'],\n self.transactions[-1]['transaction'],\n self.value,\n self.state['day'],\n self.state['hour'],\n self.state['minute'],\n ])\n\n return self.state_vector", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def get_box_vectors(file):\n box_vectors = [None,None,None]\n with open(file,\"rt\") as fin:\n for line in fin:\n if line[0:6] == \"CRYST1\":\n x_length = float(line[9:14])\n y_length = float(line[18:23])\n z_length = float(line[27:33])\n box_vectors = [x_length,y_length,z_length]\n return(box_vectors)\n return(box_vectors)" ]
[ "0.6734293", "0.6306046", "0.6234584", "0.6101457", "0.6060832", "0.5943614", "0.59248465", "0.58844465", "0.58299667", "0.5818226", "0.5769358", "0.5711672", "0.5681364", "0.5677561", "0.5665824", "0.5648895", "0.5648038", "0.5617257", "0.5584856", "0.5566153", "0.55533874", "0.5549351", "0.55450785", "0.55408937", "0.5519725", "0.5513539", "0.5478773", "0.5468112", "0.54615676", "0.54583496" ]
0.6465732
1
Extracts features from the final codebook vectors using the L2 norm. The way it works is that we pass in the data as an argument and the function produces len(data) feature vectors such that f(x_i)=[a_1 ... a_K] and a_j = || x_i c_j || where c_j is the codebook vector.
def extract_features(self, data): # TODO: Should feature extraction be done on the testing data? In the lecture notes # TODO: it is not done with the training data, but with the test data. # TODO: Maybe we should use the validate data when we do cross-validation. features = np.zeros([len(data)*self.K]).reshape(len(data), self.K) for i in range(len(data)): for j in range(self.K): features[i][j] = np.linalg.norm(data[i] - self.cb_vectors[j]) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_liwc_features(train_data, test_data):\n print(\"getting liwc features\")\n train_liwc_matrix = []\n test_liwc_matrix = []\n for phrase in train_data:\n liwc_scores = word_category_counter.score_text(phrase)\n feature_vector = []\n for key in liwc_categories:\n if key in liwc_scores.keys():\n # print(key)\n # print(liwc_scores[key])\n feature_vector.append(liwc_scores[key])\n else:\n feature_vector.append(0)\n # print(feature_vector)\n train_liwc_matrix.append(feature_vector)\n for phrase in test_data:\n liwc_scores = word_category_counter.score_text(phrase)\n feature_vector = []\n for key in liwc_categories:\n if key in liwc_scores.keys():\n # print(key)\n # print(liwc_scores[key])\n feature_vector.append(liwc_scores[key])\n else:\n feature_vector.append(0)\n test_liwc_matrix.append(feature_vector)\n # print(train_liwc_matrix)\n return sparse.csr_matrix(train_liwc_matrix), sparse.csr_matrix(test_liwc_matrix)", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def generateFeatures(self, data):\n pass", "def fvector(data, method ):\n\n fv = 0\n if method['type'] == 'lbp':\n \n\n lbpkern = lbpsimple.generateKernel2()\n \n imlbp = lbpsimple.lbp2oneslice(data, lbpkern)\n\n fv,bins = lbpsimple.features(imlbp)\n\n #pdb.set_trace();\n elif method['type'] == 'hist':\n \n fv, bins = numpy.histogram( data,range(-200,2000,20))\n fv = fv[10:15]\n #fv, bins = numpy.histogram( data)\n pass\n\n else:\n raise Exception('Unknow method for feature vector: %s' %(method))\n\n return fv", "def create_vectorized_features(data_dir, feature_version=2):\n extractor = PEFeatureExtractor(feature_version)\n\n print(\"Vectorizing training set\")\n X_path = os.path.join(data_dir, \"X_train.dat\")\n y_path = os.path.join(data_dir, \"y_train.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"train_features_{}.jsonl\".format(i)) for i in range(6)]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)\n\n print(\"Vectorizing test set\")\n X_path = os.path.join(data_dir, \"X_test.dat\")\n y_path = os.path.join(data_dir, \"y_test.dat\")\n raw_feature_paths = [os.path.join(data_dir, \"test_features.jsonl\")]\n nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])\n vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)", "def bag_of_words_vectorizer(datafile, k_features):\n data = []\n labels = []\n\n for jsoned_entity in open(\"data.json\", errors=\"ignore\").readlines():\n entity = json.loads(jsoned_entity)\n if entity[\"lang\"] == \"en\":\n data.append(entity[\"text\"])\n labels.append(entity[\"label\"])\n\n vectorizer = TfidfVectorizer(stop_words=get_stop_words(\"english\"))\n data = vectorizer.fit_transform(data)\n data = SelectKBest(chi2, k=k_features).fit_transform(data, labels)\n\n for vector_label_batch in batch(zip(data, labels), config.BATCH_SIZE):\n vectors = []\n labels = []\n for vec_label in vector_label_batch:\n vectors.append(vec_label[0].toarray())\n labels.append(vec_label[1])\n\n X = np.vstack(vectors)\n Y = np_utils.to_categorical(labels, 2)\n yield X, Y", "def _featurize(self, predictions: SequenceSample) -> List[np.ndarray]:\n feature_vectors: List[np.ndarray] = []\n source = predictions.origin_words\n\n char_nn_scores = self.char_nn_lm_score(predictions.paths)\n word_nn_scores = self.word_nn_lm_score(predictions.paths)\n\n for i, (score, hypothesis) in enumerate(zip(predictions.scores, predictions.paths)):\n obss = list(zip(hypothesis, source))\n length = len(source)\n feature_vector = np.array([\n 1.,\n length,\n self.language_model.score(hypothesis) / length,\n char_nn_scores[i],\n word_nn_scores[i],\n score / length,\n sum(w in self.language_model for w in hypothesis) / length,\n sum(h[:self.prefix_size] == s[:self.prefix_size] for h, s in obss) / length,\n sum(h[-self.suffix_size:] == s[-self.prefix_size:] for h, s in obss) / length,\n self.language_model.score(hypothesis) * score / length,\n np.mean([editdistance.eval(h, s) for h, s in obss]),\n np.mean([float(obs in self.train_set_uniq) for obs in obss]),\n np.mean([self.train_counter.get(obs, self.discount) for obs in obss]),\n ])\n feature_vectors.append(feature_vector)\n return feature_vectors", "def vectorize(tokens_list, feature_fns, min_freq, vocab=None):\n ###TODO\n \n features = []\n feature_freq = {}\n vocabulary = {}\n \n # 2 case : for vocab\n # case 1: \n if (vocab == None):\n \n for doc in tokens_list: \n #print('doc#=%d tokens=%s'%(i,doc)) \n data = featurize(doc,feature_fns)\n #print('data=',data)\n \n for feature in data: \n if feature[1] > 0 : \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n if feature[0] not in vocabulary.keys() :\n vocabulary.setdefault(feature[0], None) \n \n features.append(data)\n \n # sort vocab according to features (alphabetical order)\n vacab_list = sorted(feature_freq.keys(), key =lambda x: x,reverse=False)\n \n for colIndex,term in enumerate(vacab_list) :\n #print('colIndex = %d, term = %s'%(colIndex,term))\n vocabulary[term] = colIndex\n\n else: # case 2 \n \n # vocab already present\n #print('Vocab already present')\n vocabulary = vocab.copy() \n \n \n for doc in tokens_list: \n data = featurize(doc,feature_fns) \n \n test_data = [] \n for feature in data: \n # only take feature present in vocab \n if feature[0] in vocabulary.keys():\n #print('feature = ',feature) \n if feature[1] > 0 : \n test_data.append(feature) \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n #print('test_data = ',len(test_data)) \n features.append(test_data)\n #test_data.clear()\n #print('features = ',features)\n \n \n # build a csr_matrix \n row = []\n col = []\n data = [] \n \n for docID,feat_list in enumerate(features) :\n for term in feat_list:\n if (feature_freq[term[0]] >= min_freq): # (zero values are not stored)\n \n row.append(docID)\n col.append(vocabulary[term[0]])\n data.append(term[1])\n \n #print('row =',row)\n #print('col =',col)\n #print('data=',data)\n \n X = csr_matrix((data, (row, col)), shape=(len(features), len(vocabulary)), dtype=np.int64)\n \n #print('X ->')\n #print(X.toarray())\n #print(' size of X = ',X.get_shape())\n \n return(X, vocabulary)", "def extract_features(data_dir,mode='train'):\n files = get_files(data_dir)\n t0 = time.time()\n features = list()\n labels = list()\n for f in files:\n freq = get_frequencies(f)\n if mode=='train':\n sents = corpus_reader(f)\n labels.extend(d2l(sents,f,freq))\n elif mode=='decode':\n sents = corpus_reader(f,tag='pos')\n else:\n print('Invalid mode!')\n break\n features.extend(d2f(sents,f,freq)) \n dt = time.time() - t0\n print('Total feature extraction time: %d seconds' % dt)\n return features,labels", "def _get_word2vec_features(x, word2vec, all_words_per_tweet, max_tweet_len):\n\n features = np.zeros((len(x), max_tweet_len, word2vec.vector_size))\n\n for i, tweet_words in enumerate(all_words_per_tweet):\n tweet_repr = np.array(\n [word2vec.wv[r] if r in word2vec.wv.vocab else np.zeros(word2vec.vector_size) for r in tweet_words])\n features[i][:len(tweet_repr), :word2vec.vector_size] = tweet_repr\n\n return features", "def images_to_feature_vectors(images, bbox_size=None, train=False):\n # If no bounding box size is supplied then compute a suitable\n # bounding box by examining sizes of the supplied images.\n if bbox_size is None:\n bbox_size = get_bounding_box_size(images)\n\n bbox_h, bbox_w = bbox_size\n nfeatures = bbox_h * bbox_w\n fvectors = np.empty((len(images), nfeatures))\n\n for i, image in enumerate(images):\n padded_image = np.ones(bbox_size) * 255\n h, w = image.shape\n h = min(h, bbox_h)\n w = min(w, bbox_w)\n\n \"\"\"Here I've centred the characters, as I believe the covariance\n matricies will more easily pick up distinct features of characters when\n they are centrally aligned (instead of an L being in the same position\n as the right hand side of an M, it'd be in the middle, where there'd be\n a clearer distinction as the middle of an M doesn't usually extend a\n full character height, whereas an L will).\n \"\"\"\n h_start = round((bbox_h/2)-(h/2))\n w_start = round((bbox_w/2)-(w/2))\n padded_image[h_start:h_start+h, w_start:w_start+w] = image[0:h, 0:w]\n\n #----------Denoising\n #Simple thresholding\n threshold = lambda image: np.where(image > 127, 255, 0)\n\n #By histographical analysis, I'm fairly certain x is 90 for page 2. \n #Using this denoising improves page 2 significantly, but only that page.\n threshold2 = lambda image: np.where(image > 255-90, 255, image)\n\n #This method \"stretches\" all the values away from 128, which I thought\n # may be a marginally better approach than hard thresholding as it'd\n # preserve some of the \"confidence\" inherently expressed in the greyness\n # of each pixel.\n def stretch(image, factor=5):\n image = np.round((image-128)*factor + 128)\n image = np.where(image > 255, 255, image)\n image = np.where(image < 0, 0, image)\n return image\n\n #I tried median sizes 2, 3, & 4. I found size 3 works best.\n median = lambda image: scipy.ndimage.median_filter(padded_image, size=3)\n\n #I found that if the median kernel is shaped vertically, it performs\n # better. I suspect this is due to the fact that a lot of characters are\n # composed of vertical lines.\n median2 = lambda image: scipy.ndimage.median_filter(image, size=(3,2))\n\n #I decided to try using a diamond shaped vertical footprint to squeeze\n # some extra % out, as the font doesn't tend to have square corners.\n # This brought a minor improvement over a simple kernel of size (3,2).\n padded_image = scipy.ndimage.median_filter(padded_image, \n footprint=np.array([[0,1,0],[1,1,1],[1,1,1],[0,1,0]]))\n\n #Reshaping to a column vector.\n fvectors[i, :] = padded_image.reshape(1, nfeatures)\n\n return fvectors", "def _vectorize_data(self, docs: []):\n print('Vectorizing data...')\n tfidf = TfidfVectorizer()\n encoded_data = tfidf.fit_transform(docs)\n return encoded_data", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def featurize(self, data):\n \n bag_of_words = []\n\n tokens = data.split()\n\n for i in tokens:\n bag_of_words.append((i, True))\n\n return bag_of_words", "def get_all_features(train_data, test_data):\n #train_wc_matrix, test_wc_matrix = get_word_count_features(train_data, test_data)\n train_idf_matrix, test_idf_matrix = get_idf_features(train_data, test_data)\n train_ngram_matrix, test_ngram_matrix = get_ngram_features(train_data, test_data)\n # train_liwc_matrix, test_liwc_matrix = get_liwc_features(train_data, test_data)\n return sparse.hstack([train_idf_matrix, train_ngram_matrix]), \\\n sparse.hstack([test_idf_matrix, test_ngram_matrix])", "def create_feature_vector(features, length):\n START_IDX = 0\n END_IDX = 1\n\n output_vector = np.zeros(length)\n\n # negative strand\n for loc in features[-1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 1 \n\n # positive strand\n for loc in features[1]:\n output_vector[loc[START_IDX]:loc[END_IDX]] = 2\n\n return output_vector", "def useTfidfVectorizer(self, data):\n if self.results:\n print()\n print(\"Extracting features from the training dataset using a sparse vectorizer\", end=\" - \")\n t0 = time()\n \n vectorizer = TfidfVectorizer(max_features=10000, stop_words='english',norm='l2',use_idf=True, sublinear_tf=False,encoding='utf-8')\n matrix = vectorizer.fit_transform(data)\n \n if self.results:\n print(\"done in %0.3fs\" % (time() - t0))\n print(\"n_samples: %0.3d, n_features: %d\" % matrix.shape)\n print()\n \n feature_names = vectorizer.get_feature_names()\n return matrix, feature_names", "def get_train_data(self, train_data):\n X = []\n Y = []\n\n # word 2 indices and tag 2 indices\n w2i = {} # word to index\n c2i = {} # char to index\n tag2idx = {} # tag2idx\n\n w2i[\"_UNK\"] = 0 # unk word / OOV\n c2i[\"_UNK\"] = 0 # unk char\n c2i[\"<w>\"] = 1 # word start\n c2i[\"</w>\"] = 2 # word end index\n \n \n num_sentences=0\n num_tokens=0\n for instance_idx, (words, tags) in enumerate(read_conll_file(train_data)):\n instance_word_indices = [] #sequence of word indices\n instance_char_indices = [] #sequence of char indices\n instance_tags_indices = [] #sequence of tag indices\n\n for i, (word, tag) in enumerate(zip(words, tags)):\n\n # map words and tags to indices\n if word not in w2i:\n w2i[word] = len(w2i)\n instance_word_indices.append(w2i[word])\n\n if self.c_in_dim > 0:\n chars_of_word = [c2i[\"<w>\"]]\n for char in word:\n if char not in c2i:\n c2i[char] = len(c2i)\n chars_of_word.append(c2i[char])\n chars_of_word.append(c2i[\"</w>\"])\n instance_char_indices.append(chars_of_word)\n\n if tag not in tag2idx:\n tag2idx[tag]=len(tag2idx)\n\n instance_tags_indices.append(tag2idx.get(tag))\n\n num_tokens+=1\n\n num_sentences+=1\n\n X.append((instance_word_indices, instance_char_indices)) # list of word indices, for every word list of char indices\n Y.append(instance_tags_indices)\n\n\n print(\"%s sentences %s tokens\" % (num_sentences, num_tokens), file=sys.stderr)\n print(\"%s w features, %s c features \" % (len(w2i),len(c2i)), file=sys.stderr)\n if self.c_in_dim == 0:\n print(\"char features disabled\", file=sys.stderr)\n\n assert(len(X)==len(Y))\n\n # store mappings of words and tags to indices\n self.set_indices(w2i, c2i, tag2idx)\n\n return X, Y", "def generate_feature_vector(self, test_document, n):\n m = len(self.bag_of_features)\n feature_vector = np.zeros(m)\n for feature, col in self.bag_of_features.items():\n if feature in test_document.tfs['all'].keys():\n tf = test_document.tfs['all'][feature]\n df = self.df_term[feature]\n tf_idf = calculate_tf_idf(tf=tf, df=df, doc_num=n)\n feature_vector[col] = tf_idf\n\n np.linalg.norm(feature_vector, axis=0)\n test_document.feature_vector = feature_vector\n return feature_vector", "def feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg):\n # English stopwords from nltk\n stopwords = set(nltk.corpus.stopwords.words('english'))\n \n # Determine a list of words that will be used as features. \n # This list should have the following properties:\n # (1) Contains no stop words\n # (2) Is in at least 1% of the positive texts or 1% of the negative texts\n # (3) Is in at least twice as many postive texts as negative texts, or vice-versa.\n # YOUR CODE HERE\n\n pos_unique_words = []\n neg_unique_words = []\n intermediate_vec = []\n feature_vec = []\n\n for line in train_pos:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n pos_unique_words.append(word)\n\n for line in train_neg:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n neg_unique_words.append(word)\n\n\n pos_word_dict = collections.Counter(pos_unique_words)\n neg_word_dict = collections.Counter(neg_unique_words)\n\n unique_words = list(set(pos_word_dict.keys()).intersection(set(neg_word_dict.keys())))\n\n for word in unique_words:\n if(pos_word_dict[word] >= 0.01*len(train_pos) or neg_word_dict[word] >= 0.01*len(train_neg)):\n intermediate_vec.append(word)\n\n for word in intermediate_vec:\n if (int(pos_word_dict[word]) >= 2*int(neg_word_dict[word])or neg_word_dict[word] >= 2*pos_word_dict[word]):\n feature_vec.append(word)\n\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n # Using the above words as features, construct binary vectors for each text in the training and test set.\n # These should be python lists containing 0 and 1 integers.\n # YOUR CODE HERE\n for line in train_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_pos_vec.append(lst)\n\n for line in train_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_neg_vec.append(lst)\n\n for line in test_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_pos_vec.append(lst)\n\n for line in test_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_neg_vec.append(lst)\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def get_idf_features(train_data, test_data):\n tfidf = TfidfVectorizer(tokenizer = tokenize, ngram_range = (1, 2))\n tfidf.fit(train_data)\n return tfidf.transform(train_data), tfidf.transform(test_data)", "def get_word2vec_features(x_train, x_test):\n\n all_words_per_tweet_train = [nltk.word_tokenize(sent) for sent in x_train[\"text\"]]\n all_words_per_tweet_test = [nltk.word_tokenize(sent) for sent in x_test[\"text\"]]\n\n word2vec = Word2Vec(all_words_per_tweet_train, min_count=5)\n word2vec.train(all_words_per_tweet_train, total_examples=word2vec.corpus_count, epochs=15)\n\n max_tweet_len = np.max(\n [np.max([len(t) for t in all_words_per_tweet_train]), np.max([len(t) for t in all_words_per_tweet_test])])\n\n features_train = _get_word2vec_features(x_train, word2vec, all_words_per_tweet_train, max_tweet_len)\n features_test = _get_word2vec_features(x_test, word2vec, all_words_per_tweet_test, max_tweet_len)\n\n return features_train, features_test", "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out", "def vectorize_data(self, data, idf=False):\r\n\r\n # collect only the cleaned text of the tweet\r\n text = []\r\n for tweet in data:\r\n if not tweet.get_processed_text():\r\n tweet.set_processed_text(self.clean_tweet(tweet))\r\n text.append(tweet.get_processed_text())\r\n\r\n # vectorize tweets\r\n\r\n if idf:\r\n vectorizer = TfidfVectorizer(min_df=((len(data) // 1000) + 1), max_df=10000, ngram_range=(1, 3))\r\n else:\r\n vectorizer = CountVectorizer(min_df=((len(data) // 1000) + 1), max_df=10000, ngram_range=(1, 3))\r\n\r\n # vectorizer = TFVectorizing()\r\n vectors = vectorizer.fit_transform(text)\r\n return vectors", "def extract_features(docs_train, docs_test, perform_dimensionality_reduction):\n word_ngram_range = (1, 4)\n char_ngram_range = (2, 5)\n\n '''\n Build an n grams vectorizer with word_n_gram_range and char_n_gram_range\n '''\n\n ngrams_vectorizer = create_n_grams_vectorizer(\n word_ngram_range, char_ngram_range)\n\n # use the n_gram vectorizer to form the train and test dataset\n # it will take a lot of time... i think\n X_train = ngrams_vectorizer.fit_transform(docs_train)\n X_test = ngrams_vectorizer.transform(docs_test)\n print(\"Performed fitting of data\")\n\n ############ dimensionality reduction ################\n\n if(perform_dimensionality_reduction == True):\n X_train, X_test = perform_dimensionality_reduction(X_train, X_test)\n\n # print(docs_train[0])\n return X_train, X_test", "def vectorize(tokens_list, feature_fns, min_freq, vocab=None):\n# counter = defaultdict(int)\n# data, row, col, result = [], [], [], []\n\n# for tokens in tokens_list:\n# feats = featurize(tokens, feature_fns)\n# result.append(feats)\n# for feat in feats:\n# counter[feat[0]] += 1\n\n# if vocab == None:\n# vocab = defaultdict(int)\n# index = 0\n# for val in sorted(counter.items()):\n# if (val[1] >= min_freq):\n# vocab[val[0]] = index\n# index += 1\n\n# for index, tokens in enumerate(tokens_list):\n# for res in sorted(result[index]):\n# if (res[0] in vocab.keys()):\n# data.append(res[1])\n# col.append(vocab[res[0]])\n# row.append(index)\n\n# return csr_matrix((data, (row, col)), dtype=np.int64), vocab\n \n if vocab == None:\n d_vocab = defaultdict(list)\n doc_map = defaultdict(dict)\n for doc_no in range(len(tokens_list)):\n feats = featurize(tokens_list[doc_no], feature_fns)\n feat_dic = dict(feats)\n doc_map[doc_no] = feat_dic\n for feat in feat_dic:\n d_vocab[feat].append(doc_no)\n\n index = 0\n new_vocab = {}\n for key in sorted(d_vocab):\n if len(d_vocab[key]) >= min_freq:\n new_vocab[key] = index\n index += 1\n\n row = []\n column = []\n data = []\n for key in sorted(new_vocab.keys()):\n for doc_no in sorted(d_vocab[key]):\n if key in doc_map[doc_no]:\n row.append(doc_no)\n column.append(new_vocab[key])\n data.append(doc_map[doc_no][key])\n\n return csr_matrix((data, (row, column)), shape=(len(tokens_list), len(new_vocab)),dtype=np.int64), new_vocab\n \n\n elif vocab != None:\n row = []\n column = []\n data = []\n for doc_no in range(len(tokens_list)):\n feat_dic = dict(featurize(tokens_list[doc_no],feature_fns))\n for feat in feat_dic:\n if feat in vocab:\n row.append(doc_no)\n column.append(vocab[feat])\n data.append(feat_dic[feat])\n\n return csr_matrix((data,(row,column)), shape=(len(tokens_list),len(vocab)),dtype=np.int64),vocab", "def get_features(docs, max_length):\n docs = list(docs)\n Xs = numpy.zeros((len(docs), max_length), dtype='int32')\n for i, doc in enumerate(docs):\n j = 0\n for token in doc:\n vector_id = token.vocab.vectors.find(key=token.orth)\n if vector_id >= 0:\n Xs[i, j] = vector_id\n else:\n Xs[i, j] = 0\n j += 1\n if j >= max_length:\n break\n return Xs", "def gen_review_vecs(reviews, model, num_features):\n\n curr_index = 0\n review_feature_vecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\n\n # index2word is a list consisting of all words in the vocabulary\n # Convert list to set for speed\n index2word_set = set(model.wv.index2word)\n for review in reviews:\n\n #if curr_index%1000 == 0.:\n # print \"Vectorizing review %d of %d\" % (curr_index, len(reviews))\n \n review_feature_vecs[curr_index] = review_to_vec(review, model, num_features , index2word_set)\n curr_index += 1\n \n return review_feature_vecs", "def get_ngram_features(train_data, test_data):\n print(\"getting ngram features\")\n ngram_vectorizer = CountVectorizer(ngram_range = (1, 2))\n ngram_vectorizer = ngram_vectorizer.fit(train_data)\n return ngram_vectorizer.transform(train_data), ngram_vectorizer.transform(test_data)", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list" ]
[ "0.60243773", "0.59016556", "0.58867896", "0.5756956", "0.573142", "0.56395006", "0.56027967", "0.55864096", "0.55687845", "0.5554088", "0.55537987", "0.54962337", "0.54528916", "0.54197", "0.53998107", "0.5392125", "0.53568494", "0.5354972", "0.5350437", "0.5302239", "0.52826774", "0.5275317", "0.5274099", "0.52733266", "0.52679604", "0.5242423", "0.5238932", "0.5227998", "0.5224041", "0.52234316" ]
0.6925255
0
Sets the node_b of this NetflowFilters.
def node_b(self, node_b): self._node_b = node_b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_node(self, b):\n return b == self.__node_b", "def setB(self, b):\n\t\tself.b = int(b)", "def b(self, b):\n\n self._b = b", "def add_bilink(self, nodeport_a, nodeport_b, bilink):", "def set_bias_for_node(node: Node, value: np.ndarray):\n bias = get_bias_for_node(node)\n if bias is None:\n raise Exception('Can\\'t set bias for node {} because node does not have a bias'.format(node.name))\n set_node_value(bias, value)", "def _onSetParameterBIgnoreBounds(self, value):\n self._parameters['b'] = value\n self._logger.info(\"Parameter 'b' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def node_a(self, node_a):\n\n self._node_a = node_a", "def __init__(self, node_b=None, qos_type=None, device_interfaces=None, ports=None, protocol=None, ip_version=None, netflow_devices=None, top=None, app_type=None, nbar_application_names=None, node_a=None, conversation=None, if_names=None, direction=None): # noqa: E501 # noqa: E501\n\n self._node_b = None\n self._qos_type = None\n self._device_interfaces = None\n self._ports = None\n self._protocol = None\n self._ip_version = None\n self._netflow_devices = None\n self._top = None\n self._app_type = None\n self._nbar_application_names = None\n self._node_a = None\n self._conversation = None\n self._if_names = None\n self._direction = None\n self.discriminator = None\n\n if node_b is not None:\n self.node_b = node_b\n if qos_type is not None:\n self.qos_type = qos_type\n if device_interfaces is not None:\n self.device_interfaces = device_interfaces\n if ports is not None:\n self.ports = ports\n if protocol is not None:\n self.protocol = protocol\n if ip_version is not None:\n self.ip_version = ip_version\n if netflow_devices is not None:\n self.netflow_devices = netflow_devices\n if top is not None:\n self.top = top\n if app_type is not None:\n self.app_type = app_type\n if nbar_application_names is not None:\n self.nbar_application_names = nbar_application_names\n if node_a is not None:\n self.node_a = node_a\n if conversation is not None:\n self.conversation = conversation\n if if_names is not None:\n self.if_names = if_names\n if direction is not None:\n self.direction = direction", "def nbf(self, nbf):\n\n self._nbf = nbf", "def setEntityValue(self, b):\n return self._set(entityValue=b)", "def setEntityValue(self, b):\n return self._set(entityValue=b)", "def _onSetParameterB(self, value):\n self._parameters['b'] = min(max(value, self._parameters['lower']), self._parameters['upper']) # Limit at upper and lower\n self._logger.info(\"Parameter ba' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def set_node(self, name, state):\n self.source_net.nodes[name] = state", "def set_node(self, node):\n self.__node = node", "def __init__(self, a_node, b_node, name=None):\n BinaryMatrixOp.__init__(self, a_node, b_node, name)", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def SetActive(self, b):\r\n\r\n self.active = b", "def set_bunit(self, bunit):\n self.bunit = bunit", "def set_bribe(self, bribe_amount):\r\n self.bribe = bribe_amount", "def add_biport(self, node, biport):", "def __init__(self, node_a, node_b):\n self.node_a = node_a\n self.node_b = node_b\n self.base_color = 'blue'\n self.tint_color = 'white'\n self.tint = 0\n self.options = []", "def subject_b(self, subject_b):\n\n self._subject_b = subject_b", "def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating", "def update_b(color, new_b):\n\n color.update_b(new_b)", "def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id", "def apply_to(self, b):\n raise NotImplementedError(\"base class called\")", "def update_node(self, node):\n markov_blanket_vals = tuple(\n [self.state.data[var.index] for var in node.markov_blanket])\n gibbs_dist = self.gibbs_distributions[node][markov_blanket_vals]\n self.state.data[node.index] = gibbs_dist.sample(None)", "def setRevertable(self, b):\n\n self.revertable = b", "def bvh_tree_file(self, bvh_tree_file):\n\n self._bvh_tree_file = bvh_tree_file", "def setBorder(self, b):\n self.border = fn.mkPen(b)\n self.update()" ]
[ "0.61645985", "0.6144432", "0.60478383", "0.5791615", "0.576705", "0.5688763", "0.5581127", "0.5452997", "0.5451288", "0.53915596", "0.53915596", "0.5351376", "0.5198766", "0.5175632", "0.5073717", "0.5065744", "0.50484663", "0.50223225", "0.50185555", "0.50114703", "0.5000813", "0.4966118", "0.49446964", "0.49384293", "0.4902644", "0.48818833", "0.48490784", "0.48289916", "0.4784288", "0.47530612" ]
0.8324408
0
Sets the qos_type of this NetflowFilters.
def qos_type(self, qos_type): self._qos_type = qos_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qos(self, qos: int):\n if qos is not None and qos > 2: # noqa: E501\n raise ValueError(\"Invalid value for `qos`, must be a value less than or equal to `2`\") # noqa: E501\n if qos is not None and qos < 0: # noqa: E501\n raise ValueError(\"Invalid value for `qos`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._qos = qos", "def set_qos(self, qos, set_specs_args):\n self._impl.set_qos(qos.id, set_specs_args)\n return self._unify_qos(qos)", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def add_qos(self, qos):\n \n qos_id = qos[\"ovsdb:qos-entries\"][0][\"qos-id\"]\n self.qos_dict[qos_id] = qos", "def change_qos(self, arg, qos):\n\n if isinstance(arg, (list, tuple)):\n for job_id in arg:\n self.change_qos(job_id, qos)\n\n elif isinstance(arg, int):\n cmd = 'update job {} QOS={}'.format(arg, qos)\n self.scontrol(cmd)\n\n elif str(arg).lower() == 'all':\n self._queue = None\n for job_id, attrs in self.queue.items():\n status = attrs[self.QCOL_STATUS].lower()\n if status == 'pd':\n self.change_qos(job_id, qos)\n\n else:\n e = ('Could not change qos of: {} with type {}'\n .format(arg, type(arg)))\n logger.error(e)\n raise ExecutionError(e)", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos_interfaces__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos_interfaces__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def set_qos(self, qos_id, set_specs_args):\n aname = \"cinder_v%s.set_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.set_keys(qos_id,\n set_specs_args)", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos_elements__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos_elements__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def set_qos_stat_type(self, iface, ptype):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def qos(self) -> int:\n return self._qos", "def associate_qos(self, qos_id, vol_type_id):\n url = \"qos-specs/%s/associate\" % qos_id\n url += \"?vol_type_id=%s\" % vol_type_id\n resp, body = self.get(url)\n self.validate_response(schema.associate_qos, resp, body)\n return rest_client.ResponseBody(resp, body)", "def set_qos(self, on_ok):\n self._channel.basic_qos(\n prefetch_count=self._prefetch_count, callback=on_ok)", "def disassociate_qos(self, qos_id, vol_type_id):\n url = \"qos-specs/%s/disassociate\" % qos_id\n url += \"?vol_type_id=%s\" % vol_type_id\n resp, body = self.get(url)\n self.validate_response(schema.disassociate_qos, resp, body)\n return rest_client.ResponseBody(resp, body)", "def resource_type(self):\n return 'qos'", "def set_qos_key(self, qos_id, **kwargs):\n put_body = json.dumps({\"qos_specs\": kwargs})\n resp, body = self.put('qos-specs/%s' % qos_id, put_body)\n body = json.loads(body)\n self.validate_response(schema.set_qos_key, resp, body)\n return rest_client.ResponseBody(resp, body)", "def qos(self):\n if self == SubscribeResult.qos0:\n rv = 0\n elif self == SubscribeResult.qos1:\n rv = 1\n elif self == SubscribeResult.qos2:\n rv = 2\n else:\n raise TypeError()\n\n return rv", "def SetType(self, ct_type):\r\n\r\n self._type = ct_type", "def qos_associate_type(self, qos_specs, vol_type_id):\n aname = \"cinder_v%s.qos_associate_type\" % self.version\n with atomic.ActionTimer(self, aname):\n tuple_res = self._get_client().qos_specs.associate(qos_specs,\n vol_type_id)\n return (tuple_res[0].status_code == 202)", "def qos_associate_type(self, qos_specs, vol_type_id):\n self._impl.qos_associate_type(qos_specs, vol_type_id)\n return self._unify_qos(qos_specs)", "def test_400_enable_qos(self):\n if self._get_openstack_release() >= self.trusty_mitaka:\n unit = self.n_ovs_sentry\n set_default = {'enable-qos': 'False'}\n set_alternate = {'enable-qos': 'True'}\n self.d.configure('neutron-api', set_alternate)\n self._wait_and_check(sleep=60)\n qos_plugin = 'qos'\n config = u._get_config(\n self.neutron_api_sentry, '/etc/neutron/neutron.conf')\n service_plugins = config.get(\n 'DEFAULT',\n 'service_plugins').split(',')\n if qos_plugin not in service_plugins:\n message = \"{} not in service_plugins\".format(qos_plugin)\n amulet.raise_status(amulet.FAIL, msg=message)\n\n config = u._get_config(\n unit,\n '/etc/neutron/plugins/ml2/openvswitch_agent.ini')\n extensions = config.get('agent', 'extensions').split(',')\n if qos_plugin not in extensions:\n message = \"qos not in extensions\"\n amulet.raise_status(amulet.FAIL, msg=message)\n\n u.log.debug('Setting QoS back to {}'.format(\n set_default['enable-qos']))\n self.d.configure('neutron-api', set_default)\n self._wait_and_check()\n u.log.debug('OK')", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def get_qos(self, qos_id):\n return self._unify_qos(self._impl.get_qos(qos_id))", "def get_port_qos_rxrate(self, iface, qos):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def qos_disassociate_type(self, qos_specs, vol_type_id):\n aname = \"cinder_v%s.qos_disassociate_type\" % self.version\n with atomic.ActionTimer(self, aname):\n tuple_res = self._get_client().qos_specs.disassociate(qos_specs,\n vol_type_id)\n return (tuple_res[0].status_code == 202)", "def get_qos_rule_type_details(self, rule_type, filters=None):\n if not self._has_neutron_extension('qos'):\n raise exc.OpenStackCloudUnavailableExtension(\n 'QoS extension is not available on target cloud'\n )\n\n if not self._has_neutron_extension('qos-rule-type-details'):\n raise exc.OpenStackCloudUnavailableExtension(\n 'qos-rule-type-details extension is not available '\n 'on target cloud'\n )\n\n return self.network.get_qos_rule_type(rule_type)", "def get_qos(self, qos_id):\n aname = \"cinder_v%s.get_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.get(qos_id)", "def set_type(self, type):\n self._type = type", "def test_qos_specs(self):\n qos = {'maxIOPS': 1000, 'maxBWS': 2048}\n snapshot = fake_snapshot.fake_snapshot_obj(\n self.ctx, **{'volume': self.volume,\n 'provider_id': self.snapshot_id,\n 'volume_size': 8})\n extraspecs = {}\n self.driver._get_volumetype_qos = mock.MagicMock()\n self.driver._get_volumetype_qos.return_value = qos\n self.driver._get_volumetype_extraspecs = mock.MagicMock()\n self.driver._get_volumetype_extraspecs.return_value = extraspecs\n\n props = self.driver.initialize_connection_snapshot(\n snapshot,\n self.connector)\n\n self.assertEqual(1000, int(props['data']['iopsLimit']))\n self.assertEqual(2048, int(props['data']['bandwidthLimit']))", "def setFilter(self, type: int, filter: int) -> None:\n ..." ]
[ "0.69267035", "0.665194", "0.6260876", "0.6227411", "0.61854005", "0.6163835", "0.61467403", "0.6086317", "0.6053018", "0.60085255", "0.5876559", "0.5848173", "0.56665426", "0.56485814", "0.5635002", "0.559807", "0.53704786", "0.52488047", "0.5220447", "0.5210421", "0.5194985", "0.5194985", "0.5189316", "0.5151207", "0.5147622", "0.5135847", "0.50896573", "0.50822896", "0.5080055", "0.50793546" ]
0.8767736
0
Sets the device_interfaces of this NetflowFilters.
def device_interfaces(self, device_interfaces): self._device_interfaces = device_interfaces
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces", "def netflow_devices(self, netflow_devices):\n\n self._netflow_devices = netflow_devices", "def ifaces(self, ifaces):\n \n self._ifaces = ifaces", "def update_interfaces_config(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n all_devices = devices[\"other_devices\"]\n all_devices.update(devices[\"dpdk_devices\"])\n all_devices.update(devices[\"kernel_devices\"])\n\n current_ifcs = {}\n interfaces = {}\n if \"interfaces\" in node:\n current_ifcs = node[\"interfaces\"]\n if current_ifcs:\n for ifc in current_ifcs.values():\n dvid = ifc[\"pci_address\"]\n if dvid in all_devices:\n VppPCIUtil.vpp_create_interface(\n interfaces, dvid, all_devices[dvid]\n )\n node[\"interfaces\"] = interfaces\n\n self.updateconfig()", "def _set_interfaces(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interfaces_openconfig_qos_interfaces__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interfaces must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interfaces_openconfig_qos_interfaces__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interfaces = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interfaces(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interfaces_openconfig_qos_elements__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interfaces must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interfaces_openconfig_qos_elements__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interfaces = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interfaces(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interfaces_openconfig_qos__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interfaces must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interfaces_openconfig_qos__qos_interfaces, is_container='container', yang_name=\"interfaces\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interfaces = t\n if hasattr(self, '_set'):\n self._set()", "def devices(self, devices):\n\n self._devices = devices", "def devices(self, devices):\n\n self._devices = devices", "def update_interfaces(self, interfaces):\n for i in interfaces:\n self.update_interface(i)", "def _config_interfaces(self):\n self.interfaces['loopback'] = \"127.0.0.1\"\n self.interfaces['internal'] = \"127.0.0.1\"\n self.interfaces['external'] = \"0.0.0.0\"\n self.interfaces[\"any\"] = \"0.0.0.0\"\n self.interfaces[\"localhost\"] = \"127.0.0.1\"", "def _set_interface(self, cfg, itf):\n self.interface = None\n for i in range(cfg.bNumInterfaces):\n x = cfg[(i,0)]\n if x.bInterfaceNumber == itf:\n self.interface = x\n endpoints = sorted([ep.bEndpointAddress for ep in self.interface])\n self.ep_out, self.ep_in = endpoints[:2]", "def plug_vifs(self, instance, network_info):\n LOG.debug('plug_vifs called for instance', instance=instance)\n try:\n for viface in network_info:\n self.vif_driver.plug(instance, viface)\n self.start_firewall(instance, network_info)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_LE('Failed to configure container network'\n ' for %(instance)s: %(ex)s'),\n {'instance': instance.name, 'ex': ex},\n instance=instance)", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_interfaces__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_interfaces__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def get_network_interfaces(self):\n return self.mycam.devicemgmt.GetNetworkInterfaces()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_elements__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_elements__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()", "def setAdaptationInterfaceProperties(self, logicalinterface):\n logicalinterface.setDevice(self.getDevice())\n logicalinterface.setBlade(self.getBlade())\n logicalinterface.setPort(self.getPort())", "def fusion_api_configure_appliance_interfaces(self, body=None, api=None, headers=None):\n return self.interfaces.configure(body, api, headers)", "def get_port_interfaces(self, oid):\n path = '/servers/%s/os-interface' % oid\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('List port interfaces for server %s: %s' % \n (oid, truncate(res)))\n nets = res[0]['interfaceAttachments']\n for item in nets:\n item[u'name'] = None\n return nets", "def interfaces(self):\n if self._interfaces is None:\n self._interfaces = list(x[\"interface\"] for x in self._interfaces_detailed_list())\n\n return self._interfaces", "def setNewNativeInterfaceProperties(self, interface):\n interface.setDevice(self)\n # interface.removable = False\n if interface not in self.interfaces:\n self.interfaces.append(interface)\n if interface not in self.logicalinterfaces:\n self.logicalinterfaces.append(interface)", "def set_interface(self, interface):\n if not interface_exists(interface):\n raise ValueError(f\"Interface {interface} is invalid.\")\n self.interface = interface", "def set_device(self, device: torch.Tensor) -> None:\n raise NotImplementedError", "def devicenodes(self, devicenodes):\n\n self._devicenodes = devicenodes", "def get(self, context, device_id, filters):\n interfaces_obj = dbapi.net_interfaces_get_by_device(\n context, device_id, filters)\n return jsonutils.to_primitive(interfaces_obj), 200, None", "def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]]]:\n return pulumi.get(self, \"network_interfaces\")", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def interfaces(self, site_id, element_id, interface_id, data, tenant_id=None, api_version=\"v4.15\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}/elements/{}/interfaces/{}\".format(api_version,\n tenant_id,\n site_id,\n element_id,\n interface_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)", "def add_interface(self, inf):\n self.interfaces[inf] = {'ip': 'unassigned', 'status': 'shutdown', 'connect': ['none', 'none']}" ]
[ "0.68550044", "0.63056403", "0.6121295", "0.6114787", "0.60211456", "0.59780586", "0.5934186", "0.58451796", "0.58451796", "0.57590055", "0.5745199", "0.56535774", "0.54265004", "0.5393894", "0.5360232", "0.53313124", "0.53304845", "0.53277284", "0.5235097", "0.51627266", "0.5103836", "0.5072573", "0.50651103", "0.5025339", "0.50048065", "0.50018364", "0.49787417", "0.49764925", "0.49457002", "0.49392918" ]
0.81078243
0
Sets the ports of this NetflowFilters.
def ports(self, ports): self._ports = ports
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_ports(self, ports, **kwargs):\n pass", "def modify_rstp_ports(self, ports, **kwargs):\n pass", "def https_ports(self, https_ports):\n\n self._https_ports = https_ports", "def http_ports(self, http_ports):\n\n self._http_ports = http_ports", "def make_external_ports(self, ports):\n\n self._set_unconnected_ports()\n for ip_name, _ports in ports.items():\n for _port in _ports:\n self._set_port(self._ips[ip_name], _port)", "def setport(self, port):\n self.__port = port", "def modify_mstp_ports(self, ports, instance=0, **kwargs):\n pass", "def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})", "def port(self, port):\n\n self._port = port", "def port(self, port):\n\n self._port = port", "def port(self, port):\n\n self._port = port", "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerPortArgs']]]]:\n return pulumi.get(self, \"ports\")", "def ports(self):\n return port.PortCollection(\n self._conn,\n utils.get_sub_resource_path_by(self, \"Ports\"),\n redfish_version=self.redfish_version,\n )", "def port(self, port):\n if port is not None and port > 65535:\n raise ValueError(\"Invalid value for `port`, must be a value less than or equal to `65535`\")\n if port is not None and port < 1:\n raise ValueError(\"Invalid value for `port`, must be a value greater than or equal to `1`\")\n\n self._port = port", "def get_ports(self):\n return self._ports", "def connect_walker_ports(self, port1: Port, port2: Port) -> None:\n self.port_end.req_ports = port1\n self.port_end.req_ports = port2", "def set_ports_pool(self, being: int, end: int):\n self.ports_pool = (being, end)\n return self", "def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]\n return self.network_settings['Ports']", "def __set_port_list(self):\n\n self._coms = [str(i.device) for i in sorted(self.ports)]", "def set_port(self, party_port) -> None:\n\n self._port = party_port", "def setPort(self, port):\n libxml2mod.xmlURISetPort(self._o, port)", "def netflow_devices(self, netflow_devices):\n\n self._netflow_devices = netflow_devices", "def set_login_port(self, port: int):\n assert 0 < port < 65535\n self.login_udp_port = port\n return self", "def __init__(self, env, name, num_ports):\n self.env = env\n self.ports = [Port(self.env, \"{}-port{}\".format(name, i))\n for i in range(num_ports)]\n self.name = name", "def exposed_ports(self) -> list[\"Port\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"exposedPorts\", _args)\n _ctx = Port(_ctx)._select_multiple(\n _description=\"description\",\n _port=\"port\",\n _protocol=\"protocol\",\n )\n return _ctx.execute_sync(list[Port])", "def port(self, port: int):\n if port is not None and port < 0: # noqa: E501\n raise ValueError(\"Invalid value for `port`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._port = port", "def get_ports(cls):\n return cls._open_ports.copy()", "def port_in(self, port_in):\n\n self._port_in = port_in", "def port_in(self, port_in):\n\n self._port_in = port_in" ]
[ "0.75998574", "0.6880327", "0.6804348", "0.6704648", "0.65429854", "0.6481354", "0.6428148", "0.61421347", "0.60838145", "0.60838145", "0.60838145", "0.6080848", "0.6070129", "0.60178405", "0.59989095", "0.5934444", "0.59169525", "0.5844672", "0.58260345", "0.5825795", "0.58092695", "0.5793928", "0.5792854", "0.5792121", "0.57907724", "0.57517576", "0.5748377", "0.5669408", "0.5631242", "0.5631242" ]
0.8199667
0
Sets the ip_version of this NetflowFilters.
def ip_version(self, ip_version): self._ip_version = ip_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vip(self, vip):\n\n self._vip = vip", "def protocol_version(self, protocol_version):\n\n self._protocol_version = protocol_version", "def setVersion(self, version) :\n if version is not None :\n try :\n self.version = [int(p) for p in version.split(\".\")]\n except AttributeError :\n if len(version) == 2 : # 2-tuple\n self.version = version\n else :\n try :\n self.version = [int(p) for p in str(float(version)).split(\".\")]\n except :\n self.version = [int(p) for p in IPP_VERSION.split(\".\")]", "def ip(self, ip):\n\n self._ip = ip", "def ip(self, ip):\n\n self._ip = ip", "def version(self, version):\n if self.local_vars_configuration.client_side_validation and version is None: # noqa: E501\n raise ValueError(\"Invalid value for `version`, must not be `None`\") # noqa: E501\n\n self._version = version", "def flow_encoding_version(self, flow_encoding_version):\n\n self._flow_encoding_version = flow_encoding_version", "def ip(self, ip):\n self._ip = ip\n return self", "def set_ip(self, party_ip) -> None:\n\n self._ip = party_ip", "def version(self, version):\n \n self._version = version", "def ip(self, ip: str):\n\n self._ip = ip", "def node_version(self, node_version):\n\n self._node_version = node_version", "def ip_address(self, ip_address):\n\n self._ip_address = ip_address", "def ip_address(self, ip_address):\n\n self._ip_address = ip_address", "def ip_address(self, ip_address):\n\n self._ip_address = ip_address", "def version(self, version: int):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version" ]
[ "0.63388056", "0.58770186", "0.5694638", "0.5656582", "0.5656582", "0.56357116", "0.55922115", "0.55871147", "0.5562405", "0.55404204", "0.5522308", "0.55216855", "0.5502596", "0.5502596", "0.5502596", "0.5491824", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54864305" ]
0.8065355
0
Sets the netflow_devices of this NetflowFilters.
def netflow_devices(self, netflow_devices): self._netflow_devices = netflow_devices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def devices(self, devices):\n\n self._devices = devices", "def devices(self, devices):\n\n self._devices = devices", "def set_devices(args):\n global devices\n if args is not None:\n devices = [torch.device(i) for i in ast.literal_eval('[' + args + ']')]\n torch.cuda.set_device(devices[0])\n else:\n devices = [torch.device('cpu')]", "def device_interfaces(self, device_interfaces):\n\n self._device_interfaces = device_interfaces", "def devicenodes(self, devicenodes):\n\n self._devicenodes = devicenodes", "def set_device(self, device: torch.Tensor) -> None:\n raise NotImplementedError", "def set_device(num):\n safe_call(backend.get().af_set_device(num))", "def setFilters(self, filters):\n self.__filters = filters", "def set_device(self, device):\n self.device = device", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def set_toggle_devices_enabled(self, track, xclip, ident, value = None):\n for device in track.devices:\n if(hasattr(device, 'parameters')):\n self._parent._device_actions.set_device_on_off(device, track, xclip, ident);", "def configure_devices(self, ports):\n\n new_devices = []\n \n # for each port create a new Device and start the underlying thread\n for p in ports:\n new_device = Device(p)\n self.configured_devices[new_device.id] = new_device\n new_devices.append(new_device)\n new_device.start()\n\n return new_devices", "def flows(self, flows):\n\n self._flows = flows", "def magma_setdevice(dev):\n\n _libmagma.magma_setdevice(dev)", "def set_device_ids(self, device_ids):\n if not all(isinstance(device_id, str) for device_id in device_ids):\n raise ApiError(\"One or more invalid device IDs\")\n self._update_criteria(\"device.id\", device_ids)\n return self", "def _import_devices(self) -> None:\n self._devices.clear()\n\n # Exctract all devices\n for device in self._udev.list_devices():\n # Skip devices without mapping\n if not device.device_node or self.helper.hide_virtual_device(device):\n continue\n self._devices[device.sys_name] = Device.import_udev(device)", "def setup_devices(self, devices):\n \n self.devices = devices\n \n barrier = ReusableBarrier(len(devices))\n lock = Lock()\n aux_dict = {}\n\n for device in devices:\n device.barrier = barrier\n device.global_lock = lock\n for location in device.sensor_data: \n if location not in aux_dict:\n aux_dict[location] = Semaphore() \n \n for device in devices:\n device.device_semaphores = aux_dict\n\n self.setup_master_thread()", "def set_filters(self, filters):\n obj = []\n for fltr in filters:\n obj.append(fltr.jobject)\n javabridge.call(self.jobject, \"setFilters\", \"([Lweka/filters/Filter;)V\", obj)", "def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces", "def modify_devices(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n other_devices = devices[\"other_devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n if other_devices:\n self._modify_other_devices(\n node, other_devices, kernel_devices, dpdk_devices\n )\n\n # Get the devices again for this node\n self._get_device(node)\n devices = node[\"devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n klen = len(kernel_devices)\n if klen > 0:\n print(\"\\nThese devices are safe to be used with VPP.\\n\")\n VppPCIUtil.show_vpp_devices(kernel_devices)\n question = (\n \"\\nWould you like to use any of these \" \"device(s) for VPP [y/N]? \"\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd = {}\n for dit in kernel_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} \".format(dvid)\n question += \"for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n question = \"Would you like to bind the driver {} for {} [y/N]? \".format(\n driver, dvid\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n logging.debug(\n \"Binding device {} to driver {}\".format(\n dvid, driver\n )\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\n \"Could not bind device {}\".format(dvid)\n )\n dpdk_devices[dvid] = device\n del kernel_devices[dvid]\n\n dlen = len(dpdk_devices)\n if dlen > 0:\n print(\"\\nThese device(s) are already using DPDK.\\n\")\n VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)\n question = \"\\nWould you like to remove any of \"\n question += \"these device(s) [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to remove {} [y/N]? \".format(dvid)\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl[dvid] = device\n for dit in vppdl.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n logging.debug(\n \"Binding device {} to driver {}\".format(dvid, driver)\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n kernel_devices[dvid] = device\n del dpdk_devices[dvid]\n\n interfaces = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n VppPCIUtil.vpp_create_interface(interfaces, dvid, device)\n node[\"interfaces\"] = interfaces\n\n self._update_auto_config()\n self.updateconfig()", "def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):\n self.num_inference_steps = num_inference_steps\n timesteps = (\n np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1)\n .round()[::-1][:-1]\n .copy()\n .astype(np.int64)\n )\n self.timesteps = torch.from_numpy(timesteps).to(device)\n self.model_outputs = [\n None,\n ] * self.config.solver_order\n self.lower_order_nums = 0", "def refresh_port_filters(self, own_devices, other_devices):\n # These data structures are cleared here in order to avoid\n # losing updates occurring during firewall refresh.\n devices_to_refilter = self.devices_to_refilter\n global_refresh_firewall = self.global_refresh_firewall\n self.devices_to_refilter = set()\n self.global_refresh_firewall = False\n LOG.info(_LI(\"Going to refresh for devices: %s.\"),\n len(devices_to_refilter))\n if global_refresh_firewall:\n LOG.info(_LI(\"Refreshing firewall for all filtered devices.\"))\n self.firewall.clean_port_filters(other_devices)\n self.refresh_firewall()\n else:\n own_devices = (own_devices & devices_to_refilter)\n other_devices = (other_devices & devices_to_refilter)\n self.firewall.clean_port_filters(other_devices)\n if own_devices:\n LOG.info(_LI(\"Refreshing firewall for %d own devices.\"),\n len(own_devices))\n self.refresh_firewall(own_devices)\n if other_devices:\n LOG.info(_LI(\"Refreshing firewall for %d other devices.\"),\n len(other_devices))\n self.prepare_firewall(other_devices)\n LOG.info(_LI(\"Finished refresh for devices: %s.\"),\n len(devices_to_refilter))", "def ports(self, ports):\n\n self._ports = ports", "def set_devices(sys_device_ids):\n # Set the CUDA_VISIBLE_DEVICES environment variable\n import os\n visible_devices = ''\n for i in sys_device_ids:\n visible_devices += '{}, '.format(i)\n os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices\n # Return wrappers.\n # Models and user defined Variables/Tensors would be transferred to the\n # first device.\n device_id = 0 if len(sys_device_ids) > 0 else -1\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO", "def set_devices_for_ml(sys_device_ids):\n import os\n\n all_ids = []\n for ids in sys_device_ids:\n all_ids += ids\n unique_sys_device_ids = list(set(all_ids))\n unique_sys_device_ids.sort()\n if -1 in unique_sys_device_ids:\n unique_sys_device_ids.remove(-1)\n\n # Set the CUDA_VISIBLE_DEVICES environment variable\n\n visible_devices = ''\n for i in unique_sys_device_ids:\n visible_devices += '{}, '.format(i)\n os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices\n\n # Return wrappers\n\n relative_device_ids = []\n TVTs, TMOs = [], []\n for ids in sys_device_ids:\n relative_ids = []\n for id in ids:\n if id != -1:\n id = find_index(unique_sys_device_ids, id)\n relative_ids.append(id)\n relative_device_ids.append(relative_ids)\n\n # Models and user defined Variables/Tensors would be transferred to the\n # first device.\n TVTs.append(TransferVarTensor(relative_ids[0]))\n TMOs.append(TransferModulesOptims(relative_ids[0]))\n return TVTs, TMOs, relative_device_ids", "def set_device_type(device: str = \"cuda\"):\n DefaultDeviceType._default_device_type = device", "def flowers(self, flowers):\n\n self._flowers = flowers", "def device_count(self, device_count):\n\n self._device_count = device_count", "def initialize_devices(self):\n for k in self.devices:\n dev = self.devices[k]\n print('Starting %s' % dev.properties['name'])\n dev.initialize_driver()\n # print('Error initializing %s' % dev.properties['name'])\n if 'defaults' in dev.properties:\n defaults_file = dev.properties['defaults']\n defaults = from_yaml_to_dict(defaults_file)[dev.properties['name']]\n dev.apply_values(defaults)\n if dev.properties['type'] == 'daq':\n self.daqs[dev.properties['name']] = {'input': [],\n 'output': [],\n 'monitor': [], } # Creates an entry for every different DAQ.", "def __set_port_list(self):\n\n self._coms = [str(i.device) for i in sorted(self.ports)]" ]
[ "0.6428995", "0.6428995", "0.5585992", "0.54547", "0.5365216", "0.5246433", "0.51710474", "0.50898916", "0.50661755", "0.5038719", "0.5004575", "0.49756554", "0.4974928", "0.49667272", "0.4923213", "0.48840016", "0.48089606", "0.48036066", "0.47994307", "0.4792203", "0.47901726", "0.4782577", "0.4773169", "0.4769025", "0.47646013", "0.47079116", "0.46879467", "0.46842295", "0.46741813", "0.4669408" ]
0.86378056
0
Sets the top of this NetflowFilters.
def top(self, top): self._top = top
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bb_top(self, bb_top: float):\n\n self._bb_top = bb_top", "def top(self):\n # Sets our Z value to one.\n self.setZValue(1)\n # Set every colliding items Z value to 0\n for sibling in self.collidingItems():\n sibling.setZValue(0)", "def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))", "def set_top(self,top_name):\n self.top_name = top_name", "def top(self, top):\n self.ptr.top(top)", "def top(self, top):\n # type: (float) -> None\n\n if top is not None:\n if not isinstance(top, (float, int)):\n raise TypeError(\"Invalid type for `top`, type has to be `float`\")\n\n self._top = top", "def top_type(self, top_type):\n\n self._top_type = top_type", "def _set_top(self, user_n, item_n):\n self.user_n = user_n\n self.item_n = item_n", "def GripperTop(self, attop=True):\r\n \r\n return self.SetFlag(self.optionGripperTop, attop)", "def top_bar(self, top_bar):\n\n self._top_bar = top_bar", "def setTopP(self, value):\n return self._set(topP=value)", "def setTopP(self, value):\n return self._set(topP=value)", "def setTopP(self, value):\n return self._set(topP=value)", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def page_top(self):\n self._pos = 0\n self._display()", "def top_attire_color(self, top_attire_color):\n\n self._top_attire_color = top_attire_color", "def do_top(self, arg):\n if self.curindex == 0:\n self.error('Oldest frame')\n return\n self._select_frame(0)", "def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")", "def draw_top(self):\n return group()", "def page_top(self):\n self._npos = 0\n self.display()", "def margin_top(self, value):\n self._margin_top = value", "def top(self, value):\n\n pass", "def top(self) -> None:\n # We remove ourselves from the list then insert ourselves to the end of the list\n current_index = ALL_WINDOWS.index(self)\n ALL_WINDOWS.pop(current_index)\n ALL_WINDOWS.append(self)", "def top(self):\n return super().peek()", "def _set_top_preps(self) -> None :\n prep_dict = self._system.getPReps(1, 20)\n prep_address_list = prep_dict['preps']\n for each_prep in prep_address_list:\n self._top_preps.put(each_prep['address'])", "def _reset_top_preps(self) -> None:\n if self._system.getIISSInfo()[\"nextPRepTerm\"] > self._block_height_week.get() + (7 * 43200):\n self._block_height_week.set(self._system.getIISSInfo()[\"nextPRepTerm\"])\n for i in range(len(self._top_preps)):\n self._top_preps.pop()\n self._set_top_preps()", "def set_top_container (self, top_container_id):\n instance = self.get_instance()\n instance['sub_container']['top_container']['ref'] = '/repositories/2/top_containers/%s' % top_container_id", "def top(self):", "def top(self) -> int:\n top = self.stack.pop()\n self.stack.append(top)\n for i in range(len(self.stack) - 1):\n self.stack.append(self.stack.pop())\n return top" ]
[ "0.67293346", "0.66441184", "0.66229576", "0.6613329", "0.6542814", "0.65164036", "0.6361207", "0.6278201", "0.62754864", "0.620966", "0.60119075", "0.60119075", "0.60119075", "0.59842026", "0.59842026", "0.5849825", "0.58208567", "0.5782378", "0.5741379", "0.5739408", "0.57287025", "0.566843", "0.5667478", "0.56568104", "0.563995", "0.55853176", "0.5583333", "0.55805856", "0.5565484", "0.5547596" ]
0.73888093
0
Sets the app_type of this NetflowFilters.
def app_type(self, app_type): self._app_type = app_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _app_type(self):\n return self._event['app_type']", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def set_type(self, type):\n self._type = type", "def set_type(self, type: int):\r\n self.type = type\r\n self.canvas.itemconfig(self.item, image=self._get_image())", "def set_application(self, app):\n \n self.app = app", "def item_group_type(self, item_group_type):\n\n self._item_group_type = item_group_type", "def image_type(self, image_type: ImageType):\n\n self._image_type = image_type", "def setDataSetType(self, type):\n self.__data_set_type__ = type", "def set_input_type(self, input_type):\n if input_type is not None: self._input_type.value = input_type\n return self", "def set_execution_type(self, type):\n self.execution_type = type", "def set_type(self, rtype=ALL_USERS):\r\n self.type = rtype", "def type(self, type: str):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type" ]
[ "0.6110402", "0.6007707", "0.6007707", "0.5881547", "0.5826859", "0.5705285", "0.5672125", "0.5645346", "0.5516108", "0.5502036", "0.5485557", "0.54566836", "0.5454073", "0.5433174", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046" ]
0.8083322
0
Sets the nbar_application_names of this NetflowFilters.
def nbar_application_names(self, nbar_application_names): self._nbar_application_names = nbar_application_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)", "def set_name(self, application_name):\r\n self._name = application_name", "def app_names(self):\n return self.get_app_names()", "def app_name(self, value):\n self._app_name = value", "def config_bucket_names(self, config_bucket_names: ConfigNodePropertyArray):\n\n self._config_bucket_names = config_bucket_names", "def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')", "def axesnames(self, axesnames):\n if axesnames is None:\n self._axesnames = None\n else:\n assert isinstance(axesnames, list), 'axesnames must be list'\n self._axesnames = axesnames\n debug('ControllerStartup.axesnames = %s', itemstostr(self._axesnames))", "def category_names(self, category_names):\n\n self._category_names = category_names", "def RAppNames(self):\n\t\tnames=[]\n\t\tfor item in range(self.rApps.Count):\n\t\t\tnames.append(self.rApps.Item(item).Name)\n\t\treturn names", "def set_pinnames(self, names):\n self.pnames = names", "def reset_name_labels(infr):\n infr.print('reset_name_labels', 1)\n orig_names = infr.get_node_attrs('orig_name_label')\n infr.set_node_attrs('name_label', orig_names)", "def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n for axis in self.axisList:\n self.axesNames.append(labels[i] + ' - ' + axis.id)\n i += 1", "def set_index_names(self, names, axis=0):\n self.get_axis(axis).names = names", "def tag_names(self, tag_names):\n\n self._tag_names = tag_names", "def nvmf_namespace_num(self, nvmf_namespace_num):\n\n self._nvmf_namespace_num = nvmf_namespace_num", "def set_fnames(self, fnames):\n self.fnames = fnames[:]", "def set_application(self, app):\n \n self.app = app", "def setName(self, *args):\n return _libsbml.FluxBound_setName(self, *args)", "def set_title_bar_visible(self, visible):\n self.widget.SetTitleBarVisible(visible)", "def set_FilterName(self, value):\n super(GetCallbackDataInputSet, self)._set_input('FilterName', value)", "def apps(self, apps):\n\n self._apps = apps", "def get_app_names(self):\n groups = self['__store']\n lookup = {\n g.group_id: g.name[2:]\n for g in groups\n if (g.name.startswith('a_'))\n }\n return set(map(lookup.get, self.get_app_ids()))", "def setProgramName(self, *args):\n return _libsbml.SBMLWriter_setProgramName(self, *args)", "def merge_nonjunk_into_new_name(self, event=None):\n # Delete all original names\n aid_list = self.all_aid_list\n aid_list_filtered = ut.filterfalse_items(\n aid_list, self.ibs.get_annot_isjunk(aid_list)\n )\n # Rename annotations\n self.ibs.set_annot_names_to_same_new_name(aid_list_filtered)\n self.update_callback()\n self.backend_callback()\n self.show_page()", "def set_all_inactive(self):\n for name in self.get_names():\n self.set_inactive(name)", "def clean_name(self) -> None:\n\n for regex, group in self.parse_generic_regex:\n m = regex.match(self.app_name)\n\n if m:\n self.app_name = m.group(group).strip()\n return", "def setFilters(self, filters):\n self.__filters = filters", "def applications(self, applications: List[ApplicationRequestResponse]):\n\n self._applications = applications", "def set_git_filter_attribute(self, filtername):\n self._filter = filtername", "def set_atom_labels(self, labels):\n self.set_attribute(\"atom_labels\", labels)" ]
[ "0.5579578", "0.5503502", "0.51913893", "0.5184525", "0.51769996", "0.5006621", "0.4993549", "0.48435128", "0.48244205", "0.4778834", "0.4738988", "0.47321886", "0.47280967", "0.47159183", "0.4684673", "0.46767935", "0.46593088", "0.46463352", "0.4644338", "0.46052152", "0.46007124", "0.4590774", "0.45867503", "0.45630264", "0.45449847", "0.4543776", "0.45381227", "0.45350233", "0.45310223", "0.44987962" ]
0.86759675
0
Sets the node_a of this NetflowFilters.
def node_a(self, node_a): self._node_a = node_a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_node(self, node):\n self.__node = node", "def from_node(self, a):\n return a == self.__node_a", "def nodes(self, nodes_array):\n self.nodes_set = nodes_array", "def set_node(self, name, state):\n self.source_net.nodes[name] = state", "def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Zeroslike(%s)\" % node_A.name\n return new_node", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Zeroslike(%s)\" % node_A.name\r\n return new_node", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Oneslike[%s]\" % node_A.name\r\n return new_node", "def node_b(self, node_b):\n\n self._node_b = node_b", "def __call__(self, node_A):\n new_node = Op.__call__(self)\n new_node.inputs = [node_A]\n new_node.name = \"Oneslike(%s)\" % node_A.name\n return new_node", "def a(self, a):\n\n self._a = a", "def nodes(self, nodes):\n\n self._nodes = nodes", "def setA(self, a):\n\t\tself.a = int(a)", "def set_node(self, index, node):\r\n self.loc.coord[index] = node", "def __init__(self, node_a, node_b):\n self.node_a = node_a\n self.node_b = node_b\n self.base_color = 'blue'\n self.tint_color = 'white'\n self.tint = 0\n self.options = []", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def node_data(self, node_data):\n\n self._node_data = node_data", "def set_nodes(self, nodes):\n self._drv_nodes = nodes", "def set_node_attribute(\n node: MatterNode,\n endpoint: int,\n cluster_id: int,\n attribute_id: int,\n value: Any,\n) -> None:\n attribute_path = f\"{endpoint}/{cluster_id}/{attribute_id}\"\n node.endpoints[endpoint].set_attribute_value(attribute_path, value)", "def node_id(self, node_id):\n\n self._node_id = node_id", "def set_node(self, uri, info):\n\t\tself.node_uri = uri\n\t\tself.node_info = info", "def node_info(self, node_info):\n\n self._node_info = node_info", "def set(self, node, value):\n self.val[node] = value", "def set_node_id(self, node_id):\n self._node_id = node_id", "def nodes(self, nodes):\n self.nodes_ = nodes\n self.last_sequence_ind = int(self.nodes_.shape[0] - 1)\n logging.debug(\n \"Segment - Nodes {n_shape} set.\".format(n_shape=self.nodes_.shape)\n )", "async def set_nodes(self, node_callq: Dict):\n for svc in self._services:\n await svc.set_nodes(node_callq)", "def set_apex_node(self):\n if self.opt == 'CT':\n self.epi_apex_node = self.mesh_poly.GetPoints().GetPoint(3604)\n self.endo_apex_node = self.mesh_poly.GetPoints().GetPoint(3579)\n else:\n self.endo_apex_node = None # we do not know this\n self.epi_apex_node = self.mesh_poly.GetPoints().GetPoint(0)", "def node_count(self, node_count):\n\n self._node_count = node_count", "def node_id(self, node_id: int):\r\n self._node_id = node_id", "def _update_nodes_ids(self, change=None):\n self._nodes_filter.val_range = self.nodes_range\n self.nodes_ids = self._nodes_filter.val_ids\n self._update_edges_filtered(change)", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Sqrt(%s)\" % (node_A.name)\r\n return new_node" ]
[ "0.58251303", "0.5789575", "0.5745441", "0.5615295", "0.557767", "0.5573292", "0.5538027", "0.55000454", "0.5496195", "0.54542196", "0.5382919", "0.5307776", "0.51498705", "0.5089265", "0.507674", "0.5060496", "0.5041549", "0.50394356", "0.4960041", "0.49577066", "0.4915388", "0.49085498", "0.48717454", "0.48525095", "0.48383656", "0.4815578", "0.48139805", "0.4796978", "0.47719133", "0.47489136" ]
0.79782516
0
Sets the conversation of this NetflowFilters.
def conversation(self, conversation): self._conversation = conversation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_conversation(self, conversation):\r\n self.conversation = conversation", "def set_gift_conversation(self, conversation_string):\r\n self.gift_conversation = conversation_string", "def update(self, conversation):\n self.content_type = \"application/json\"\n self.method = \"PATCH\"\n entity = Conversation(json.loads(self.send(conversation).content))\n self._initialize_collection_properties(entity)\n return entity", "def conversation_participant_name(self, conversation_participant_name):\n\n self._conversation_participant_name = conversation_participant_name", "def set_convert(self, connection_conv):\n self.convert = connection_conv", "def conversation_participant_uuid(self, conversation_participant_uuid):\n\n self._conversation_participant_uuid = conversation_participant_uuid", "def conversation(self, thread):\r\n assert isinstance(thread, int) and 0 <= thread < len(self._threads), \"Thread {} don't exists at channel {}!\".\\\r\n format(thread, self.name)\r\n return self._threads[thread][\"conversation\"]", "def conversations(self):\n if self._conversations is None:\n self._conversations = Conversations(self)\n return self._conversations", "def sent(self, sent):\n\n self._sent = sent", "def handle_chat_received(self, peer: Peer):\n if not self._conversation_view:\n return\n\n if peer is not self._conversation_view.peer():\n # The active conversation is different than the one receiving the message\n index = self.__convs_list.model().index_of(peer)\n if index is not None:\n model_index = self.__convs_list.model().index(index, 0, QModelIndex())\n self.__convs_list.model().setData(model_index, QBrush(Qt.red), Qt.ForegroundRole)", "def start_conversation(self, event):\n if self._border.get_background_color(False) == globals.GROUP_ODD_COLOR:\n background_color = globals.CONVERSATION_EVEN_COLOR\n else:\n background_color = globals.CONVERSATION_ODD_COLOR\n \n self._conv.append(Conversation(self, bg=background_color))", "def initialise_conversation_model(self):\n self.conversation = model.conversation.ConversationSystem()\n #\n # Set all as alive\n for name in 'abcde':\n self.conversation.addKnowledge(['{0}-alive'.format(name)])\n #\n # And set the requires\n self.conversation.convertPresentToRequires('{0}-alive')", "def set_chatbot(self, chatbot):\n super(MultiLogicAdapter, self).set_chatbot(chatbot)\n\n for adapter in self.adapters:\n adapter.set_chatbot(chatbot)", "def get_conversations(self):\n\t\treturn self.conversations", "def sentmodel(sent_data):\n\n # with tf.variable_scope(\"sent\", reuse=tf.AUTO_REUSE):\n with tf.variable_scope(\"sent\"):\n sent_data = tf.expand_dims(sent_data, -1)\n filter_sizes = [2, 3, 5]\n filter_bitsent = mul_filtercnn(filter_sizes, sent_data, 'sent')\n \n fc_sent = tf.identity(tf.layers.conv1d(\\\n inputs=filter_bitsent,\\\n filters=1,\\\n kernel_size=1,\\\n padding=\"same\",\\\n activation=tf.nn.sigmoid),name=\"fc_sent\")\n return fc_sent", "def channel(self, channel):\n allowed_values = [\"sms\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and channel not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `channel` ({0}), must be one of {1}\" # noqa: E501\n .format(channel, allowed_values)\n )\n\n self._channel = channel", "def setRxConvolved(self, rx_convolved):\n \n self.rx_convolved = rx_convolved", "def filter(self, message):\n conversations = Conversations()\n conversation = conversations.get_conversation(message.from_user.id)\n if conversation is None:\n return False\n\n return conversation.type == self.conversation_type", "def set_sentences(self, sentences):\n self._sentences = sentences", "def list(self, request, *args, **kwargs):\n return super(ConversationViewSet, self).list(request, *args, **kwargs)", "def get_gift_conversation(self):\r\n return self.gift_conversation", "def __init__(self, request_url, client, options):\n super(ConversationRequest, self).__init__(request_url, client, options)", "def set_channel(cls, channel):\n cls.channel = channel", "def _set_channel_(self, channel):\n self._channel = channel", "async def set_filter(self, filter_name: str, **kwargs: Any) -> None:\n\n # valid filter?\n if filter_name not in self._telescope.filters:\n raise ValueError(\"Invalid filter name.\")\n\n # log and send event\n if filter_name != self._telescope.filter_name:\n # set it\n logging.info(\"Setting filter to %s\", filter_name)\n await self._change_motion_status(MotionStatus.SLEWING, interface=\"IFilters\")\n await asyncio.sleep(3)\n await self._change_motion_status(MotionStatus.POSITIONED, interface=\"IFilters\")\n self._telescope.filter_name = filter_name\n\n # send event\n await self.comm.send_event(FilterChangedEvent(filter_name))\n logging.info(\"New filter set.\")", "def conversation_participant_arn(self, conversation_participant_arn):\n\n self._conversation_participant_arn = conversation_participant_arn", "def show_conversation(self, conversation_item):\n #\n # Handle any special knowledge which might arise\n self.handle_special_knowledge()\n #\n if conversation_item.conversation_text.strip():\n new_item = textentry.TextEntry(\n 'person_{0}'.format(conversation_item.person),\n self.markup_text(conversation_item.conversation_text),\n width=S['text-entry-width'],\n fontname='computerfont',\n color=S['vdu-colour'],\n )\n self.tabbed.add_dialog_item(new_item)\n #\n try:\n self.awaiting_conversations.remove(conversation_item)\n except KeyError:\n pass", "def _set_send_community(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=send_community.send_community, is_container='container', presence=False, yang_name=\"send-community\", rest_name=\"send-community\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Send community attribute to this neighbor', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"send_community must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=send_community.send_community, is_container='container', presence=False, yang_name=\"send-community\", rest_name=\"send-community\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Send community attribute to this neighbor', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__send_community = t\n if hasattr(self, '_set'):\n self._set()", "def set_voice_actor(sim_info: SimInfo, voice_actor: Union[int, CommonVoiceActorType]) -> None:\n sim_info.voice_actor = int(voice_actor)", "async def set_chat(self, args):\n value = args if isinstance(args, bool) else args.lower() in ('yes', 'true', '1')\n if self.chat == value:\n return\n self.chat = value\n if self.chat_message is not None:\n await self.delete_message(self.chat_message)\n await self.set_trigger('chat_init', None)\n await self.set_trigger('chat', None)\n tag = 'chat' if self.chat else 'chat_init'\n self.chat_message = await self.send_tag(tag, emoji.TRIGGERS[tag], 'Chat enabled' if self.chat else 'Chat muted')\n if not self.chat:\n await self.shell_terminate_all(self.shell_chat)" ]
[ "0.7668855", "0.6350055", "0.52989596", "0.5078657", "0.50487155", "0.50432694", "0.49181578", "0.49066126", "0.4869592", "0.48333606", "0.4763982", "0.47448006", "0.47074327", "0.46813306", "0.46577984", "0.45800743", "0.4569655", "0.45474747", "0.45351785", "0.44976678", "0.44866642", "0.44683528", "0.44654623", "0.44592154", "0.4428278", "0.44042638", "0.44039187", "0.43915802", "0.43814868", "0.4380441" ]
0.72746265
1
Sets the if_names of this NetflowFilters.
def if_names(self, if_names): self._if_names = if_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ifaces_init(*ifnames):\n for ifname in ifnames:\n _set_eth_admin_state(ifname, schema.InterfaceState.ABSENT)", "def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)", "def setFilters(self, filters):\n self.__filters = filters", "def set_filters(self, filters):\n obj = []\n for fltr in filters:\n obj.append(fltr.jobject)\n javabridge.call(self.jobject, \"setFilters\", \"([Lweka/filters/Filter;)V\", obj)", "def set_filters(self, filters: List[DataGridFilter]):\n self.filters = filters", "def filters(self, filters):\n\n self._filters = filters", "def _set_ifname(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"ifname\", rest_name=\"ifname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ifname must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"ifname\", rest_name=\"ifname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-opstest', defining_module='brocade-opstest', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__ifname = t\n if hasattr(self, '_set'):\n self._set()", "def set_FilterName(self, value):\n super(GetCallbackDataInputSet, self)._set_input('FilterName', value)", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def names(self, names):\n\n self._names = names", "def ifaces(self, ifaces):\n \n self._ifaces = ifaces", "def update_filters(self, filters: str) -> None:\r\n\r\n log.debug(f'Updating filters to {filters}')\r\n\r\n parts = filters.split('&')\r\n\r\n for part in parts:\r\n value, key = part.split('=')\r\n\r\n if value == 'iv':\r\n self.__payload['prevMinIV'] = self.__payload['minIV']\r\n self.__payload['minIV'] = key.strip()\r\n elif value == 'exiv':\r\n self.__payload['exMinIV'] = key.strip()\r\n else:\r\n log.debug(f'Dont know filter: \"{part}\", ignoring...')\r\n\r\n self.__filters_string = filters", "def tag_names(self, tag_names):\n\n self._tag_names = tag_names", "def SetNames(self, names):\n # parse the names (a semicolon seperated list of names)\n if isinstance(names, str):\n names = names.split(';')\n if self.__names != names:\n self.__names = names\n self.Modified()", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def set_pinnames(self, names):\n self.pnames = names", "def setSearchFieldnames(self, fieldnames):\n self._search_fieldnames = fieldnames", "def set_fnames(self, fnames):\n self.fnames = fnames[:]", "def addNames(self, names):\n for name in names:\n self.tags.setdefault(name, ModelTag(name))", "def set_git_filter_attribute(self, filtername):\n self._filter = filtername", "def set_scanning_filter(self, **kwargs):\n for k, v in kwargs.get(\"filters\", {}).items():\n if k == \"UUIDs\":\n self._filters[k] = Variant(\"as\", v)\n elif k == \"RSSI\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Pathloss\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Transport\":\n self._filters[k] = Variant(\"s\", v)\n elif k == \"DuplicateData\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Discoverable\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Pattern\":\n self._filters[k] = Variant(\"s\", v)\n else:\n logger.warning(\"Filter '%s' is not currently supported.\" % k)\n\n if \"Transport\" not in self._filters:\n self._filters[\"Transport\"] = Variant(\"s\", \"le\")", "def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces", "def input_names(self):\n raise NotImplementedError(\n 'Derived ExternalGreyBoxModel classes need to implement the method: input_names'\n )", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def setFilters(self, regex=None):\n if regex is not None:\n try:\n self.__regex = re.compile(regex)\n except Exception as e:\n return\n\n self.__all_filters = (self.__regex,)\n\n self.__customFilterEnabled = any(self.__all_filters)\n self.invalidateFilter()", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "async def set_filter(self, filter_name: str, **kwargs: Any) -> None:\n\n # valid filter?\n if filter_name not in self._telescope.filters:\n raise ValueError(\"Invalid filter name.\")\n\n # log and send event\n if filter_name != self._telescope.filter_name:\n # set it\n logging.info(\"Setting filter to %s\", filter_name)\n await self._change_motion_status(MotionStatus.SLEWING, interface=\"IFilters\")\n await asyncio.sleep(3)\n await self._change_motion_status(MotionStatus.POSITIONED, interface=\"IFilters\")\n self._telescope.filter_name = filter_name\n\n # send event\n await self.comm.send_event(FilterChangedEvent(filter_name))\n logging.info(\"New filter set.\")", "def _set_interface_name(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..512']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}),], is_leaf=True, yang_name=\"interface-name\", rest_name=\"interface-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='union', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface_name must be of a type compatible with union\"\"\",\n 'defined-type': \"brocade-fcoe-ext:union\",\n 'generated-type': \"\"\"YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..512']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}),], is_leaf=True, yang_name=\"interface-name\", rest_name=\"interface-name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'The Interface value.'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='union', is_config=True)\"\"\",\n })\n\n self.__interface_name = t\n if hasattr(self, '_set'):\n self._set()", "def setChannelNames(self, n1, n2):\n\t\tfor i, val in enumerate(self.headervals):\n\t\t\ts = val[0]\n\t\t\ts = s.replace(\"%ch1%\", n1)\n\t\t\ts = s.replace(\"%ch2%\", n2)\n\t\t\tself.headervals[i][0] = s\n\t\t\tself.SetStringItem(i, 0, s)", "def setIndexNames(self):\n self.xi = self.i1\n self.yi = self.i2" ]
[ "0.5829484", "0.5813386", "0.5766594", "0.5461771", "0.5420651", "0.53666186", "0.5335555", "0.5328612", "0.5256119", "0.5160971", "0.5098839", "0.50947213", "0.5075196", "0.5055912", "0.5040678", "0.4927203", "0.49013257", "0.48951134", "0.48855725", "0.48845008", "0.48448634", "0.48366588", "0.4816613", "0.47895256", "0.47626862", "0.47608158", "0.47430158", "0.474297", "0.47307447", "0.4720284" ]
0.8094472
0
Sets the direction of this NetflowFilters.
def direction(self, direction): self._direction = direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_direction(self, direction: str) -> None:\n self.wink.set_fan_direction(direction)", "def set_direction(self, new_dir):\n self.__direction = new_dir", "def setDirection(self,stepDir = 2):\n pass", "def setdirection(self, *args, **kwargs):\n return _coordsys.coordsys_setdirection(self, *args, **kwargs)", "def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]", "async def async_set_direction(self, direction: str) -> None:\n if direction == DIRECTION_FORWARD:\n self._device.fan_dir = SENSEME_DIRECTION_FORWARD\n else:\n self._device.fan_dir = SENSEME_DIRECTION_REVERSE", "def direction(self, direction):\n allowed_values = [\"supports\", \"does_not_support\"] # noqa: E501\n if direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `direction` ({0}), must be one of {1}\" # noqa: E501\n .format(direction, allowed_values)\n )\n\n self._direction = direction", "def set_direction(self, direction: str) -> None:\n if direction == \"forward\":\n self._bond.setDirection(self._deviceId, Directions.FORWARD)\n elif direction == \"reverse\":\n self._bond.setDirection(self._deviceId, Directions.REVERSE)\n self._attributes['current_direction'] = direction", "def set_port_direction(self, port, direction):\n\n if port == 1:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, direction)\n self.__port_b_direction = direction\n else:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, direction)\n self.__port_a_direction = direction\n return", "def set_direction(self, direction):\n\n def same_axis(direction1, direction2):\n y_axis = [Direction.Y_POSITIVE, Direction.Y_NEGATIVE]\n x_axis = [Direction.X_POSITIVE, Direction.X_NEGATIVE]\n return ((direction1 in x_axis and direction2 in x_axis)\n or (direction1 in y_axis and direction2 in y_axis))\n\n if direction is None:\n return\n elif not same_axis(self.direction, direction):\n self.direction = direction", "def direction(self):\n _direction = self._custom.get(\"direction\")\n if _direction is not None:\n return _direction\n\n _direction = self._infer_direction()\n self._custom[\"direction\"] = _direction\n\n return _direction", "def setDirection (self, ra, dec):\n self._response.setDirection(ra, dec)", "def setRobotDirection(self, direction):\n self.direction = direction", "def setRobotDirection(self, direction):\n self.direction = direction", "def SetLayoutDirection(*args, **kwargs):\n return _gdi_.DC_SetLayoutDirection(*args, **kwargs)", "def direction(self, direction):\n _api.check_in_list(['horizontal', 'vertical'], direction=direction)\n if hasattr(self, '_direction') and direction != self._direction:\n # remove previous artists\n self._selection_artist.remove()\n if self._interactive:\n self._edge_handles.remove()\n self._direction = direction\n self.new_axes(self.ax)\n if self._interactive:\n self._setup_edge_handles(self._handle_props)\n else:\n self._direction = direction", "def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError", "def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError", "def set_direction(self, right_or_left):\r\n if right_or_left == \"r\":\r\n self.__direction = self.__direction - 7\r\n elif right_or_left == \"l\":\r\n self.__direction = self.__direction + 7", "def Direction(self, direction):\r\n \r\n self.dock_direction = direction\r\n return self", "def set_direction(self, direction: int) -> None: \r\n self.direction = direction\r\n if (direction == Directions.turn_left or\r\n direction == Directions.turn_right):\r\n self.stop_timer = time.time() + self.driving_time_turning\r\n else:\r\n self.stop_timer = time.time() + self.driving_time", "def dock_direction_set(self, value):\r\n \r\n self._dock_direction = value", "def direction(self):\n return self._direction.copy()", "def steer(self, direction):\n\n if -1 <= direction <= 1:\n target_position = self.steering_limit * direction\n self.brick_pi.set_motor_position(\n self.motor_steer, -target_position)", "def shiftDir(self, direction, n):\n assert Direction.isDir(direction), \"incorrect type of arg direction: should be a Direction, is {}\".format(type(direction))\n assert isinstance(n, AxisDistance), 'incorrect type of arg n: should be type AxisDistance, is type {}'.format(type(n))\n direction = Direction(direction)\n self.x += direction.dx * n\n self.y += direction.dy * n\n return self", "def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return", "def setBitDirection(self, bit_mask):\n DPxSetDinDataDir(bit_mask)", "def set_dir(self, dir, resistor=None):\n self.IN = mraa.DIR_IN\n self.OUT = mraa.DIR_OUT\n self.PULL_UP = mraa.DIR_OUT_HIGH\n self.PULL_DOWN = mraa.DIR_OUT_LOW\n if dir not in (mraa.DIR_OUT, mraa.DIR_IN):\n # incorrect arguments passed in\n raise Exception(\"Incorrect pin direction dir={}. Use 'gpio.IN' or 'gpio.OUT'\".format(dir))\n elif resistor not in (None, self.PULL_UP, self.PULL_DOWN):\n # incorrect arguments passed in\n raise Exception(\"Incorrect resistor={}. Use 'UP' or 'Down'\".format(resistor))\n elif dir is self.IN:\n self.dir = dir\n self.gpio_pin.dir(self.IN)\n if resistor is not None:\n raise Warning('default', 'Pin dir is {} but should be \\'None\\' when using resistor'.format(dir))\n elif resistor is not None:\n self.resistor = resistor\n self.dir = dir\n # default to only output\n if resistor is self.PULL_UP:\n self.gpio_pin.dir(mraa.DIR_OUT_HIGH)\n else:\n self.gpio_pin.dir(mraa.DIR_OUT_LOW)\n else:\n self.resistor = resistor\n self.dir = dir\n # default to only output\n self.gpio_pin.dir(mraa.DIR_OUT)", "def direction(self):\n return self.cfg.direction", "def direction(self) -> int:\n return self._direction" ]
[ "0.7139598", "0.7040138", "0.7006834", "0.69987583", "0.6970682", "0.68825686", "0.6802282", "0.6730239", "0.63998103", "0.6399511", "0.6314273", "0.6307516", "0.63062197", "0.63062197", "0.62925655", "0.62565696", "0.6163797", "0.6163797", "0.60774386", "0.60496444", "0.60301095", "0.6010736", "0.58777267", "0.58369535", "0.58177066", "0.5744206", "0.5712969", "0.57048905", "0.56974995", "0.569339" ]
0.7080141
1
To add parents to database
def add_parent(session, df): try: for _, row in df.iterrows(): parent = Parent() parent.name = row['parent_name'] parent.family = row['family'] session.add(parent) except Exception as ex: session.rollback() raise ex else: session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_children_category(self):\n category = Category(catname='olympic games')\n category1 = Category(catname='Tennis')\n category.parents.append(category1)\n category.save()\n assert category.parents", "def add_parent(sender, instance, **kwargs):\n if not kwargs['created']:\n return\n for att in ['term', 'term_secondary', 'context']:\n if getattr(instance, att) is None:\n continue\n parent = getattr(instance, att).item_id\n child = instance.item_id\n ItemRelation.objects.get_or_create(\n parent_id=parent,\n child_id=child,\n visible=True,\n active=instance.active,\n )", "def set_parents(self):\n route53 = self.pcf_field.get_particles(flavor=\"route53_record\")\n route53_record_pcf_name = route53.get(\"pcf_name\", self.name)\n ec2_particles = self.pcf_field.get_particles(flavor=\"ec2_instance\")\n\n self.pcf_field.particles[\"route53_record\"][route53_record_pcf_name].parents.update(list(ec2_particles.values()))\n self.pcf_field.link_particles(self.pcf_field.particles)", "def set_parents_table(self) -> None:\n self.parents[\"A\"] = \"start\"\n self.parents[\"B\"] = \"start\"\n self.parents[\"fin\"] = None", "def update_parents(self):\n for a_parent in self.parents:\n for child in self.children:\n for a_dest in self.children[child]:\n if (a_dest[0] + a_parent.children[self][0][0],\n a_parent.children[self][0][1]) not in a_parent.children[child]:\n a_parent.children[child].append((a_dest[0] + a_parent.children[self][0][0],\n a_parent.children[self][0][1]))\n a_parent.update_parents()", "def parents(self, path):\n pass", "def add_parent(self, node):\n self.parents.append(node)\n self.parent_depencencies_left += 1", "def parents(rectype, source, include):\n click.echo('Migrating {}s...'.format(rectype))\n with commit():\n import_parents_from_file(source, rectype=rectype, include=include)", "def insert(self, parent, name):\n pid = self.db.insert_returning_id('simple', dict(parent=parent, name=name))\n return pid", "def insert(self, parent, name):\n if parent is None:\n #pid = self.db.execute('''\n # INSERT INTO pathenum (path, name) VALUES (\n # '',\n # :name\n # ) RETURNING id\n # ''', dict(parent=parent, name=name)\n #).list()[0][0]\n pid = self.db.insert_returning_id('pathenum', dict(path='', name=name))\n else:\n #pid = self.db.execute('''\n # INSERT INTO pathenum (path, name) VALUES (\n # (SELECT path || id || '.' FROM pathenum WHERE id = :parent),\n # :name\n # ) RETURNING id\n # ''', dict(parent=parent, name=name)\n #).list()[0][0]\n if self.db.is_dialect('mysql'):\n # FIXME: bardzo brzydki kod\n path = self.db.execute('''SELECT concat(path, id, '.') AS path FROM pathenum WHERE id = %s''' % parent).fetch_single('path')\n #print path\n pid = self.db.insert_returning_id('pathenum', dict(name=name, path=path))\n #pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n # path=\"(SELECT concat(path, id, '.') FROM pathenum WHERE id = %s)\" % parent)\n #)\n elif self.db.is_dialect('sqlserver'):\n pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n path=\"(SELECT path + CONVERT(varchar(10), id) + '.' FROM pathenum WHERE id = %s)\" % parent)\n )\n else:\n pid = self.db.insert_returning_id('pathenum', dict(name=name), dict(\n path=\"(SELECT path || id || '.' FROM pathenum WHERE id = %s)\" % parent)\n )\n return pid", "def add_parent(self, child, parent):\r\n setp = self._parents.setdefault(child, set())\r\n setp.add(parent)", "def test_skirmish_parenting(self):\n root = SkirmishAction()\n a1 = SkirmishAction()\n a2 = SkirmishAction()\n self.sess.add_all([root, a1, a2])\n self.sess.commit()\n\n root.children.append(a1)\n root.children.append(a2)\n self.sess.commit()\n\n self.assertEqual(a1.parent_id, root.id)\n self.assertEqual(a2.parent_id, root.id)", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def insert(self, parent, name):\n if parent is None:\n pid = self.db.execute('''\n INSERT INTO ltreetab (path, name) VALUES (\n text2ltree('' || currval('ltreetab_id_seq')),\n :name\n ) RETURNING id\n ''',\n dict(parent=parent, name=name)\n ).fetch_single()\n else:\n pid = self.db.execute('''\n INSERT INTO ltreetab (path, name) VALUES (\n (SELECT path FROM ltreetab WHERE id = :parent) ||\n ('' ||currval('ltreetab_id_seq')),\n :name\n ) RETURNING id\n ''',\n dict(parent=parent, name=name)\n ).fetch_single()\n \n return pid", "def register_parent(self, **fields):\n if 'parent_key' not in fields.keys():\n raise KeyError('Primary key is missing')\n existing_fields = [i.name for i in self._db.get_columns('parents')]\n needed_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n needed_fields[key] = value\n if 'UID' not in needed_fields.keys():\n needed_fields['UID'] = needed_fields['parent_key']\n check = Parents.get_or_none(parent_key=needed_fields['parent_key'])\n if check is not None:\n return check\n new_parent = Parents.get_or_create(**needed_fields)\n return new_parent", "def set_parent(self, parent):\n if self not in parent.children:\n parent.children.append(self)\n self.parent = parent", "def add_parent(\n self, parent: \"Vertex\", loop: bool = False, first: bool = False\n ) -> None:\n if (loop or self != parent) and (parent not in self.parents):\n if first:\n self.parents.insert(0, parent)\n else:\n self.parents.append(parent)", "def add_parents(self, nodes):\n # Check that nodes is a list/tuple of BaseNode objects\n if (isinstance(nodes, (list, tuple)) and\n all([isinstance(node, BaseNode) for node in nodes])):\n for node in nodes:\n self.add_parent(node)\n else:\n raise TypeError('add_parents() is expecting an iterable of '\n 'Job and/or Dagman objects')\n\n return self", "def init_db(self, parent_type, child_type):\n self.parent = Node(self.handler, parent_type)\n self.children = [ Node(self.handler, child_type) for x in range(0, self.SIZE) ]\n for node in self.children:\n Link(self.handler, self.parent.node, node.node, child_type.upper())", "def insert(self, pid, pname, pparent, pobj, ptype):\r\n self.pids.append(pid)\r\n self.pnames.append(pname)\r\n self.pparents.append(pparent)\r\n self.ptypes.append(ptype)\r\n self.pobjs.append(pobj)", "def initialize_parents(self, filename=None):\n\t\tpass", "def include_parents():\n suffix = uuid4().hex\n\n click.secho('*** Creating Genres for Movie...', fg='green')\n _horror = _make_document('genre', name='Horror - %s' % suffix)\n click.secho(json.dumps(_horror, indent=2, sort_keys=True), fg='yellow')\n\n _monster = _make_document('genre', name='Monster - %s' % suffix, parent=_horror['_id'])\n click.secho(json.dumps(_monster, indent=2, sort_keys=True), fg='yellow')\n\n _vampire = _make_document('genre', name='Vampire - %s' % suffix, parent=_monster['_id'])\n click.secho(json.dumps(_vampire, indent=2, sort_keys=True), fg='yellow')\n\n _werewolf = _make_document('genre', name='Werewolf - %s' % suffix, parent=_monster['_id'])\n click.secho(json.dumps(_werewolf, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating Movie with genres `Werewolf` and `Vampire`, parent genres should be auto-filled...', fg='green')\n twilight = _make_document('movie', title='Twilight', genres=[_vampire['_id'], _werewolf['_id']])\n click.secho(json.dumps(twilight, indent=2, sort_keys=True), fg='yellow')", "def set_parent_table(self, table):\n self.__parent_table = table", "def parent_id(self, new_id: str) -> None:\n self._db_data.parent_id = new_id", "def set_parent(self, index):\n self.add_parent(self[index])", "def parents(self, p):\n raise NotImplementedError('must be implemented by subclass')", "def add_parent(self, parent, *args, **kwargs):\n return parent.add_child(self, **kwargs)", "def add_parent_groups(ctx):\n asyncio.run(add_parent_groups_impl(ctx.obj[\"config\"]))", "def show_available_parents(self):\n self.categoryParent.clear()\n\n parents = self.orm.fetch_parents()\n self.categoryParent.addItems([p.name for p in parents])\n\n self.categoryParent.addItem('')\n self.categoryParent.setCurrentText('')", "def add_children(self,node):\n\n node.parent_id = self.id\n node.level = self.level + 1\n node.path = node._create_path()\n node.save()" ]
[ "0.6654415", "0.65198356", "0.6462206", "0.64579284", "0.6353594", "0.62729967", "0.6186695", "0.61660165", "0.6154463", "0.61290205", "0.61266243", "0.6095059", "0.6068027", "0.6065559", "0.6063551", "0.6041484", "0.6020467", "0.60039234", "0.60022867", "0.59916437", "0.5974864", "0.59564966", "0.59110826", "0.5887255", "0.5886367", "0.588044", "0.5866037", "0.5846542", "0.58446354", "0.5843909" ]
0.6953736
0
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL.
def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs): path = '/v1/global/root_outcome_group' url = request_ctx.base_api_url + path.format() response = client.get(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(account_id=account_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def redirect_to_root_outcome_group_for_context_courses(request_ctx, course_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(course_id=course_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def redirect_associated_mood():\n\n # grab the mood_id from the form\n user_mood_id = request.form.get(\"mood\")\n\n # set the mood_id to id grabbed from the form\n mood_id = user_mood_id\n\n return redirect(\"/moods/{}/entries\".format(mood_id))", "def root1(request):\n\ttemplate = 'main'\n\treturn redirect(template)", "def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect", "def root(request):\n\ttemplate = 'bfbot/main'\n\treturn redirect(template)", "def redirect(self, location):\n self.redirect_see_other(location)", "def redirect(target):\n return {\n 'status': '302',\n 'statusDescription': 'Found',\n 'headers': {\n 'location': [{\n 'key': 'Location',\n 'value': target\n }]\n }\n }", "def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response", "def view__model_admin_root(context, request):\n return HTTPFound(request.resource_url(context.__parent__))", "def catch_all(path):\n return redirect('/', code=302)", "def redirect_to(self, route_name, *args, **kwargs):\n self.redirect(self.uri_for(route_name, *args, **kwargs))", "def redirect_source():\n return redirect(url_for(\"base_blueprint.source\"), code=301)", "def __call__(self, environ, start_response):\n path_info = environ['PATH_INFO']\n for key, redirect in self.redirects.items():\n if self.match(key, path_info):\n environ['PATH_INFO'] = redirect\n return self(environ, start_response)\n else:\n path, cut, prefix = self.first_path_segment(path_info)\n root = path[:cut]\n rest = path[cut:]\n if root in self.routes:\n environ['PATH_INFO'] = rest\n #XXX shouldn't we += to SCRIPT_NAME?\n environ['SCRIPT_NAME'] = prefix + root\n app = self.routes[root]\n else:\n app = webob.exc.HTTPNotFound()\n return app(environ, start_response)", "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def redirect_found(self, location):\n self.status = 302\n self.set_header('Location', location)", "def redirect_to_question():\n # responses variable will go on to store all of the user's answers to the questions\n session[ANSWERS_KEY] = []\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")", "def eastgardens(event, context):\n\n request = event['Records'][0]['cf']['request']\n path = request['uri']\n query = request['querystring']\n\n # prepend a ? if there is a query\n if query != '':\n query = '?' + query\n\n # Path+query based custom redirects get checked first\n if path + query in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path + query])\n\n # Now check path only custom redirects\n if path in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path])\n\n return handle_fallthrough(event, path, query)", "def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location", "def index_file():\n return redirect(\"/\")", "def redir_index():\n return redirect(url_for(\"index\"), code=301)", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def intermediate_redirect(cls, form_path):\r\n from r2.lib.template_helpers import add_sr\r\n dest = cls.format_output_url(request.fullpath)\r\n path = add_sr(form_path + query_string({\"dest\": dest}))\r\n return cls.redirect(path)", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def index():\n return redirect(url_for('second_page'))", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))" ]
[ "0.7103075", "0.67741513", "0.56865895", "0.51907563", "0.5116719", "0.5102657", "0.5087065", "0.5047118", "0.4916807", "0.49087209", "0.48846796", "0.47762623", "0.47364914", "0.4677289", "0.46656385", "0.46637428", "0.46563548", "0.46534562", "0.45972314", "0.4521781", "0.45062536", "0.45030215", "0.45024154", "0.45024154", "0.44957826", "0.44948602", "0.44899046", "0.44889957", "0.44869763", "0.448599" ]
0.76904535
0
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL.
def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs): path = '/v1/accounts/{account_id}/root_outcome_group' url = request_ctx.base_api_url + path.format(account_id=account_id) response = client.get(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def redirect_to_root_outcome_group_for_context_courses(request_ctx, course_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(course_id=course_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def redirect_associated_mood():\n\n # grab the mood_id from the form\n user_mood_id = request.form.get(\"mood\")\n\n # set the mood_id to id grabbed from the form\n mood_id = user_mood_id\n\n return redirect(\"/moods/{}/entries\".format(mood_id))", "def root1(request):\n\ttemplate = 'main'\n\treturn redirect(template)", "def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect", "def root(request):\n\ttemplate = 'bfbot/main'\n\treturn redirect(template)", "def redirect(self, location):\n self.redirect_see_other(location)", "def redirect(target):\n return {\n 'status': '302',\n 'statusDescription': 'Found',\n 'headers': {\n 'location': [{\n 'key': 'Location',\n 'value': target\n }]\n }\n }", "def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response", "def view__model_admin_root(context, request):\n return HTTPFound(request.resource_url(context.__parent__))", "def catch_all(path):\n return redirect('/', code=302)", "def redirect_to(self, route_name, *args, **kwargs):\n self.redirect(self.uri_for(route_name, *args, **kwargs))", "def redirect_source():\n return redirect(url_for(\"base_blueprint.source\"), code=301)", "def __call__(self, environ, start_response):\n path_info = environ['PATH_INFO']\n for key, redirect in self.redirects.items():\n if self.match(key, path_info):\n environ['PATH_INFO'] = redirect\n return self(environ, start_response)\n else:\n path, cut, prefix = self.first_path_segment(path_info)\n root = path[:cut]\n rest = path[cut:]\n if root in self.routes:\n environ['PATH_INFO'] = rest\n #XXX shouldn't we += to SCRIPT_NAME?\n environ['SCRIPT_NAME'] = prefix + root\n app = self.routes[root]\n else:\n app = webob.exc.HTTPNotFound()\n return app(environ, start_response)", "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def redirect_found(self, location):\n self.status = 302\n self.set_header('Location', location)", "def redirect_to_question():\n # responses variable will go on to store all of the user's answers to the questions\n session[ANSWERS_KEY] = []\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")", "def eastgardens(event, context):\n\n request = event['Records'][0]['cf']['request']\n path = request['uri']\n query = request['querystring']\n\n # prepend a ? if there is a query\n if query != '':\n query = '?' + query\n\n # Path+query based custom redirects get checked first\n if path + query in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path + query])\n\n # Now check path only custom redirects\n if path in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path])\n\n return handle_fallthrough(event, path, query)", "def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location", "def index_file():\n return redirect(\"/\")", "def redir_index():\n return redirect(url_for(\"index\"), code=301)", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def intermediate_redirect(cls, form_path):\r\n from r2.lib.template_helpers import add_sr\r\n dest = cls.format_output_url(request.fullpath)\r\n path = add_sr(form_path + query_string({\"dest\": dest}))\r\n return cls.redirect(path)", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def index():\n return redirect(url_for('second_page'))", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))" ]
[ "0.76904535", "0.67741513", "0.56865895", "0.51907563", "0.5116719", "0.5102657", "0.5087065", "0.5047118", "0.4916807", "0.49087209", "0.48846796", "0.47762623", "0.47364914", "0.4677289", "0.46656385", "0.46637428", "0.46563548", "0.46534562", "0.45972314", "0.4521781", "0.45062536", "0.45030215", "0.45024154", "0.45024154", "0.44957826", "0.44948602", "0.44899046", "0.44889957", "0.44869763", "0.448599" ]
0.7103075
1
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed).
def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, 'parent_outcome_group_id' : parent_outcome_group_id, } url = request_ctx.base_api_url + path.format(id=id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)", "def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})", "def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def modify_resource_group(\n self,\n request: dds_20151201_models.ModifyResourceGroupRequest,\n ) -> dds_20151201_models.ModifyResourceGroupResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_resource_group_with_options(request, runtime)", "def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)", "def update_research_group(self, employee_id, new_research_group):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET research_group = %s '\n 'WHERE id=%s;',\n (new_research_group, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def ModifyGroup(self, group, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/modify\" %\n (GANETI_RAPI_VERSION, group)), query, kwargs)", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def edit_group_command(self):\n self.switch_frame(\"Edit Group\")\n id = self.parent.get_frame_id(\"Edit Group\")\n self.parent.frames[id].display_group(self.user.active_group)", "def test_update_group(self):\n pass", "def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)", "def request_group_update():\n target_group = Group.query.filter_by(id=request.args['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n return Response(\n render_template(\n 'admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/update\",\n id=target_group.id,\n name=target_group.name,\n meter=target_group.group_meter_id,\n group_production_meter_id_first=target_group.group_production_meter_id_first,\n group_production_meter_id_second=target_group.group_production_meter_id_second),\n mimetype='text/html')", "def _mod_group(self, command, group_id, group_type, buckets=None):\n self.datapath.send_msg(\n self.parser.OFPGroupMod(\n datapath=self.datapath,\n command=command,\n group_id=group_id,\n type_=group_type,\n buckets=buckets,\n )\n )", "def reset_group(node, suffix=\"_grp\"):\n # create transform group\n name = \"{}_{}\".format(node.rsplit(\"_\", 1)[0], suffix)\n reset_grp = cmds.createNode(\"transform\", name=name)\n cmds.parent(reset_grp, node)\n cmds.makeIdentity(reset_grp, translate=True, rotate=True, scale=True)\n\n # reparent under parent if any, else world\n parent = (cmds.listRelatives(node, parent=True) or [None])[0]\n if parent:\n cmds.parent(reset_grp, parent)\n else:\n cmds.parent(reset_grp, world=True)\n cmds.parent(node, reset_grp)\n\n # for joints, reset rotates and jointOrients\n if cmds.nodeType(node) == \"joint\":\n cmds.makeIdentity(node, jointOrient=True, rotate=True, apply=True)\n\n cmds.select(clear=True)\n\n return reset_grp", "def slotGroupEdit(self):\n dialog = GroupDialog(self)\n if dialog.exec_loop() == QDialog.Accepted:\n if dialog.group_id != None:\n # set group\n self.sampleGroup.globalGroupId = dialog.group_id\n self.groupLabel.setText(dialog.group_id)\n else:\n # ungroup\n self.sampleGroup.globalGroupId = None\n self.groupLabel.setText('Not\\nGrouped')\n self.emit(PYSIGNAL('groupChanged'), (self,))", "def axial_correction_group(obj,\n to_parents_origin=False,\n name_prefix=\"\",\n name_postfix=\"_ACGroup#\"):\n obj = get_valid_dag_node(obj)\n\n if name_postfix == \"\":\n name_postfix = \"_ACGroup#\"\n\n ac_group = pm.group(\n em=True,\n n=(name_prefix + obj.name() + name_postfix)\n )\n\n ac_group = pm.parent(ac_group, obj)[0]\n\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n parent = pm.listRelatives(obj, p=True)\n if len(parent) != 0:\n pm.parent(ac_group, parent[0], a=True)\n else:\n pm.parent(ac_group, w=True)\n\n if to_parents_origin:\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n pm.parent(obj, ac_group, a=True)\n\n # for joints also set the joint orient to zero\n if isinstance(obj, pm.nodetypes.Joint):\n # set the joint rotation and joint orient to zero\n obj.setAttr('r', (0, 0, 0))\n obj.setAttr('jo', (0, 0, 0))\n\n return ac_group", "def _group_modify_id(group, id_modifier):\n\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton._group_modify_id(g, id_modifier), group.children)))\n\n return group", "def process_object(self, new, old=None):\n new = super().process_object(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.modified_field, self.model.permissions_field)\n validate_from_bucket_schema_or_400(\n new,\n resource_name=\"group\",\n request=self.request,\n ignore_fields=internal_fields,\n id_field=self.model.id_field,\n )\n\n return new", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def test_update_team_user_group(client):\n group = client.update_team_user_group(TEAM_ID, GROUP_ID, {\n \"name\": \"Updated Python group\",\n \"is_reviewer\": False,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert group.name == \"Updated Python group\"\n assert group.permissions['is_admin']\n assert not group.permissions['is_reviewer']", "def update_group(groupname):\n name = request.get_json().get(\"name\", None)\n description = request.get_json().get(\"description\", None)\n response = jsonify(\n admin.update_group(current_app.scoped_session(), groupname, description, name)\n )\n return response", "def group(ctx, project, group): # pylint:disable=redefined-outer-name\n ctx.obj = ctx.obj or {}\n ctx.obj['project'] = project\n ctx.obj['group'] = group" ]
[ "0.61605686", "0.61406356", "0.5958074", "0.59414357", "0.5797429", "0.5668177", "0.5624319", "0.5621426", "0.561763", "0.5572149", "0.5539592", "0.55051327", "0.54824334", "0.5461233", "0.54360193", "0.5433018", "0.53988296", "0.5368346", "0.53458875", "0.52537507", "0.52454543", "0.5235111", "0.5230351", "0.52265704", "0.5214914", "0.5211578", "0.5167291", "0.51348954", "0.51078886", "0.50968206" ]
0.67553115
0
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed).
def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, 'parent_outcome_group_id' : parent_outcome_group_id, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)", "def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})", "def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def modify_resource_group(\n self,\n request: dds_20151201_models.ModifyResourceGroupRequest,\n ) -> dds_20151201_models.ModifyResourceGroupResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_resource_group_with_options(request, runtime)", "def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)", "def update_research_group(self, employee_id, new_research_group):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET research_group = %s '\n 'WHERE id=%s;',\n (new_research_group, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def ModifyGroup(self, group, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/modify\" %\n (GANETI_RAPI_VERSION, group)), query, kwargs)", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def edit_group_command(self):\n self.switch_frame(\"Edit Group\")\n id = self.parent.get_frame_id(\"Edit Group\")\n self.parent.frames[id].display_group(self.user.active_group)", "def test_update_group(self):\n pass", "def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)", "def request_group_update():\n target_group = Group.query.filter_by(id=request.args['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n return Response(\n render_template(\n 'admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/update\",\n id=target_group.id,\n name=target_group.name,\n meter=target_group.group_meter_id,\n group_production_meter_id_first=target_group.group_production_meter_id_first,\n group_production_meter_id_second=target_group.group_production_meter_id_second),\n mimetype='text/html')", "def _mod_group(self, command, group_id, group_type, buckets=None):\n self.datapath.send_msg(\n self.parser.OFPGroupMod(\n datapath=self.datapath,\n command=command,\n group_id=group_id,\n type_=group_type,\n buckets=buckets,\n )\n )", "def reset_group(node, suffix=\"_grp\"):\n # create transform group\n name = \"{}_{}\".format(node.rsplit(\"_\", 1)[0], suffix)\n reset_grp = cmds.createNode(\"transform\", name=name)\n cmds.parent(reset_grp, node)\n cmds.makeIdentity(reset_grp, translate=True, rotate=True, scale=True)\n\n # reparent under parent if any, else world\n parent = (cmds.listRelatives(node, parent=True) or [None])[0]\n if parent:\n cmds.parent(reset_grp, parent)\n else:\n cmds.parent(reset_grp, world=True)\n cmds.parent(node, reset_grp)\n\n # for joints, reset rotates and jointOrients\n if cmds.nodeType(node) == \"joint\":\n cmds.makeIdentity(node, jointOrient=True, rotate=True, apply=True)\n\n cmds.select(clear=True)\n\n return reset_grp", "def slotGroupEdit(self):\n dialog = GroupDialog(self)\n if dialog.exec_loop() == QDialog.Accepted:\n if dialog.group_id != None:\n # set group\n self.sampleGroup.globalGroupId = dialog.group_id\n self.groupLabel.setText(dialog.group_id)\n else:\n # ungroup\n self.sampleGroup.globalGroupId = None\n self.groupLabel.setText('Not\\nGrouped')\n self.emit(PYSIGNAL('groupChanged'), (self,))", "def axial_correction_group(obj,\n to_parents_origin=False,\n name_prefix=\"\",\n name_postfix=\"_ACGroup#\"):\n obj = get_valid_dag_node(obj)\n\n if name_postfix == \"\":\n name_postfix = \"_ACGroup#\"\n\n ac_group = pm.group(\n em=True,\n n=(name_prefix + obj.name() + name_postfix)\n )\n\n ac_group = pm.parent(ac_group, obj)[0]\n\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n parent = pm.listRelatives(obj, p=True)\n if len(parent) != 0:\n pm.parent(ac_group, parent[0], a=True)\n else:\n pm.parent(ac_group, w=True)\n\n if to_parents_origin:\n pm.setAttr(ac_group + \".t\", [0, 0, 0])\n pm.setAttr(ac_group + \".r\", [0, 0, 0])\n pm.setAttr(ac_group + \".s\", [1, 1, 1])\n\n pm.parent(obj, ac_group, a=True)\n\n # for joints also set the joint orient to zero\n if isinstance(obj, pm.nodetypes.Joint):\n # set the joint rotation and joint orient to zero\n obj.setAttr('r', (0, 0, 0))\n obj.setAttr('jo', (0, 0, 0))\n\n return ac_group", "def _group_modify_id(group, id_modifier):\n\n group = group._replace(id=id_modifier(group.id))\n group = group._replace(children=list(map(lambda g: Skeleton._group_modify_id(g, id_modifier), group.children)))\n\n return group", "def process_object(self, new, old=None):\n new = super().process_object(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.modified_field, self.model.permissions_field)\n validate_from_bucket_schema_or_400(\n new,\n resource_name=\"group\",\n request=self.request,\n ignore_fields=internal_fields,\n id_field=self.model.id_field,\n )\n\n return new", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def test_update_team_user_group(client):\n group = client.update_team_user_group(TEAM_ID, GROUP_ID, {\n \"name\": \"Updated Python group\",\n \"is_reviewer\": False,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert group.name == \"Updated Python group\"\n assert group.permissions['is_admin']\n assert not group.permissions['is_reviewer']", "def update_group(groupname):\n name = request.get_json().get(\"name\", None)\n description = request.get_json().get(\"description\", None)\n response = jsonify(\n admin.update_group(current_app.scoped_session(), groupname, description, name)\n )\n return response", "def group(ctx, project, group): # pylint:disable=redefined-outer-name\n ctx.obj = ctx.obj or {}\n ctx.obj['project'] = project\n ctx.obj['group'] = group" ]
[ "0.67553115", "0.61406356", "0.5958074", "0.59414357", "0.5797429", "0.5668177", "0.5624319", "0.5621426", "0.561763", "0.5572149", "0.5539592", "0.55051327", "0.54824334", "0.5461233", "0.54360193", "0.5433018", "0.53988296", "0.5368346", "0.53458875", "0.52537507", "0.52454543", "0.5235111", "0.5230351", "0.52265704", "0.5214914", "0.5211578", "0.5167291", "0.51348954", "0.51078886", "0.50968206" ]
0.61605686
1
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion will fail.
def delete_outcome_group_global(request_ctx, id, **request_kwargs): path = '/v1/global/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(id=id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def test_groups_group_ref_delete(self):\n pass", "def delete_outcome_group_courses(request_ctx, course_id, id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))", "def delete(self, consistencygroup, force=False):\n body = {'consistencygroup': {'force': force}}\n self.run_hooks('modify_body_for_action', body, 'consistencygroup')\n url = '/consistencygroups/%s/delete' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_groups(self):\n pass", "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def do_del_group(dbsync, group):\n pass", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch" ]
[ "0.66492325", "0.6541948", "0.6461525", "0.6348538", "0.633461", "0.6283196", "0.6217388", "0.6203314", "0.6168106", "0.61588556", "0.6150082", "0.61439496", "0.6109929", "0.61078966", "0.6060277", "0.60184395", "0.6001307", "0.6001307", "0.59507966", "0.5928808", "0.5904541", "0.588854", "0.58635855", "0.5855789", "0.58181447", "0.5795915", "0.57424855", "0.5739038", "0.57293683", "0.57169527" ]
0.662239
1
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion will fail.
def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_outcome_group_global(request_ctx, id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def test_groups_group_ref_delete(self):\n pass", "def delete_outcome_group_courses(request_ctx, course_id, id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))", "def delete(self, consistencygroup, force=False):\n body = {'consistencygroup': {'force': force}}\n self.run_hooks('modify_body_for_action', body, 'consistencygroup')\n url = '/consistencygroups/%s/delete' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_groups(self):\n pass", "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def do_del_group(dbsync, group):\n pass", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch" ]
[ "0.662239", "0.6541948", "0.6461525", "0.6348538", "0.633461", "0.6283196", "0.6217388", "0.6203314", "0.6168106", "0.61588556", "0.6150082", "0.61439496", "0.6109929", "0.61078966", "0.6060277", "0.60184395", "0.6001307", "0.6001307", "0.59507966", "0.5928808", "0.5904541", "0.588854", "0.58635855", "0.5855789", "0.58181447", "0.5795915", "0.57424855", "0.5739038", "0.57293683", "0.57169527" ]
0.66492325
0
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes' payload = { 'outcome_id' : outcome_id, 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def ez_set_outcome(auth_token, dataset_id, outcome, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_set_outcome\"\n payload = {\n \"dataset_id\": dataset_id,\n \"outcome\" : outcome,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, workflow_ID=None, parentobj_ID=None, **kwargs):\n\n uri = kwargs.get('uri')\n uid = kwargs.get('uid')\n desc = kwargs.get('desc')\n name = kwargs.get('name')\n source = kwargs.get('source')\n\n if (self.debug):\n print('MPO.ADD', workflow_ID, parentobj_ID, name, desc,uri,uid,source,kwargs, file=sys.stderr)\n\n if uid:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uid\":uid}\n elif uri:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uri\":uri}\n else:\n return {\"name\":name,\"description\":desc,\"source_uid\":source,\"message\":\"Must provide either uri or uid.\", 'uid':-1, \"status\":-1}\n\n return self.post(self.DATAOBJECT_RT,workflow_ID,[parentobj_ID],data=payload,**kwargs)", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def addLink(self, obj1, obj2):\n\n link = vsdModels.ObjectLink(object1=obj1, object2=obj2)\n link.validate()\n return self.postRequest('object-links', data=link.to_struct())", "def _reward(self, action):\n raise NotImplementedError", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def link(self, callback, SpawnedLink=SpawnedLink):\n # XXX: Is the redefinition of SpawnedLink supposed to just be an\n # optimization, or do people use it? It's not documented\n # pylint:disable=redefined-outer-name\n self.rawlink(SpawnedLink(callback))", "def _on_outcome(self, outcome, condition):\n self._outcome = outcome\n self._condition = condition", "def create_hit(self, hit_type=None, question=None,\r\n lifetime=datetime.timedelta(days=7),\r\n max_assignments=1, \r\n title=None, description=None, keywords=None,\r\n reward=None, duration=datetime.timedelta(days=7),\r\n approval_delay=None, annotation=None,\r\n questions=None, qualifications=None,\r\n response_groups=None):\r\n \r\n # handle single or multiple questions\r\n neither = question is None and questions is None\r\n both = question is not None and questions is not None\r\n if neither or both:\r\n raise ValueError(\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\")\r\n\r\n if question:\r\n questions = [question]\r\n question_param = QuestionForm(questions)\r\n if isinstance(question, QuestionForm):\r\n question_param = question\r\n elif isinstance(question, ExternalQuestion):\r\n question_param = question\r\n \r\n # Handle basic required arguments and set up params dict\r\n params = {'Question': question_param.get_as_xml(),\r\n 'LifetimeInSeconds' :\r\n self.duration_as_seconds(lifetime),\r\n 'MaxAssignments' : max_assignments,\r\n }\r\n\r\n # if hit type specified then add it\r\n # else add the additional required parameters\r\n if hit_type:\r\n params['HITTypeId'] = hit_type\r\n else:\r\n # Handle keywords\r\n final_keywords = MTurkConnection.get_keywords_as_string(keywords)\r\n \r\n # Handle price argument\r\n final_price = MTurkConnection.get_price_as_price(reward)\r\n \r\n final_duration = self.duration_as_seconds(duration)\r\n\r\n additional_params = dict(\r\n Title=title,\r\n Description=description,\r\n Keywords=final_keywords,\r\n AssignmentDurationInSeconds=final_duration,\r\n )\r\n additional_params.update(final_price.get_as_params('Reward'))\r\n\r\n if approval_delay is not None:\r\n d = self.duration_as_seconds(approval_delay)\r\n additional_params['AutoApprovalDelayInSeconds'] = d\r\n\r\n # add these params to the others\r\n params.update(additional_params)\r\n\r\n # add the annotation if specified\r\n if annotation is not None:\r\n params['RequesterAnnotation'] = annotation\r\n \r\n # Add the Qualifications if specified\r\n if qualifications is not None:\r\n params.update(qualifications.get_as_params())\r\n\r\n # Handle optional response groups argument\r\n if response_groups:\r\n self.build_list_params(params, response_groups, 'ResponseGroup')\r\n \r\n # Submit\r\n return self._process_request('CreateHIT', params, [('HIT', HIT),])", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))" ]
[ "0.717459", "0.7163847", "0.7061997", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42772636", "0.4274945", "0.42674753", "0.4266109", "0.4264433", "0.42578077", "0.42515564", "0.42505667", "0.424946", "0.42489296" ]
0.7362988
0
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}' payload = { 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def ez_set_outcome(auth_token, dataset_id, outcome, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_set_outcome\"\n payload = {\n \"dataset_id\": dataset_id,\n \"outcome\" : outcome,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def link(url, title, icon=None, badge=None, **context):\n\n return {\n \"url\": url,\n \"title\": title,\n \"context\": context,\n \"badge\": badge,\n \"class\": \"link\",\n \"icon\": icon\n }", "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'parent_outcome_group_id' : parent_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add(self, workflow_ID=None, parentobj_ID=None, **kwargs):\n\n uri = kwargs.get('uri')\n uid = kwargs.get('uid')\n desc = kwargs.get('desc')\n name = kwargs.get('name')\n source = kwargs.get('source')\n\n if (self.debug):\n print('MPO.ADD', workflow_ID, parentobj_ID, name, desc,uri,uid,source,kwargs, file=sys.stderr)\n\n if uid:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uid\":uid}\n elif uri:\n payload={\"name\":name,\"description\":desc,\"source_uid\":source,\"uri\":uri}\n else:\n return {\"name\":name,\"description\":desc,\"source_uid\":source,\"message\":\"Must provide either uri or uid.\", 'uid':-1, \"status\":-1}\n\n return self.post(self.DATAOBJECT_RT,workflow_ID,[parentobj_ID],data=payload,**kwargs)", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # assert that \"comfort_level\" is specified.\n # this is validated outside of the `Link` fields handled by\n # DRF serializer validation.\n comfort_level = int(request.data.get('comfort_level', None))\n if comfort_level not in [i[0] for i in COMFORT_LEVELS]:\n return Response({'error': True,\n 'msg': 'Please specify a reader comfort level'\n })\n\n url = urltools.normalize(url)\n\n try:\n # fetch existing link\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n # create a new link\n link_serializer = LinkSerializer(data=request.data)\n link_serializer.is_valid(raise_exception=True)\n link = link_serializer.save()\n\n # add link to bundle\n if not BundleLink.objects.filter(bundle=bundle, link=link).exists():\n # call alchemy util to fetch concepts for URL\n concepts = bundles.alchemy_utils.get_concepts(url)\n this_bundle = BundleLink.objects.create(bundle=bundle,\n link=link,\n comfort_level=comfort_level,\n curator_id=1)\n for concept in concepts:\n this_bundle.tags.add(concept)\n\n return Response('', status=201)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def _publish_reward_topic(self, reward, episode_number=1):\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)", "def relate(self, related):\n\n self._module._connection.relate(self, related)", "def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def outcomes(self, outcomes):\n\n self._outcomes = outcomes", "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs):\n\n if per_page is None:\n per_page = request_ctx.per_page\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'per_page' : per_page,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def addLink(self, obj1, obj2):\n\n link = vsdModels.ObjectLink(object1=obj1, object2=obj2)\n link.validate()\n return self.postRequest('object-links', data=link.to_struct())", "def _reward(self, action):\n raise NotImplementedError", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def link(self, callback, SpawnedLink=SpawnedLink):\n # XXX: Is the redefinition of SpawnedLink supposed to just be an\n # optimization, or do people use it? It's not documented\n # pylint:disable=redefined-outer-name\n self.rawlink(SpawnedLink(callback))", "def _on_outcome(self, outcome, condition):\n self._outcome = outcome\n self._condition = condition", "def create_hit(self, hit_type=None, question=None,\r\n lifetime=datetime.timedelta(days=7),\r\n max_assignments=1, \r\n title=None, description=None, keywords=None,\r\n reward=None, duration=datetime.timedelta(days=7),\r\n approval_delay=None, annotation=None,\r\n questions=None, qualifications=None,\r\n response_groups=None):\r\n \r\n # handle single or multiple questions\r\n neither = question is None and questions is None\r\n both = question is not None and questions is not None\r\n if neither or both:\r\n raise ValueError(\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\")\r\n\r\n if question:\r\n questions = [question]\r\n question_param = QuestionForm(questions)\r\n if isinstance(question, QuestionForm):\r\n question_param = question\r\n elif isinstance(question, ExternalQuestion):\r\n question_param = question\r\n \r\n # Handle basic required arguments and set up params dict\r\n params = {'Question': question_param.get_as_xml(),\r\n 'LifetimeInSeconds' :\r\n self.duration_as_seconds(lifetime),\r\n 'MaxAssignments' : max_assignments,\r\n }\r\n\r\n # if hit type specified then add it\r\n # else add the additional required parameters\r\n if hit_type:\r\n params['HITTypeId'] = hit_type\r\n else:\r\n # Handle keywords\r\n final_keywords = MTurkConnection.get_keywords_as_string(keywords)\r\n \r\n # Handle price argument\r\n final_price = MTurkConnection.get_price_as_price(reward)\r\n \r\n final_duration = self.duration_as_seconds(duration)\r\n\r\n additional_params = dict(\r\n Title=title,\r\n Description=description,\r\n Keywords=final_keywords,\r\n AssignmentDurationInSeconds=final_duration,\r\n )\r\n additional_params.update(final_price.get_as_params('Reward'))\r\n\r\n if approval_delay is not None:\r\n d = self.duration_as_seconds(approval_delay)\r\n additional_params['AutoApprovalDelayInSeconds'] = d\r\n\r\n # add these params to the others\r\n params.update(additional_params)\r\n\r\n # add the annotation if specified\r\n if annotation is not None:\r\n params['RequesterAnnotation'] = annotation\r\n \r\n # Add the Qualifications if specified\r\n if qualifications is not None:\r\n params.update(qualifications.get_as_params())\r\n\r\n # Handle optional response groups argument\r\n if response_groups:\r\n self.build_list_params(params, response_groups, 'ResponseGroup')\r\n \r\n # Submit\r\n return self._process_request('CreateHIT', params, [('HIT', HIT),])", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))" ]
[ "0.7362988", "0.7163847", "0.7061997", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42772636", "0.4274945", "0.42674753", "0.4266109", "0.4264433", "0.42578077", "0.42515564", "0.42505667", "0.424946", "0.42489296" ]
0.717459
1
Unlinking an outcome only deletes the outcome itself if this was the last link to the outcome in any group in any context. Aligned outcomes cannot be deleted; as such, if this is the last link to an aligned outcome, the unlinking will fail.
def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}' url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink(self, link_id):", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def remove_link():", "def delete_link(self, word):\n meaning = self.word2meaning[word]\n print(str(self.unique_id) + \" forgot \" +\n str(word) + \" for \" + str(meaning))\n del self.word2meaning[word]\n del self.meaning2word[meaning]\n del self.wordsuccess[word]\n\n # If the agent was the only one using the word, delete the word\n if len(self.model.vocabulary[meaning][word]) == 1:\n del self.model.vocabulary[meaning][word]\n # Else simply remove the agent\n else:\n self.model.vocabulary[meaning][word].remove(self.unique_id)", "def unlink(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.UNLINK_FROM_TEMPLATE,\n EntityType.ROLE.value, None)", "def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)", "def unlink(self):\n self._linked = False\n self.is_dirty = False\n return self", "def unlink_pivot(remote, pivot_id):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_UnlinkPivot(pivot_id)\n remote.runCommand(cmd)", "async def unlink(self, ctx: MyContext):\n query = \"SELECT * FROM wormhole_channel WHERE channelID = ?\"\n wh_channel = self.bot.db_query(\n query, (ctx.channel.id,), astuple=True, fetchone=True\n )\n # comes as: (name, channelID, guildID, type, webhookID, webhookTOKEN)\n if len(wh_channel) == 0:\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-linked\"))\n return\n query = \"DELETE FROM wormhole_channel WHERE channelID = ? AND name = ?\"\n async with ClientSession() as session:\n webhook = discord.Webhook.partial(\n wh_channel[4], wh_channel[5], session=session\n )\n await webhook.delete()\n self.bot.db_query(query, (wh_channel[0], ctx.channel.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.channel-unlinked\")\n )", "def delete_sense_relation(wn, source, target, change_list=None):\n delete_sense_rel(wn, source, target, change_list)\n delete_sense_rel(wn, target, source, change_list)", "def delete_relation(wn, source, target, change_list=None):\n delete_rel(source, target, change_list)\n delete_rel(target, source, change_list)", "def _remove_link(self, name, object_id):\n if not name in self.data:\n return\n\n if self.data[name] and object_id in self.data[name]:\n self.data[name] = self.data[name].remove(object_id)", "def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None", "def remove_link(self, dest):\n for i, link in enumerate(self.runscript.links):\n if link[1] == dest:\n del self.runscript.links[i]\n break", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n wn_source = wn\n entry = wn_source.entry_by_id(source_entry)\n if entry:\n sense = [sense for sense in entry.senses if sense.id == source][0]\n if not any(r for r in sense.sense_relations if r.target == target):\n print(\"No sense relations deleted\")\n else:\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]\n if change_list:\n change_list.change_entry(wn, entry)\n else:\n print(\"No entry for \" + source_entry)", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n entry = wn.entry_by_id(source_entry)\n if change_list:\n change_list.change_entry(wn, entry)\n sense = [sense for sense in entry.senses if sense.id == source][0]\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]", "def remove(self, thought):\n # delete references to thought\n for linked in thought.links.all:\n linked.links.remove(thought)\n self.update(linked)\n\n # remove thought itself\n if self.__db.contains(thought.key):\n self.__db.remove(thought.key)\n self.__cache.remove(thought)\n else:\n raise SarasvatiException(\"Unable to remove a non-existent thought\")", "def remove_link(self,link,verbose=False):\n label, child = link\n self.outgoing.remove((label,child))\n child.incoming.remove((label,self))\n if verbose: print('removed', label, self.nodeid, child.nodeid)", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def unlink(self):\n if self._context.get('is_landlord_rent'):\n rent_ids = []\n for tenancy_rec in self:\n analytic_ids = self.env['account.analytic.line'].search(\n [('account_id', '=', tenancy_rec.id)])\n if analytic_ids and analytic_ids.ids:\n analytic_ids.unlink()\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', tenancy_rec.id)])\n post_rent = [x.id for x in rent_ids if x.move_check is True]\n if post_rent:\n raise Warning(\n _('''You cannot delete Tenancy record, if any related Rent'''\n '''Schedule entries are in posted.'''))\n else:\n rent_ids.unlink()\n return super(AccountAnalyticAccount, self).unlink()", "def unlink(address):", "def unlink(self, cr, uid, ids, context=None):\n allowances_archive = self.read(cr, uid, ids, ['transfer','state'], context=context)\n unlink_ids = []\n for record in allowances_archive:\n if record['transfer'] == False and record['state'] in ['draft','cancel']:\n unlink_ids.append(record['id'])\n else:\n raise osv.except_osv(_('Invalid action !'), _('Sorry you can not Delete this record(s), Because The request is in Process , You have To cancelled Firest or It already Transtered To account Voucher!'))\n for id in unlink_ids:\n allowances_archive_name = self.browse(cr, uid, id, context=context).name\n message = _(\"Env and Safety allowances archive '%s' has been deleted.\") % allowances_archive_name\n self.log(cr, uid, id, message)\n return super(env_and_safety_allowances_archive, self).unlink(cr, uid, unlink_ids, context=context)", "def deletelink(self, link_index=1, child=None):\n child = self.getnodenamed(child) # Verify pointer.\n\n # (int link_index, node_bn* child)\n cnetica.DeleteLink_bn.argtypes = [c_int, c_void_p]\n cnetica.DeleteLink_bn.restype = None\n cnetica.DeleteLink_bn(link_index, child)", "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def unlink_action(self):\n self.check_access_rights('write', raise_exception=True)\n self.filtered('binding_model_id').write({'binding_model_id': False})\n return True", "def delete_rel(source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source.id, target.id))\n ss = source\n source.synset_relations = [\n r for r in ss.synset_relations if r.target != target.id]\n if change_list:\n change_list.change_synset(source)", "def unShare(sharedItem):\n sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()", "def problem_relationship_delete(self, src_identifier, relation_dict):\n self._delete(\"problems/%d/relationships\" % src_identifier, json=relation_dict)", "def delete(self):\n\n lod_history = self.repo._get_lod_history(self.lod)\n assert lod_history.exists()\n lod_history.update(self.repo._youngest, None)\n self._mark_deleted()" ]
[ "0.6474747", "0.63120115", "0.6281923", "0.61563367", "0.61477256", "0.6030046", "0.58153516", "0.56244844", "0.5609927", "0.5545071", "0.55448717", "0.54989296", "0.5490254", "0.5487666", "0.5484662", "0.54513216", "0.5432963", "0.54169786", "0.53966224", "0.5357432", "0.5355181", "0.5353978", "0.5317241", "0.52846473", "0.526616", "0.52355725", "0.5235444", "0.5225721", "0.5221762", "0.52011555" ]
0.6547148
0
Creates a new empty subgroup under the outcome group with the given title and description.
def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/subgroups' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, } url = request_ctx.base_api_url + path.format(id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def with_group(title: str) -> Generator[None, None, None]:\n if os.environ.get(\"GITHUB_ACTIONS\", \"false\") != \"true\":\n console.print(\"#\" * 10 + \" [bright_blue]\" + title + \"[/] \" + \"#\" * 10)\n yield\n return\n console.print(f\"::group::[bright_blue]{title}[/]\")\n yield\n console.print(\"::endgroup::\")", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_cannot_create_group_with_empty_field(self):\n\n utils.create_user_and_authenticate(self)\n group_fields = ['name', 'description']\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def create_subgroup_courses(request_ctx, course_id, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def test_empty_description_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={'message': \"Description cannot be empty.\", 'status':\"error\"},\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('Project', '')", "def write_group_start(self, title):\n self.write('H', GROUP_START)\n self.write('i', ((len(title) + 1) * 2) + DB_STRING_SIZE_SZ)\n self.write('H', len(title) + 1)\n self.write_string(title, double_byte=True)", "def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)", "def test_create_project_title_delimiter(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': 'New{}Project'.format(CAT_DELIMITER),\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def add_group(self, *args, **kwds):\n title = kwds.pop('title', None)\n description = kwds.pop('description', None)\n if kwds:\n raise Exception('unknown keyword arguments: %s' % kwds)\n\n # set title, description if args[0] is string\n if isinstance(args[0], string_types):\n title = args[0]\n args = args[1:]\n if isinstance(args[0], string_types):\n description = args[0]\n args = args[1:]\n\n assert all(isinstance(arg, Command) for arg in args), 'all args should be instance of Command'\n self._arg_stack.append(('group', args, {'title': title, 'description': description}))\n return self", "def create_project(self, conn, name, description=\"\"):\n group = conn.group.allocate(name, description)\n # returns Project object\n return group", "def test_required_group_empty(self):\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n groups={'empty': GroupBuilder('empty')},\n )\n self.assertBuilderEqual(expected, builder)", "def _assert_create_group(self, personality, response=400):\n group_response = self.autoscale_behaviors.create_scaling_group_given(\n lc_personality=personality)\n self.assertEquals(group_response.status_code, response, msg='Create group '\n 'with invalid lc_personality returned {0} as against '\n '{1}'.format(group_response.status_code, response))\n if response is 200:\n group = group_response.entity\n self.resources.add(group, self.empty_scaling_group)\n return group", "def test_trivial(self):\n group = Group()", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def create( self, trans, payload, **kwd ):\n group_dict = dict( message='', status='ok' )\n name = payload.get( 'name', '' )\n if name:\n description = payload.get( 'description', '' )\n if not description:\n description = ''\n else:\n # TODO add description field to the model\n group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )\n else:\n raise RequestParameterMissingException( 'Missing required parameter \"name\".' )\n return group_dict", "def test_create_resource_group(self):\n pass", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def put_in_groupbox(widget, title):\n box = QtGui.QGroupBox(title)\n layout = QtGui.QHBoxLayout(box)\n layout.addWidget(widget)\n return box", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def _create_course(self):\r\n super(TestPublish, self)._create_course(split=False)\r\n\r\n self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False)\r\n self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False)\r\n self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('html', 'Html1', \"<p>Goodbye</p>\", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion1',\r\n \"discussion discussion_category=\\\"Lecture 1\\\" discussion_id=\\\"a08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 1\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 1\",\r\n \"discussion_target\": \"Lecture 1\",\r\n \"display_name\": \"Lecture 1 Discussion\",\r\n \"discussion_id\": \"a08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert1',\r\n split=False\r\n )\r\n self._create_item('html', 'Html2', \"<p>Hellow</p>\", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion2',\r\n \"discussion discussion_category=\\\"Lecture 2\\\" discussion_id=\\\"b08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 2\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 2\",\r\n \"discussion_target\": \"Lecture 2\",\r\n \"display_name\": \"Lecture 2 Discussion\",\r\n \"discussion_id\": \"b08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert2',\r\n split=False\r\n )\r\n self._create_item('static_tab', 'staticuno', \"<p>tab</p>\", {'display_name': 'Tab uno'}, None, None, split=False)\r\n self._create_item('about', 'overview', \"<p>overview</p>\", {}, None, None, split=False)\r\n self._create_item('course_info', 'updates', \"<ol><li><h2>Sep 22</h2><p>test</p></li></ol>\", {}, None, None, split=False)", "def make_new_post(title: str, category: int, description: str):\n slug = _get_slug(title)\n header = _make_header(title, category, description, slug)\n filename = _get_filename(slug)\n with open(filename, \"w\") as fp:\n fp.write(header)\n print(f\"Created {filename}\")" ]
[ "0.5995512", "0.57896537", "0.5699493", "0.55475307", "0.55475307", "0.55366933", "0.55120105", "0.5418014", "0.5416043", "0.53805554", "0.53521186", "0.53274035", "0.5322022", "0.53077036", "0.529829", "0.5248793", "0.5231281", "0.5218727", "0.5214925", "0.5195291", "0.5195138", "0.5189096", "0.51839924", "0.51741415", "0.51573783", "0.515698", "0.51557213", "0.5152268", "0.51454943", "0.51309705" ]
0.6064545
0
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy organizational structure, but does not create copies of the outcomes themselves, only new links. The source group must be either global, from the same context as this outcome group, or from an associated account. The source group cannot be the root outcome group of its context.
def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/import' payload = { 'source_outcome_group_id' : source_outcome_group_id, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def copy_group(self):\n dd = self.destination_directory\n sg = self.source_group\n dg = self.destination_group\n\n data = {\n 'description': sg.description,\n 'name': sg.name,\n 'status': sg.status,\n }\n\n # If this Group already exists, we'll just update it.\n if dg:\n for key, value in data.items():\n setattr(dg, key, value)\n\n while True:\n try:\n dg.save()\n return dg\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))\n\n # If we get here, it means we need to create the Group from scratch.\n while True:\n try:\n return dd.groups.create(data)\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def migrate(self):\n self.destination_group = self.get_destination_group()\n self.destination_group = self.copy_group()\n self.copy_custom_data()\n\n logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8')))\n return self.destination_group", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_convert_to_existing_group2(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_all')\n inventoryloader.convert_group('glance_registry', 'glance_all')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance_registry' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert not inventoryloader.groups['glance_all'].has_group('glance_registry')\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.groups['glance_all'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_all'].vars", "def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def test_convert_to_existing_group(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_registry')\n assert 'glance_api' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance_registry'].has_host('localhost')\n assert inventoryloader.groups['glance_registry'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_registry'].vars", "def addgroup(self, abspath=None, sourcetree=pbxconsts.SOURCE_TREE.group, name=None, move=True):\n group_name = os.path.basename(abspath) if name is None or len(name) == 0 else name\n abspath = abspath if not abspath is None else self.realpath()\n subgroup = func.get_list_item(func.take(\\\n lambda o: o.isa == u'PBXGroup' and o.realpath() == abspath \\\n and o.displayname() == group_name, self.pbx_children), 0)\n if subgroup is None:\n subgroup = self.project().new_object(u'PBXGroup')\n pbxpath.set_path_with_source_tree(subgroup, abspath, source_tree=sourcetree, \\\n parent_group=self)\n if not name is None:\n subgroup.pbx_name = name\n self.addchild(subgroup, move=move)\n return subgroup", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def clone(self):\n return _libsbml.Group_clone(self)", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_from_src(self, cgsnapshot_id, source_cgid, name=None,\n description=None, user_id=None,\n project_id=None):\n body = {'consistencygroup-from-src': {'name': name,\n 'description': description,\n 'cgsnapshot_id': cgsnapshot_id,\n 'source_cgid': source_cgid,\n 'user_id': user_id,\n 'project_id': project_id,\n 'status': \"creating\",\n }}\n\n self.run_hooks('modify_body_for_update', body,\n 'consistencygroup-from-src')\n resp, body = self.api.client.post(\n \"/consistencygroups/create_from_src\", body=body)\n return common_base.DictWithMeta(body['consistencygroup'], resp)", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def copyGroupFrom(self, groupName, sourceDesign, sourceProject=None, sourceProjectPath=None):\n oName = self.project_name\n if sourceProject == oName or sourceProject is None:\n oSrcProject = self._desktop.GetActiveProject()\n else:\n self._desktop.OpenProject(sourceProjectPath)\n oSrcProject = self._desktop.SetActiveProject(sourceProject)\n\n oDesign = oSrcProject.SetActiveDesign(sourceDesign)\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\")\n oEditor.Copy([\"NAME:Selections\", \"Selections:=\", groupName])\n\n self.modeler.oeditor.Paste()\n self.modeler.primitives.refresh_all_ids()\n self.materials._load_from_project()\n return True", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def test_does_not_return_duplicate_groups(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n GroupResolution.objects.create(\n group=self.group,\n release=self.release,\n type=GroupResolution.Type.in_release,\n )\n\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def test_mergeGroups(self):\n tabs = [\n widgets.Tab(u'id1', u'Title 1', None),\n widgets.Tab(u'id2', u'Title 2', None)]\n tabGroup1 = widgets.TabGroup(u'id', u'Title', tabs=tabs)\n tabs = [\n widgets.Tab(u'id3', u'Title 3', None)]\n tabGroup2 = widgets.TabGroup(u'id', u'Hello', tabs=tabs)\n\n newGroup = widgets.TabGroup.mergeGroups(tabGroup1, tabGroup2)\n self.assertEquals(newGroup.id, u'id')\n self.assertEquals(newGroup.title, u'Hello')\n self.assertEquals(newGroup.tabs, tabGroup1.tabs + tabGroup2.tabs)", "def _create_child_group(self, name) -> \"GroupBase\":\n pass" ]
[ "0.5898711", "0.5813369", "0.57389224", "0.5706337", "0.56055593", "0.55976254", "0.5595738", "0.55490917", "0.5544522", "0.5541461", "0.5518073", "0.53831655", "0.53829265", "0.53379554", "0.531845", "0.5302578", "0.52945095", "0.52933514", "0.5258613", "0.5246323", "0.52400297", "0.5225952", "0.5201779", "0.5191769", "0.5140609", "0.50881827", "0.50875765", "0.50842255", "0.50807875", "0.5079402" ]
0.5874123
1
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy organizational structure, but does not create copies of the outcomes themselves, only new links. The source group must be either global, from the same context as this outcome group, or from an associated account. The source group cannot be the root outcome group of its context.
def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/import' payload = { 'source_outcome_group_id' : source_outcome_group_id, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def copy_group(self):\n dd = self.destination_directory\n sg = self.source_group\n dg = self.destination_group\n\n data = {\n 'description': sg.description,\n 'name': sg.name,\n 'status': sg.status,\n }\n\n # If this Group already exists, we'll just update it.\n if dg:\n for key, value in data.items():\n setattr(dg, key, value)\n\n while True:\n try:\n dg.save()\n return dg\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))\n\n # If we get here, it means we need to create the Group from scratch.\n while True:\n try:\n return dd.groups.create(data)\n except StormpathError as err:\n logger.error('Failed to copy Group: {} into Directory: {} ({})'.format(sg.name.encode('utf-8'), dd.name.encode('utf-8'), err))", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")", "def migrate(self):\n self.destination_group = self.get_destination_group()\n self.destination_group = self.copy_group()\n self.copy_custom_data()\n\n logger.info('Successfully copied Group: {}'.format(self.destination_group.name.encode('utf-8')))\n return self.destination_group", "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def test_convert_to_existing_group2(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_all')\n inventoryloader.convert_group('glance_registry', 'glance_all')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance_registry' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert not inventoryloader.groups['glance_all'].has_group('glance_registry')\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.groups['glance_all'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_all'].vars", "def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)", "def test_convert_to_newgroup(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance1')\n assert 'glance_api' not in inventoryloader.groups\n assert 'glance1' in inventoryloader.groups\n assert inventoryloader.groups['glance_all'].has_group('glance1')\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance1'].has_host('localhost')\n assert \"management_bridge\" in inventoryloader.groups['glance1'].vars", "def test_convert_to_existing_group(self, inventoryloader):\n inventoryloader.convert_group('glance_api', 'glance_registry')\n assert 'glance_api' not in inventoryloader.groups\n assert not inventoryloader.groups['glance_all'].has_group('glance_api')\n assert inventoryloader.groups['glance_registry'].has_host('localhost')\n assert inventoryloader.groups['glance_registry'].has_host('localhost2')\n assert \"management_bridge\" in inventoryloader.groups['glance_registry'].vars", "def addgroup(self, abspath=None, sourcetree=pbxconsts.SOURCE_TREE.group, name=None, move=True):\n group_name = os.path.basename(abspath) if name is None or len(name) == 0 else name\n abspath = abspath if not abspath is None else self.realpath()\n subgroup = func.get_list_item(func.take(\\\n lambda o: o.isa == u'PBXGroup' and o.realpath() == abspath \\\n and o.displayname() == group_name, self.pbx_children), 0)\n if subgroup is None:\n subgroup = self.project().new_object(u'PBXGroup')\n pbxpath.set_path_with_source_tree(subgroup, abspath, source_tree=sourcetree, \\\n parent_group=self)\n if not name is None:\n subgroup.pbx_name = name\n self.addchild(subgroup, move=move)\n return subgroup", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "async def mergegroup(self, ctx, original_group_id: int, duplicate_group_id: int):\n original_group = await ex.get_group(original_group_id)\n duplicate_group = await ex.get_group(duplicate_group_id)\n if not duplicate_group:\n return await ctx.send(f\"> {duplicate_group_id} could not find a Group.\")\n if not original_group:\n return await ctx.send(f\"> {original_group} could not find a Group.\")\n # move aliases\n await ex.conn.execute(\"UPDATE groupmembers.aliases SET objectid = $1 WHERE isgroup = $2 AND objectid = $3\", original_group.id, 1, duplicate_group.id)\n for member_id in duplicate_group.members:\n if member_id not in original_group.members:\n # update the member location to the original group\n await ex.conn.execute(\"UPDATE groupmembers.idoltogroup SET groupid = $1 WHERE idolid = $2 AND groupid = $3\", original_group.id, member_id, duplicate_group.id)\n # delete group\n await ex.conn.execute(\"DELETE FROM groupmembers.groups WHERE groupid = $1\", duplicate_group.id)\n # recreate cache\n await ex.create_idol_cache()\n await ex.create_group_cache()\n await ctx.send(f\"> Merged {duplicate_group_id} to {original_group_id}.\")", "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def clone(self):\n return _libsbml.Group_clone(self)", "def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_from_src(self, cgsnapshot_id, source_cgid, name=None,\n description=None, user_id=None,\n project_id=None):\n body = {'consistencygroup-from-src': {'name': name,\n 'description': description,\n 'cgsnapshot_id': cgsnapshot_id,\n 'source_cgid': source_cgid,\n 'user_id': user_id,\n 'project_id': project_id,\n 'status': \"creating\",\n }}\n\n self.run_hooks('modify_body_for_update', body,\n 'consistencygroup-from-src')\n resp, body = self.api.client.post(\n \"/consistencygroups/create_from_src\", body=body)\n return common_base.DictWithMeta(body['consistencygroup'], resp)", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def _group_append(groups, id, new_group):\n\n path_inds = []\n _, _, idx = Skeleton._group_parent(groups, id)\n while id is not None:\n path_inds.append(idx)\n id, idx, _ = Skeleton._group_parent(groups, id)\n\n path_inds = list(reversed(path_inds))\n\n if len(path_inds) == 1:\n groups[path_inds[0]]._replace(children=new_group)\n elif len(path_inds) == 2:\n groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)\n elif len(path_inds) == 3:\n groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)\n\n return groups", "def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' : outcome_id,\n 'title' : title,\n 'display_name' : display_name,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n 'mastery_points' : mastery_points,\n 'ratings[description]' : ratings_description,\n 'ratings[points]' : ratings_points,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def copyGroupFrom(self, groupName, sourceDesign, sourceProject=None, sourceProjectPath=None):\n oName = self.project_name\n if sourceProject == oName or sourceProject is None:\n oSrcProject = self._desktop.GetActiveProject()\n else:\n self._desktop.OpenProject(sourceProjectPath)\n oSrcProject = self._desktop.SetActiveProject(sourceProject)\n\n oDesign = oSrcProject.SetActiveDesign(sourceDesign)\n oEditor = oDesign.SetActiveEditor(\"3D Modeler\")\n oEditor.Copy([\"NAME:Selections\", \"Selections:=\", groupName])\n\n self.modeler.oeditor.Paste()\n self.modeler.primitives.refresh_all_ids()\n self.materials._load_from_project()\n return True", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def test_does_not_return_duplicate_groups(self):\n repo = Repository.objects.create(\n organization_id=self.org.id,\n name=self.project.name,\n )\n commit = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='a' * 40,\n )\n commit2 = Commit.objects.create(\n organization_id=self.org.id,\n repository_id=repo.id,\n key='b' * 40,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit,\n order=1,\n )\n ReleaseCommit.objects.create(\n organization_id=self.org.id,\n release=self.release,\n commit=commit2,\n order=0,\n )\n GroupLink.objects.create(\n group_id=self.group.id,\n project_id=self.group.project_id,\n linked_type=GroupLink.LinkedType.commit,\n relationship=GroupLink.Relationship.resolves,\n linked_id=commit.id,\n )\n GroupResolution.objects.create(\n group=self.group,\n release=self.release,\n type=GroupResolution.Type.in_release,\n )\n\n response = self.client.get(self.path)\n\n assert response.status_code == 200, response.content\n assert len(response.data) == 1\n assert response.data[0]['id'] == six.text_type(self.group.id)", "def test_mergeGroups(self):\n tabs = [\n widgets.Tab(u'id1', u'Title 1', None),\n widgets.Tab(u'id2', u'Title 2', None)]\n tabGroup1 = widgets.TabGroup(u'id', u'Title', tabs=tabs)\n tabs = [\n widgets.Tab(u'id3', u'Title 3', None)]\n tabGroup2 = widgets.TabGroup(u'id', u'Hello', tabs=tabs)\n\n newGroup = widgets.TabGroup.mergeGroups(tabGroup1, tabGroup2)\n self.assertEquals(newGroup.id, u'id')\n self.assertEquals(newGroup.title, u'Hello')\n self.assertEquals(newGroup.tabs, tabGroup1.tabs + tabGroup2.tabs)", "def _create_child_group(self, name) -> \"GroupBase\":\n pass" ]
[ "0.5874123", "0.5813369", "0.57389224", "0.5706337", "0.56055593", "0.55976254", "0.5595738", "0.55490917", "0.5544522", "0.5541461", "0.5518073", "0.53831655", "0.53829265", "0.53379554", "0.531845", "0.5302578", "0.52945095", "0.52933514", "0.5258613", "0.5246323", "0.52400297", "0.5225952", "0.5201779", "0.5191769", "0.5140609", "0.50881827", "0.50875765", "0.50842255", "0.50807875", "0.5079402" ]
0.5898711
0
Parse challenge from a challenge response, cache it, and return it.
def _update_challenge(request: PipelineRequest, challenger: "PipelineResponse") -> HttpChallenge: challenge = HttpChallenge( request.http_request.url, challenger.http_response.headers.get("WWW-Authenticate"), response_headers=challenger.http_response.headers, ) ChallengeCache.set_challenge_for_url(request.http_request.url, challenge) return challenge
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_challenge(cls, response):\n links = _parse_header_links(response)\n try:\n authzr_uri = links['up']['url']\n except KeyError:\n raise errors.ClientError('\"up\" link missing')\n return (\n response.json()\n .addCallback(\n lambda body: messages.ChallengeResource(\n authzr_uri=authzr_uri,\n body=messages.ChallengeBody.from_json(body)))\n )", "def solve_challenge():\n\treturn (challenge[0]*challenge[1]-challenge[2]) * challenge[3] - challenge[4]", "def _parse_challenge(header):\n # type: (str) -> Dict[str, str]\n ret = {}\n if header.startswith(BEARER):\n challenge_params = header[len(BEARER) + 1 :]\n\n matches = re.split(AUTHENTICATION_CHALLENGE_PARAMS_PATTERN, challenge_params)\n _clean(matches)\n ret = {}\n for i in range(0, len(matches), 2):\n ret[matches[i]] = matches[i + 1]\n\n return ret", "def create_challenge_response(\n self,\n room_code: str,\n challenge_response: ChallengeResponse,\n ) -> GameInfo:\n game = self.read_game(room_code)\n\n if game.challenge is None:\n msg = f\"No challenge exists on game {room_code!r}\"\n raise InvalidMove(msg)\n if game.challenge.state != ChallengeState.AWAITING_RESPONSE:\n state = game.challenge.state.value\n msg = f\"Challenge is in {state!r} state, not 'AWAITING_RESPONSE'\"\n raise InvalidMove(msg)\n\n self.games_table.update_item(\n Key={\"room_code\": room_code},\n UpdateExpression=(\"set challenge.#chalresp=:r, challenge.#chalstate=:s\"),\n ExpressionAttributeValues={\n \":r\": challenge_response.dict(),\n \":s\": ChallengeState.VOTING,\n },\n ExpressionAttributeNames={\n # \"response\" and \"state\" are reserved words\n \"#chalstate\": \"state\",\n \"#chalresp\": \"response\",\n },\n ConditionExpression=Attr(\"challenge\").eq(game.dict()[\"challenge\"]),\n )\n\n return self.read_game(room_code)", "def get_challenge(self, obj):\n return obj.challenge_phase.challenge", "def parse(self, response):\n if self._has_captcha(response):\n result = self._handle_captcha(response, self.parse)\n else:\n result = super(AmazonBaseClass, self).parse(response)\n\n return result", "def read(challenge):\n\n data = {\n 'id': challenge.id,\n 'name': challenge.name,\n 'value': challenge.value,\n 'description': \"This challenge has not been unlocked yet. You need at least {} points to play.\".format(challenge.unlock_at),\n 'category': challenge.category,\n 'hidden': challenge.hidden,\n 'max_attempts': challenge.max_attempts,\n 'unlock_at': challenge.unlock_at,\n 'locked': True,\n 'type': challenge.type,\n 'type_data': {\n 'id': CTFdLockingChallenge.id,\n 'name': CTFdLockingChallenge.name,\n 'templates': CTFdLockingChallenge.templates,\n 'scripts': CTFdLockingChallenge.scripts,\n },\n }\n\n if session.get('admin') or not locked(challenge):\n data['locked'] = False\n data['description'] = str(challenge.description)\n\n return challenge, data", "def decode(self, response, request):\n log.debug(\"Decoding authorization.\")\n auth = self._parseAuth(response)\n try:\n self._verifyChallenge(auth[\"challenge\"], request)\n creds = self.buildCredentials(auth, request)\n except KeyError, ke:\n raise LoginFailed(\"{0!r} not in authorization\".format(*ke.args))\n except LoginFailed, lf:\n log.warn(lf)\n raise\n log.debug(\"Decoded credentials: {0}\".format(creds))\n return creds", "def get(self):\n try:\n imageFilename = random.choice(os.listdir(self.cacheDir))\n imagePath = os.path.join(self.cacheDir, imageFilename)\n with open(imagePath) as imageFile:\n self.image = imageFile.read()\n except IndexError:\n raise GimpCaptchaError(\"CAPTCHA cache dir appears empty: %r\"\n % self.cacheDir)\n except (OSError, IOError):\n raise GimpCaptchaError(\"Could not read Gimp captcha image file: %r\"\n % imageFilename)\n\n self.answer = imageFilename.rsplit(os.path.extsep, 1)[0]\n self.challenge = self.createChallenge(self.answer)\n\n return (self.image, self.challenge)", "def get_challenge(self, obj):\n return obj.challenge_phase_split.challenge_phase.challenge", "def create_challenge(\n self,\n room_code: str,\n challenge: NewChallenge,\n ) -> GameInfo:\n game = self.read_game(room_code)\n\n if game.challenge is not None:\n raise InvalidMove(f\"Game {room_code!r} already has an open challenge\")\n\n if challenge.challenger_name not in [player.name for player in game.players]:\n msg = f\"Player {challenge.challenger_name!r} not in game {room_code!r}\"\n raise InvalidMove(msg)\n\n if (len(game.moves) == 0) or (challenge.move != game.moves[-1]):\n raise InvalidMove(\"Can only challenge the most recent move\")\n\n initial_state = (\n ChallengeState.AWAITING_RESPONSE\n if challenge.type is ChallengeType.NO_VALID_WORDS\n else ChallengeState.VOTING\n )\n\n game_challenge = Challenge(\n challenger_name=challenge.challenger_name,\n move=challenge.move,\n type=challenge.type,\n state=initial_state,\n response=None,\n votes=[],\n )\n\n self.games_table.update_item(\n Key={\"room_code\": room_code},\n UpdateExpression=(\"set challenge=:c\"),\n ExpressionAttributeValues={\":c\": game_challenge.dict()},\n ConditionExpression=Attr(\"challenge\").eq(None),\n )\n\n self._advance_turn(game)\n\n return self.read_game(room_code)", "def _parse_response(response):\n m = re.match(r\"^(?P<alias>[^\\s]*)\\s+(?P<resp>.*)$\", response)\n return m.group('alias'), m.group('resp')", "def get_response_from_cache(responsefile):\n global __response_cache\n\n if responsefile not in __response_cache:\n return\n\n if not goodfile(responsefile):\n try:\n del __response_cache[responsefile]\n except KeyError: # pragma: no cover\n pass\n return\n\n modtime = str(os.path.getmtime(responsefile))\n if modtime not in __response_cache.get(responsefile, {}):\n return\n\n log.debug(\"Retrieving data from response file (%s) in cache\" %\n responsefile)\n return __response_cache.get(responsefile, {}).get(modtime)", "def generate_response(self, challenge, name):\n response_plain = challenge.identifier + self.secret + challenge.value\n response_hashed = hashlib.sha1(response_plain)\n response_obj = Response(challenge.identifier, response_hashed, name)\n return response_obj", "def parse_response(self, buffer):\n # Begin by copying the data out of the buffer. This is necessary\n # because as much as possible we want to use the built-in bytestring\n # methods, rather than looping over the data in Python.\n temp_buffer = buffer.tobytes()\n\n index = temp_buffer.find(b'\\n')\n if index == -1:\n return None\n\n version, status, reason = (\n temp_buffer[0:index].split(None, 2) + [b''])[:3]\n if not version.startswith(b'HTTP/1.'):\n raise ParseError(\"Not HTTP/1.X!\")\n\n minor_version = int(version[7:])\n status = int(status)\n reason = memoryview(reason.strip())\n\n # Chomp the newline.\n index += 1\n\n # Now, parse the headers out.\n end_index = index\n headers = []\n\n while True:\n end_index = temp_buffer.find(b'\\n', index)\n if end_index == -1:\n return None\n elif (end_index - index) <= 1:\n # Chomp the newline\n end_index += 1\n break\n\n name, value = temp_buffer[index:end_index].split(b':', 1)\n value = value.strip()\n headers.append((memoryview(name), memoryview(value)))\n index = end_index + 1\n\n resp = Response(status, reason, minor_version, headers, end_index)\n return resp", "async def get_response(self, key: str) -> Optional[CachedResponse]:\n # Attempt to fetch response from the cache\n logger.debug(f'Attempting to get cached response for key: {key}')\n try:\n if not await self.responses.contains(key):\n key = str(await self.redirects.read(key))\n response = await self.responses.read(key)\n except (KeyError, TypeError):\n logger.debug('No cached response found')\n return None\n if not isinstance(response, CachedResponse):\n logger.debug('Cached response is invalid')\n return None\n # If the item is expired or filtered out, delete it from the cache\n if not self.is_cacheable(response):\n logger.info('Cached response expired; deleting')\n await self.delete(key)\n return None\n\n # Optionally update last_used time\n if self.lru:\n response.last_used = datetime.utcnow()\n await self.responses.write(key, response)\n\n logger.info(f'Cached response found for key: {key}')\n return response", "def _parse_response(self, response):\n if response is not None:\n return response.string\n return response", "def createChallenge(self, answer):\n timestamp = str(int(time.time())).zfill(12)\n blob = timestamp + answer\n encBlob = self.publicKey.encrypt(blob)\n hmac = crypto.getHMAC(self.hmacKey, encBlob)\n challenge = urlsafe_b64encode(hmac + encBlob)\n return challenge", "def answer_challenge(self, challenge_body, response):\n action = LOG_ACME_ANSWER_CHALLENGE(\n challenge_body=challenge_body, response=response)\n with action.context():\n return (\n DeferredContext(\n self._client.post(challenge_body.uri, response))\n .addCallback(self._parse_challenge)\n .addCallback(self._check_challenge, challenge_body)\n .addCallback(\n tap(lambda c:\n action.add_success_fields(challenge_resource=c)))\n .addActionFinish())", "def check_response(self, challenge, response):\n if challenge is not None:\n expected_response = challenge.identifier + self.secret + challenge.challenge\n expected_response_hashed = hashlib.sha1(expected_response)\n if expected_response_hashed == response.response_hash:\n return True\n else:\n return False\n else:\n raise Exception", "async def on_challenge_update(self, challenge_data):\n pass", "async def parse_handle_response(self, json_response):\n try:\n vasp = self.vasp\n other_key = vasp.info_context.get_peer_compliance_verification_key(\n self.other_address_str\n )\n message = await other_key.verify_message(json_response)\n response = json.loads(message)\n response = CommandResponseObject.from_json_data_dict(\n response, JSONFlag.NET\n )\n\n return self.handle_response(response)\n\n except OffChainInvalidSignature as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'Signature verification failed. OffChainInvalidSignature: {e}'\n )\n raise e\n except JSONParsingError as e:\n logger.warning(\n f'(other:{self.other_address_str}) JSONParsingError: {e}'\n )\n raise e\n except OffChainException or OffChainProtocolError as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'OffChainException/OffChainProtocolError: {e}',\n )\n raise e", "def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response", "def ping_challenge(self):\n return self._ping_data_raw['challenge']", "def challenge_response(self, challenge_response):\n\n self._challenge_response = challenge_response", "def parse_answers(dns_resp: str, session_cache):\n\n ID = dns_resp[:4]\n other_flags = dns_resp[4:8]\n questions_count = dns_resp[8:12]\n answers_count = dns_resp[12:16]\n auth_serv_info = dns_resp[16:20]\n additional_info = dns_resp[20:24]\n offset = 0\n ip = \"0.0.0.0\"\n\n # может придти несколько ответов, из каждого вычленим нужные записи\n for i in range(int(answers_count, 16)):\n try:\n ip, offset = DNSHandler.parse_answer(dns_resp, session_cache, offset=offset * i)\n except ValueError:\n print(\"url does not exist\")\n sys.exit(0)\n return ip", "def _process_response(self, response):\n\n self.log.debug(\"Received Response: %r\", response)\n\n return self.token_manager.process_response(response)", "def parse_line(cls, line):\n assert isinstance(line, bytes)\n\n match = HEADER_FIELD_REGEX.match(line)\n\n if not match:\n raise HeaderParseError(line)\n\n name, content = (s.decode(\"ascii\").strip() for s in match.groups(b\"\"))\n name = name.lower()\n\n if name != \"set-cookie\" or is_rfc1123_datetime(content):\n content = cls.split_field_content(content)\n\n return (name, content)", "def get_h_parser(*, allow_cache=True):\n\n # Singleton pattern\n global _parser\n if _parser and allow_cache:\n return _parser\n\n source = _get_wgpu_header(\n os.path.join(lib_dir, \"resources\", \"webgpu.h\"),\n os.path.join(lib_dir, \"resources\", \"wgpu.h\"),\n )\n\n # Create parser\n hp = HParser(source)\n hp.parse()\n _parser = hp\n return hp", "async def _handle_challenge_request(self, split_message: List[str]) -> None:\n challenging_player = split_message[2].strip()\n\n if challenging_player != self.username:\n if len(split_message) >= 6:\n if split_message[5] == self._format:\n await self._challenge_queue.put(challenging_player)" ]
[ "0.69334584", "0.57358587", "0.56640327", "0.56572354", "0.5530632", "0.54902357", "0.54403126", "0.54084736", "0.54046005", "0.54018176", "0.5199438", "0.5182611", "0.517387", "0.5165005", "0.512281", "0.5000999", "0.49986807", "0.4956543", "0.4911077", "0.49107736", "0.4908334", "0.4896391", "0.48870412", "0.4884257", "0.48749492", "0.4874675", "0.48739746", "0.48563156", "0.48544717", "0.4850815" ]
0.6099478
1
check if the reference folder is in place and all attributes are ready
def check_reference_ready(): # check to see if there is a manifest file in the default reference path manifest_file = os.path.join(settings.DEFAULT_REFERENCE_PATH, 'manifest.json') if not os.path.isfile(manifest_file): _log("manifest.json file cannot be found in the reference folder; simulation will NOT work!") return _log("reading manifest.json ..") # read the manifest file with open(manifest_file, 'r') as manifest: data = json.load(manifest) reference_fasta = os.path.join(settings.DEFAULT_REFERENCE_PATH, data["reference"]) if not os.path.isfile(reference_fasta): _log("genome reference file (.fasta | .fa) cannot be found in the reference folder; simulation will NOT work!") return _log("found all required simulation files in place; simulation is READY!") settings.REFERENCE_READY = True settings.INPUT_FILES = {"reference": data['reference'], "targets": 'dummy'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))", "def copy_file_check(self):\n pass", "def _before_reference_check(self, maya_file, client_data=None):\n\n if self.is_artella_path():\n self.validate_environment_for_callback('BeforeReferenceCheck')\n\n raw_full_name = maya_file.rawFullName()\n if not dccplugin.DccPlugin().is_path_translated(\n raw_full_name) and dccplugin.DccPlugin().is_artella_path(raw_full_name):\n convert_path = dccplugin.DccPlugin().convert_path(raw_full_name)\n maya_file.setRawFullName(convert_path)\n\n return True", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def checkRefs(self, export_refs):\r\n return True", "def init_check(self):\n for required_file in self._required_files:\n # Check if required files are there\n # FIXME Sometimes it doesn't work :?\n if required_file not in self.files:\n self.valid = False", "def _check_before_run(self):\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def needs_sync(self):\n\n affected_attributes = [\n 'css_files', 'js_files',\n 'scss_files', 'widgets']\n\n for attr in affected_attributes:\n if len(getattr(self, attr)) > 0:\n return True\n return False", "def __checkDestination(self):\n return os.path.exists(self.__targetPath)", "def verify_attrs(self):\n self.verify_namespace_attrs(self.newlibrary.wrap_namespace)", "def base_data_check_shot(self):\n\n #alembic_dir\n alembic_dir = self.alembic_functionality.get_parm_value(self.node, 'alembic_dir')\n \n #is False\n if not (alembic_dir):\n #log\n self.logger.debug('Parameter alembic dir empty.')\n return False\n\n #dir exists\n if not (os.path.isdir(alembic_dir)):\n #log\n self.logger.debug('Alembic dir {0} does not exist.'.format(alembic_dir))\n return False\n\n\n #alembic_path_list\n alembic_path_list = [os.path.join(alembic_dir, file).replace('\\\\', '/') for \n file in \n os.listdir(alembic_dir) if \n (os.path.isfile(os.path.join(alembic_dir, file)) and file.split('.')[-1] == 'abc')]\n #alembic_path_list empty\n if not (alembic_path_list):\n #log\n self.logger.debug('alembic_path_list empty. Alembic dir {0} does not seem to contain alembic files.'.format(alembic_dir))\n return False\n\n\n #checked_alembic_path_list\n checked_alembic_path_list = []\n\n #iterate\n for alembic_path in alembic_path_list:\n\n #object_path_list\n object_path_list = self.alembic_functionality.get_alembic_object_path_list(alembic_path)\n #object_path_list empty\n if not (object_path_list):\n #log\n self.logger.debug('Object path list for alembic {0} empty. Continuing'.format(alembic_path))\n continue\n\n #iterate, check and create\n for object_path in object_path_list:\n\n #helga_locator_attr_exists\n helga_locator_attr_exists = self.alembic_functionality.alembic_attribute_exists(alembic_path, object_path, 'helga_locator')\n\n #helga_highpoly_rendergeo_attr_exists\n helga_highpoly_rendergeo_attr_exists = self.alembic_functionality.alembic_attribute_exists(alembic_path, object_path, 'helga_highpoly_rendergeo')\n\n #if attr exists append and break\n if (helga_locator_attr_exists and helga_highpoly_rendergeo_attr_exists):\n\n #append\n checked_alembic_path_list.append(alembic_path)\n break\n\n #checked_alembic_path_list empty\n if not (checked_alembic_path_list):\n #log\n self.logger.debug('checked_alembic_path_list empty. Alembic dir {0} does not seem to contain alembic files with helga_highpoly_rendergeo attribute.'.format(alembic_dir))\n return False\n\n\n #alembic_highpoly_rendergeo_dir\n alembic_highpoly_rendergeo_dir = self.alembic_functionality.get_parm_value(self.node, 'alembic_highpoly_rendergeo_dir')\n \n #is False\n if not (alembic_highpoly_rendergeo_dir):\n #log\n self.logger.debug('Parameter alembic highpoly rendergeo dir empty.')\n return False\n\n #dir exists\n if not (os.path.isdir(alembic_highpoly_rendergeo_dir)):\n #log\n self.logger.debug('Alembic highpoly rendergeo dir {0} does not exist.'.format(alembic_highpoly_rendergeo_dir))\n return False\n\n\n #return\n return [checked_alembic_path_list, alembic_highpoly_rendergeo_dir]", "def test_exist_entry_on_rebuild(self):\n self.validate_attributes_in_exist_response()", "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'relu5-3/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'relu5-3/test.pkl')))", "def check_folder_state(self):\n while self:\n diff = self.get_diff()\n print(diff or 'No changes detected')\n if diff:\n self.parent.send_diff_data(diff)\n time.sleep(1)", "def _verify(self) -> None:\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.data_dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.data_dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()", "def _check_before_run(self):\n\t\tif not osp.exists(self.dataset_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n\t\tif not osp.exists(self.train_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n\t\tif not osp.exists(self.query_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n\t\tif not osp.exists(self.gallery_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.probe_gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.probe_gallery_dir))", "def check_structure_is_modified(self):\n if not self.structure_has_been_modified: \n print('NEED TO MODIFY STRUCTURE BEFORE PROCEEDING FURTHER!')\n sys.exit()", "def ensure_loaded(self):\n if not (Asset.list_all(self)):\n self.update_list()", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.list_query_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_query_path))\n if not osp.exists(self.list_gallery_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_gallery_path))", "def check(self):\r\n self._check_object(self._config.name)" ]
[ "0.6192987", "0.60495603", "0.5934274", "0.59128857", "0.58630824", "0.5849084", "0.5817839", "0.5815572", "0.5815572", "0.57971984", "0.57678586", "0.5761644", "0.57452965", "0.5725083", "0.5716483", "0.5684965", "0.5675776", "0.5654527", "0.5647706", "0.5647706", "0.5647706", "0.5647706", "0.5647706", "0.5647706", "0.5647706", "0.5642257", "0.56371033", "0.55947846", "0.5581424", "0.55641794" ]
0.7016824
0
Callback to be called whenever the system state has changed. Checks whether or not the step has to be advanced or not
def updateState(self): if ('cutting' in self.step_ops) and (self.cut_state.user_cutting): self.step_ops['cutting'] = True if ('cooking' in self.step_ops) and (self.cut_state.user_cooking): self.step_ops['cooking'] = True # TODO: add the rest of the operations advance = True # Check if ALL operations are complete for op in self.step_ops: if self.step_ops[op] == False: advance = False break if advance: self.nextStep()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()", "def has_state_changed(self) -> bool:\r\n ...", "def _on_step(self) -> bool:\n # print(\"locals \", self.locals)\n # # what timestep you think\n # print(\"timestep \",CustomCallback.step)\n # # what timestep a2c or ppo2 learn() is on \n # print(\"a2c/ppo2 num timestep\",self.num_timesteps)\n \n # TODO: add flag to save screenshots or not\n subfolder = os.path.join(self.directory, 'screen/')\n filepath = os.path.join(subfolder)\n img_name = '_screenshot_' + str(self.num_timesteps)\n \n if(self.algo == \"A2C\" or self.algo == \"PPO2\"):\n # self.locals['obs'] gives black and white imgs\n obs = self.env.get_images()\n for i in range(self.num_envs):\n mpl.image.imsave(subfolder+\"env_\" + str(i) + img_name + \"_.png\", obs[i])\n elif (self.algo == \"DQN\"):\n self.env.ale.saveScreenPNG(subfolder+\"env_\" + str(0) + img_name + \"_.png\")\n\n step_stats = {self.num_timesteps: {\n 'num_timesteps': self.num_timesteps,\n 'state': self.num_timesteps/self.num_envs,\n }\n }\n # add step to dict\n CustomCallback.main_data_dict.update(step_stats)\n key = self.num_timesteps\n\n # collection of minimum data: action, reward, lives\n if(self.algo == \"DQN\"):\n CustomCallback.main_data_dict[key]['action_env_0'] = self.locals['action']\n CustomCallback.main_data_dict[key]['action_name_env_0'] = self.actions[self.locals['env_action']]\n if(self.game == \"Pong\"):\n CustomCallback.main_data_dict[key]['curr_score_env_0'] = self.locals['episode_rewards'][-1]\n else:\n CustomCallback.main_data_dict[key]['cumulative_life_reward'] = self.locals['episode_rewards'][-1]\n if(self.isLives == True):\n CustomCallback.main_data_dict[CustomCallback.step]['lives'] = self.locals['info']['ale.lives']\n else:\n for i in range(self.num_envs):\n CustomCallback.main_data_dict[key]['action_env_'+str(i)] = self.locals['actions'][i]\n CustomCallback.main_data_dict[key]['action_name_env_'+str(i)] = self.actions[self.locals['actions'][i]]\n CustomCallback.main_data_dict[key]['step_reward_env_'+str(i)] = self.locals['rewards'][i]\n if(self.isLives == True):\n if(CustomCallback.step == 1):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = 3\n if(CustomCallback.step >= 2):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = self.locals['infos'][i]['ale.lives']\n\n if(self.game == \"Pong\" and self.algo != \"DQN\"):\n # extra processing for Pong scores\n self.find_life_game_info_a2c_ppo2_pong()\n\n # at the last step, write data into csv files\n if(CustomCallback.step == (self.num_steps/self.num_envs)):\n self.make_dataframes(self.df_list)\n # save minimal data\n self.df_to_csv(\"df_og.csv\", self.df_list)\n self.df_to_parquet()\n CustomCallback.step = CustomCallback.step + 1\n return True", "def _on_step(self) -> bool:\n\t\t#self.model.get_env().env_method(\"set_model_reference\", self.model.get_parameters())\n\t\tself.env.set_model_reference(self.model.get_parameters())\n\t\tprint(\"current timestep\", self.num_timesteps)\n\t\treturn True", "def assumed_state(self):\n # Progtime Blue does NOT update the handles when the manual\n # switch button is pressed, so the state may be wrong!\n return True", "def segmentNeedle(self):\r\n # productive #event\r\n profprint()\r\n if self.fiducialButton.isEnabled():\r\n print \"new checked state: \", not self.fiducialButton.checked\r\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def update(self):\n\t\t# If being controlled by COM\n\t\tif self.controled_by_com :\n\t\t\t# Substract 1 from the update counter\n\t\t\tself.update_counter -= 1\n\t\t\t# If the update counter reaches zero\n\t\t\tif self.update_counter == 0. :\n\t\t\t\t# then ask for an action \n\t\t\t\tif self.intermediate_phase is False :\n\t\t\t\t\tself.action_required = True \n\t\t\t\t\t\t\n\t\t\t\t# if during a change\n\t\t\t\t# then make the change\n\t\t\t\tif self.intermediate_phase is True : \n\t\t\t\t\tself.action_required = False\n\t\t\t\t\tself._color_changer() #Make the change in the Simulator\n\t\telse :\n\t\t\tpass", "def segmentNeedle(self):\n #productive #event\n profprint()\n if self.fiducialButton.isEnabled():\n print \"new checked state: \",not self.fiducialButton.checked\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def state_wait_validate(cfg, app, win, events):", "def step(self):\n self.state_estimator.step()", "def step(self, state):", "def check_state(self):\n pass", "def stepText2Changed(build, step, text2):", "def update(self, elapsed):\n delta = 10 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('desired-stimulus-releaser')\n solo = self.behavior_system.robot.drive_system.solo_drive\n joy = self.behavior_system.robot.emotion_system.emotion_joy\n\n if rel.is_active() and self.behavior_system.robot.drive_system.active_drive == solo and self.behavior_system.robot.emotion_system.active_emotion == joy:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def state_chosen_validate(cfg, app, win, events):", "def state_processing_validate(cfg, app, win, events):", "def update(self, elapsed):\n delta = 10 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('desired-stimulus-releaser')\n social = self.behavior_system.robot.drive_system.social_drive\n joy = self.behavior_system.robot.emotion_system.emotion_joy\n\n if rel.is_active() and self.behavior_system.robot.drive_system.active_drive == social and self.behavior_system.robot.emotion_system.active_emotion == joy:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def update(self, elapsed):\n delta = 18 * elapsed\n rel = self.behavior_system.robot.perception_system.get_releaser('undesired-stimulus-releaser')\n sorry = self.behavior_system.robot.emotion_system.emotion_sorrow\n\n if rel.is_active() and self.behavior_system.robot.emotion_system.active_emotion == sorry:\n self.activation_level = self.activation_level + delta\n else:\n self.activation_level = max(0, self.activation_level - delta)", "def state_finish_validate(cfg, app, win, events):", "def state_wait_do(cfg, app, win, events):", "def do_step(self) -> None:", "def try_advance(self):\n if not self.step.toclick:\n self.step.finished = True\n return True\n return False", "def notify_wizard(self):\n self.emit_datachanged()\n #self.emit(SIG(\"condition_update\"), self._conds or None)", "def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)", "def state_chosen_do(cfg, app, win, events):", "def update_shuttle_state(self):\n if len(self.steps) > self.current_step >= 0:\n step = self.steps[self.current_step]\n if step.is_fulfilled():\n step.end(True)", "def onTimeStep(self, timeStep):\n pass", "def _update_status(self):\n if any([abs(v) > LIMITS[i] for i, v in enumerate(self.state)]):\n self.terminal = True\n elif abs(self.q[3]) < LIMITS[9]:\n self.terminal = True\n elif self.steps + 1 >= self.max_steps:\n self.terminal = True", "def _autooff_changed(hass, entity_id=None, old_state=None, new_state=None):\n PERSIST['states'][0] = new_state.state == 'on'\n _eval_state(hass)", "def handle_robot_step_changed(self, step):\n\n #Save the last step if some lost\n last_known_step = self.step\n super(WeldTask, self).handle_robot_step_changed(step)\n\n if step < 0 or step >= len(self.welding_parameters):\n # invalid step\n return\n\n if self.job is None:\n # no jobs\n return\n\n if self.welding_parameters[step] == WeldingState():\n # default state, skip\n return\n\n if last_known_step > step:\n # moving to the other direction\n return\n\n if self.welding_parameters[last_known_step] != self.welding_parameters[step]:\n # if there is a difference, send the new params\n RosProxy().call_service(\n '/welding_driver/set_params',\n SetWeldingParameters,\n self.welding_parameters[step])" ]
[ "0.6339768", "0.6194187", "0.6136971", "0.5964047", "0.5904102", "0.5903155", "0.5893415", "0.5889973", "0.58527935", "0.58454347", "0.5844172", "0.58329093", "0.57805914", "0.57780147", "0.5772509", "0.5751888", "0.573898", "0.57344913", "0.5711796", "0.5696087", "0.56890595", "0.56870806", "0.568033", "0.566892", "0.5665165", "0.56595486", "0.5652686", "0.56413996", "0.5639269", "0.56331325" ]
0.6468291
0
Constructor for thread that will request the RSS of a particular podcast series, parse the series details and episode information, and save the information w/`storer`
def __init__(self, storer, series, i): super(EpisodeWorker, self).__init__() self.storer = storer self.series = series # All series self.i = i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n while self.i < len(self.series):\n # Grab line + RSS\n s = self.series[self.i]\n rss = self.request_rss(s.feedUrl)\n\n # Compose Episodes\n ep_dicts = []\n for entry in rss['entries']:\n ep_dicts.append(Episode(s, entry).__dict__)\n\n # Build result dict\n result_dict = dict()\n result_dict['series'] = deepcopy(s.__dict__)\n result_dict['series']['genres'] = \\\n result_dict['series']['genres'].split(';')\n result_dict['series']['type'] = 'series'\n result_dict['episodes'] = ep_dicts\n\n # Store podcast\n self.storer.store(result_dict)\n\n # Move onto the next one\n self.i += 20\n print(\"Retrieved \" + str(s.id))", "def __init__(self, **kwargs):\n self.identifier = kwargs.get(\"identifier\")\n self.playback_state = kwargs.get(\"playback_state\")\n self.title = kwargs.get(\"title\")\n self.series_name = kwargs.get(\"series_name\")\n self.artist = kwargs.get(\"artist\")\n self.album = kwargs.get(\"album\")\n self.genre = kwargs.get(\"genre\")\n self.total_time = kwargs.get(\"total_time\")\n self.position = kwargs.get(\"position\")\n self.season_number = kwargs.get(\"season_number\")\n self.episode_number = kwargs.get(\"episode_number\")\n self.repeat = kwargs.get(\"repeat\")\n self.shuffle = kwargs.get(\"shuffle\")\n self.media_type = kwargs.get(\"media_type\")\n self.playback_rate = kwargs.get(\"playback_rate\")\n self.supported_commands = kwargs.get(\"supported_commands\")\n self.artwork = kwargs.get(\"artwork\")\n self.artwork_identifier = kwargs.get(\"artwork_identifier\")\n self.artwork_mimetype = kwargs.get(\"artwork_mimetype\")\n self.artwork_width = kwargs.get(\"artwork_width\")\n self.artwork_height = kwargs.get(\"artwork_height\")\n self.skip_time = kwargs.get(\"skip_time\")\n self.app_name = kwargs.get(\"app_name\")\n self.content_identifier = kwargs.get(\"content_identifier\")", "def __init__(self):\n\t\t\n\t\tprint \"Getting latest stories from Hacker News...\"\n\t\t#try:\n\t\tself.stories = self.h.getLatestStories(self.newestOrTop, self.alreadyReadList)\n\t\t\n\t\tfor i in range(1,self.h.numberOfStoriesOnFrontPage+1):\n\t\t\tself.oneToThirty.append(str(i))\n\t\t\tself.oneToThirtyComments.append(\"c\" + str(i))\n\t\t\tself.oneToThirtyPlusComments.append(str(i) + \"+\")\n\t\t\tself.oneToThirtySubmitters.append(\"s\" + str(i))\n\t\t\tself.oneToThirtyInstapaper.append(\"i\" + str(i))\n\t\t\t\n\t\tself.setPreferencesAtStartup()\n\n\t\tif self.hnUserName != \"\":\n\t\t\tprint \"Getting \" + self.hnUserName + \"'s karma from HN...\"\n\t\t\tuser = HackerNewsUser(self.hnUserName)\n\t\t\tself.karma = user.karma\n\n\t\tself.printStories()\n\t\t\n\t\t#except:\n\t\t#\tprint \"error\"\n\t\t#\tself.quit = 1\n\n\t\tself.loop()", "def __init__(self, json):\n\n self.id = json[\"id\"]\n self.alternateId = json[\"alternateId\"]\n\n if \"airDate\" in json:\n self.airDate = datetime.strptime(json[\"airDate\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"name\" in json:\n self.name = json[\"name\"]\n\n if \"title\" in json:\n self.title = json[\"title\"]\n\n if \"description\" in json:\n self.description = json[\"description\"]\n\n if \"episode\" in json:\n self.episode = json[\"episode\"]\n\n if \"episodeNumber\" in json:\n self.episodeNumber = json[\"episodeNumber\"]\n else:\n self.episodeNumber = None\n\n if \"season\" in json:\n self.season = json[\"season\"]\n\n if \"seasonNumber\" in json:\n self.seasonNumber = json[\"seasonNumber\"]\n else:\n self.seasonNumber = None\n\n if \"publishStart\" in json:\n self.publishStart = datetime.strptime(json[\"publishStart\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"publishEnd\" in json:\n self.publishEnd = datetime.strptime(json[\"publishEnd\"], '%Y-%m-%dT%H:%M:%SZ')\n\n if \"videoDuration\" in json:\n self.videoDuration = timedelta(milliseconds=json[\"videoDuration\"])\n\n if \"isFreePlayable\" in json:\n self.isFreePlayable = json[\"isFreePlayable\"]\n\n if \"isPlayable\" in json:\n self.isPlayable = json[\"isPlayable\"]\n\n if \"isNew\" in json:\n self.isNew = json[\"isNew\"]\n\n if \"image\" in json:\n self.image = Image(json[\"image\"])", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def __init__(self, data, feed_repo):\n super(Feed, self).__init__()\n self.url = data['url']\n self.name = data.get('name')\n read_stamp = data.get(\"last_read\")\n if read_stamp:\n self.last_read = datetime.datetime.fromtimestamp(read_stamp, tz=datetime.timezone.utc)\n else:\n self.last_read = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc)\n self._repo = feed_repo", "def __init__(self, thoonk, feed):\n Queue.__init__(self, thoonk, feed)\n\n self.feed_publishes = 'feed.publishes:%s' % feed\n self.feed_published = 'feed.published:%s' % feed\n self.feed_cancelled = 'feed.cancelled:%s' % feed\n self.feed_retried = 'feed.retried:%s' % feed\n self.feed_finishes = 'feed.finishes:%s' % feed\n self.feed_claimed = 'feed.claimed:%s' % feed\n self.feed_stalled = 'feed.stalled:%s' % feed\n self.feed_running = 'feed.running:%s' % feed\n \n self.job_finish = 'job.finish:%s' % feed", "def __init__(self, number, json):\n\n self.number = number\n self.episodes = []\n for episode in json:\n self.episodes.append(Episode(episode))", "def __init__(self: object) -> None:\n self.empty: bool = True\n self.episode_broadcast: str = \"\"\n self.episode_id: int = 0\n self.episode_inspectors: str = \"\"\n self.episode_name: str = \"\"\n self.episode_sequence: str = \"\"\n self.episode_url: str = \"\"\n self.episode_year: int = 0", "def get_podcast_episodes(url):\n\n def parse_pubdate(date_string):\n \"\"\"\n Change pubdate string to datetime object. Tries a bunch of\n possible formats, but if none of them is a match, it will\n return a epoch = 0 datetime object\n\n :param date_string: A string representing a date\n :return: datetime object\n \"\"\"\n date_formats = (\n '%a, %d %b %Y %H:%M:%S +0000',\n '%a, %d %b %Y',\n '%a, %d %b %Y%H:%M:%S +0000',\n '%a, %d %b %Y %H:%M',\n '%a, %d %b %Y %H.%M'\n )\n df_generator = (format for format in date_formats)\n\n date = None\n while date is None:\n try:\n date = datetime.strptime(date_string, next(df_generator))\n except ValueError:\n pass\n except StopIteration:\n date = datetime.fromtimestamp(0)\n\n return date\n\n doc = get_document(url)\n\n return (\n {\n 'url': item.select('guid')[0].text,\n 'Premiered': parse_pubdate(\n item.select('pubdate')[0].text\n ).strftime(\"%d.%m.%Y\"),\n # 'Duration': duration_to_seconds(item.find('itunes:duration').text),\n 'title': item.title.text,\n 'Plot': item.description.text\n }\n for item in doc.find_all(\"item\")\n )", "def __init__(self, rss_url=None, cell_num=None, cache_filename=None):\n if not any([rss_url, cell_num]):\n raise Exception('Must pass rss url and cell number.')\n self.rss_url = str(rss_url)\n self.cell_num = cell_num\n self.cache_filename = str(cache_filename)\n self.from_num = '4088685453'\n\n # Set up twilio client for sending text messages\n account = os.environ.get('TWILIO_ACCT')\n token = os.environ.get('TWILIO_TOKEN')\n self.twilio_client = TwilioRestClient(account, token)\n\n self.load_last_post()", "def __init__(self, url, epRange):\n self.driver = webdriver.PhantomJS()\n self.downloads = OrderedDict() # sort episodes in asending order\n self.pbar = \"\" # Download Progressbar\n self.Main(url, epRange)", "def run_rss(self):\n\n pass", "def __init__( self ):\n\n self.log = gLogger.getSubLogger( self.__class__.__name__ )\n self.rssConfig = RssConfiguration()\n self.__opHelper = Operations()\n self.rssClient = None\n\n # We can set CacheLifetime and CacheHistory from CS, so that we can tune them.\n cacheLifeTime = int( self.rssConfig.getConfigCache() )\n\n # RSSCache only affects the calls directed to RSS, if using the CS it is not\n # used.\n self.seCache = RSSCache( 'StorageElement', cacheLifeTime, self.__updateSECache )", "def __init__(self, url, start_pos, end_pos, f):\n\n super(DownloadThread, self).__init__()\n self.url = url\n self.start_pos = start_pos\n self.end_pos = end_pos\n self.fd = f", "def __init__(self, SONG):\n self.track_name = SONG['name']\n self.artist_name = SONG['artist']\n self.provider = 'lastfm'\n self.track_number = \"1\"\n self.collection_name = \"\"\n self.release_date = \"\"\n self.artwork_url_100 = SONG[\"image\"][-1][\"#text\"]\n self.track_time = \"\"\n self.primary_genre_name = \"N/A\"", "def __init__(self, name, storyline, trailer, poster):\n # Assigning the values of the instances to the class variables\n self.title = name\n self.mov_story = storyline\n self.trailer_youtube_url = trailer\n self.poster_image_url = poster", "def __init__(self):\n self.reddit = praw.Reddit('bot1')\n self.thread = None\n self.handle = None\n self.refresh_delay = None\n # Manage the checked comments\n if not os.path.isfile(\"reddit_comments_replied_to.txt\"):\n self.checked_comments = []\n else:\n # Read the file into a list and remove any empty values\n with open(\"reddit_comments_replied_to.txt\", \"r\") as f:\n self.checked_comments = f.read()\n self.checked_comments = self.checked_comments.split(\"\\n\")\n self.checked_comments = list(filter(None, self.checked_comments))\n # Manage the checked posts\n if not os.path.isfile(\"reddit_posts_replied_to.txt\"):\n self.checked_posts = []\n else:\n with open(\"reddit_posts_replied_to.txt\", \"r\") as f:\n self.checked_posts = f.read()\n self.checked_posts = self.checked_posts.split(\"\\n\")\n self.checked_posts = list(filter(None, self.checked_posts))", "def __init__(self):\n self.site = ('http://vortex.plymouth.edu/cgi-bin/gen_statlog-u.cgi')\n \"\"\"Root of URL to query for data.\"\"\"\n yesterday = datetime.today() - timedelta(days=1)\n self.year = yesterday.year\n \"\"\"Year to get data for.\"\"\"\n self.month = yesterday.month\n \"\"\"Month to get data for.\"\"\"\n self.day = yesterday.day\n \"\"\"Day to get data for.\"\"\"\n self.stns = dict(yvr=\"CYVR\",\n sandheads=\"CWVF\")\n \"\"\"Mapping of common station names to official station IDs.\"\"\"", "def __init__(self, title, year,story, poster_url, trailer_url):\n self.title = title\n self.year = year\n self.story = story\n self.poster_url = poster_url\n self.trailer_url = trailer_url", "def __init__(self):\n self.score = None\n self.avg_score = None\n self.std_dev = None\n self.scores = [] # list containing scores from each episode\n self.avg_scores = [] # list containing average scores after each episode\n self.scores_window = deque(maxlen=100) # last 100 scores\n self.best_avg_score = -np.Inf # best score for a single episode\n self.time_start = time.time() # track cumulative wall time\n self.total_steps = 0 # track cumulative steps taken\n self.writer = SummaryWriter(\"../results/\") # this is where tensorboard results are stored", "def __init__(self, seriesDir, parQ, interval=.2):\n # start the thread upon completion\n Thread.__init__(self)\n\n # set up logger\n self.logger = logging.getLogger(__name__)\n\n # initialize class parameters\n self.interval = interval # interval for polling for new files\n self.seriesDir = seriesDir # full path to series directory\n self.parQ = parQ # queue to store par header files\n self.alive = True # thread status\n self.numParsAdded = 0 # counter to keep track of # mosaics\n self.queued_par_files = set() # empty set to store names of queued mosaic\n self.par_pattern = re.compile(fnmatch.translate('*.par'), re.IGNORECASE)", "def __init__(self, title):\n # will hit the TMDB API on every instantiation\n search = tmdb.Search()\n response = search.movie({'query': title})\n\n # if there are any results to querying for the title, take the first result\n if len(search.results) > 0:\n self.ID = uuid.uuid4()\n self.TMDB_ID = search.results[0]['id']\n movie = tmdb.Movies(self.TMDB_ID).info() # get all the information available\n\n # save off a few interesting attributes\n self.title = movie['title']\n self.release_date = movie['release_date']\n self.popularity = movie['popularity']\n self.overview = movie['overview']\n else:\n self.initialize()\n print \" ##### Warning: could not find any matches for %s\" % title", "def __init__(self, URL):\n\n # add to topics list to retreive different topics from CBC RSS feed\n base = \"/cmlink/rss-\"\n topics = [\"politics\", \"technology\", \"sports\"]\n article_id = 1\n self.articles = []\n\n for topic in topics:\n\n # build our url string to make it dynamic\n full_url = URL + base + topic\n # gives us all article urls\n urls = getArticleURLs(full_url)\n\n for url in urls:\n new_article = Article(url, topic, article_id)\n\n # is it a valid article url?\n if new_article.article_id != -1:\n article_id += 1\n self.articles.append(new_article)\n \n # break # remove this to get all articles", "def __init__(self, API, playlist_uri):\n\n self.API = API\n self.playlist_uri = playlist_uri\n self.metadata = None", "def __init__(self, movie_title, release_date, movie_storyline, poster_image,\n trailer_youtube, more_link):\n\n self.title = movie_title\n self.date = release_date\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.more_url = more_link", "def __init__(self, ticker):\n EClient.__init__(self, self)\n self.previous_ts = None\n # Collect 1 min of data with 5 sec frequency\n self.data_collection = list()", "def __init__(self, server_id, subsection_id, output_queue):\n self.online = False\n self.id = server_id\n self.type = subsection_id\n self.queue = deque()\n self.is_serving = False\n self.current_passenger = None\n self.output_queue = output_queue\n self.max_queue_size = 1\n self.utilization = 0.0\n self.utilization_anchor = 0\n self.utilization_series = pd.Series(np.nan,\n index=hourly_timestamps,\n name=self.id)", "def __init__(self, _id, a_cookie, a_user_agent):\n self._id = _id\n self.ticker = ''\n self.pub_date = '0001-01-01'\n self.author = ''\n self.title = ''\n self.text = ''\n self.includes = ''\n\n self.comments = []\n self.valid = True\n self._parse_article(a_cookie, a_user_agent)", "def __init__(self, series):\n if series < 0 or series > 11:\n raise ValueError('Series has to be between 1 and 11')\n self._series = series\n self._list = []\n self._dict = {}\n self._dictID = 0\n\n self.hero_series()" ]
[ "0.666735", "0.582353", "0.5599041", "0.5515614", "0.5502834", "0.54765725", "0.54175603", "0.5364501", "0.533091", "0.53269607", "0.5278374", "0.5247649", "0.5233488", "0.5222864", "0.5213202", "0.5204529", "0.51684767", "0.516591", "0.51639456", "0.5141017", "0.513316", "0.51284343", "0.50994885", "0.5091677", "0.50807124", "0.5075149", "0.506984", "0.5063355", "0.50569385", "0.5055379" ]
0.6067558
1
Uses information in `line` to request and return the RSS feed
def request_rss(self, url): return feedparser.parse(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_rss(url):", "def get_news(url):\r\n \r\n # parse RSS feed into list of dictionaries\r\n feed = feedparser.parse(url)\r\n\r\n # no RSS feed articles for url\r\n if len(feed['entries']) == 0:\r\n return []\r\n \r\n # get first ten articles from the RSS feed\r\n news = []\r\n i = 0\r\n while True:\r\n if i == len(feed['entries']) or i > 30:\r\n break\r\n \r\n try:\r\n # get link to article\r\n link = feed[\"entries\"][i][\"link\"]\r\n\r\n # get title of article\r\n title = feed[\"entries\"][i][\"title\"]\r\n \r\n try:\r\n # get raw summary of article\r\n summary_raw = feed[\"entries\"][i][\"summary\"]\r\n \r\n # format summary\r\n summary = \"\"\r\n for c in summary_raw:\r\n if c == \"<\":\r\n summary += \"...\"\r\n break\r\n summary += c\r\n except KeyError as e:\r\n logging.error(\"no summary for RSS feed article: {}\".format(link))\r\n summary = \"read more here...\"\r\n \r\n # get raw date \r\n date_raw = feed[\"entries\"][i][\"published_parsed\"]\r\n \r\n if date_raw is None:\r\n date = feed[\"entries\"][i][\"published\"]\r\n \r\n else:\r\n # format date\r\n year = str(date_raw.tm_year)\r\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n month = months[date_raw.tm_mon - 1]\r\n day = str(date_raw.tm_mday)\r\n weekdays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\r\n wday = weekdays[date_raw.tm_wday]\r\n hour = str(date_raw.tm_hour)\r\n hour = \"{:2}\".format(hour).format(' ','0')\r\n min = str(date_raw.tm_min)\r\n min = \"{:2}\".format(min).replace(' ','0')\r\n date = hour + \":\" + min + \" - \" + wday + \" \" + month + \" \" + day + \", \" + year\r\n \r\n # compile entry and append to news list\r\n entry = {\"link\":link, \"title\":title, \"date\":date, \"summary\":summary}\r\n \r\n # sanitize entry\r\n for key in entry:\r\n # apostrophe\r\n entry[key] = entry[key].replace(\"&#39;\", \"'\")\r\n # right single quotation mark\r\n entry[key] = entry[key].replace(\"’\", \"&#8217;\")\r\n # left single quotation mark\r\n entry[key] = entry[key].replace('\"', \"&#8216;\")\r\n # right double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8221;\")\r\n # left double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8220;\")\r\n # Weird ampersand formatting\r\n entry[key] = entry[key].replace(\"&amp;\", \"&\")\r\n \r\n # prepare entry for sqlite queries\r\n entry[key] = surround(entry[key])\r\n \r\n # add entry to news list\r\n news.append(entry)\r\n \r\n # max 10 entries\r\n if len(news) == 10:\r\n break\r\n i += 1\r\n \r\n except Exception as e:\r\n logging.error(e)\r\n i += 1\r\n pass\r\n \r\n # success\r\n return news", "def get_feed(self):\n possible_endings = ('rss', 'rss/')\n if not self.url or not self.url.endswith(possible_endings):\n print('Please check URL(is RSS?) and Internet connection')\n sys.exit()\n try:\n data = feedparser.parse(self.url)\n except urllib.error.URLError:\n print('Please input correct URL')\n sys.exit()\n self.get_content(data)\n return self.items", "def rss_fetch():\n items = {}\n\n def add_item(pubDate, title, link):\n nonlocal items\n idx = float(parsedate_to_datetime(pubDate).timestamp())\n while idx in items:\n idx = idx + 0.1\n dbg(\"Adding item: %11.1f \\\"%s\\\" %s\" % (idx, title, link))\n items[idx] = {}\n items[idx]['title'] = title\n items[idx]['link'] = link\n\n state = \"\" # state parser is in (\"\", \"item\", \"title\", \"link\", \"pubDate\")\n title = \"\" # Currently parsing this title.\n link = \"\" # \" \" \" link\n pubDate = \"\" # \" \" \" pubDate (index)\n\n def start_element(name, attrs):\n nonlocal state\n nonlocal title\n nonlocal link\n nonlocal pubDate\n dbg(\"Start: %s %s %s\" %(name, str(attrs), str((state, title, link, pubDate))))\n if state == \"\":\n if name == \"item\":\n state = \"item\"\n elif state == \"item\":\n if name == \"title\":\n state = \"title\"\n if title:\n prn(\"Two titles?\")\n sys.exit(1)\n elif name == \"link\":\n state = \"link\"\n if link:\n prn(\"Two links?\")\n sys.exit(1)\n elif name == \"pubDate\":\n state = \"pubDate\"\n if pubDate:\n prn(\"Two pubDates?\")\n sys.exit(1)\n\n\n def end_element(name):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"End: %s %s\" % (name, str((state, title, link, pubDate))))\n if state == \"item\":\n if name == \"item\":\n if title == \"\":\n prn(\"No title at end item.\")\n sys.exit(1)\n if link == \"\":\n prn(\"No link at end item.\")\n sys.exit(1)\n if pubDate == \"\":\n prn(\"No pubDate at end item.\")\n sys.exit(1)\n else:\n add_item(pubDate, title, link)\n state = \"\"\n title = \"\"\n link = \"\"\n pubDate = \"\"\n elif state == \"title\":\n if name == \"title\":\n state = \"item\"\n elif state == \"link\":\n if name == \"link\":\n state = \"item\"\n elif state == \"pubDate\":\n if name == \"pubDate\":\n state = \"item\"\n\n def char_data(data):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"Data: %s %s)\" % (str(data), str((state, title, link, pubDate))))\n if state == \"title\":\n title = title + data\n elif state == \"link\":\n link = link + data\n elif state == \"pubDate\":\n pubDate = pubDate + data\n\n\n p = xml.parsers.expat.ParserCreate(\"UTF-8\")\n\n p.StartElementHandler = start_element\n p.EndElementHandler = end_element\n p.CharacterDataHandler = char_data\n\n with urllib.request.urlopen('https://news.ycombinator.com/rss') as f:\n xml_file = b\"\"\n while True:\n r = f.read(255)\n if r:\n xml_file = xml_file + r\n else:\n break\n\n try:\n p.Parse(xml_file.decode(\"UTF-8\"), True)\n except:\n dbg(\"Writing fetched RSS feed to file...\")\n err_f = open(parse_error_output_file, \"ab\")\n err_f.write(b\"GET URL: \")\n err_f.write(f.geturl().encode(\"UTF-8\"))\n err_f.write(b\"\\nReturn Code: \")\n err_f.write((\"%d\\n\" % (f.getcode(), )).encode(\"UTF-8\"))\n err_f.write(b\"Meta Info:\\n\")\n err_f.write(f.info().as_bytes(unixfrom=True))\n err_f.write(b\"XML output:\\n\")\n err_f.write(xml_file)\n err_f.close()\n dbg(\"Done.\")\n raise\n\n return items", "def get_rss(limit):\n rss_data = feedparser.parse(URL)\n if limit == 1:\n title = rss_data.entries[0].title\n link = rss_data.entries[0].link\n rss_print(title, link)\n else:\n for i in range(0, limit):\n title = rss_data.entries[i].title\n link = rss_data.entries[i].link\n\n print(Back.CYAN + str(i + 1) + \"\\t\")\n rss_print(title, link)", "def getFeedFromXXX(RSSlink):\n summary =\"\"\n link =\"\"\n if \"packetstormsecurity\" in RSSlink:\n link =\"link\"\n summary=\"summary_detail\"\n elif \"jetlib\" in RSSlink:\n link=\"id\"\n summary=\"summary\"\n myFeed=\"\"\n try:\n myFeed = feedparser.parse(RSSlink)\n except:\n print(\"problem with the db website.try to change the source db in option !\")\n return None\n entries = [item for item in myFeed.items() if \"entries\" in item]\n tupleInsideEntries =entries[0]\n #print len(tupleInsideEntries[1])#show the number of result founded\n for dicItem in tupleInsideEntries[1]:\n if dicItem.get(\"title\")==\"No Results Found\":\n return False #break from this loop if theres no result\n print (\"Title : \"+dicItem.get(\"title\"))#title\n if summary ==\"summary_detail\": #packetstormsecurity\n print (\"Description : \"+str(dicItem.get(summary).get(\"value\")))#description\n else:\n print (\"Description : \"+str(dicItem.get(summary)))\n print (\"Date : \"+dicItem.get(\"published\"))#date\n print (\"Link : \"+dicItem.get(link)) #link\n print (\"#################################################################################\")\n return True", "def get_feed(self):\n\t\turl=\"http://news.google.com/news?ned=%s&topic=%s&output=rss\"\n\t\tlinks=[{\"ned\":\"us\", \"type\":\"h\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"w\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"nz\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"sa\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"b\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"t\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"m\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"s\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"e\"},\n\t\t\t ]\n\t\tfeed = links[self.get_input()]\n\t\treturn url%(feed[\"ned\"],feed[\"type\"])", "def zhihu_rss_fetcher(ctx):\n URL = 'http://www.zhihu.com/rss'\n coll = ctx.get_mongo_collection()\n\n for entry in fetch_rss(URL).entries:\n try:\n coll.insert({'_id': entry.link})\n except DuplicateKeyError:\n continue\n ctx.new_item(TextOnlyItem(entry.title, entry.description), ['zhihu'],\n parse_entry_time(entry),\n {'id': entry.link})\n log_info(u'zhihu: new entry: {} {}'.format(entry.link,\n entry.title))", "def rss_feed(rss_url):\n try:\n # Use feedparser to analyze given RSS feed, if it is valid RSS.\n d = feedparser.parse(rss_url)\n except:\n return \"Sorry, invalid RSS feed. Please check and try again later.\"\n \n total = len(d['entries'])\n updates = dict()\n for index, item in enumerate(d['entries']):\n # Convert publish time from ctime format to iso-time format.\n a_time = time_convert(item.published)\n # Set article url ad dictionary key, with publish date as value. \n updates[str(item.link)] = a_time \n return (total, updates)", "def process(url):\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n print entry\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n summary = translate_html(entry.summary)\n try:\n subject = translate_html(entry.tags[0]['term'])\n except AttributeError:\n subject = \"\"\n newsStory = NewsStory(guid, title, subject, summary, link)\n ret.append(newsStory)\n return ret", "def parse_rss(link, mode):\n\n one_feed = []\n news_counter = 0\n app.logger.info(f'Parsing feed: {link}')\n # Get file from internet, open it with xml-parser\n rss = feedparser.parse(link)\n\n for entry in rss.entries:\n\n if mode == 'latest':\n news_item_date = get_timestamp(entry.published)\n\n # Stop reading RSS if current news is already older than time\n # when user last got the news feed\n if news_item_date < last_time_user_got_news:\n return one_feed\n\n post = {'title': entry.title,\n 'published': get_timestamp(entry.published)}\n\n # Try to get link to image from one of a place where it can be\n try:\n pic = entry.enclosures[0].href\n except(IndexError, AttributeError):\n pic = get_img_source(entry.summary)\n\n post['image'] = pic if pic else url_for('static',\n filename=\"400x400.jpg\")\n\n link = entry.link\n post['link'] = link\n domain_name = re.search(r'://(.+?)/', link).group(1)\n post['domain_name'] = domain_name if domain_name else 'unknown'\n\n one_feed.append(post)\n\n if mode != 'latest':\n return one_feed\n else:\n print('There are no new news at all.')\n return []", "def cli():\n fire.Fire(fetch_rss_file)", "def RSS2format(inputfile):\n print \"START: FEED GENERATOR[ITEM OBJECT CREATOR]: \", time.time()\n xmldocument = parse(inputfile)\n feed_title = \"\"\n try:\n feed_title = xmldocument.getElementsByTagName('dc:title')[0].firstChild.data\n except IndexError as details:\n print \"Handling IndexError: \", details\n feed_title = \"Handling IndexError...\"\n except AttributeError as details:\n print \"Handling AttributeError: \", details\n feed_title = \"Handling AttributeError...\"\n # only get first 100 characters.. RSS\n feed_description = \"\"\n try:\n feed_description = xmldocument.getElementsByTagName('dc:description')[0].firstChild.data[:100]\n except IndexError as details:\n print \"Handling IndexError: \"\n feed_description = \"Handling IndexError\"\n except AttributeError as details:\n\tfeed_description = \"Handling AttributeError\"\n feed_link = xmldocument.getElementsByTagName('identifier')[0].firstChild.data # get header identifier for link value\n feed_pubDate = xmldocument.getElementsByTagName('datestamp')[0].firstChild.data # get header datestamp for pubDate value\n feed_guid = xmldocument.getElementsByTagName('identifier')[0].firstChild.data # get header identifier for guid value\\\n # return a PyRSS2Gen object\n return PyRSS2Gen.RSSItem(\n title = feed_title,\n link = feed_link,\n description = feed_description,\n guid = feed_guid,\n pubDate = datetime.strptime(feed_pubDate.replace(\"T\", \" \").replace(\"Z\", \"\"), '%Y-%m-%d %H:%M:%S')\n )", "def get_rss_item(self) -> str:\n base_item = '''\n<item>\n <title>{display_name} tweeted {id}</title>\n <link>{url}</link>\n <pubDate>{pub_date}</pubDate>\n <dc:creator>{display_name}</dc:creator>\n <category>Tweets</category>\n <guid isPermaLink=\"false\">{url}</guid>\n <description />\n <content:encoded><![CDATA[\n RSS_ITEM_PLACE_HOLDER\n ]]></content:encoded>\n</item>'''.format(\n display_name=self.display_name,\n id=self.id,\n url=self.url,\n pub_date=_rss_time_format(self.inner.created_at_in_seconds),\n )\n try:\n return base_item.replace('RSS_ITEM_PLACE_HOLDER', self.get_content())\n except:\n logging.exception('Failed to create RSS item for %s.', self.url)\n return base_item.replace('RSS_ITEM_PLACE_HOLDER', 'RSS Error. Please read {} directly.'.format(self.url))", "def retrieveFeed(self, rss_url):\n url = 'http://{}'.format(rss_url)\n result = feedparser.parse(url)\n if result.status != 200:\n sys.stdout.write('request failed for retrieve this RSS ({})\\n'.format(url))\n else:\n self.storeFeeds(url, result['items'])", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def process_line(self, line):\n find_result = re.findall(LINE_REGEX, line)\n line_data = {r[0]: r[1] for r in find_result}\n self.process_url(line_data.get('request_to'))\n self.process_status_code(line_data.get('response_status'))", "def parse_shaarli_rss_export(rss_file):\n\n rss_file.seek(0)\n entries = rss_file.read().split('<entry>')[1:]\n for entry in entries:\n # example entry:\n # <entry>\n # <title>Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online</title>\n # <link href=\"https://www.heise.de/security/meldung/Aktuelle-Trojaner-Welle-Emotet-lauert-in-gefaelschten-Rechnungsmails-4291268.html\" />\n # <id>https://demo.shaarli.org/?cEV4vw</id>\n # <published>2019-01-30T06:06:01+00:00</published>\n # <updated>2019-01-30T06:06:01+00:00</updated>\n # <content type=\"html\" xml:lang=\"en\"><![CDATA[<div class=\"markdown\"><p>&#8212; <a href=\"https://demo.shaarli.org/?cEV4vw\">Permalink</a></p></div>]]></content>\n # </entry>\n\n trailing_removed = entry.split('</entry>', 1)[0]\n leading_removed = trailing_removed.strip()\n rows = leading_removed.split('\\n')\n\n def get_row(key):\n return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0]\n\n title = str_between(get_row('title'), '<title>', '</title>').strip()\n url = str_between(get_row('link'), '<link href=\"', '\" />')\n ts_str = str_between(get_row('published'), '<published>', '</published>')\n time = datetime.strptime(ts_str, \"%Y-%m-%dT%H:%M:%S%z\")\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title or None,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "def feed(self, entry):\r\n pass", "def run_rss(self):\n\n pass", "def article_extractor(rss_feed_link):\n user_agent = {\"user-agent\": \"Mozilla/5.0 (Windows NT 6.2; Win64;\\\n x64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1\"}\n try:\n feed = requests.get(rss_feed_link, headers=user_agent)\n except requests.exceptions.ConnectionError:\n print(\"No internet connection\")\n exit()\n\n dirty_content = BeautifulSoup(feed.text, \"xml\")\n return dirty_content", "def workAFeed(feed):\n print(\"::working \",feed)\n\n # add http\n if feed.find(\"http\") == -1:\n feed = \"http://\" + feed\n print (\"::feed=\",feed)\n\n return feed", "def get_news(rss_feed):\r\n\r\n class _CurrentData(object):\r\n \"\"\"Class holding a set of current attributes.\"\"\"\r\n item = None\r\n text = None\r\n\r\n def _start_element_handler(name, attrs):\r\n \"\"\"Handle XML start-elements.\"\"\"\r\n if name == 'item':\r\n # Allocate a new item.\r\n current.item = NewsItem()\r\n\r\n def _end_element_handler(name):\r\n \"\"\"Handle XML end-elements.\"\"\"\r\n if name == 'item':\r\n news_items.append(current.item)\r\n elif name in ('title', 'description', 'link', 'category'):\r\n try:\r\n setattr(current.item, name, current.text)\r\n except AttributeError:\r\n # The parser has run into a non-news item.\r\n pass\r\n\r\n def _char_data_handler(data):\r\n \"\"\"Handle XML element character data.\"\"\"\r\n current.text = data\r\n\r\n news_items = list()\r\n current = _CurrentData()\r\n\r\n parser = expat.ParserCreate()\r\n parser.StartElementHandler = _start_element_handler\r\n parser.EndElementHandler = _end_element_handler\r\n parser.CharacterDataHandler = _char_data_handler\r\n\r\n news_handle = urllib2.urlopen(rss_feed)\r\n xml_data = news_handle.read()\r\n \r\n parser.Parse(xml_data)\r\n\r\n return news_items", "def getFeed(self):\n\n entries_xml = []\n\n for entry in self.middleware.entries:\n request = entry['request']\n response = entry.get('response')\n begin = time.localtime(request['begin'])\n entry_id = self._generateEntryTagURI(entry)\n entry_title = '%s %s ' % (request['method'], request['url'])\n\n short_url = request['url']\n max_url_len = 40\n if len(short_url) > max_url_len:\n prefix = short_url[:9]\n suffix = short_url[-max_url_len+9:]\n short_url = prefix + '...' + suffix\n entry_title = '%s %s ' % (request['method'], short_url)\n\n # Make the <rz:cgi_variable> nodes into a string\n cgivars = \"\"\n for k,v in request['cgi_variables']:\n newv = escape(str(v))\n s = cgi_variable_fmt % (k, newv)\n cgivars = cgivars + s\n\n # Make the <rz:cgi_variable> nodes into a string\n wsgivars = \"\"\n for k,v in request['wsgi_variables']:\n newv = escape(str(v))\n s = wsgi_variable_fmt % (k, newv)\n wsgivars = wsgivars + s\n\n # Make the <rz:request> node\n rzrequest = rzrequest_fmt % {\n 'begin': request['begin'],\n 'cgi_variables': cgivars,\n 'wsgi_variables': wsgivars,\n 'method': request['method'],\n 'url': request['url'],\n 'body': escape(request['body']),\n }\n\n if response is not None:\n # Make the <rz:request> node\n headers = ''\n for k,v in response['headers']:\n newv = escape(str(v))\n s = header_fmt % (k, newv)\n headers = headers + s\n\n rzresponse = rzresponse_fmt % {\n 'begin': response['begin'],\n 'end': response['end'],\n 'content-length': response['content-length'],\n 'headers': headers,\n 'status': response['status'],\n 'body': escape(response['body']),\n }\n else:\n rzresponse = ''\n\n\n # Make the atom:entry/atom:content node\n content = contentfmt % {\n 'logentry_id': entry_id,\n 'rzrequest': rzrequest,\n 'rzresponse': rzresponse,\n }\n\n entry_xml = entryfmt % {\n 'entry_id':entry_id,\n 'entry_title':escape(entry_title),\n 'updated':time.strftime('%Y-%m-%dT%H:%M:%SZ', begin),\n 'summary':escape(pprint.pformat(entry)),\n 'content':content,\n }\n entries_xml.append(entry_xml)\n\n now = time.time()\n\n body = feedfmt % {\n 'title':'repoze.debug feed for pid %s' % self.middleware.pid,\n 'entries':'\\n'.join(entries_xml),\n 'feed_id':self._generateFeedTagURI(now, self.middleware.pid),\n 'updated':time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(now)),\n }\n\n resp = Response(content_type='application/atom+xml', body=body)\n return resp", "def parse_rss_export(rss_file):\n\n rss_file.seek(0)\n items = rss_file.read().split('<item>')\n items = items[1:] if items else []\n for item in items:\n # example item:\n # <item>\n # <title><![CDATA[How JavaScript works: inside the V8 engine]]></title>\n # <category>Unread</category>\n # <link>https://blog.sessionstack.com/how-javascript-works-inside</link>\n # <guid>https://blog.sessionstack.com/how-javascript-works-inside</guid>\n # <pubDate>Mon, 21 Aug 2017 14:21:58 -0500</pubDate>\n # </item>\n\n trailing_removed = item.split('</item>', 1)[0]\n leading_removed = trailing_removed.split('<item>', 1)[-1].strip()\n rows = leading_removed.split('\\n')\n\n def get_row(key):\n return [r for r in rows if r.strip().startswith('<{}>'.format(key))][0]\n\n url = str_between(get_row('link'), '<link>', '</link>')\n ts_str = str_between(get_row('pubDate'), '<pubDate>', '</pubDate>')\n time = datetime.strptime(ts_str, \"%a, %d %b %Y %H:%M:%S %z\")\n title = str_between(get_row('title'), '<![CDATA[', ']]').strip() or None\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "async def create_rss(channel_alias: str, request: Request):\r\n global channel_hash, client\r\n channel_alias = channel_alias.lstrip('@')\r\n private_channel = channel_alias[:8] == 'joinchat'\r\n if private_channel:\r\n private_hash = channel_alias[8:]\r\n channel_alias = 't.me/joinchat/' + private_hash\r\n try:\r\n await client.start()\r\n if channel_alias not in channel_hash:\r\n if private_channel:\r\n await client(ImportChatInviteRequest(private_hash))\r\n channel = await client.get_entity(channel_alias)\r\n ch_full = await client(GetFullChannelRequest(channel=channel))\r\n username = channel.username or channel.id\r\n channel_hash[channel_alias] = {\r\n 'username': username,\r\n 'title': channel.title,\r\n 'id': channel.id,\r\n 'about': ch_full.full_chat.about or str(username),\r\n }\r\n logging.info(f\"Adding to the hash '{channel_alias}'\")\r\n with open('hash.pickle', 'wb') as f:\r\n pickle.dump(channel_hash, f)\r\n ch = channel_hash[channel_alias]\r\n messages = [m async for m in client.iter_messages(\r\n ch['username'], limit=int(config['RSS']['RECORDS']))]\r\n except Exception as e:\r\n warn = f\"{str(e)}, request: '{channel_alias}'\"\r\n logging.warning(warn)\r\n return warn\r\n\r\n fg = FeedGenerator()\r\n fg.title(f\"{ch['title']} (@{ch['username']}, id:{ch['id']})\")\r\n fg.subtitle(ch['about'])\r\n link = channel_alias if private_channel else f\"t.me/s/{ch['username']}\"\r\n fg.link(href=f'https://{link}', rel='alternate')\r\n fg.generator(config['RSS']['GENERATOR'])\r\n fg.language(config['RSS']['LANGUAGE'])\r\n for m in messages:\r\n if not (config['RSS'].getboolean('SKIP_EMPTY') and not m.text):\r\n fe = fg.add_entry(order='append')\r\n link = 'https://t.me/' + ('c/' if private_channel else '')\r\n fe.guid(guid=f\"{link}{ch['username']}/{m.id}\", permalink=True)\r\n fe.content(markdown(m.text))\r\n fe.published(m.date)\r\n\r\n logging.debug(f\"Successfully requested '{ch['username']}'\")\r\n return Response(content=fg.rss_str(), media_type='application/xml')", "def get_rss_infos():\n\n url_rss_lib = \"http://www.liberation.fr/rss\"\n soup = utils.recovery_flux_url_rss(url_rss_lib)\n\n rss_items = soup.find_all(\"li\")\n\n rss_list = []\n\n link_rss = []\n\n for ri in rss_items:\n if ri.get(\"class\") == ['rss-item']:\n rss_list.append(ri.a.get('href'))\n\n for rl in rss_list:\n soup = utils.recovery_flux_url_rss(rl)\n entre = soup.find_all('entry')\n for e in entre:\n link_rss.append(e.link.get('href'))\n\n return link_rss", "def latestEntriesRss():\n now = datetime.now()\n latestEntries = session.query(Pokemon).order_by(desc(Pokemon.date_entered))\\\n .limit(20)\n rss = render_template('rss.xml', lastBuildDate=now, entries=latestEntries)\n response = make_response(rss)\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response", "def publish_line(self, stream, line):\n pass", "def get_from_url(source):\n try:\n rss_news = feedparser.parse(source)\n result = parse_news(rss_news['entries'])\n except urllib.error.URLError:\n raise SystemExit(\"Source isn't available\")\n else:\n if len(result) == 0:\n raise SystemExit('Please, check if the entered link is correct!')\n else:\n return result" ]
[ "0.665676", "0.63081646", "0.6112597", "0.60895586", "0.60594904", "0.60477144", "0.60260314", "0.598809", "0.5984063", "0.59758997", "0.5936038", "0.5913608", "0.58760685", "0.5829383", "0.58091927", "0.5775324", "0.5773286", "0.5708184", "0.56792194", "0.56642944", "0.56638587", "0.56513196", "0.5647424", "0.56383437", "0.5601557", "0.55917937", "0.55866176", "0.5581136", "0.5549213", "0.5522708" ]
0.696534
0
Variable assignment can include assigning array elements.
def assign_variable(executor, variable, value): variable = variable.replace(" ", "") # TODO Should move parsing of this to ParsedStatementLet. # TODO Need to handle N-dimensional array element assignment. i = variable.find("(") if i != -1: # Array reference j = variable.find(")", i+1) if j == -1: raise BasicSyntaxError(F"Missing ) in in array assignment to {variable}") if i+1 == j: raise BasicSyntaxError(F"Missing array subscript in assignment to {variable}") subscripts = variable[i+1:j].split(",") variable = variable[:i] is_valid_identifier(variable) subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts] executor.put_symbol_element(variable, value, subscripts) else: is_valid_identifier(variable) executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))", "def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))", "def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))", "def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))", "def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))", "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node", "def visit_VarAssignNode(self, node: VarAssignNode, symbol_table: SymbolTable) -> None:\n if isinstance(node.name, AccessNode) and isinstance(node.name.item_to_access, NumberNode):\n var = self.visit(node.name.accessor, symbol_table)\n var.vals[int(node.name.item_to_access.tok.value)] = self.visit(node.value, symbol_table)\n if isinstance(var, List):\n var.value = [item[idx].value for idx, item in enumerate(var.vals.values())]\n else:\n return f'Strings are immutable'\n else:\n assignment = self.visit(node.value, symbol_table)\n\n symbol_table[node.name] = assignment", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def assign(array1, array2):\n for i in range(len(array1)):\n array2[i] = array1[i]", "def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")", "def visit_Assign(self, node):\n var_name = node.left.value\n self.VARIABLES[var_name] = self.visit(node.right)", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def checkVarArray(self, script, node):\n\n if isinstance(node.value, ast.Call):\n if isinstance(node.value.func, ast.Name):\n if node.value.func.id == 'Var':\n if len(node.value.args) > 0:\n for target in node.targets:\n if isinstance(target, ast.Attribute):\n if isinstance(target.value, ast.Name):\n if target.value.id in script.modelVars:\n if target.value.id not in self.varArrays:\n self.varArrays[target.value.id] = []\n self.varArrays[target.value.id].append(target.attr)", "def set_assignment(self, var, value):\n self.variable_to_value[var] = value", "def _assign_op(dest, op, arg, val, path, scope):\n if op == '[':\n dest[arg] = val\n elif op == '.':\n setattr(dest, arg, val)\n elif op == 'P':\n _assign = scope[TargetRegistry].get_handler('assign', dest)\n try:\n _assign(dest, arg, val)\n except Exception as e:\n raise PathAssignError(e, path, arg)\n else: # pragma: no cover\n raise ValueError('unsupported T operation for assignment')", "def varcopy(self, vars):", "def assign_variable(self, name, value):\n return self.set_variable(name, value)", "def _var_update(self, **kwargs):\n for k, v in kwargs.items():\n if v is not None:\n v = np.asanyarray(v)\n\n if not hasattr(self, k):\n setattr(self, k, v)\n elif v is not None:\n setattr(self, k, v)\n \n self._var_check()", "def assign(ary, out):\n\n from . import _bh\n\n if not np.isscalar(ary):\n (ary, out) = broadcast_arrays(ary, out)[0]\n # We ignore self assignments\n if _bh.same_view(ary, out):\n return\n\n # Assigning empty arrays doesn't do anything\n if hasattr(ary, \"size\"):\n if ary.size == 0:\n return\n if hasattr(out, \"size\"):\n if out.size == 0:\n return\n\n # We use a tmp array if the in-/out-put has memory conflicts\n if overlap_conflict(out, ary):\n tmp = array_create.empty_like(out)\n assign(ary, tmp)\n return assign(tmp, out)\n\n if bhary.check(out):\n _bh.ufunc(UFUNCS[\"identity\"].info['id'], (out, ary))\n else:\n if bhary.check(ary):\n if \"BH_SYNC_WARN\" in os.environ:\n import warnings\n warnings.warn(\"BH_SYNC_WARN: Copying the array to NumPy\", RuntimeWarning, stacklevel=2)\n ary = ary.copy2numpy()\n out[...] = ary", "def assign(self, *args):\n return _ida_hexrays.cloop_t_assign(self, *args)", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)", "def __call__(self, elementname, name, master):\n self._name = name\n self._master = master\n if elementname not in self._elementvars:\n v = ArrayElementVar(varname=self._name, elementname=elementname, master=self._master)\n self._elementvars[elementname] = v\n return self._elementvars[elementname]", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, assignee: np.ndarray):\n if isinstance(self.data, pd.DataFrame):\n self.data = pd.concat([self.data, assignee], axis=1, ignore_index=True)\n else:\n self.data = pd.DataFrame(data=assignee)", "def setUniformValueArray(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass" ]
[ "0.6699032", "0.6667143", "0.66010433", "0.64327574", "0.6394654", "0.632295", "0.62098926", "0.61762494", "0.6125874", "0.6119721", "0.60467184", "0.6016822", "0.5979976", "0.5958759", "0.58768624", "0.57980937", "0.5793912", "0.5786944", "0.57581115", "0.5734274", "0.573222", "0.57276773", "0.57172596", "0.5700201", "0.56997126", "0.56997126", "0.56997126", "0.56997126", "0.5676348", "0.56761956" ]
0.7048861
0
An if statement works by skipping to the next line, if the THEN clause is false, otherwise it continues to execute the clauses after the THEN.
def stmt_if(executor, stmt): e = Expression() result = e.eval(stmt._tokens, symbols=executor._symbols) if not result: executor.goto_next_line()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_ifeq(self):\n self.indent_left()\n self.write_line(\"endif\")", "def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))", "def test_27_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=0; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,427))", "def conditional(self) -> global___Statement.Conditional:", "def test_30_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=1; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,430))", "def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,429))", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def _If(self, t):\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()", "def test_31_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\tif a>0 then return 0;\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,431))", "def compile_if(self):\r\n else_label = \"ELSE_\" + str(self.__if_count)\r\n end_label = \"END_IF_\" + str(self.__if_count)\r\n self.__if_count += 1\r\n self.__advance(n=2)\r\n self.compile_expression()\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(else_label)\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__vmwriter.write_goto(end_label)\r\n self.__vmwriter.write_label(else_label)\r\n self.__advance()\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"ELSE\"]:\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__advance()\r\n self.__vmwriter.write_label(end_label)", "def test_78_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do if (x=0) then return;\n\t\tend{with} continue; end{while} continue; end\"\"\"\n\t\texpect = \"Continue Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,478))", "def eliminate_ifones(body):\n def isifone(tree):\n if type(tree) is If:\n if type(tree.test) is Num: # TODO: Python 3.8+: ast.Constant, no ast.Num\n if tree.test.n == 1:\n return \"then\"\n elif tree.test.n == 0:\n return \"else\"\n elif type(tree.test) is NameConstant: # TODO: Python 3.8+: ast.Constant, no ast.NameConstant\n if tree.test.value is True:\n return \"then\"\n elif tree.test.value in (False, None):\n return \"else\"\n return False\n\n def optimize(tree): # stmt -> list of stmts\n t = isifone(tree)\n if t:\n branch = tree.body if t == \"then\" else tree.orelse\n return branch\n return [tree]\n\n return transform_statements(optimize, body)", "def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)", "def test_77_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do if (x=0) then continue; else return;\n\t\tend{with} end{while} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,477))", "def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"", "def dummy_elif(dummy_code_block):\n return make_dummy_elif()", "def test_76_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do continue;\n\t\tend{with} end{with} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,476))", "def get_if_condition(self, file, i):\n\n # Check if 'if function' is to run main function of program\n if re.match(\"if __name__ == [\\\"']__main__[\\\"']:\", file[i]) and \\\n re.match(r\"\\s*main\\(\\)\", file[i + 1]):\n\n # If yes, return None\n return \"omit\", 2, \n\n # Run super definition\n line = super().get_if_condition(file, i)\n\n # Strip ending colon\n line = line.split(\":\", 1)\n line, multi_statement = line[0], line[1]\n\n # Set if keyword for back translation\n ln_split = line.split(\" \")\n if ln_split[0] not in [\"elif\", \"else\"]:\n if_kw = \"if\"\n else:\n if_kw, line = ln_split[0], \" \".join(ln_split[1:]).strip()\n\n # Replace 'elif' with standard\n if if_kw == \"elif\":\n if_kw = \"else if\"\n\n # Replace logical operators\n line = self.replace_logical_ops(line, direction=\"to\")\n\n # Create start and end for while call\n start = []\n end = []\n\n # Check if multiple statements are declared in one line\n if multi_statement.strip():\n start += multi_statement.split(\";\")\n\n # Return if condition\n return line, if_kw, start, end", "def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar))}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif (foo and bar) or foo and (bar or (foo and bar)) %}\\nfoo{% endif %}\"", "def compile_if(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'if' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.code_writer.write_if(lab1)\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab2)\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab1)\r\n if (self.tokenizer.token_type() == JackTokenizer.KEYWORD_T and\r\n self.tokenizer.key_word() == \"else\"):\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n self.compile_statements()\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab2)", "def compile_else(self):\n\n\t\txml = self.tokenizer.keyword() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\txml = '</statements>\\n' + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)", "def with_if_statement():\n if c():\n return t()\n else:\n return f()", "def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"ifStatement: \", tok )\n\tstart = match( \"if\" )\n\texpr = expression( )\n\tblk = parseBlock( )\n\telseblk = None\n\ttok = tokens.peek( )\n\tif tok == \"else\":\n\t\tmatch( \"else\" )\n\t\telseblk = parseBlock( )\n\treturn ifStatement(expr, blk, elseblk)", "def test_if_elseif_else_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo\\n{else}bar{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo\\n{% else %}bar{% endif %}\"", "def link_if_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.true_body)\n if stmt.false_body is not None:\n self.link_stmt(stmt.false_body)", "def _IfExp(self, t):\n self.dispatch(t.test)\n self.write(\" ? \")\n self.dispatch(t.body)\n self.write(\" : \")\n self.dispatch(t.orelse)", "def test_if_statement_multiple():\n r = convert_code(\n \"{if !foo or foo.bar or foo|bar:foo['hello']}\\nfoo\\n{/if}\")\n assert r == \"{% if not foo or foo.bar or foo|bar(foo['hello']) %}\\nfoo\\n{% endif %}\"", "def _check_semicolon_else_skip(self, symbol):\n if symbol.type == self.scanner.SEMICOLON:\n pass\n else:\n self._display_syntax_error(\"semicolon\")\n # Skip to semicolon at end of line\n self._semicolon_skipper()", "def start_ifeq(self, left, right):\n self.write_line(\"ifeq (\" + left + \",\" + right + \")\")\n self.indent_right()" ]
[ "0.6381344", "0.6276441", "0.6248517", "0.6244961", "0.62422997", "0.61599344", "0.6136429", "0.6136429", "0.5794875", "0.5792966", "0.575992", "0.57597315", "0.5758323", "0.5756217", "0.573461", "0.56686", "0.5654149", "0.5635589", "0.5634545", "0.563404", "0.5616345", "0.5601915", "0.55984026", "0.5582077", "0.5566953", "0.5562973", "0.55500007", "0.55444866", "0.55400145", "0.5538579" ]
0.741904
0
Calculate tips over past X amount of time and write JSON output
def aggregate_tips(): # The SQL query to perform now = time.time() print("Computing tip stats...", end="", flush=True) labels = ["30_days", "7_days", "24_hours", "1_hour"] windows = [30*86400.0, 7*86400.0, 1*86400.0, 3600.0] result = {} result["unix_time"] = now result["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC" # Agrees with old method, but should it be SUM(amount)? query = "SELECT support_id, amount, time, claim_name, claim_id, is_nsfw, SUM(to_claim_address) tot FROM (SELECT support.id as support_id, support.support_amount amount,\ transaction.transaction_time time, claim.is_nsfw is_nsfw,\ claim.claim_id claim_id, claim.name claim_name,\ (CASE WHEN (output.address_list LIKE CONCAT('%25', claim_address, '%25')) THEN '1' ELSE '0' END) to_claim_address\ FROM claim\ INNER JOIN support ON support.supported_claim_id = claim.claim_id\ INNER JOIN transaction ON support.transaction_hash_id = transaction.hash\ INNER JOIN output ON transaction.hash = output.transaction_hash \ WHERE transaction.transaction_time > ({now} - {window})\ AND transaction.transaction_time <= {now}) AS result\ GROUP BY support_id, amount;".format(now=now, window=windows[0]) request = requests.get("https://chainquery.lbry.com/api/sql?query=" + query) the_dict = request.json() # Get tips into numpy array times = [] tips = [] is_tip = [] links = [] is_nsfw = [] for row in the_dict["data"]: times.append(float(row["time"])) tips.append(float(row["amount"])) links.append("https://open.lbry.com/" + str(row["claim_name"]) + ":"\ + str(row["claim_id"])) is_nsfw.append(row["is_nsfw"]) if row["tot"] > 0: is_tip.append(True) else: is_tip.append(False) times = np.array(times) tips = np.array(tips) is_tip = np.array(is_tip) links = np.array(links) is_nsfw = np.array(is_nsfw) # Write tips for i in range(len(labels)): keep = (times > (now - windows[i])) & is_tip _times = times[keep] _tips = tips[keep] _links = links[keep] _is_nsfw = is_nsfw[keep] result["num_tips_{label}".format(label=labels[i])] = len(_tips) result["lbc_tipped_{label}".format(label=labels[i])] = float(_tips.sum()) maxtip = 0 maxtip_link = None maxtip_is_nsfw = None if len(_tips) > 0: maxtip = float(_tips.max()) index = np.argmax(_tips) maxtip_link = _links[index] maxtip_is_nsfw = _is_nsfw[index] result["biggest_tip_{label}".format(label=labels[i])] = maxtip result["biggest_tip_{label}_link".format(label=labels[i])] = maxtip_link result["biggest_tip_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw) # Write supports for i in range(len(labels)): keep = (times > (now - windows[i])) & (~is_tip) _times = times[keep] _tips = tips[keep] _links = links[keep] _is_nsfw = is_nsfw[keep] result["num_supports_{label}".format(label=labels[i])] = len(_tips) result["lbc_supports_{label}".format(label=labels[i])] = float(_tips.sum()) maxtip = 0 maxtip_link = None maxtip_is_nsfw = None if len(_tips) > 0: maxtip = float(_tips.max()) index = np.argmax(_tips) maxtip_link = _links[index] maxtip_is_nsfw = _is_nsfw[index] result["biggest_support_{label}".format(label=labels[i])] = maxtip result["biggest_support_{label}_link".format(label=labels[i])] = maxtip_link result["biggest_support_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw) f = open("tips_stats.json", "w") f.write(json.dumps(result)) f.close() print("done. ", flush=True, end="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_tip(meal_base, tip_rate):", "def time_taken(json_cutlist, laser):\r\n\tcutlist = json.loads(json_cutlist)\r\n\ttime = 0\r\n\tcoordinate_array = [0, 0]\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\tcoordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) - coordinate_array[1]]\r\n\t\t\tmag = math.sqrt(coordinate_array[0]**2 + coordinate_array[1]**2)\r\n\t\t\tif a[0] == \"jump\":\r\n\t\t\t\ttime += mag/laser[\"jump_speed\"]\r\n\t\t\telse:\r\n\t\t\t\ttime += mag/laser[\"mark_speed\"]\r\n\t\t\tcoordinate_array = [float(a[1]), float(a[2])]\r\n\t\telif a[0] == \"z_abs\" or a[0] == \"z_rel\":\r\n\t\t\tzSet = float(a[1])\r\n\t\telif a[0] == \"c_abs\" or a[0] == \"c_rel\":\r\n\t\t\tcSet = float(a[1])\r\n\t\telif a[0] == \"a_abs\" or a[0] == \"a_rel\":\r\n\t\t\taSet = float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\treturn str(datetime.timedelta(seconds=int(time)))", "def time_plot(time_window):\n try:\n time_window = int(time_window)\n\n # make a connection and look up DB\n conn = sqlite3.connect(\"tweets.db\")\n c = conn.cursor()\n\n t = datetime.utcnow() - timedelta(seconds=time_window)\n\n time_lst = list(c.execute(\"SELECT created_at from tweets;\"))\n time_lst = np.array([i[0] for i in time_lst])\n\n count = str(np.sum(time_lst>t.strftime('%Y-%m-%d %H:%M:%S')))\n\n json_response = {\"status\":\"success\", \"data\":count}\n conn.close()\n except:\n json_response = {\"status\":\"failure\"}\n traceback.print_exc()\n conn.close()\n\n return json.dumps(json_response)", "def main(url: str, time_window_list: list) -> dict:\n page = 1\n data = {\"JT1\": 0, \"JT2\": 0, \"JT3\": 0, \"JT4\": 0, \"JT5\": 0}\n\n print(f\"Mining on {url.split('/')[5]}\")\n while True:\n\n response = requests.get(f\"{url}?page={page}&state=closed&access_token=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\")\n\n if response.status_code in range(300, 500):\n print(f\"Error: {response.json()['message']}\")\n exit(1)\n\n if len(response.json()) == 0:\n break\n\n if response.status_code == 200:\n # print(f\"{response.json()=}\")\n # print(f\"{len(response.json())}\")\n for issue in response.json():\n\n # if issues is not a pull request\n if 'pull_request' not in issue:\n closed_at = datetime.strptime(issue['closed_at'].split(\"T\")[0], \"%Y-%m-%d\")\n\n # JT1\n if time_window_list[0]['since'] <= closed_at <= time_window_list[0]['to']:\n data['JT1'] = data['JT1'] + 1\n\n # JT2\n elif time_window_list[1]['since'] <= closed_at <= time_window_list[1]['to']:\n data['JT2'] = data['JT2'] + 1\n\n # JT3\n elif time_window_list[2]['since'] <= closed_at <= time_window_list[2]['to']:\n data['JT3'] = data['JT3'] + 1\n\n # JT4\n elif time_window_list[3]['since'] <= closed_at <= time_window_list[3]['to']:\n data['JT4'] = data['JT4'] + 1\n\n # JT5\n elif time_window_list[4]['since'] <= closed_at <= time_window_list[4]['to']:\n data['JT5'] = data['JT5'] + 1\n page += 1\n print(f\"Closed: {data=}\")\n print(f\"Done!\")\n return data", "def get_chartdata():\n callback = bottle.request.query.get('callback')\n y_axis = bottle.request.query.get('y_axis').strip()\n w_acts = [\"action='%s'\" % act for act in bottle.request.query.get('actions').strip().split(',')]\n w_acts = 'AND (%s)' % ' OR '.join(w_acts) if w_acts else ''\n f_value = 'AVG(latency)' if y_axis.startswith('avg') else 'COUNT(timestamp)'\n atomic = 1 if y_axis in ['aops', 'avgl'] else 0\n\n db_conn = tools.get_db_conn('%s.db' % bottle.request.query.test_run_id)\n sql = 'SELECT test_run_status, timestamp_started, timestamp_completed FROM info LIMIT 1'\n status, started, finished = tools.db_query(db_conn, sql)[1][0]\n progress = int(float(finished) - float(started)) if finished \\\n else int(tools.get_timestamp() - float(started))\n\n sql = 'SELECT substr(timestamp, 0, 11), code, %s FROM recs ' % f_value + \\\n 'WHERE atomic=%s %s GROUP BY code, substr(timestamp, 0, 11) ' % (atomic, w_acts) + \\\n 'ORDER BY id DESC LIMIT 3600' # last 1 hour activity\n\n result = tools.db_query(db_conn, sql)[1] if finished else tools.db_query(db_conn, sql)[1][:-1]\n result = list(reversed(result))\n results = {str(abs(int(item[0]) - int(float(started)))):\n {'failed': 0, 'passed': 0, 'incomplete': 0} for item in result}\n for item in result: # item[0] - timestamp, item[1] - code (None if incomplete), item[2] - value\n timestamp = str(int(item[0]) - int(float(started)))\n value = item[2] or 0\n results[timestamp]['failed'] += value if item[1] and item[1] != 200 else 0\n results[timestamp]['passed'] += value if item[1] == 200 else 0\n results[timestamp]['incomplete'] += value if item[1] == None else 0\n results = [{'timestamp': key, 'failed': value['failed'], 'passed': value['passed'],\n 'incomplete': value['incomplete']} for key, value in results.items()]\n result = {bottle.request.query.slave: results, 'status': status,\n 'started': started, 'finished': finished or '(not finished)', 'progress': progress}\n return '{0}({1})'.format(callback, result)", "def power_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n\n ]", "def timed_recipes():\n time = request.args.get('time', 0, type=int) #raw input from HTML page\n global time_global\n time_global = time #sets global time to inputted time, for use in search function\n return jsonify(cooktime=time_global) #returns a confirmation of the input tiime", "def hourly_stats():\r\n count_total.delay()\r\n count_unique.delay()\r\n count_tags.delay()", "def genStats(data, printStats = 0):\n fVotes = open('/home/eduardo/ForestWatchers/ann2besttile/results/votes.txt','w')\n tileCount = []\n numberTasks = len(data)\n for task in range(numberTasks):\n tileCount.append([0] * 12)\n numberResults = len(data[task])\n fVotes.write(str(task)+\" \")\n for result in range(numberResults):\n fVotes.write(data[task][result]['answer']+\" \")\n if data[task][result]['answer'] == '2011352':\n tileCount[task][0] += 1\n elif data[task][result]['answer'] == '2011353':\n tileCount[task][1] += 1\n elif data[task][result]['answer'] == '2011355':\n tileCount[task][2] += 1\n elif data[task][result]['answer'] == '2011357':\n tileCount[task][3] += 1\n elif data[task][result]['answer'] == '2011358':\n tileCount[task][4] += 1\n elif data[task][result]['answer'] == '2011359':\n tileCount[task][5] += 1\n elif data[task][result]['answer'] == '2011360':\n tileCount[task][6] += 1\n elif data[task][result]['answer'] == '2011361':\n tileCount[task][7] += 1\n elif data[task][result]['answer'] == '2011362':\n tileCount[task][8] += 1\n elif data[task][result]['answer'] == '2011363':\n tileCount[task][9] += 1\n elif data[task][result]['answer'] == '2011364':\n tileCount[task][10] += 1\n elif data[task][result]['answer'] == '2011365':\n tileCount[task][11] += 1\n fVotes.write(\"\\n\")\n #Print info for debug\n if printStats == 1:\n print \"Stats for task \" + str(task)\n print \"Tile 00 (352) = \" + str(tileCount[task][0])\n print \"Tile 01 (353) = \" + str(tileCount[task][1])\n print \"Tile 02 (355) = \" + str(tileCount[task][2])\n print \"Tile 03 (357) = \" + str(tileCount[task][3])\n print \"Tile 04 (358) = \" + str(tileCount[task][4])\n print \"Tile 05 (359) = \" + str(tileCount[task][5])\n print \"Tile 06 (360) = \" + str(tileCount[task][6])\n print \"Tile 07 (361) = \" + str(tileCount[task][7])\n print \"Tile 08 (362) = \" + str(tileCount[task][8])\n print \"Tile 09 (363) = \" + str(tileCount[task][9])\n print \"Tile 10 (364) = \" + str(tileCount[task][10])\n print \"Tile 11 (365) = \" + str(tileCount[task][11])\n print \"Maximum value = \" + str(max(tileCount[task]))\n print \"Position = \" + str(tileCount[task].index(max(tileCount[task])))\n print \"\"\n fVotes.close()\n return tileCount", "def cowreport():\n central = pytz.timezone(\"America/Chicago\")\n yesterday = (utc() - datetime.timedelta(days=1)).astimezone(central)\n midnight = yesterday.replace(hour=0, minute=0)\n midutc = midnight.astimezone(pytz.UTC)\n begints = midutc.strftime(\"%Y-%m-%dT%H:%M\")\n endts = (midutc + datetime.timedelta(hours=24)).strftime(\"%Y-%m-%dT%H:%M\")\n api = (\n f\"http://iem.local/api/1/cow.json?begints={begints}&endts={endts}&\"\n \"phenomena=SV&phenomena=TO&lsrtype=SV&lsrtype=TO\"\n )\n data = requests.get(api, timeout=60).json()\n st = data[\"stats\"]\n if st[\"events_total\"] == 0:\n text = \"No SVR+TOR Warnings Issued.\"\n html = f\"<h3>IEM Cow Report</h3><pre>{text}</pre>\"\n txt = f\"> IEM Cow Report\\n{text}\\n\"\n return txt, html\n\n vp = st[\"events_verified\"] / float(st[\"events_total\"]) * 100.0\n text = (\n f\"SVR+TOR Warnings Issued: {st['events_total']:3.0f} \"\n f\"Verified: {st['events_verified']:3.0f} [{vp:.1f}%]\\n\"\n \"Polygon Size Versus County Size \"\n f\"[{st['size_poly_vs_county[%]']:.1f}%]\\n\"\n \"Average Perimeter Ratio \"\n f\"[{st['shared_border[%]']:.1f}%]\\n\"\n \"Percentage of Warned Area Verified (15km) \"\n f\"[{st['area_verify[%]']:.1f}%]\\n\"\n \"Average Storm Based Warning Size \"\n f\"[{st['avg_size[sq km]']:.0f} sq km]\\n\"\n f\"Probability of Detection(higher is better) [{st['POD[1]']:.2f}]\\n\"\n f\"False Alarm Ratio (lower is better) [{st['FAR[1]']:.2f}]\\n\"\n f\"Critical Success Index (higher is better) [{st['CSI[1]']:.2f}]\\n\"\n )\n\n html = f\"<h3>IEM Cow Report</h3><pre>{text}</pre>\"\n txt = f\"> IEM Cow Report\\n{text}\\n\"\n\n return txt, html", "def _calc_times():\n app.logger.debug(\"Got a JSON request\")\n km = request.args.get('km', 0, type=float)\n begin_date = request.args.get('begin_date')\n begin_time = request.args.get('begin_time')\n arrow_start = arrow.get(begin_date + \" \" + begin_time + \":00\")\n brevet_dist = request.args.get('brevet_dist', 999, type=int)\n app.logger.debug(\"km={}\".format(km))\n app.logger.debug(\"request.args: {}\".format(request.args))\n # FIXME: These probably aren't the right open and close times\n # and brevets may be longer than 200km\n percent120 = brevet_dist * 1.2\n possible_brev = [200, 300, 400, 600, 1000]\n if brevet_dist not in possible_brev:\n note = \"Current brevet distance is abnormal. Choose from 200, 300, 400, 600, or 1000\"\n elif km > percent120:\n note = \"Control location is more than 20% over the selected distance.\"\n else:\n note = \"\"\n open_time = acp_times.open_time(km, brevet_dist, arrow_start.isoformat())\n close_time = acp_times.close_time(km, brevet_dist, arrow_start.isoformat())\n result = {\"open\": open_time, \"close\": close_time, \"note\": note}\n return flask.jsonify(result=result)", "def _disp_times():\n fields = request.args.get('fields', type=str)\n format_type = request.args.get('format', type=str)\n top = request.args.get('top', type=int)\n token = request.args.get('token', type=str)\n results = {}\n\n result, length, code = retrieve(token, format_type, top, request_table[fields])\n return flask.jsonify(result=result, length=length, code=code)\n\n # elif code == 401: # Unauthorized\n # app.logger.debug(\"Token Expired! Let's log the user out.\")\n # return render_template('calc.html')", "def trends(max: int = None, until: str = None):\n for post in client.trends(max=max, until=until):\n print(json.dumps(post))", "def moderator_points():\n moderators = {}\n collection = constants.DB.moderators\n\n community_managers = [\n moderator[\"account\"] for moderator in\n collection.find({\"supermoderator\": True})]\n\n utopian_fest = constants.UTOPIAN_FEST.col_values(1)\n\n for moderator in set(community_managers + utopian_fest):\n moderators.setdefault(moderator, 0)\n if moderator in community_managers:\n moderators[moderator] += 100.0\n\n # Check for BOSSPOEM or TECHSLUT\n if moderator == \"espoem\" or moderator == \"techslut\":\n moderators[moderator] = 400.0\n\n # Utopian Fest bonus\n if moderator in utopian_fest:\n moderators[moderator] += 50.0\n\n # Save dictionary as JSON with date of last Thursday\n with open(\n f\"/home/amos/utopian/utopian/static/{constants.THIS_WEEK}.json\",\n \"w\") as fp:\n json.dump(moderators, fp, indent=4)", "def update_tweets_feed(n):\n \n # Retrieve the tweets\n first_tweet = get_value(df_1t, n)\n second_tweet = get_value(df_2t, n) \n third_tweet = get_value(df_3t, n)\n fourth_tweet = get_value(df_4t, n)\n fifth_tweet = get_value(df_5t, n)\n sixth_tweet = get_value(df_6t, n)\n seventh_tweet = get_value(df_7t, n)\n eighth_tweet = get_value(df_8t, n)\n nineth_tweet = get_value(df_9t, n)\n tenth_tweet = get_value(df_10t, n) \n \n # Compute the sentiment of each tweet\n sa_first_tweet = sentiment_analyzer_scores(first_tweet)\n sa_second_tweet = sentiment_analyzer_scores(second_tweet)\n sa_third_tweet = sentiment_analyzer_scores(third_tweet)\n sa_fourth_tweet = sentiment_analyzer_scores(fourth_tweet)\n sa_fifth_tweet = sentiment_analyzer_scores(fifth_tweet)\n sa_sixth_tweet = sentiment_analyzer_scores(sixth_tweet)\n sa_seventh_tweet = sentiment_analyzer_scores(seventh_tweet)\n sa_eighth_tweet = sentiment_analyzer_scores(eighth_tweet)\n sa_nineth_tweet = sentiment_analyzer_scores(nineth_tweet)\n sa_tenth_tweet = sentiment_analyzer_scores(tenth_tweet)\n \n # Return the tweet contents and a pie graph of the sentiment.\n \n return html.Div([\n html.Div([\n\n# First Tweet\n html.Div([\n html.Div([\n html.Pre(str(first_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '2px 2px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px',\n }\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_first_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\", }\n ),\n ], \n className = 'row' \n ),\n \n# Second Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(second_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_second_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Third Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(third_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_third_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Fourth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(fourth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_fourth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n\n # Fifth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(fifth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_fifth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n\n # Sixth Tweet\n html.Div([\n html.Div([\n html.Pre(str(sixth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_sixth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Seventh Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(seventh_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n \n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_seventh_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Eighth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(eighth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n \n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_eighth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Nineth\n \n html.Div([\n html.Div([\n html.Pre(str(nineth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_nineth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Tenth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(tenth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_tenth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n ], style = {'overflowY': 'scroll', 'overflowX': 'hidden',\n 'maxHeight': '105ex', 'backgroundColor' : '#eaeaea'}\n ),\n \n ])", "def procfs_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n ]", "def make_request(coins, time_frame_in_min):\n while True:\n for i in coins:\n a = r.get('https://bittrex.com/api/v1.1/public/getmarketsummary?market=btc-{}'.format(i)).text\n with open('price_data/' + str(i) + '.json', 'a') as f:\n f.write('{}\\n'.format(a))\n\n time.sleep(60 * time_frame_in_min)", "def tobs():\n # Create our session (link) from Python to the DB.\n session = Session(engine)\n\n # Calculate the date 1 year ago from the last data point in the database.\n last_measurement_data_point_tuple = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n (latest_date, ) = last_measurement_data_point_tuple\n latest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d')\n latest_date = latest_date.date()\n date_year_ago = latest_date - relativedelta(years=1)\n\n # Perform a query to retrieve the data and temperature scores.\n data_from_last_year = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= date_year_ago).all()\n\n session.close()\n\n # Convert results to a dictionary \n all_temperatures = []\n for date, temp in data_from_last_year:\n if temp != None:\n temp_dict = {}\n temp_dict[date] = temp\n all_temperatures.append(temp_dict)\n # Return JSON\n return jsonify(all_temperatures)", "def getTimes():", "def getTimes():", "def getTimes():", "def update_stats(self, responses, no_responses):\n slowest_rtt = 0.0\n slowest_ip = None\n fastest_rtt = 9999999.9\n fastest_ip = None\n rtt_total = 0.0\n\n for ip, rtt in responses.items():\n rtt_total += rtt\n if rtt > slowest_rtt:\n slowest_rtt = rtt\n slowest_ip = ip\n elif rtt < fastest_rtt:\n fastest_rtt = rtt\n fastest_ip = ip\n\n sorted_rtts = sorted(responses.values())\n l = len(sorted_rtts)\n if l == 0:\n median_rtt = 0.0\n elif l % 2 == 1:\n # Odd number: Median is the middle element\n median_rtt = sorted_rtts[int(l / 2)]\n else:\n # Even number (average between two middle elements)\n median_rtt = (sorted_rtts[int(l / 2) - 1] +\n sorted_rtts[int(l / 2)]) / 2.0\n\n now = datetime.datetime.now().isoformat()\n m = {\n \"time\" : now,\n \"num_responses\" : len(responses),\n \"num_no_responses\" : len(no_responses),\n \"slowest\" : {\n \"ip\" : slowest_ip,\n \"rtt\" : slowest_rtt\n },\n \"fastest\" : {\n \"ip\" : fastest_ip,\n \"rtt\" : fastest_rtt\n },\n \"average_rtt\" : rtt_total / len(responses),\n \"median_rtt\" : median_rtt\n }\n\n self.measurements.insert(0, m)\n self.measurements = self.measurements[:self.max_num_measurements]", "def range():\n\n # Time this functions.\n timer = coils.Timer()\n\n # Parse the URL parameter \"amount\".\n errors = list()\n try:\n amount = flask.request.args.get('amount')\n amount = float(amount)\n except:\n errors.append('Failed to parse \"amount\" parameter.')\n\n # Bail on any errors.\n if errors:\n return flask.jsonify(errors=errors)\n\n\n latest_tstring = db.session.query(mapping.Datum).\\\n filter(mapping.Datum.name=='latest_tstamp')[0].value\n latest_time = coils.string2time(latest_tstring)\n start_time = latest_time - dt.timedelta(seconds=amount)\n start_tstring = getNearestTime(start_time)\n \n return flask.jsonify(\n begin_time=start_tstring,\n end_time=latest_tstring,\n )", "def tip_calulator(total, people, tip):\n tip = tip / 100\n total = total / people\n tip_amount = total * tip\n new_total = total + tip_amount\n\n return tip_amount, new_total\n # pass", "def send_data():\n range = request.args.get('range', '30')\n time = arrow.utcnow().replace(minutes=-int(range))\n data = Temperature.query\\\n .filter(Temperature.timestamp > time).order_by(Temperature.timestamp.desc()).all()\n return jsonify(results=[i.serialize for i in data])", "def func1():\r\n f = urllib.request.urlopen('http://api.wunderground.com/api/94127df53e899ea4/history_20000531/q/autoip.json')\r\n tob='11:00pm'\r\n \r\n # Automatically geolocate the connecting IP\r\n ff = urllib.request.urlopen('http://freegeoip.net/json/')\r\n json_string = ff.read()\r\n ff.close()\r\n location = json.loads(json_string)\r\n \r\n city =location['city']\r\n state = location['region_name']\r\n country = location['country_name']\r\n zip = location['zip_code']\r\n \r\n print(\"Your nearest Location:\")\r\n print(\"1. country: %s ,state: %s\"%(country,state))\r\n print(\"2. city: %s ,zip-code: %s\"%(city,zip))\r\n print()\r\n json_string = f.read()\r\n parsed_json = json.loads(json_string)\r\n \r\n \r\n dailySummary=parsed_json['history']['dailysummary'][0]\r\n fog=dailySummary['fog']\r\n rain=dailySummary['rain']\r\n snow=dailySummary['snow']\r\n \r\n humidity=dailySummary['humidity']\r\n maxTemp=dailySummary['maxtempm']\r\n minTemp=dailySummary['mintempm']\r\n \r\n pressure=dailySummary['meanpressurem']\r\n \r\n print(\"my birth of year is 1991, but the data is unavailable. So, 2000AD was selected\")\r\n print(\"The weather data is available from 1997 in this location!!!\")\r\n print(\"#Weather data on 31 May 2000 at the location:\")\r\n print(\"------------------------------\")\r\n print()\r\n print(\"1. fog: %s ,rain: %s ,snow: %s\"%(fog,rain,snow))\r\n print(\"2. humidity: %s ,pressure: %s\"%(humidity,pressure))\r\n \r\n print(\"3. max-temperature: %s\\u00b0C ,min-temperature: %s\\u00b0C \"%(maxTemp,minTemp))\r\n \r\n \r\n obs= parsed_json['history']['observations'][38]\r\n \r\n \r\n fog=obs['fog']\r\n rain=obs['rain']\r\n snow=obs['snow']\r\n \r\n humidity=obs['hum']\r\n Temp=obs['tempm']\r\n \r\n \r\n pressure=obs['pressurem']\r\n \r\n print()\r\n print(\"I was born roughly at 11 pm\")\r\n print(\"#Weather data at that time on 31 May 2000 at the location:\")\r\n print(\"------------------------------\")\r\n print()\r\n print(\"1. fog: %s ,rain: %s ,snow: %s\"%(fog,rain,snow))\r\n print(\"2. humidity: %s ,pressure: %s\"%(humidity,pressure))\r\n \r\n print(\"3. temperature: %s\\u00b0C\"%(Temp))\r\n \r\n f.close()", "def toy_transformer(in_file, out_file):\n new_data = {}\n new_data['experiment'] = \"toy\"\n with open(in_file, \"r\") as fh:\n fancyprint(in_str=(\"Importing: \" + in_file))\n source = json.load(fh)\n fancyprint(in_str=\"Converting into toy format\")\n new_data[\"version\"] = source[\"version\"]\n new_data[\"data\"] = []\n topic_counter = 3\n for topic in tqdm(source[\"data\"]):\n topic_dict = {}\n topic_dict[\"title\"] = topic[\"title\"]\n topic_dict[\"paragraphs\"] = []\n para_counter = 3\n for para in topic[\"paragraphs\"]:\n paragraph = {}\n paragraph[\"context\"] = para[\"context\"]\n paragraph[\"qas\"] = []\n qa_counter = 3\n for qas in para['qas']:\n qas_dict = {}\n qas_dict[\"id\"] = qas[\"id\"]\n qas_dict[\"is_impossible\"] = qas[\"is_impossible\"]\n qas_dict[\"question\"] = quick_clean(raw_str=qas[\"question\"])\n qas_dict[\"answers\"] = []\n if not qas[\"is_impossible\"]:\n for answer in qas[\"answers\"]:\n answer_dict = {}\n answer_dict[\"answer_start\"] = answer[\"answer_start\"]\n answer_dict[\"text\"] = answer[\"text\"]\n qas_dict[\"answers\"].append(answer_dict)\n paragraph[\"qas\"].append(qas_dict)\n\n qa_counter -= 1\n if qa_counter == 0:\n break\n\n topic_dict[\"paragraphs\"].append(paragraph)\n para_counter -= 1\n if para_counter == 0:\n break\n\n new_data[\"data\"].append(topic_dict)\n\n topic_counter -= 1\n if topic_counter == 0:\n break\n\n save(filename=out_file, obj=new_data, message=\"saving toy data\")", "def get_exercise(username, month, year):\n\n def _format_output(_workouts):\n _workouts = [w.to_dict() for w in _workouts]\n _workouts.sort(key=lambda w: w['start_time'])\n output = []\n\n if len(_workouts) > 0:\n first_workout = _workouts[0]\n prev_exercise = first_workout['exercise']\n prev_exercise_start = first_workout['start_time']\n prev_set_end = first_workout['end_time']\n if first_workout['skeleton_data']:\n picture = encodestring(first_workout['skeleton_data'])\n else:\n picture = None\n\n exercise = {\n 'exercise': prev_exercise,\n 'reps': [first_workout['repetitions']],\n 'weights': [first_workout['weight']],\n 'startTimes': [first_workout['start_time']],\n 'endTimes': [first_workout['end_time']],\n 'picture': picture\n }\n\n for ii in range(1, len(_workouts)):\n current_workout = _workouts[ii]\n current_set_start = current_workout['start_time']\n if current_workout['exercise'] == prev_exercise and current_set_start - prev_set_end < REST_INTERVAL:\n exercise['reps'].append(current_workout['repetitions'])\n exercise['weights'].append(current_workout['weight'])\n exercise['startTimes'].append(current_set_start)\n exercise['endTimes'].append(current_workout['end_time'])\n else:\n output.append({\n 'date': prev_exercise_start,\n 'exercises': exercise\n })\n prev_exercise_start = current_workout['start_time']\n if current_workout['skeleton_data']:\n picture = encodestring(current_workout['skeleton_data'])\n else:\n picture = None\n\n exercise = {\n 'exercise': current_workout['exercise'],\n 'reps': [current_workout['repetitions']],\n 'weights': [current_workout['weight']],\n 'startTimes': [current_set_start],\n 'endTimes': [current_workout['end_time']],\n 'picture': picture\n }\n\n prev_exercise = current_workout['exercise']\n prev_set_end = current_workout['end_time']\n\n output.append({\n 'date': prev_exercise_start,\n 'exercises': exercise\n })\n\n return output\n\n if month < MIN_MONTH or month > MAX_MONTH or year < MIN_YEAR:\n raise ValueError('Invalid date passed into workout query')\n\n user_id = _get_user_id(username)\n if user_id is None:\n return None\n\n epoch = datetime(month=1, year=1970, day=1)\n month_start_epoch = int((datetime(month=month, year=year, day=1) - epoch).total_seconds())\n if month == 12:\n month_end_epoch = int((datetime(month=1, year=year + 1, day=1) - epoch).total_seconds())\n else:\n month_end_epoch = int((datetime(month=month + 1, year=year, day=1) - epoch).total_seconds())\n\n exercises = select(exercise for exercise in UserExerciseData\n if (exercise.user_id == user_id\n and exercise.start_time > month_start_epoch\n and exercise.start_time < month_end_epoch)\n )[:]\n\n exercises = _format_output(exercises)\n\n # Ensure that query obtained results\n if len(exercises) > 0:\n result = {'username': username, 'data': exercises}\n else:\n result = None\n return result", "def zeetemps(start_date):\n print(\"server received request for tobs stats start to end of data...\")\n # correct for dates before the start of our data\n if start_date < '2010-01-01':\n start_date = '2010-01-01'\n # set end date\n end_date = '2017-08-23'\n range_df = temps_df[(temps_df['date'] >= start_date) & (temps_df['date'] <= end_date)]\n lowest = range_df['tobs'].min()\n highest = range_df['tobs'].max()\n average = range_df['tobs'].mean()\n output = {'TMIN': lowest, 'TMAX': highest, 'TAVG': average}\n return jsonify(output)", "def temps(): \n \n # Create session and save reference to table\n session = Session(engine)\n Measurement = Base.classes.measurement\n\n # Query\n tobs_query = session.query(Measurement.date, func.avg(Measurement.tobs).label('tobs'))\\\n .filter(Measurement.date >= '2016-08-23').group_by(Measurement.date)\n \n tobs_list = []\n for row in tobs_query:\n tobs_list.append(row._asdict())\n \n return jsonify(tobs_list)\n\n session.close()" ]
[ "0.5688149", "0.55947673", "0.5528684", "0.55030835", "0.5432563", "0.53793204", "0.5325283", "0.53094494", "0.52589875", "0.51901346", "0.51684177", "0.5168089", "0.5163765", "0.5146831", "0.511869", "0.5105191", "0.50967735", "0.5090349", "0.5079822", "0.5079822", "0.5079822", "0.5078533", "0.50360924", "0.5035626", "0.5029966", "0.5018392", "0.5015583", "0.5009065", "0.49963132", "0.49933022" ]
0.6560097
0
Publish files to somewhere on the internet.
def publish_files(): print("Publishing files to the internet...", end="", flush=True) import subprocess try: subprocess.run("./upload.sh", timeout=120.0) print("done.\n") except: print("failed.\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, filename):\n # 1) Encrypt file\n # 2) Publish to remote cloud server\n # 3) Wait for the result\n # 4) Store results in files located inside RAM folder", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()", "def publish():\n pass", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def publish(self, path):\n self.logger.info(\"Publishing %s\", path)\n try:\n self.set_workspace()\n workspace_path = getcwd()\n if workspace_path != commonpath([workspace_path, abspath(path)]):\n self.logger.error(\"Attempt to publish a non-local file %s\", path)\n raise ContextError(\n f\"Only local workspace files can be published! PATH={path}\"\n )\n if not isfile(path):\n self.logger.error(\"Attempt to publish a non-file path %s\", path)\n raise ContextError(f\"Only files can be published! PATH={path}\")\n # publish the file\n target_path = join(self._path_perm, relpath(path))\n targer_url = urljoin(self._url_base, relpath(path))\n if not isdir(self._path_perm):\n raise MissingContextError(\n f\"Permanent directory does not exist! PATH={self._path_perm}\"\n )\n if not exists(dirname(target_path)):\n makedirs(dirname(target_path))\n move(path, target_path)\n except Exception as error:\n self.logger.warning(\"Failed to publish %s! %s\", path, error)\n raise\n self.logger.debug(\"moved %s -> %s\", path, target_path)\n return target_path, targer_url", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()", "def files_distribute(self):\n self._post('files/distribute')", "def checkin(url, files, message=None):\n from grit import Repo, Item\n r = Repo(url)\n\n if not files:\n raise GritError('No files')\n\n def _write(path):\n item = Item.from_path(repo=r, path=path)\n if r.isLocal():\n v.addItem(item=item)\n else:\n r.upload(filename=os.path.basename(path), filedata=open(path, 'r').read())\n\n if r.isLocal():\n v = r.addVersion()\n count = 1\n total = len(files) \n while count <= total:\n print '[%s/%s] %0.2f%%' %(count, total, (float(count) / total) * 100), '*'*count, '\\r',\n _write(os.path.abspath(files[count-1]))\n count += 1\n sys.stdout.flush()\n if message is None:\n message = 'Publishing %s' % ', '.join(files)\n if r.isLocal():\n v.save(message=message)\n print", "def pubone(file_name,alg,host):\n\n hash_alg=alg\n scheme=\"ni\"\n rform=\"json\"\n ext=\"{ \\\"meta\\\": { \\\"pubdirs\\\" : \\\"yep\\\" } }\"\n\n # record start time of this\n stime=time.time()\n\n # Create NIdigester for use with form encoder and StreamingHTTP\n ni_digester = NIdigester()\n # Install the template URL built from the scheme, the authority and the digest algorithm\n rv = ni_digester.set_url((scheme, host, \"/%s\" % hash_alg))\n if rv != ni_errs.niSUCCESS:\n nilog(\"Cannot construct valid ni URL: %s\" % ni_errs_txt[rv])\n return\n debug(ni_digester.get_url())\n # Open the file if possible\n try:\n f = open(file_name, \"rb\")\n except Exception, e :\n debug(\"Cannot open file %s: Error: %s\" %(file_name, str(e)))\n return\n # Guess the mimetype of the file\n m = magic.Magic(mime=True)\n ctype = m.from_file(file_name)\n debug(\"Content-Type: %s\" % ctype)\n if ctype is None:\n # Guessing didn't work - default\n ctype = \"application/octet-stream\"\n # Set up HTTP form data for publish request\n # Make parameter for file with digester\n octet_param = MultipartParam(\"octets\",\n fileobj=f,\n filetype=ctype,\n filename=file_name,\n digester = ni_digester)\n # Make dictionary that will dynamically retrieve ni URI when it has been made\n uri_dict = { \"generator\": octet_param.get_url,\n \"length\": (len(ni_digester.get_url()) + len(\";\") +\n ni_digester.get_b64_encoded_length())}\n msgid=str(random.randint(1, 2**64)) \n param_list = [octet_param,\n (\"URI\", uri_dict),\n (\"msgid\", msgid),\n (\"ext\", ext),\n (\"fullPut\", \"yes\"),\n (\"rform\", rform)]\n # Construct data generator and header strings\n datagen, headers = multipart_encode(param_list)\n if verbose:\n debug(\"Parameters prepared: %s\"% \"\".join(datagen))\n\n # Set up streaming HTTP mechanism - register handlers with urllib2\n # get out for now, don't do it\n opener = streaminghttp.register_openers()\n # Where to send the publish request.\n http_url = \"http://%s/netinfproto/publish\" % host\n # debug(\"Accessing: %s\" % http_url)\n # Send POST request to destination server\n fsize=os.path.getsize(file_name)\n nilog(\"%s,PUBLISH tx,file,%s,size,%d,to,%s\" % (msgid,file_name,fsize,host))\n try:\n req = urllib2.Request(http_url, datagen, headers)\n except Exception, e:\n nilog(\"%s,PUBLISH tx error\" % msgid);\n if verbose:\n nilog(\"Error: Unable to create request for http URL %s: %s\" %\n (http_url, str(e)))\n f.close()\n return\n # Get HTTP results\n try:\n http_object = urllib2.urlopen(req)\n except Exception, e:\n nilog(\"%s,PUBLISH rx error\" % msgid);\n if verbose:\n nilog(\"Error: Unable to access http URL %s: %s\" % (http_url, str(e)))\n f.close()\n return\n f.close()\n if verbose:\n nilog(\"Digester result: %s\" % octet_param.get_url())\n # Get message headers\n http_info = http_object.info()\n http_result = http_object.getcode()\n if verbose:\n debug(\"HTTP result: %d\" % http_result)\n debug(\"Response info: %s\" % http_info)\n debug(\"Response type: %s\" % http_info.gettype())\n\n # Read results into buffer\n payload = http_object.read()\n http_object.close()\n # debug(payload)\n # Report outcome\n if (http_result != 200):\n if verbose:\n debug(\"Unsuccessful publish request returned HTTP code %d\" %\n http_result) \n nilog(\"%s,PUBLISH rx error bad response status,%d\" % (msgid,http_result));\n return\n # Check content type of returned message matches requested response type\n ct = http_object.headers[\"content-type\"]\n if ct != \"application/json\":\n if verbose:\n debug(\"Error: Expecting JSON coded (application/json) \"\n \"response but received Content-Type: %s\" % ct)\n nilog(\"%s,PUBLISH rx error bad content type,%s\" % (msgid,ct));\n return\n # If output of response is expected, print in the requested format\n if verbose:\n nilog( \"Publication of %s successful:\" % target)\n\n # JSON cases\n try:\n json_report = json.loads(payload)\n except Exception, e:\n if verbose:\n nilog(\"Error: Could not decode JSON report '%s': %s\" % (payload,\n str(e)))\n nilog(\"%s, PUBLISH rx error bad json decode\" % msgid);\n return\n\n if verbose: \n print json.dumps(json_report, indent = 4)\n etime=time.time()\n duration=etime-stime\n niuri=json_report[\"ni\"]\n nilog(\"%s,PUBLISH rx fine,ni,%s,size,%d,time,%10.10f\" % (msgid,niuri,fsize,duration*1000))\n\n return niuri", "def cvmfsPublish(reponame = None):\n if reponame == None:\n reponame = _getRepoName()\n\n rc = subprocess.call([\"cvmfs_server\", \"publish\", \"-f\", reponame])\n if rc != 0:\n raise RuntimeError(\"Could not publish CVMFS transaction\")", "def assets_push(ctx, metadata, dir, brizo, price, service_endpoint, timeout):\n try:\n files = [f for f in os.listdir(dir) if os.path.isfile(dir+'/'+f)]\n except NotADirectoryError:\n files = [dir]\n\n response = []\n metadata = json.load(open(metadata, 'r'))\n\n for f in files:\n metadata['base']['files'][0]['url'] = f\n response += [ctx.invoke(assets_publish,\n metadata=metadata,\n brizo=brizo,\n price=price,\n service_endpoint=service_endpoint,\n timeout=timeout)]", "def publish(self, service, pid=None):\n\n self.db_connect()\n\n # Find all files without a DOI (and assume these are in the publication staging area).\n with self.connection:\n query = \"SELECT * FROM %s WHERE doi IS NULL\" % PUBLICATIONS_TABLE\n c = self.connection.cursor()\n c.execute(query)\n to_publish = c.fetchall()\n\n if not to_publish:\n _LOG.warning(\"No files selected for publication.\")\n return\n\n # Does the user needs to commit any modified files first?\n modified_files = subprocess.check_output(['git', 'diff', '--name-only']).split()\n for i in range(len(modified_files)):\n # Get the absolute path\n modified_files[i] = self.repo.working_dir + \"/\" + modified_files[i]\n _LOG.debug(\"Modified files: %s\" % str(modified_files))\n \n # We only care if the uncommitted changes apply to files in the 'publishing staging area'.\n overlap = False\n for f in to_publish:\n if f[\"path\"] in modified_files:\n overlap = True\n if self.repo.is_dirty() and overlap:\n _LOG.error(\"Uncomitted changes exist in the repository. Please commit these changes before trying to publish any files.\")\n return\n \n # Get the minimal amount of metadata needed to publish from the user.\n response = raw_input(\"Private publication? (y/n): \")\n if response == \"y\" or response == \"Y\":\n _LOG.info(\"Publishing as a private repository...\")\n private = True\n elif response == \"n\" or response == \"N\":\n _LOG.info(\"Publishing as a public repository...\")\n private = False\n else:\n _LOG.error(\"Unknown response '%s'. Not publishing.\" % response)\n return\n\n parameters = self.get_publication_parameters()\n \n # Publish to the repository hosting service.\n publisher = Publisher(service=service)\n pid, doi = publisher.publish_data(parameters, pid=pid, private=private)\n \n # Update the publications database by adding the DOIs and publication IDs to the previously-staged files.\n with self.connection:\n c = self.connection.cursor()\n query = \"UPDATE %s SET doi=? WHERE doi IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [doi])\n query = \"UPDATE %s SET pid=? WHERE pid IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [pid])\n query = \"UPDATE %s SET date=? WHERE date IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [str(datetime.datetime.now().date())])\n query = \"UPDATE %s SET time=? WHERE time IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [str(datetime.datetime.now().time())])\n query = \"UPDATE %s SET sha=? WHERE sha IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [str(self.repo.head.object.hexsha)])\n \n self.db_disconnect()\n \n return", "def download(urls, dest_folder):\n pass", "def publish(self):\n return", "def detect(self, filename):\n self.publish(filename)", "def publish(self):\n # Write the models locally\n local_path_dist = self.dump_distributions()\n local_path_model = self.dump_model()\n\n # Write them to cloud storage\n bucket_path_dist = self.get_bucket_path(self.filename_distributions)\n bucket_path_model = self.get_bucket_path(self.filename_model)\n\n config = self.services.config\n lake = self.services.lake\n\n\n lake.upload(bucket_path_dist, local_path_dist, bucket_name=config.lake_bucket)\n lake.upload(bucket_path_model, local_path_model, bucket_name=config.lake_bucket)\n\n # Now finally we want to write our reference file to our repository and build a merge request\n reference = {\n \"model\": {\n \"bucket\": config.lake_bucket,\n \"path\": bucket_path_model,\n \"md5\": file_md5(local_path_model),\n },\n \"distributions\": {\n \"bucket\": config.lake_bucket,\n \"path\": bucket_path_dist,\n \"md5\": file_md5(local_path_dist),\n },\n }\n\n return reference", "def publish(self):\n #vprint(\"PUBLISHING \",self.__dict__)\n \n js = self.compute_json()\n name = self.name\n #topicdir = \"/topicd/\" if constants.publishToS3Dev else \"/topic/\"\n s3path = constants.compositeDir+\"/\"+name+\"/main.json\" #the path where the page will finally end up\n s3.s3SetContents(s3path,contents=js,relativeTo=\"\",contentType=\"application/json\")\n self.genPage()", "def upload_release_files():\n version = get_release_version()\n target = sf_files + sourceforge_target_dir(version)\n\n print()\n print(\"Uploading release files...\")\n print(\" Source:\", release_path)\n print(\" Target: \" + target)\n print(\" Files: \" + ', '.join(glob.glob('*')))\n print()\n call_rsync(\n username,\n \"\",\n path.join(release_path, \"*\"),\n target\n )\n print()", "def publish(self, settings, item):\n\n publisher = self.parent\n engine = publisher.engine\n document = item.properties[\"document\"]\n\n path = _document_path(document)\n item.properties[\"upload_path\"] = path\n item\n psdProject = PSDImage.open(path)\n\n #save layers to link and create new task to do so\n for layer in psdProject:\n layer.compose().save(layer.name+'.tiff')\n self.logger.info(\"Saved Layer {layerName}.psd\".format(layerName=layer.name))\n publish = sgtk.util.register_publish(publisher.sgtk,\n item.context,\n os.path.join(os.path.dirname(path),layer.name+'.tiff'),\n layer.name,\n version_number=None,\n published_file_type=\"Rendered Image\")", "def publish(self, id: uplink.Path):\n pass", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)", "def publish(self, message: str) -> None:\n if __debug__:\n logger.warning(\n \"WARN: Unnecessary call on publish on FileDistroStream\"\n )", "def _TransferPublishManifest(self, publish_manifest, db_path_prefix,\n force_copy):\n for item in publish_manifest:\n src_path = item.current_path\n dest_path = \"%s/%s\" % (db_path_prefix, item.orig_path)\n logger.debug(\"TransferPublishManifest - src_path: %s, dest_path: %s.\",\n src_path, dest_path)\n\n # Transfer manifest file to published database directory.\n tries = 2\n sleep_secs = 5\n while (not serve_utils.LocalTransfer(\n src_path, dest_path,\n force_copy, prefer_copy=True, allow_symlinks=False)):\n tries -= 1\n if tries == 0:\n raise exceptions.PublishServeException(\n \"Could not transfer publish manifest file %s to %s.\" %\n (src_path, dest_path))\n logger.debug(\"Retrying Local Transfer.\")\n time.sleep(sleep_secs)\n sleep_secs *= 2 # Double the sleep time after each retry.", "def save_publish():\n import mop\n\n path = cmds.file(query=True, location=True)\n work_dir = os.path.dirname(path)\n publish_dir = os.path.join(work_dir, \"release\")\n\n highest_publish = None\n highest_version = -1\n\n for f in os.listdir(publish_dir):\n ext = os.path.splitext(f)[-1]\n if ext == \".ma\":\n pattern = r\"v(?P<version>\\d{3})\"\n regex = re.compile(pattern)\n match = regex.search(f)\n if match:\n version = int(match.group(\"version\"))\n if version > highest_version:\n highest_version = version\n highest_publish = f\n\n new_path = mop.increment_version(os.path.join(publish_dir, highest_publish))\n cmds.file(rename=new_path)\n cmds.file(save=True, force=True)", "def send_to(self, dest='.', src='/tmp/', url='localhost',\n rsync='rsync -auv'):\n files = self.setup(dest=dest, src=src)\n self.send_files(files, url=url, rsync=rsync)", "def upload():\n env.user = 'webcontent'\n rsync_project(DOCDIR, 'doc/_build/html/', delete=True)", "def __publish_dirt(self, dirt):\n self.dirt_pub.publish(dirt)", "def publish(self, file_name, c_id, size, torrent, files): # ver lo del id del cliente\n dht = get_remote_node(self.dht_ip, self.dht_port)\n v = dht.get(get_hash(file_name))\n\n if v == None:\n dht.set(get_hash(file_name), [c_id])\n cantstep = dht.get(get_hash(maxstep))\n print(\"cantstep\", cantstep)\n l = len(dht.get(get_hash(filestep + \"|\" + str(cantstep))))\n if l == lenstep: #create new step\n print(\"full step\")\n dht.set(get_hash(maxstep), cantstep + 1)\n dht.set(get_hash(filestep + \"|\" + str(cantstep + 1)), [file_name])\n else:\n all = dht.get(get_hash(filestep + \"|\" + str(cantstep)))\n all.append(file_name)\n dht.set(get_hash(filestep + \"|\" + str(cantstep)), all)\n k = sizefile + \"|\" + file_name\n dht.set(get_hash(k), size)\n dht.set(get_hash(file_name + \".torrent\"), torrent) #first time to publish this .torrent\n else:\n if not v.__contains__(c_id):\n v.append(c_id)\n dht.set(get_hash(file_name), v)\n\n dht.set(get_hash(myfiles + \"|\" + str(c_id)),files)\n print(\"client \", c_id, \"published file \", file_name)", "def deploy():\n build()\n collect()\n commit()\n push()" ]
[ "0.74543023", "0.6922994", "0.6866991", "0.676951", "0.642343", "0.6293285", "0.6157238", "0.6132221", "0.60680485", "0.59941494", "0.5980355", "0.5970285", "0.59354484", "0.5896326", "0.58699507", "0.5835118", "0.5805512", "0.58000696", "0.57821625", "0.5779796", "0.57461995", "0.56931955", "0.5661582", "0.5660802", "0.5656355", "0.5643859", "0.56142336", "0.5604374", "0.55907357", "0.5585925" ]
0.77630585
0
Start the Microblaze Processor. The processor instance will start automatically after instantiation.
def start(self): self.microblaze.run() self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0) self.load_switch_config(self.iop_switch_config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startup(self):\n if self.initialize_mp:\n self.initialize_multiprocessing()\n self.startup_run()\n self.startup_finish()", "def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)", "def platform_start(self):\n self.platform.start()", "def start(self):\n control_process = mp.Process(target = self._start, args = [])\n control_process.start()", "async def start(self):\n await self._backend.start()", "def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)", "def run(self) -> None:\n self.microphone.start()\n try:\n self._run()\n finally:\n self.microphone.stop()", "def start(self) -> None:\n context = self._get_multiprocessing_context()\n self._last_parsing_stat_received_at = time.monotonic()\n\n self._parent_signal_conn, child_signal_conn = context.Pipe()\n process = context.Process(\n target=type(self)._run_processor_manager,\n args=(\n self._dag_directory,\n self._max_runs,\n self._processor_timeout,\n child_signal_conn,\n self._dag_ids,\n self._pickle_dags,\n self._async_mode,\n ),\n )\n self._process = process\n\n process.start()\n\n self.log.info(\"Launched DagFileProcessorManager with pid: %s\", process.pid)", "def initialize_multiprocessing(self):\n if self.multiprocessing_controller is not None:\n MPControl.set_multiprocess_engine(self.multiprocessing_controller)\n MPControl.connect()", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def start(self):\n self.j_pump.start()\n return self", "def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)", "def start(self):\n self.p.start()", "def start(self):\n logger.debug('Starting controller')\n pass", "def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )", "def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)", "def start(self):\n if self._started:\n return\n\n self._register()\n self._started = True", "def add_cpu(self):\n cpu_worker = CPUCmdRunner(self.host, 'cpu')\n self.cpu_workers.append(cpu_worker)\n cpu_worker.start()\n self.log.info('CPU worker added')", "def start_processing(self):", "def start(self):\n for workload in self._workloads:\n self.log.info(\"%-20s STARTING port=%s\" % (workload.name(), workload.port()))\n workload.pre_start()\n workload.start()\n self._monitor_loop()\n self._cleanup()", "def _start(self):\n\n _log.debug(\"Pipeline {} launching run components\".format(self.id))\n self._start_time = time.time()\n for run in self.runs:\n run.start()\n if run.sleep_after:\n time.sleep(run.sleep_after)", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def start(self) -> None:\n data = {\n \"pipeline\": self.pipeline.id,\n \"language\": self.language,\n }\n if self.runner_data is not None:\n data[\"runner_data\"] = self.runner_data\n\n self.process_event(PipelineEvent(PipelineEventType.RUN_START, data))", "def run(self):\n self.process.start()", "def start_run(self, context: RobotRunnerContext) -> None:\n rospy.init_node(\"robot_runner\", disable_signals=True)\n self.ina219_profiler = INA219ProfilerClient()\n self.cpu_mem_profiler = ResourceProfilerClient()", "def start(self):\n self._setup_thread()\n self.thread.start()", "def run(self):\n client = ProcessorClient()\n try:\n client.connect(self.address)\n except Exception as e:\n self.error = e\n logging.error(e)\n else:\n self.clients[self.name] = client", "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None", "def start(self):\n if not self._Thread__initialized:\n raise RuntimeError('thread.__init__() not called')\n if self._Thread__started.is_set():\n raise RuntimeError('threads can only be started once')\n with threading._active_limbo_lock:\n threading._limbo[self] = self\n try:\n start_new_background_thread(self.__bootstrap, ())\n except Exception:\n with threading._active_limbo_lock:\n del threading._limbo[self]\n raise\n self._Thread__started.wait()", "def start( self ):\n\n self.service()" ]
[ "0.6218762", "0.60426116", "0.5927747", "0.5888535", "0.5852811", "0.58245885", "0.5803731", "0.57714486", "0.57650065", "0.5744394", "0.5729824", "0.5724727", "0.5718365", "0.5661167", "0.5647819", "0.5586553", "0.5540475", "0.553082", "0.551599", "0.5512633", "0.5512412", "0.5510244", "0.5494249", "0.549061", "0.5490138", "0.5489092", "0.5472116", "0.5468504", "0.54674816", "0.54644006" ]
0.6880082
0
Put the Microblaze processor into reset. This method will set processor status as "STOPPED".
def stop(self): self.microblaze.reset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n # The camera will give no response to this command\n self._serial_io('\\x55\\x99\\x66\\x11', None)\n while True:\n try:\n self.system_state = 0x11\n if self.system_state == 0x11:\n break\n except CygnetExc:\n time.sleep(.2)\n while True:\n self.system_state = 0x12\n time.sleep(.2)\n if self.system_state == 0x16:\n break", "async def reset(self):\n await self.set_param(\"ContinuousExposures\", 0)\n await self.set_param(\"Exposures\", 0)\n cmd = await self.send_command(\"RESETTIMING\", timeout=1)\n if not cmd.succeeded():\n self.status = ControllerStatus.ERROR\n raise ArchonError(f\"Failed sending RESETTIMING ({cmd.status.name})\")\n\n # TODO: here we should do some more checks before we say it's IDLE.\n self.status = ControllerStatus.IDLE", "def reset(self):\n\t\treturn Job(SDK.PrlVm_Reset(self.handle)[0])", "def reset(self):\n self.success = False\n self.i = 0\n if self.monitor:\n self.env = gym.wrappers.Monitor(self.env, \"./mountaincar-monitor\", force=True)\n state = self.env.reset()\n state = self.preprocess_state(state)\n state = np.concatenate([state] * self.action_repeat)\n return state", "def soft_reset():", "def reset():\n _runtime.reset()", "def reset(self):\n self.state = self.process_state(self.env.reset())\n return self.state", "def reset(self):\n with self.bkp_lock:\n self.active_breakpoints = set()\n self.stepping = SteppingMode.STEP_NO_STEP\n self.continue_next()", "def reset(self) -> None:\n # See section 7.2.2 of the datasheet for reset description.\n self._reset.value = True\n time.sleep(0.0001) # 100 us\n self._reset.value = False\n time.sleep(0.005) # 5 ms", "def reset(self):\n GPIO.output(self.reset_pin, GPIO.LOW)\n time.sleep(0.1)\n GPIO.output(self.reset_pin, GPIO.HIGH)\n time.sleep(0.1)\n\n if self.inky_version == 2:\n self._send_command(_V2_RESET)\n\n self._busy_wait()", "def reset(self):\n self.stop()\n self.start()", "def halt(self):\n cmd_title('HALTING')\n self.running = False\n # Reset any state\n self.tape = None\n self.transitions = None\n self.current_state = None\n self.tape_index = None\n self.result = ''", "def reset_and_stop(self):\n self.enabled = False\n self.start_time = None", "def svc_reset_system_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)", "def test_reset():\n dev = _aws_device(wires=2)\n dev._circuit = CIRCUIT\n dev._task = TASK\n\n dev.reset()\n assert dev.circuit is None\n assert dev.task is None", "def reset():\n for cpu_id in POSSIBLE_CPUS:\n set_cpu(cpu_id, True)", "def set_working_state(self):\n self.state = 0\n self.port = None", "def reset(self):\n# \n self.end_and_close()\n# self.sim.start()\n\n # Start the next simulation\n self.sim._model.swmm_open()\n self.sim._model.swmm_start()\n\n # get the state\n state = self._state()\n return state", "def reset(self):\n command = \"export STLINK_DEVICE=\" + self.stlink.port + \"; st-flash reset\"\n subprocess.run(command, shell=True)\n time.sleep(1)", "def reset(self):\n self.write_to_serial('*RST')", "def reset(self):\r\n _debug('simq03b_api.reset')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\n error_estop = \"\"\"\\\nE-Stop is ASSERTED. Disengage E-Stop and then reset the robot.\n\"\"\"\n error_nonfatal = \"\"\"Non-fatal Robot Error on reset.\nRobot reset cleared stopped state and robot can be enabled, but a non-fatal\nerror persists. Check diagnostics or rethink.log for more info.\n\"\"\"\n error_env = \"\"\"Failed to reset robot.\nPlease verify that the ROS_IP or ROS_HOSTNAME environment variables are set\nand resolvable. For more information please visit:\nhttp://sdk.rethinkrobotics.com/wiki/RSDK_Shell#Initialize\n\"\"\"\n is_reset = lambda: (self._state.enabled == False and\n self._state.stopped == False and\n self._state.error == False and\n self._state.estop_button == 0 and\n self._state.estop_source == 0)\n pub = rospy.Publisher('robot/set_super_reset', Empty, queue_size=10)\n\n if (self._state.stopped and\n self._state.estop_button == AssemblyState.ESTOP_BUTTON_PRESSED):\n rospy.logfatal(error_estop)\n raise IOError(errno.EREMOTEIO, \"Failed to Reset: E-Stop Engaged\")\n\n rospy.loginfo(\"Resetting robot...\")\n try:\n baxter_dataflow.wait_for(\n test=is_reset,\n timeout=3.0,\n timeout_msg=error_env,\n body=pub.publish\n )\n except OSError as e:\n if e.errno == errno.ETIMEDOUT:\n if self._state.error == True and self._state.stopped == False:\n rospy.logwarn(error_nonfatal)\n return False\n raise", "def reset(self):\n\n # Deactivate the card\n try:\n result = self.mch_comms.call_ipmitool_command([\"picmg\", \"deactivate\", (str(self.slot + PICMG_SLOT_OFFSET))])\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n print(\"reset: caught TimeoutExpired exception: {}\".format(e))\n\n # TODO: Add a resetting status here to allow other reads to wait\n # See DIAG-68.\n\n # Wait for the card to shut down\n time.sleep(2.0)\n\n # Activate the card\n try:\n result = self.mch_comms.call_ipmitool_command([\"picmg\", \"activate\", str(self.slot + PICMG_SLOT_OFFSET)])\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n print(\"reset: caught TimeoutExpired exception: {}\".format(e))", "def resetDeviceStates(self):", "def reset(self,):\n\n self._toggle_pin(RESET_PIN)", "def reset(self, sync=True):\n self.vmomi_object.ResetVM_Task()\n if sync: self._wait_for_power_on()", "def reset(self):\n\n ## Turn off controller to bring to a known state\n try:\n self.logger.info(\"Turning off sta3800 controller (sta3800_off).\")\n ccdsetup.sta3800_off()\n except Exception:\n self.logger.exception(\"Unable to turn off controller! State may be unknown.\")\n raise\n else:\n self.logger.info(\"Controller turned off successfully.\")\n\n ## Initialize controller\n try:\n self.logger.info(\"Turning on sta3800 controller (sta3800_setup).\")\n ccdsetup.sta3800_setup()\n except Exception:\n self.logger.exception(\"Unable to turn on sta3800 controller!\")\n raise\n else:\n self.logger.info(\"Controller turned on successfully.\")", "def reset(self):\n self.stuck = False", "def reset_to_cold(self):\n self._log_msg_start(\"CFG-RST - Reset to cold start\")\n self._ubx.send(\"CFG-RST\", navBbrMask=0xFFFF, resetMode=0x01)", "def handle_warm_resets():\n\n # If we're in USB reset, we're actively receiving warm reset signaling; and we should reset\n # to the Rx.Detect.Reset state.\n with m.If(self.in_usb_reset):\n transition_to_state(\"Rx.Detect.Reset\")" ]
[ "0.62783825", "0.6214647", "0.62101734", "0.62012935", "0.6188777", "0.6144242", "0.6094757", "0.60869604", "0.6043643", "0.6032759", "0.6026657", "0.6011027", "0.600109", "0.59982294", "0.5969956", "0.59564924", "0.5952704", "0.59244686", "0.5921813", "0.5914156", "0.59073234", "0.58894604", "0.58858716", "0.58761483", "0.5875608", "0.5830331", "0.58266324", "0.58210874", "0.58090645", "0.5803332" ]
0.70397276
0
Load the Microblaze processor's switch configuration. This method will update switch config. Each pin requires 8 bits for configuration.
def load_switch_config(self, config=None): if config is None: config = ARDUINO_SWCFG_DIOALL elif not len(config) == 4*ARDUINO_SWITCHCONFIG_NUMREGS: raise TypeError('Invalid switch config {}.'.format(config)) # Build switch config word self.iop_switch_config = config sw_config_words = [0]*ARDUINO_SWITCHCONFIG_NUMREGS for ix, cfg in enumerate(self.iop_switch_config): if ix < 4: sw_config_words[0] |= (cfg << ix*8) elif ix < 8: sw_config_words[1] |= (cfg << (ix-4)*8) elif ix < 12: sw_config_words[2] |= (cfg << (ix-8)*4) elif ix < 16: sw_config_words[3] |= (cfg << (ix-12)*4) else: sw_config_words[4] |= (cfg << (ix-16)*4) # Configure switch for i in range(ARDUINO_SWITCHCONFIG_NUMREGS): self.write_cmd(ARDUINO_SWITCHCONFIG_BASEADDR + 4*i, sw_config_words[i])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError", "def configure_switch(self, config):\n raise NotImplementedError", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def GPIO_initialization():\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(Sensor.IN_1, GPIO.OUT)\n GPIO.setup(Sensor.IN_2, GPIO.OUT)\n GPIO.setup(Sensor.EN, GPIO.OUT)\n\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['RED_STOP'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['YELLOW_CW'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['GREEN_CCW'], GPIO.IN)\n\n GPIO.output(Sensor.IN_1, GPIO.LOW)\n GPIO.output(Sensor.IN_2, GPIO.LOW)", "def _use_existing_configuration(self):\n HW_Init(self.ftdi, None)", "def load_switches(self):\n new_switches = list()\n for site in self.sites:\n switches = self.get_switches_stats(site_id=site['id'])\n for switch in switches:\n if len(switch['name']) < 1:\n switch['name'] = ':'.join([switch['mac'][i:i + 2].upper() for i in range(0, len(switch['mac']), 2)])\n new_switch = {\n \"name\": switch['name'],\n \"site\": site['name'],\n \"site_id\": site['id'],\n \"device_id\": switch['id'],\n \"mac\": switch['mac'],\n \"mac_str\": ':'.join([switch['mac'][i:i + 2].upper() for i in range(0, len(switch['mac']), 2)]),\n \"ip_config\": switch['ip_config'],\n \"ip_actual\": switch['ip_stat'],\n \"net_obj\": get_network(address=switch['ip_config']['ip'], netmask=switch['ip_config']['netmask']) if 'ip' in switch['ip_config'] else None\n }\n for vlan, addr in new_switch['ip_actual']['ips'].items():\n if new_switch['ip_actual']['ip'] == addr:\n new_switch['ip_actual']['vlan'] = vlan.strip('vlan')\n else:\n new_switch['ip_actual']['vlan'] = 0\n if new_switch['ip_config']['network'] and new_switch['ip_config']['network'] != \"default\":\n new_switch['ip_config']['vlan'] = site['network_template']['networks'][new_switch['ip_config']['network']]['vlan_id']\n logger.debug(f\"Matched {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n elif new_switch['ip_config']['network'] and new_switch['ip_config']['network'] == \"default\":\n new_switch['ip_config']['vlan'] = 1\n logger.debug(f\"Matched {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n else:\n new_switch['ip_config']['vlan'] = 0\n logger.error(f\"Did not match {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n new_switches.append(new_switch)\n self.switches = new_switches", "def configure_light(self, number: str, subtype: str, config: LightConfig,\n platform_settings: dict) -> \"LightPlatformInterface\":\n raise NotImplementedError", "def setupHw():\n\n pin.setupHw()\n pin.setupOutPins(traffic_lights)\n pin.setDebug(False)", "def start(self):\n self.microblaze.run()\n self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0)\n self.load_switch_config(self.iop_switch_config)", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def state(config: dict):\n\n async def state_callback(device):\n if device.basic_info is not None:\n if device.available:\n print_device_details(device)\n\n device.shutdown_event_loop()\n\n logger.info(\"Initialising SonoffSwitch with host %s\" % config[\"host\"])\n SonoffSwitch(\n host=config[\"host\"],\n callback_after_update=state_callback,\n logger=logger,\n device_id=config[\"device_id\"],\n api_key=config[\"api_key\"],\n )", "def switch_changed(self, switch, name):\n section, option = name\n v = (\"1\" if switch.value else \"0\")\n _stash.config.set(section, option, v)\n self.save()", "def setup_platform(hass, config, add_devices, discovery_info=None):\n devices = config.get(CONF_SWITCHES, {})\n cmdrgbwlight = []\n\n for object_id, device_config in devices.items():\n value_template = device_config.get(CONF_STATE_VALUE_TEMPLATE)\n\n if value_template is not None:\n value_template.hass = hass\n\n cmdrgbwlight.append(\n CommandSwitch(\n hass,\n object_id,\n device_config.get(CONF_NAME),\n device_config.get(CONF_COMMAND_ON),\n device_config.get(CONF_COMMAND_OFF),\n device_config.get(CONF_COMMAND_STATE),\n device.config.get(CONF_BRIGHTNESS_STATE),\n device.config.get(CONF_BRIGHTNESS_COMMAND),\n device.config.get(CONF_BRIGHTNESS_VALUE_TEMPLATE),\n device.config.get(CONF_RGB_STATE),\n device.config.get(CONF_RGB_COMMAND),\n device.config.get(CONF_RGB_VALUE_TEMPLATE),\n device.config.get(CONF_FRIENDLY_NAME, object_id),\n device.config.get(CONF_BRIGHTNESS_SCALE),\n value_template\n )\n )\n\n if not cmdrgbwlight:\n _LOGGER.error(\"No switches added\")\n return False\n\n add_devices(cmdrgbwlight)", "def load(self):\n basePath = './examples/'\n file = \"print8.ls8\"\n # file = \"mult.ls8\"\n # file = \"stack.ls8\"\n # file = \"call.ls8\"\n file = \"sctest.ls8\"\n if len(sys.argv) > 1:\n file = sys.argv[1]\n address = 0\n\n with open(basePath + file, \"r\") as f:\n for line in f:\n line = line.split(\"#\")\n\n try:\n v = int(line[0], 2)\n except ValueError:\n continue\n # print(v)\n self.ram[address] = v\n address += 1", "def setup(self):\n self.log.debug('RFSwitch - in RFSwitch setup()')\n # Add resource setup code here\n print(\"Calling RFSwitch:setup\")", "def setup_platform(hass, config, add_devices, discovery_info=None):\n cl = hass.data.get(DATA_CIRCADIAN_LIGHTING)\n if cl:\n cs = CircadianSwitch(\n hass,\n cl,\n name=config.get(CONF_NAME),\n lights_ct=config.get(CONF_LIGHTS_CT, []),\n lights_rgb=config.get(CONF_LIGHTS_RGB, []),\n lights_xy=config.get(CONF_LIGHTS_XY, []),\n lights_brightness=config.get(CONF_LIGHTS_BRIGHT, []),\n disable_brightness_adjust=config.get(CONF_DISABLE_BRIGHTNESS_ADJUST),\n min_brightness=config.get(CONF_MIN_BRIGHT),\n max_brightness=config.get(CONF_MAX_BRIGHT),\n sleep_entity=config.get(CONF_SLEEP_ENTITY),\n sleep_state=config.get(CONF_SLEEP_STATE),\n sleep_colortemp=config.get(CONF_SLEEP_CT),\n sleep_brightness=config.get(CONF_SLEEP_BRIGHT),\n disable_entity=config.get(CONF_DISABLE_ENTITY),\n disable_state=config.get(CONF_DISABLE_STATE),\n initial_transition=config.get(CONF_INITIAL_TRANSITION),\n )\n add_devices([cs])\n\n def update(call=None):\n \"\"\"Update lights.\"\"\"\n cs.update_switch()\n\n return True\n else:\n return False", "def _configure(self) -> None:\n reg_data = self.configuration\n conf_data = reg_data & ~0xC0 | 0x80\n # check if already in the right configuration, do not re-configure on and on again\n if reg_data != conf_data:\n self.configuration = conf_data", "def load(self):\n super().load()\n for channel in range(self.n_channels):\n c_str = 'Channel_{0:02d}'.format(channel)\n if c_str not in self:\n log.info(f'{c_str} not found in config yaml, adding it now with defaults')\n self.set(c_str, {'amplitude': 1.5, 'dc_offset': 0.0}, save_config=True)\n\n val = self.get(c_str)\n self.amplitude(channel, val['amplitude'])\n self.offset(channel, val['dc_offset'])\n self._set_register(0, self.get('clock_delay', 1000)//100 + self._seq_length//100 - 1)", "async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n\n bhyve = hass.data[DOMAIN][entry.entry_id][CONF_CLIENT]\n\n switches = []\n devices = filter_configured_devices(entry, await bhyve.devices)\n programs = await bhyve.timer_programs\n\n device_by_id = {}\n\n for device in devices:\n device_id = device.get(\"id\")\n device_by_id[device_id] = device\n if device.get(\"type\") == DEVICE_SPRINKLER:\n if not device.get(\"status\"):\n _LOGGER.warning(\n \"Unable to configure device %s: the 'status' attribute is missing. Has it been paired with the wifi hub?\",\n device.get(\"name\"),\n )\n continue\n\n # Filter out any programs which are not for this device\n device_programs = [\n program for program in programs if program.get(\"device_id\") == device_id\n ]\n\n switches.append(\n BHyveRainDelaySwitch(hass, bhyve, device, \"weather-pouring\")\n )\n\n all_zones = device.get(\"zones\")\n for zone in all_zones:\n zone_name = zone.get(\"name\")\n # if the zone doesn't have a name, set it to the device's name if there is only one (eg a hose timer)\n if zone_name is None:\n zone_name = (\n device.get(\"name\") if len(all_zones) == 1 else \"Unnamed Zone\"\n )\n switches.append(\n BHyveZoneSwitch(\n hass,\n bhyve,\n device,\n zone,\n zone_name,\n device_programs,\n \"water-pump\",\n )\n )\n\n for program in programs:\n program_device = device_by_id.get(program.get(\"device_id\"))\n program_id = program.get(\"program\")\n if program_device is not None and program_id is not None:\n _LOGGER.info(\"Creating switch: Program %s\", program.get(\"name\"))\n switches.append(\n BHyveProgramSwitch(\n hass, bhyve, program_device, program, \"bulletin-board\"\n )\n )\n\n async_add_entities(switches, True)\n\n async def async_service_handler(service):\n \"\"\"Map services to method of BHyve devices.\"\"\"\n _LOGGER.info(\"%s service called\", service.service)\n method = SERVICE_TO_METHOD.get(service.service)\n if not method:\n _LOGGER.warning(\"Unknown service method %s\", service.service)\n return\n\n params = {\n key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID\n }\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n component = hass.data.get(SWITCH_DOMAIN)\n if entity_ids:\n target_switches = [component.get_entity(entity) for entity in entity_ids]\n else:\n return\n\n method_name = method[\"method\"]\n _LOGGER.debug(\"Service handler: %s %s\", method_name, params)\n\n for entity in target_switches:\n if not hasattr(entity, method_name):\n _LOGGER.error(\"Service not implemented: %s\", method_name)\n return\n await getattr(entity, method_name)(**params)\n\n for service, details in SERVICE_TO_METHOD.items():\n schema = details[\"schema\"]\n hass.services.async_register(\n DOMAIN, service, async_service_handler, schema=schema\n )", "def set_switch(self, node_uuid, index, data):\n if data == \"on\":\n self._bus.i2c_acquire()\n try:\n p = self.values['num'].get_data_index(index=index)\n self._bus.pca9685_manager.set_pwm(p, 4096, 0)\n self.values['level'].set_data_index(index=index, data=100)\n except Exception:\n logger.exception('[%s] - Exception when switching on', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n elif data == \"off\":\n self._bus.i2c_acquire()\n try:\n p = self.values['num'].get_data_index(index=index)\n self._bus.pca9685_manager.set_pwm(p, 0, 4096)\n self.values['level'].set_data_index(index=index, data=0)\n except Exception:\n logger.exception('[%s] - Exception when switching off', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n else:\n logger.warning(\"[%s] - set_switch unknown data : %s\", self.__class__.__name__, data)", "def t0_switch_config_helper(test_obj: 'T0TestBase'):\n configer = SwitchConfiger(test_obj)\n test_obj.dut.switch_id = configer.start_switch()", "def connect_to_switches(self):\n for p4switch in self.topo.get_p4switches():\n thrift_port = self.topo.get_thrift_port(p4switch)\n self.controllers[p4switch] = SimpleSwitchThriftAPI(thrift_port)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n switches = []\n for coil in config.get(\"coils\"):\n switches.append(ModbusCoilSwitch(\n coil.get(CONF_NAME),\n coil.get(CONF_SLAVE),\n coil.get(CONF_COIL)))\n add_devices(switches)", "def config_led(my_bus):\n try:\n my_bus.write_i2c_block_data(LED_DEVICE_ADDRESS, 0x2F, [0xFF]) # system setup\n my_bus.write_i2c_block_data(LED_DEVICE_ADDRESS, 0x89, [0xFF]) # display on\n except IOError:\n t = 1\n print(\"got IOError. try again in\", t, \"second\")\n time.sleep(t)", "def initialize(self):\n\t\tpcd8544.LCD.initialize(self)\n\t\tRPIO.setup(self._backlight_pin, RPIO.OUT, initial=RPIO.LOW)", "def setup(self):\n\t\tself.interface = self.getDriver('light_interface')\n\n\t\tself.pin = self.config['interface_position']\n\t\tself.blink_rate = self.config['blink_rate'] / 2 or 0.5\n\t\tself.is_on = False\n\n\t\tself.intensity = 255\n\t\tself.blink = False\n\t\tself.count = None\n\t\tself.current_count = False\n\t\tself.current_count = None\n\n\t\tself.saved_intensity = None\n\t\tself.saved_blink = False\n\t\tself.saved_count = None\n\n\t\treturn True", "def _initialize_hardware(self):\n # Import\n try:\n from gpiozero import MCP3008\n except Exception as ex:\n logging.error('\\n *** ERROR importing gpiozero: {}'.format(ex))\n\n # Things failed, must be running locally, not on a widget, so don't\n # bother initializing the MCP3008\n return\n\n # Initialize the MCP3008\n try:\n self._sensor = MCP3008(channel=0)\n except Exception as ex:\n logging.error('\\n *** ERROR initializing MCP3008: {}'.format(ex))\n return\n\n # Start force loop thread\n threading.Thread(target=self._force_loop, daemon=True).start()", "def set_new_config(modem, disable_auto_linking, monitor_mode, auto_led, deadman):\n modem.configuration[DISABLE_AUTO_LINKING].new_value = disable_auto_linking\n modem.configuration[MONITOR_MODE].new_value = monitor_mode\n modem.configuration[AUTO_LED].new_value = auto_led\n modem.configuration[DEADMAN].new_value = deadman" ]
[ "0.62282544", "0.5851503", "0.5757967", "0.5553404", "0.5420665", "0.5405235", "0.5385367", "0.53103375", "0.5305637", "0.52973616", "0.5270049", "0.52667636", "0.5264424", "0.52271485", "0.5182084", "0.51805335", "0.51612717", "0.515244", "0.5107933", "0.50897145", "0.50789636", "0.5074718", "0.5069044", "0.50566053", "0.50538623", "0.504823", "0.5027205", "0.4997396", "0.49921843", "0.49845707" ]
0.67974967
0
Returns the status of the Microblaze processor. Returns str The processor status ("IDLE", "RUNNING", or "STOPPED").
def status(self): return self.microblaze.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status(self):\n return self.read_register(259, 0, 3)", "async def get_status(self) -> str:\n return await self.hw_device.status()", "def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()", "def processor():\n return uname().processor", "def processor():\n return uname().processor", "def hardware_status(self):\n stat = structs.JLinkHardwareStatus()\n res = self._dll.JLINKARM_GetHWStatus(ctypes.byref(stat))\n if res == 1:\n raise errors.JLinkException('Error in reading hardware status.')\n return stat", "def runtime_status(self):\n try:\n return self.yarn_api.state(self.app_id)\n except:\n return \"NONE\"", "def status(self):\n return self._bp.get_motor_status(self._port)", "def get_status(self):\n\n return self._system", "def get_cpu_core():\n processor_info = subprocess.getoutput('dmidecode -t processor')\n cpu_core_value = re.findall(r'(?i)Core Count:\\s+(.*?)\\n', processor_info, re.S)[0]\n log.info('cpu_core value:{}'.format(cpu_core_value))\n if cpu_core_value:\n cpu_core = cpu_core_value\n else:\n cpu_core = ''\n return cpu_core", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")" ]
[ "0.66442674", "0.65349734", "0.6357901", "0.61985266", "0.61985266", "0.6195143", "0.61590946", "0.6113441", "0.6109538", "0.6081476", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187" ]
0.692374
0
Check whether the command mailbox is idle. Returns bool True if the command in the mailbox is idle.
def is_cmd_mailbox_idle(self): mb_cmd_word = self.microblaze.read(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET) return (mb_cmd_word & 0x1) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def is_idle(self) -> bool:\n return (\n await self.send_command_and_read_reply(Protocol1Command(command=\"F\")) == \"Y\"\n )", "def is_idle(self) -> bool:\n\n return self.get_runningstate == self.cmd.C815_IDLE_STATE", "def is_idle(self) -> bool:", "def is_idle(self) -> bool:\n\n return self.send(self.cmd.GET_GLOBALSTATUS_RUNNING) is False", "def is_idle(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def is_busy(self) -> bool:\n return self.__interface.read_pin(self.__interface.BUSY_PIN) == 0 # 0: busy, 1: idle.", "def _isInIdle(self):\r\n if core.FW_conf['blackbox'].isVideoRecorderAvailable() and core.FW_conf['blackbox'].videoClient.GetCurrentState() == 'idle':\r\n self.inIdle = True\r\n return True\r\n else:\r\n return False", "def __is_active(self, command):\n return True", "def still_active(pid: int, cmd: str) -> bool:\n os_cmd = get_command_for_pid(pid)\n return cmd in os_cmd", "def is_idle(self) -> bool:\n return not self.orders", "def is_idle(self):\n idle = len(self.__tasks) == 0, self.__queue.qsize() == 0\n return collections.namedtuple('TaskletIdle', ['tasklet', 'worker'])(*idle)", "async def wait_until_idle(self):\n logger.debug(f\"ML600 pump {self.name} wait until idle...\")\n while not self.is_idle():\n await asyncio.sleep(0.1)\n logger.debug(f\"...ML600 pump {self.name} idle now!\")", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def should_poll(self):\n return self._command_state is not None", "def is_busy(self):\n cons = self.rpc.call(MsfRpcMethod.ConsoleList)['consoles']\n for c in cons:\n if c['id'] == self.cid:\n return c['busy']", "def is_active(self):\n with self._lock:\n return self._robot is not None", "def is_blocked(self, idle_time = 2.0):\n return time.time() - self._last_update > idle_time", "def is_active(self):\n for unit in self.units:\n if unit.is_alive():\n return True\n return False", "def _CheckForIdleQuit(self):\n timeout = time.time() + self.idle_timeout_secs\n while time.time() < timeout:\n if self._shutdown_requested_event.is_set():\n # An external source called shutdown()\n return\n elif self._rpc_received_event.is_set():\n logging.debug('Resetting the idle timeout')\n timeout = time.time() + self.idle_timeout_secs\n self._rpc_received_event.clear()\n time.sleep(1)\n # We timed out, kill the server\n logging.warning('Shutting down the server due to the idle timeout')\n self.shutdown()", "def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True", "def is_alive(self):\n self.ssh.sendline(\"clear\")\n return self.ssh.prompt()", "def isBusy(self):\n return self.busy", "def idle_check(self):\n result = []\n LOG.info('Idle check...')\n to_deactivate = self.manager.idle()\n with LOCK:\n for ctx in to_deactivate:\n LOG.info('removing idle chatstate %s', ctx.chat_id)\n self.manager.remove_chat_context(ctx)\n result.append(ctx)\n return result", "async def locked(self):\n return not \"not\" in await self.ask(\"locked\")", "def wait_until_idle(self):\n while True:\n time.sleep(self.__interface.WT_STATE_LOOKUP)\n\n if not self.is_busy:\n break", "def is_in_cmd(self):\r\n return self.select_cmd is not None", "def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0", "def isActive(self):\n return self._timerID is not None", "def is_in_terminal(self):\n return self._current_state is None", "def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None" ]
[ "0.7600817", "0.7516991", "0.7223411", "0.7089875", "0.6917131", "0.66209525", "0.6557727", "0.6422757", "0.6312891", "0.6253275", "0.62224126", "0.6158467", "0.6102417", "0.60107124", "0.5923812", "0.58288354", "0.58169097", "0.5816199", "0.5791001", "0.57627046", "0.5757289", "0.5746407", "0.5734319", "0.5691204", "0.56857747", "0.5674667", "0.565714", "0.56486446", "0.5643674", "0.5631291" ]
0.8497326
0
Computes the hamming distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths=True): return _nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))", "def hamming_dist(seq1, seq2):\n diffs = 0\n length = 0\n for x, y in zip(str(seq1), str(seq2)):\n if x == '-' or y == '-':\n continue\n elif x != y:\n diffs += 1\n length += 1\n try:\n return float(diffs) / length\n except:\n return 0.5", "def hamming_distance(s1, s2):\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_distance(s1, s2):\n assert len(s1)==len(s2), \",\".join((s1, s2))\n s1 = np.array(s1.upper(), dtype=\"c\")\n s2 = np.array(s2.upper(), dtype=\"c\")\n return np.sum(s1 != s2)", "def hamming_distance(x1: np.ndarray, x2: np.ndarray) -> int:\n assert isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray)\n return (x1 != x2).sum()", "def hamming_distance(words: Iterator[str], vocabulary: Dict[str, int]):\n\n for word in words:\n distances = []\n suggestions = []\n vocab_list = list(vocabulary)\n for (i,vocab) in enumerate(vocab_list):\n if len(vocab) == len(word):\n distances.append(hamming(word, vocab))\n else:\n distances.append(120)\n \n idx = np.array(distances).argsort()[:5]\n \n for i in range(5):\n for j in range(i+1,5):\n if distances[idx[i]] == distances[idx[j]]:\n if vocabulary.get(vocab_list[idx[i]]) < vocabulary.get(vocab_list[idx[j]]):\n temp = idx[i] \n idx[i] = idx[j]\n idx[j] = temp \n\n for i in idx:\n suggestions.append(vocab_list[i])\n\n output(\"{misspelled}\\t{corrections}\".format(\n misspelled=word,\n corrections=\"\\t\".join(suggestions)\n )) # may cause IO bottleneck", "def hamming_distance(s1, s2):\n if len(s1) > len(s2):\n s2 = s2.ljust(len(s1))\n else:\n s1 = s1.ljust(len(s2))\n\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_distance(a, b):\n return np.count_nonzero(a != b)", "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist", "def hamming_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n\n # Call the function to compute the distance\n return measure.get_raw_score(s1, s2)", "def hamming_distance(cs):\n d = 0.0\n end = len(cs) - 1\n for idx in range(end):\n s1 = cs[idx]\n s2 = cs[idx + 1]\n assert len(s1) == len(s2)\n s1_bits = ''.join('{:b}'.format(c).zfill(8) for c in s1)\n s2_bits = ''.join('{:b}'.format(c).zfill(8) for c in s2)\n d += sum(c1 != c2 for c1, c2 in zip(s1_bits, s2_bits))\n return d / end", "def hamming_distance(a, b):\n assert len(a) == len(b)\n dist = sum(item_a != item_b for item_a, item_b in zip(a, b))\n return dist", "def _PD_hamming(alignA, alignB, subst, bySite, withinA, ignoreGaps=True):\n L = len(alignA.iloc[0])\n gapCode = AA2CODE['-']\n\n \"\"\"Convert alignments into integer arrays first to speed comparisons\"\"\"\n matA = np.zeros((len(alignA), L))\n for seqi, s in enumerate(alignA):\n matA[seqi,:] = _seq2vec(s)\n if not withinA:\n matB = np.zeros((len(alignB), L))\n for seqi, s in enumerate(alignB):\n matB[seqi,:] = _seq2vec(s)\n\n \"\"\"Dist will be 1 where equal, 0 where not and nan if one is a gap\"\"\"\n if withinA:\n dist=np.zeros((int(scipy.special.comb(len(alignA), 2)), L))\n allPairs = itertools.combinations(np.arange(len(alignA)), 2)\n for j, (seqi1, seqi2) in enumerate(allPairs):\n dist[j,:] = matA[seqi1,:]!=matA[seqi2,:]\n if ignoreGaps:\n gapInd = (matA[seqi1,:]==gapCode) | (matA[seqi2,:]==gapCode)\n dist[j, gapInd] = np.nan\n else:\n dist=np.zeros((len(alignA)*len(alignB), L))\n allPairs = itertools.product(np.arange(len(alignA)), np.arange(len(alignB)))\n for j, (seqiA, seqiB) in enumerate(allPairs):\n dist[j,:] = matA[seqiA,:]!=matB[seqiB,:]\n if ignoreGaps:\n gapInd = (matA[seqiA,:]==gapCode) | (matB[seqiB,:]==gapCode)\n dist[j, gapInd] = np.nan\n\n if not bySite:\n dist=np.nanmean(dist, axis=1)\n return np.nanmean(dist, axis=0)", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def hamming_distance(lhs,rhs):\n return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def hamming_dist(a_b, b_b):\n return sum(bin(a_b[n] ^ b_b[n]).count('1') for n in range(len(a_b)))", "def hamdist(inp):\n\treturn sum(c1 != c2 for c1, c2 in itertools.izip(inp[0],inp[1]))", "def get_all_distances(cls, indices, dist_mat):\n distances = []\n for i, j in combinations(indices, 2):\n distances.append(cls.get_dist(dist_mat, i, j))\n return distances", "def __hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_dist(v1, v2):\r\n edits = (v1 != v2)\r\n return edits.sum()", "def hamming_dist(gene_1, gene_2):\n ham_dist = 0\n for c1, c2 in zip(gene_1, gene_2):\n if c1 != c2:\n ham_dist += 1\n return ham_dist", "def countingPointMutations(seq1, seq2):\n seqLength = len(list(seq1))\n \n hammingDistance=0;\n for i in range(0,seqLength):\n if list(seq1)[i]!=list(seq2)[i]:\n hammingDistance = hammingDistance+1;\n return hammingDistance", "def hamming(seq1, seq2) -> int:\n if type(seq1) is SeqRecord:\n return hamming(seq1.seq, seq2)\n elif type(seq2) is SeqRecord:\n return hamming(seq1, seq2.seq)\n elif (type(seq1) is str or type(seq1) is Seq) and (type(seq2) is Seq or type(seq2) is str):\n if len(seq1) != len(seq2):\n raise ValueError('The sequences are of different lengths!')\n else:\n distance = 0\n for i in range(len(seq1)):\n if seq1[i] != seq2[i]:\n distance += 1\n return distance\n else:\n raise TypeError('Wrong type.')", "def hard_example_mining(dist_mat, is_pos, is_neg):\n\n assert len(dist_mat.size()) == 2\n\n # `dist_ap` means distance(anchor, positive)\n # both `dist_ap` and `relative_p_inds` with shape [N]\n dist_ap, _ = torch.max(dist_mat * is_pos, dim=1)\n # `dist_an` means distance(anchor, negative)\n # both `dist_an` and `relative_n_inds` with shape [N]\n inf = dist_mat.max() + 1\n dist_an, _ = torch.min(dist_mat * is_neg + is_pos * inf, dim=1)\n\n return dist_ap, dist_an", "def sim_mat(fc7_feats):\n print(\"Something\")\n t = time.time()\n pdist_ = spatial.distance.pdist(fc7_feats)\n print('Created distance matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n dist_mat = spatial.distance.squareform(pdist_)\n print('Created square distance matrix' + ' ' + str(time.time() - t) + ' sec')\n del pdist_\n\n t = time.time()\n sigmas = np.sort(dist_mat, axis=1)[:, 7] + 1e-16\n matrice_prodotti_sigma = np.dot(sigmas[:, np.newaxis], sigmas[np.newaxis, :])\n print('Generated Sigmas' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n dist_mat /= -matrice_prodotti_sigma\n print('Computed dists/-sigmas' + ' ' + str(time.time() - t) + ' sec')\n\n del matrice_prodotti_sigma\n\n t = time.time()\n W = np.exp(dist_mat, dist_mat)\n # W = np.exp(-(dist_mat / matrice_prodotti_sigma))\n np.fill_diagonal(W, 0.)\n\n # sparsify the matrix\n k = int(np.floor(np.log2(fc7_feats.shape[0])) + 1)\n n = W.shape[0]\n print('Created inplace similarity matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n for x in W:\n x[np.argpartition(x, n - k)[:(n - k)]] = 0.0\n\n print('Sparsify the matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n # matrix_S = np.zeros((n, n))\n m1 = W[np.triu_indices(n, k=1)]\n m2 = W.T[np.triu_indices(n, k=1)]\n\n W = spatial.distance.squareform(np.maximum(m1, m2))\n print('Symmetrized the similarity matrix' + ' ' + str(time.time() - t) + ' sec')\n\n return W", "def get_adjacent_distances(dist_matrix_header,\r\n dist_matrix,\r\n sample_ids,\r\n strict=False):\r\n filtered_idx = []\r\n filtered_sids = []\r\n for sid in sample_ids:\r\n try:\r\n idx = dist_matrix_header.index(sid)\r\n except ValueError:\r\n if strict:\r\n raise ValueError(\r\n \"Sample ID (%s) is not present in distance matrix\" %\r\n sid)\r\n else:\r\n pass\r\n else:\r\n filtered_idx.append(idx)\r\n filtered_sids.append(sid)\r\n\r\n if len(filtered_idx) < 2:\r\n raise ValueError(\"At least two of your sample_ids must be present in the\"\r\n \" distance matrix. %d are present.\" % len(filtered_idx))\r\n\r\n distance_results = []\r\n header_results = []\r\n for i in range(len(filtered_idx) - 1):\r\n distance_results.append(\r\n dist_matrix[filtered_idx[i]][filtered_idx[i + 1]])\r\n header_results.append(\r\n (filtered_sids[i], filtered_sids[i + 1]))\r\n return distance_results, header_results", "def calculate_dist_mat(embeddings: np.ndarray, norm: int) -> np.ndarray:\n kwargs = {'p': norm}\n condensed_dist = pdist(embeddings, metric='minkowski', **kwargs)\n dist_mat = squareform(condensed_dist)\n return dist_mat" ]
[ "0.63828456", "0.6093228", "0.6056598", "0.59507614", "0.5795671", "0.578761", "0.57800555", "0.574706", "0.5681204", "0.56678975", "0.56452966", "0.56424135", "0.563103", "0.558626", "0.55659384", "0.5539205", "0.5504324", "0.5493676", "0.5407647", "0.5401868", "0.53662705", "0.53654534", "0.5331345", "0.531185", "0.52777064", "0.5271174", "0.52624714", "0.5231668", "0.52188754", "0.52121013" ]
0.6753979
0
Computes the Levenshtein edit distance between two sequences, with the AA substitution distances provided in distance_matrix. The default distance matrix has a 1 for mismatches and 0 for matches.
def nb_editdistance(seq_vec1, seq_vec2, distance_matrix=identity_nb_distance_matrix, gap_penalty=1): q_L = seq_vec1.shape[0] s_L = seq_vec2.shape[0] if q_L == s_L: """No gaps: substitution distance This will make it differ from a strict edit-distance since the optimal edit-distance may insert same number of gaps in both sequences""" dist = 0 for i in range(q_L): dist += distance_matrix[seq_vec1[i], seq_vec2[i]] return dist ldmat = np.zeros((q_L, s_L), dtype=np.int16) for row in range(1, q_L): ldmat[row, 0] = row * gap_penalty for col in range(1, s_L): ldmat[0, col] = col * gap_penalty for col in range(1, s_L): for row in range(1, q_L): ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty, ldmat[row, col-1] + gap_penalty, ldmat[row-1, col-1] + distance_matrix[seq_vec1[row-1], seq_vec2[col-1]]) # substitution return ldmat[row, col]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list(range(x2)))\n\tfor i in range(1, x1):\n\t\td.append([i] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tif(e1 == e2): c = 0\n\t\t\telse:\n\t\t\t\tv1 = embeddings[e1]\n\t\t\t\tv2 = embeddings[e2]\n\n\t\t\t\tif((v1 is None) or (v2 is None)): c = 1\n\t\t\t\telse:\n\t\t\t\t\tdst = np.linalg.norm(v1 - v2) # Distance 2 (or L2 norm of the difference)\n\n\t\t\t\t\t# Now, we need a function increasing function mapping 0 to 0 and +inf to 1\n\t\t\t\t\tc = 1 - (1 / (1 + (alpha * dst)))\n\n\t\t\t\t\t#c /= r # If you uncomment this line, the cost of a substitution at distance `average_distance` will be 1 and substitutions might have higher cost, up to 1/r. This might be justified as long as `r` is above 0.5 (otherwise, some substitutions might be more expensive than an insertion followed by a deletion).\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + 1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + 1), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + c) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (len(seq1) + len(seq2)))\n\treturn raw", "def levenshteinDistance(s1, s2):\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]", "def get_levenshtein_distance(a, b):\r\n n, m = len(a), len(b)\r\n if n > m:\r\n # Make sure n <= m, to use O(min(n,m)) space\r\n a, b = b, a\r\n n, m = m, n\r\n current_row = range(n+1) # Keep current and previous row, not entire matrix\r\n\r\n for i in range(1, m+1):\r\n previous_row, current_row = current_row, [i]+[0]*n\r\n for j in range(1, n+1):\r\n add, delete, change = previous_row[j]+1, current_row[j-1]+1, previous_row[j-1]\r\n if a[j-1] != b[i-1]:\r\n change += 1\r\n current_row[j] = min(add, delete, change)\r\n return current_row[n]", "def edit_distance_between_seqs(seq1, seq2):\n aln1, aln2 = needleman_wunsch(seq1, seq2)\n return edit_distance_from_aln_strings(aln1, aln2)", "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(self, seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = range(1, len(seq2) + 1) + [0]\n for x in xrange(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in xrange(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def levenshtein_distance(first, second):\n if len(first) > len(second):\n first, second = second, first\n if len(second) == 0:\n return len(first)\n first_length = len(first) + 1\n second_length = len(second) + 1\n distance_matrix = [range(second_length) for x in range(first_length)]\n for i in range(1, first_length):\n for j in range(1, second_length):\n deletion = distance_matrix[i-1][j] + 1\n insertion = distance_matrix[i][j-1] + 1\n substitution = distance_matrix[i-1][j-1]\n if first[i-1] != second[j-1]:\n substitution += 1\n distance_matrix[i][j] = min(insertion, deletion, substitution)\n\n return distance_matrix[first_length-1][second_length-1]", "def dameraulevenshtein(seq1, seq2):\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def levenshtein_distance(s1,s2):\n\n\t\tif len(s1) < len(s2):\n\t\t\treturn Searcher.levenshtein_distance(s2, s1)\n\n\t\t# len(s1) >= len(s2)\n\t\tif len(s2) == 0:\n\t\t\treturn len(s1)\n\n\t\tprevious_row = range(len(s2) + 1)\n\t\tfor i, c1 in enumerate(s1):\n\t\t\tcurrent_row = [i + 1]\n\t\t\tfor j, c2 in enumerate(s2):\n\t\t\t\tinsertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n\t\t\t\tdeletions = current_row[j] + 1 # than s2\n\t\t\t\tsubstitutions = previous_row[j] + (c1 != c2)\n\t\t\t\tcurrent_row.append(min(insertions, deletions, substitutions))\n\t\t\tprevious_row = current_row\n\t\t\n\t\treturn previous_row[-1]", "def edit_distance(str1, str2, reconstruct_answer=False, method=alignments.Levinshtein(),\n swap_case_on_mismatch=True):\n method = alignments.Levinshtein() if method is None else method\n return align(str1, str2, reconstruct_answer, method, swap_case_on_mismatch)", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def string_edit_dist(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.distance()", "def levenshtein_distance(str1, str2):\n m = len(str1)\n n = len(str2)\n lensum = float(m + n)\n d = [] \n for i in range(m+1):\n d.append([i]) \n del d[0][0] \n for j in range(n+1):\n d[0].append(j) \n for j in range(1,n+1):\n for i in range(1,m+1):\n if str1[i-1] == str2[j-1]:\n d[i].insert(j,d[i-1][j-1]) \n else:\n minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2) \n d[i].insert(j, minimum)\n ldist = d[-1][-1]\n ratio = (lensum - ldist)/lensum\n return {'distance':ldist, 'ratio':ratio}", "def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)", "def find_edit_distance(string1,string2):\n M=zeros((len(string1)+1,len(string2)+1), dtype=int)\n for i in xrange(1,len(string1)+1):\n M[i][0]=i\n for j in xrange(1,len(string2)+1):\n M[0][j]=j\n for i in xrange(1,len(string1)+1):\n for j in xrange(1,len(string2)+1):\n if(string1[i-1]!=string2[j-1]):\n M[i][j] = min(M[i - 1][j] + 1, M[i][j - 1] + 1, M[i - 1][j - 1] + 1)\n else:\n M[i][j] = M[i - 1][j - 1]\n return M[len(string1)][len(string2)]", "def levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n \n return previous_row[-1]", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def distances(a, b):\n # generating matrix\n matrix = [[(0, None) for x in range(len(b) + 1)] for y in range(len(a) + 1)]\n\n # base case\n for i in range(1, len(a) + 1):\n matrix[i][0] = (i, Operation.DELETED)\n for j in range(1, len(b) + 1):\n matrix[0][j] = (j, Operation.INSERTED)\n\n # fill in matrix with tuples (cost, operation)\n for i in range(1, len(a) + 1):\n for j in range(1, len(b) + 1):\n # edit distance algorithm\n # costs for deletion, insertion and substitution\n delete_cost = matrix[i - 1][j][0] + 1\n insert_cost = matrix[i][j - 1][0] + 1\n substitute_cost = matrix[i - 1][j - 1][0]\n if a[i - 1] != b[j - 1]:\n substitute_cost += 1\n\n # edit distance is min cost of deletion, insertion, substitution\n if delete_cost < insert_cost and delete_cost < substitute_cost:\n matrix[i][j] = (delete_cost, Operation.DELETED)\n elif insert_cost < substitute_cost:\n matrix[i][j] = (insert_cost, Operation.INSERTED)\n else:\n matrix[i][j] = (substitute_cost, Operation.SUBSTITUTED)\n\n return matrix", "def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost", "def question7(seq_x, seq_y):\n \n diag_score = 2\n off_diag_score = 1\n dash_score = 0\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n score_matrix = student.build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score)\n \n align_matrix = student.compute_alignment_matrix(seq_x, seq_y, score_matrix, True)\n score, align_x, align_y = student.compute_global_alignment(seq_x, seq_y, score_matrix, align_matrix)\n \n edit_distance = len(seq_x) + len(seq_y) - score\n \n print \"Edit distance: \" + str(edit_distance)\n print align_x\n print align_y", "def edit_distance_from_aln_strings(str1, str2):\n assert len(str1) == len(str2)\n edit_distance = 0\n in_gap = False\n\n for i, char1 in enumerate(str1):\n if char1 == \"-\" or str2[i] == \"-\":\n if not in_gap:\n in_gap = True\n edit_distance += 1\n else:\n in_gap = False\n\n if char1 != str2[i]:\n edit_distance += 1\n\n return edit_distance", "def levenshtein(seq1: str, seq2: str) -> int:\n if seq1 == \"\":\n return len(seq2)\n if seq2 == \"\":\n return len(seq1)\n if seq1[-1] == seq2[-1]:\n cost = 0\n else:\n cost = 1\n \n result = min([levenshtein(seq1[:-1], seq2) + 1,\n levenshtein(seq1, seq2[:-1]) + 1,\n levenshtein(seq1[:-1], seq2[:-1]) + cost ])\n return result", "def iterative_levenshtein(self, w1, d1, w2, d2):\n rows = len(w1) + 1\n cols = len(w2) + 1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n\n for col in range(1, cols):\n for row in range(1, rows):\n if w1[row - 1] == w2[col - 1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row - 1][col] + 1, # deletion\n dist[row][col - 1] + 1, # insertion\n dist[row - 1][col - 1] + cost) # substitution\n return dist[row][col] < 5", "def compute_l2_distance_matrix(features_queries, features_dataset):\n sx = np.sum(features_queries ** 2, axis=1, keepdims=True)\n sy = np.sum(features_dataset ** 2, axis=1, keepdims=True)\n\n return np.sqrt(-2 * features_queries.dot(features_dataset.T) + sx + sy.T)", "def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n #print(indices.shape)\n #print(seqs_mat.shape)\n #print(seqs_L.shape)\n return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty)", "def weighted_levenshtein(seq1, seq2, weights, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\ttmp = 0.0\n\tfirst_line = [tmp]\n\tfor e in seq2:\n\t\ttmp += weights.get(e, 1)\n\t\tfirst_line.append(tmp)\n\td.append(first_line)\n\ttmp = 0\n\tfor e in seq1:\n\t\ttmp += weights.get(e, 1)\n\t\td.append([tmp] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tw1 = weights.get(e1, 1)\n\t\t\tw2 = weights.get(e2, 1)\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + w1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + w2), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + (int(e1 != e2) * max(w1, w2))) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (d[0][-1] + d[-1][0]))\n\treturn raw", "def lev_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the distance measure.\n return measure.get_raw_score(s1, s2)", "def damerau_levenshtein_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n infinite = n1 + n2\n\n char_arr = defaultdict(int)\n dp = [[0] * (n2 + 2) for _ in range(n1 + 2)]\n\n dp[0][0] = infinite\n for i in range(0, n1 + 1):\n dp[i + 1][0] = infinite\n dp[i + 1][1] = i\n for i in range(0, n2 + 1):\n dp[0][i + 1] = infinite\n dp[1][i + 1] = i\n\n for i in range(1, n1 + 1):\n db = 0\n for j in range(1, n2 + 1):\n i1 = char_arr[s2[j - 1]]\n j1 = db\n cost = 1\n if s1[i - 1] == s2[j - 1]:\n cost = 0\n db = j\n\n dp[i + 1][j + 1] = min(dp[i][j] + cost,\n dp[i + 1][j] + 1,\n dp[i][j + 1] + 1,\n dp[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1))\n char_arr[s1[i - 1]] = i\n\n return dp[n1 + 1][n2 + 1]" ]
[ "0.6711283", "0.6683752", "0.6674949", "0.6592512", "0.6581954", "0.64532", "0.64532", "0.6445244", "0.6432772", "0.64265794", "0.63696915", "0.6358784", "0.62909234", "0.62251955", "0.6190301", "0.61244994", "0.6103599", "0.6082008", "0.60701114", "0.60523444", "0.60067487", "0.5975594", "0.5952714", "0.5941617", "0.5930747", "0.59077793", "0.59071326", "0.58838445", "0.5841615", "0.58236533" ]
0.6989441
0
Compute "tcrdist" distance between two TCR CDR3 sequences. Using default weight, gap penalty, ntrim and ctrim is equivalent to the original distance published in Dash et al, (2017). By setting ntrim and ctrim to 0 and adjusting the dist_weight, it is also possible to compute the CDR1/2 loop distances which can be combined with the CDR3 distance for overall distance. See tcrdist2 package for details.
def nb_tcrdist(seq_vec1, seq_vec2, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True): q_L = seq_vec1.shape[0] s_L = seq_vec2.shape[0] if q_L == s_L: """No gaps: substitution distance""" tmp_dist = 0 for i in range(ntrim, q_L - ctrim): tmp_dist += distance_matrix[seq_vec1[i], seq_vec2[i]] return tmp_dist * dist_weight short_len = min(q_L, s_L) len_diff = abs(q_L - s_L) if fixed_gappos: """If we are not aligning, use a fixed gap position relative to the start of the CDR3 that reflects the typically longer and more variable-length contributions to the CDR3 from the J than from the V. For a normal-length CDR3 this would be after the Cys+5 position (ie, gappos = 6; align 6 rsds on N-terminal side of CDR3). Use an earlier gappos if lenshort is less than 11.""" min_gappos = min(6, 3 + (short_len - 5) // 2) max_gappos = min_gappos else: """The CYS and the first G of the GXG are 'aligned' in the beta sheet the alignment seems to continue through roughly CYS+4 ie it's hard to see how we could have an 'insertion' within that region gappos=1 would be a insertion after CYS gappos=5 would be a insertion after CYS+4 (5 rsds before the gap) the full cdr3 ends at the position before the first G so gappos of len(shortseq)-1 would be gap right before the 'G' shifting this back by 4 would be analogous to what we do on the other strand, ie len(shortseq)-1-4""" min_gappos = 5 max_gappos = short_len - 1 - 4 while min_gappos > max_gappos: min_gappos -= 1 max_gappos += 1 min_dist = -1 # min_count = -1 for gappos in range(min_gappos, max_gappos + 1): tmp_dist = 0 # tmp_count = 0 remainder = short_len - gappos for n_i in range(ntrim, gappos): """n_i refers to position relative to N term""" # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]]) tmp_dist += distance_matrix[seq_vec1[n_i], seq_vec2[n_i]] # tmp_count += 1 #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i]) for c_i in range(ctrim, remainder): """c_i refers to position relative to C term, counting upwards from C term""" tmp_dist += distance_matrix[seq_vec1[q_L - 1 - c_i], seq_vec2[s_L - 1 - c_i]] # tmp_count += 1 #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i]) if tmp_dist < min_dist or min_dist == -1: min_dist = tmp_dist # min_count = tmp_count if min_dist == 0: break """Note that weight_cdr3_region is not applied to the gap penalty""" return min_dist * dist_weight + len_diff * gap_penalty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n for i in range(ntrim, q_L - ctrim):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight\n continue\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty\n return dist", "def computeCDR3PWDist(seqs, gap_open=3, gap_extend=3, matrix=parasail.blosum62, useIdentity=False):\n cache = CachedNWDistance(seqs, matrix=matrix, gap_open=gap_open, gap_extend=gap_extend, useIdentity=useIdentity)\n\n indices = cache.indices()\n L = indices.shape[0]\n pwdist = np.nan * np.zeros((L, L))\n \n for i, j in itertools.product(range(L), range(L)):\n \n if i <= j:\n d = cache.metric(indices[i], indices[j])\n pwdist[i, j] = d\n pwdist[j, i] = d\n\n pwdist = pd.DataFrame(pwdist, columns=cache.elements, index=cache.elements)\n return pwdist", "def distance_3D(c1, c2):\n return np.sqrt((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 + (c1[2] - c2[2]) ** 2)", "def test_cdtw(self):\n np.random.seed(1)\n M = 100\n N = 150\n t1 = np.linspace(0, 1, M)\n X = np.zeros((M, 2), dtype=np.float32)\n X[:, 0] = np.cos(2*np.pi*t1)\n X[:, 1] = np.sin(8*np.pi*t1)\n ## Sample an element from a dictionary of parameterizations\n ## and use this parameterization to interpolate the original\n ## time series\n D = linmdtw.alignmenttools.get_parameterization_dict(N)\n s = linmdtw.alignmenttools.sample_parameterization_dict(D, 4)\n Y = linmdtw.alignmenttools.get_interpolated_euclidean_timeseries(X, s)\n\n cost10 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 10))\n cost10_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 10))\n assert(cost10 == cost10_T)\n cost4 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 4))\n cost4_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 4))\n assert(cost4 == cost4_T)\n assert(cost10 < cost4)\n assert(cost10_T < cost4_T)", "def func_c_align_split_n(self, args):\n tik_instance, ub_ori, ub_trans, n_before, n_len = args\n\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n dhw_d = d_d * h_d * w_d\n hw_d = h_d * w_d\n\n data_offset = n_before * self.c_0\n ub_offset = 0\n ori_nburst = dhw_d * self.c_1\n burst_len = n_len * self.c_0 // self.cp_align_len\n src_stride = (n_d - n_len) * self.c_0 // self.cp_align_len\n dst_stride = 0\n args = tik_instance, self.src_gm, ub_ori, data_offset, ub_offset, \\\n ori_nburst, burst_len, src_stride, dst_stride, self.cp_align_len\n _gm_to_ub_one(args)\n\n hwnoni = hw_d * n_len\n with tik_instance.for_range(0, d_d) as num_d:\n with tik_instance.for_range(0, self.c_1) as num_c1:\n ori_cur = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_c1 * hwnoni * self.c_0\n trans_cur = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_c1 * self.c_0\n nburst = hwnoni\n burst_len = self.c_0 // self.cp_align_len\n src_stride = 0\n dst_stride = (self.c_1 - 1) * self.c_0 // self.cp_align_len\n tik_instance.data_move(\n ub_trans[trans_cur],\n ub_ori[ori_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n with tik_instance.for_range(0, dhw_d) as num_dhw:\n src_cur = num_dhw * n_len * c_d\n dst_cur = num_dhw * c_d\n nburst = n_len\n burst_len = c_d // self.cp_align_len\n src_stride = 0\n dst_stride = (dhw_d - 1) * c_d // self.cp_align_len\n tik_instance.data_move(\n ub_ori[dst_cur],\n ub_trans[src_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n dst_offset = n_before * dhw_d * c_d\n burst_len = n_len * dhw_d * c_d // self.cp_align_len\n tik_instance.data_move(self.dst_gm[dst_offset],\n ub_ori,\n 0, 1, burst_len, 0, 0)", "def chord_dist(n1, n2):\n return min(((n2.node_id - n1.node_id) % (2 ** config.ring_size_bits)),\n ((n1.node_id - n2.node_id) % (2 ** config.ring_size_bits)),\n ) / float(2 ** config.ring_size_bits)", "def calculate_d3ct(self):\n data = deepcopy(self.ddct)\n data = data.set_index(['cell_line', 'replicate', 'Assay', 'time', 'treatment'])\n control = data.query('treatment == \"Control\"')#.reset_index(drop=True)\n tgfb = data.query('treatment == \"TGFb\"')#.reset_index(drop=True)\n control.index = control.index.droplevel(4)\n tgfb.index = tgfb.index.droplevel(4)\n return tgfb / control", "def convert_tcr(split_line, tcr_id):\n\n # Compile the necessary fields for output\n out_vals = {'sequence_id': tcr_id,\n 'sequence': split_line[params['sequence_index']], 'rev_comp': 'F',\n 'duplicate_count': split_line[params['abundance_index']]}\n\n # If the option has been set, only retain those sequences with a value equal to or greater than that threshold\n if input_args['abundance_filter']:\n if float(out_vals['duplicate_count']) < input_args['abundance_filter']:\n return\n\n # Infer productivity (using presence of CDR3 and Adaptive sequenceStatus value), take junction if there\n if split_line[params['cdr3_index']] and split_line[params['productivity']] == 'In':\n out_vals['junction_aa'] = split_line[params['cdr3_index']]\n out_vals['productive'] = 'T'\n\n # If the option to discard rearrangements lacking proper CDR3 motifs has been set, skip this entry if not C/F\n if input_args['motif_filter']:\n if out_vals['junction_aa'][0] != 'C':\n return\n elif chain == 'TRB' and out_vals['junction_aa'][-1] != 'F':\n return\n # Human TRAJ are a bit more flexible as to their junction-defining residue\n elif chain == 'TRA' and out_vals['junction_aa'][-1] not in ['F', 'W', 'C']:\n return\n\n # If the option to discard rearrangements lacking proper CDR3 motifs has been set, skip this entry if not C/F\n if input_args['junction_len_filter'] != 0:\n if len(out_vals['junction_aa']) < input_args['junction_len_filter']:\n return\n\n else:\n out_vals['junction_aa'] = ''\n out_vals['productive'] = 'F'\n\n # If the option to ignore non-productive rearrangements has been set, skip this row\n if input_args['productivity_filter']:\n return\n\n # If users wanted to they could infer the junction nt sequence, but I haven't, as it's redundant/not very useful\n out_vals['junction'] = ''\n\n # Extract the VDJ genes, fixing their nomenclature and combining together multiple possible calls\n for gene in ['v', 'd', 'j']:\n\n # First take Adaptive's best call\n call = split_line[params[gene + 'MaxResolved']]\n\n # Check whether the code wants to be looking for D genes\n if input_args['no_d'] and gene == 'd':\n sorted_call = ''\n\n # Check whether a gene has been called - if not (and not D) check in the ambiguous gene name ties field\n # NB ambiguous Ds are ignored by default, as there are only two options for TRBD and they're almost identical\n elif not call or call == 'unresolved':\n if gene == 'd' and input_args['no_d_ambiguity']:\n sorted_call = ''\n else:\n sorted_call = resolve_ambiguous_name(split_line, gene)\n\n # If it has full allele accuracy (indicated by an asterisk), tidy it up and take that as the result\n elif call[-3] == '*':\n sorted_call = check_gene(tidy_gene(call))\n\n # Depending on the (hidden) version of the input data, remaining ambiguity might be resolved in 2 places:\n # either in the GeneNameTies or AlleleNameTies fields - need to infer which is correct and deal appropriately\n else:\n\n if bits[params[gene + 'GeneNameTies']]:\n sorted_call = resolve_ambiguous_name(split_line, gene)\n elif bits[params[gene + 'GeneAlleleTies']]:\n sorted_call = resolve_ambiguous_allele(call, split_line, gene)\n\n # However some files are not even covered by that broad formatting, so you just need to allow whatever\n elif input_args['allow_ambiguity']:\n sorted_call = check_gene(tidy_gene(call))\n\n else:\n raise IOError(\"Unknown format on line \" + str(line_count) + \"! Cannot continue. \"\n \"\\n\\tAmbiguity for \" + gene.upper() + \" gene calls lacking allele info that is\"\n \"\\n\\t not resolved in either 'Gene' or 'Allele Ties' fields.\"\n \"\\n\\tTry re-running the script using the '-a' flag (to allow ambiguity),\"\n \"\\n\\t and check that the format of the output document is correct.\")\n\n # If option is selected, remove allele level information\n if input_args['strip_alleles']:\n sorted_call = strip_alleles(poss_alleles, sorted_call)\n\n out_vals[gene + '_call'] = sorted_call\n\n # Finally pad the missing values for the required columns\n for value in [x for x in out_headers if x not in out_vals]:\n out_vals[value] = ''\n\n return out_vals", "def nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n\n return _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos)", "def s_dtw(t0, t1):\n n0 = len(t0)\n n1 = len(t1)\n C = np.zeros((n0 + 1, n1 + 1))\n C[1:, 0] = float('inf')\n C[0, 1:] = float('inf')\n for i in np.arange(n0) + 1:\n for j in np.arange(n1) + 1:\n C[i, j] = great_circle_distance(t0[i - 1][0], t0[i - 1][1], t1[j - 1][0], t1[j - 1][1]) + \\\n min(C[i, j - 1], C[i - 1, j - 1], C[i - 1, j])\n dtw = C[n0, n1]\n return dtw", "def tnc(self):\n\n if os.path.isfile(self.scenario_path + \"/output/TNCTrips.csv\"):\n\n # load the output folder tnc trip list\n trips = pd.read_csv(self.scenario_path + \"/output/TNCTrips.csv\",\n usecols=[\"trip_ID\", # unique trip surrogate key\n \"originTaz\", # trip origin TAZ\n \"destinationTaz\", # trip destination TAZ\n \"totalPassengers\"]) # passengers in vehicle excluding driver (0-6)\n\n # append distance and time skims\n # using am peak period hov-2 low value of time\n am_skims = om.open_file(self.scenario_path + \"/output/traffic_skims_AM.omx\")\n\n trips[\"distanceTotal\"] = [\n am_skims[\"AM_HOV2_L_DIST\"][o - 1, d - 1]\n for o, d in zip(trips[\"originTaz\"], trips[\"destinationTaz\"])\n ]\n\n trips[\"timeTotal\"] = [\n am_skims[\"AM_HOV2_L_TIME\"][o - 1, d - 1]\n for o, d in zip(trips[\"originTaz\"], trips[\"destinationTaz\"])\n ]\n\n am_skims.close()\n\n # create person and trip-based weights based on occupancy\n trips[\"passengers\"] = trips[\"totalPassengers\"]\n trips[\"weightPersonTrip\"] = (trips[\"totalPassengers\"] + 1) * 1 / self.sample_rate\n trips[\"weightTrip\"] = 1 * 1 / self.sample_rate\n\n return trips[[\"trip_ID\",\n \"passengers\",\n \"distanceTotal\",\n \"timeTotal\",\n \"weightPersonTrip\",\n \"weightTrip\"]]\n\n else:\n return False", "def dtw(x, y, dist='euclidean'):\n # sanity check\n r, c = len(x), len(y)\n assert r and c, \"the input cannot be empty array\"\n\n if np.ndim(x) == 1:\n x = np.array(x)[:, np.newaxis]\n if np.ndim(y) == 1:\n y = np.array(y)[:, np.newaxis]\n\n # initialization\n step = [(-1, -1), (-1, 0), (0, -1)]\n C = np.zeros((r + 1, c + 1))\n C[:, 0] = C[0, :] = np.inf\n\n # assign cost\n if isinstance(dist, str):\n C[1:, 1:] = cdist(x, y, dist)\n else:\n for i in range(1, r+1):\n for j in range(1, c+1):\n C[i, j] = dist(x[i-1], y[j-1])\n cost = C[1:, 1:].copy()\n\n # DP body\n for i in range(1, r+1):\n for j in range(1, c+1):\n if j == i == 1:\n continue\n C[i, j] += min([C[i+s[0], j+s[1]] for s in step])\n\n dtw_dist = C[-1, -1]/(r+c)\n acc_cost = C[1:, 1:]\n\n # trace back\n path = _traceback(C[1:, 1:], step)\n return dtw_dist, cost, acc_cost, path", "def distanceTo(\n self,\n trgtstn=None,\n instofst=None,\n trgtofst=None,\n refcoef=None,\n ddxyz=False,\n offsettype=None,\n ):\n diff = self.vectorTo(trgtstn, instofst, trgtofst, offsettype=offsettype)\n dist = np.sqrt(np.vdot(diff, diff))\n if not ddxyz:\n return dist\n diff /= dist\n return dist, -diff, diff", "def ccw(p1, p2, p3):\n return (p2[0] - p1[0])*(p3[1] - p1[1]) - (p2[1] - p1[1])*(p3[0] - p1[0])", "def distance(self, t1, t2, costs=unit_costs):\r\n #print costs\r\n #raw_input(\"pause\")\r\n # Cf. Zhang & Shasha:p.1252-1253\r\n #===========================================================================\r\n # Use an embedded function, so T1,T2, l1,l2, and TD are available from the\r\n # name space of the outer function and don't need to be dragged around in\r\n # each function call\r\n # TREEDIST function\r\n #===========================================================================\r\n def edit_dist(i, j):\r\n \"\"\"\r\n compute edit distance between two subtrees rooted in nodes i and j\r\n respectively\r\n \"\"\"\r\n # temporary array for forest distances\r\n FD = ForestDist()\r\n for n in range(l1[i], i+1):\r\n FD[ (l1[i],n), None ] = ( FD[ (l1[i],n-1), None ] + \r\n costs(T1[n], None) ) #NOT SURE ABOUT THE T1[n].label --> TO BE CHECKED\r\n \r\n for m in range(l2[j], j+1):\r\n FD[ None, (l2[j],m) ] = ( FD[ None, (l2[j],m-1) ] + \r\n costs(None, T2[m]) )\r\n \r\n for n in range(l1[i], i+1):\r\n for m in range(l2[j], j+1):\r\n if l1[n] == l1[i] and l2[m] == l2[j]:\r\n FD[ (l1[i],n), (l2[j],m) ] = min(\r\n FD[(l1[i],n-1),(l2[j],m)] + costs(T1[n], None),\r\n FD[(l1[i],n),(l2[j],m-1)] + costs(None, T2[m]),\r\n FD[(l1[i],n-1),(l2[j],m-1)] + costs(T1[n], T2[m]))\r\n \r\n TD[n, m] = FD[ (l1[i],n), (l2[j],m) ]\r\n else:\r\n FD[ (l1[i],n), (l2[j],m) ] = min(\r\n FD[(l1[i],n-1),(l2[j],m)] + costs(T1[n], None),\r\n FD[(l1[i],n),(l2[j],m-1)] + costs(None, T2[m]),\r\n FD[(l1[i],n-1),(l2[j],m-1)] + TD[n,m])\r\n return TD[i,j]\r\n \r\n \r\n #Compute T1[] and T2[]\r\n T1 = self.postorder(t1)\r\n T2 = self.postorder(t2)\r\n \r\n # Compute l()\r\n l1 = self.leftmost_leaf_descendant_indices(T1)\r\n l2 = self.leftmost_leaf_descendant_indices(T2)\r\n \r\n # LR_keyroots1 and LR_keyroots2\r\n kr1 = self.key_root_indices(l1)\r\n kr2 = self.key_root_indices(l2)\r\n \r\n # permanent treedist array\r\n TD = dict()\r\n for i in kr1:\r\n for j in kr2:\r\n edit_dist(i, j)\r\n \r\n #self.print_matrix(T1, T2, TD)\r\n \r\n return TD[i,j]", "def calculate(self, rxn: ComputedReaction) -> float:\n combos = chain(\n product(rxn.reactant_entries, rxn.product_entries),\n combinations(rxn.product_entries, 2),\n )\n distances = [\n self.cpd.shortest_domain_distance(\n combo[0].composition.reduced_formula,\n combo[1].composition.reduced_formula,\n )\n for combo in combos\n ]\n\n distance = self._mu_func(distances)\n return distance", "def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T", "def edit_distance(self):\n\n edit_dist = 0\n misaligned = False\n\n try:\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = [g.strip() for g in gt_file.readlines()]\n\n num_symbols = 0\n bd = 0\n # Go through all lines (for polyphony)\n for i in range(len(out_lines)):\n # Skip comparing sequence staff line\n if 'Sequence staff' in gt_lines[i]:\n continue\n\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n #print('Out:',out_split)\n #print('Gt:',gt_split)\n\n num_symbols += len(gt_split) # for calculating symbol error rate\n misaligned = 'misaligned' in out_lines[i] # for ensembling\n\n _a = [symbol for symbol in out_split if symbol != '\\n' and symbol != -1]\n _b = [symbol for symbol in gt_split if symbol != '\\n' and symbol != -1]\n\n ed = self.levenshtein(_a,_b)\n \n # Account for barline at end (don't use when checking CRNN output)\n #if ed == 1 and out_split[-1] == 'barline' and gt_split[-1] != 'barline':\n # ed = 0\n \n edit_dist += ed\n \n staff_num = (i + 1) // 2\n \n if ed == 1:\n pass\n #print(self.output_file)\n #print('Edit dist (staff #%d): %d' % (staff_num, ed))\n \n if _a[-1] == 'barline' and _b[-1] != 'barline' or \\\n _a[-1] != 'barline' and _b[-1] == 'barline':\n #print('Barline diff') \n # print(self.output_file)\n bd = 1\n #print(_a)\n #print(_b)\n \n\n '''\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n '''\n except FileNotFoundError:\n print('Missing:',self.output_file, self.gt_file)\n return -1, 1, 0, False\n #print('Found:',self.output_file, self.gt_file)\n return edit_dist, num_symbols, bd, misaligned", "def get_dist(text1, text2, wv):\n t1 = lookup(text1, wv)\n t2 = lookup(text2, wv)\n dist = cos_sim(t1, t2)\n return dist", "def pairwise_correlation_difference(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n substract_m = np.subtract(corr_real, corr_rand)\r\n prwcrdst = LA.norm(substract_m)\r\n\r\n return prwcrdst, substract_m", "def dist_cost(s_vr, failed_vr, neighbor_vr, dist_matrix, w_a1, w_a2):\n #print s_vr, failed_vr, neighbor_vr\n dist_i_f = dist_matrix[s_vr][failed_vr + 1]\n dist_i_k = dist_matrix[s_vr][neighbor_vr + 1]\n dist = w_a1 * float(dist_i_f) + w_a2 * float(dist_i_k)\n #print \"d_i_f: \", dist_i_f, \", dist_i_k: \", dist_i_k\n return dist", "def distance_between(self, n1, n2):\n if self.distance_method == 'direct':\n n1_relevants = 0\n n2_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], n1.anchor):\n n1_relevants += 1\n if is_relevant(self.sample.iloc[i], n2.anchor):\n n2_relevants += 1\n return (n1_relevants - n2_relevants)/len(self.sample)\n else:\n return 0.5", "def prep_distance(self, t: str = 'float') -> np.ndarray:\n d = np.zeros([self.ic.shape[0]*self.ic.shape[1],\n self.ic.shape[1]*self.ic.shape[0]])\n\n u,v = np.meshgrid(np.arange(self.ic.shape[0]),\n np.arange(self.ic.shape[1]),\n sparse=False, indexing='xy')\n u = u.ravel()\n v = v.ravel()\n z = np.array([u,v]).T\n\n for (k,x) in enumerate(z):\n if not self.boundary:\n d[k,:] = np.array(np.sqrt((u - x[0])**2 + (v - x[1])**2),dtype=t)\n\n else:\n d[k,:] = self.torus(x[0],x[1],\n self.ic.shape[0],\n self.ic.shape[1]\n ).ravel()\n\n return d", "def c_align_split_n(self, tik_instance):\n n_d, d_d, h_d, w_d, _ = self.dst_shape\n dhw_d = d_d * h_d * w_d\n nc_one = self.ub_ele // dhw_d\n c_align = self.c_1 * self.c_0\n n_ub = nc_one // c_align\n\n all_core = _ceil_div(n_d, n_ub)\n ac_num = _set_core_num(all_core)\n\n with tik_instance.for_range(0, ac_num, block_num=ac_num) as num_core:\n ub_ori = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n\n ub_loop = _set_loop(tik_instance, num_core, ac_num, all_core)\n\n with tik_instance.for_range(0, ub_loop) as num_u:\n core_index = num_u * ac_num + num_core\n\n with tik_instance.if_scope(core_index < all_core - 1):\n n_len = n_ub\n n_before = n_ub * core_index\n args = tik_instance, ub_ori, ub_trans, n_before, n_len\n self.func_c_align_split_n(args)\n\n with tik_instance.else_scope():\n n_before = (all_core - 1) * n_ub\n n_len = n_d - n_before\n args = tik_instance, ub_ori, ub_trans, n_before, n_len\n self.func_c_align_split_n(args)\n\n return tik_instance", "def calc_dist_diff(self, obj1_position, obj2_position, obj3_position):\n if self.prev_obj1_position is None and self.prev_obj2_position is None and self.prev_obj3_position is None:\n self.prev_obj1_position = obj1_position\n self.prev_obj2_position = obj2_position\n self.prev_obj3_position = obj3_position\n\n prev_diff_12 = self.task.calc_distance(self.prev_obj1_position, self.prev_obj2_position)\n current_diff_12 = self.task.calc_distance(obj1_position, obj2_position)\n\n prev_diff_13 = self.task.calc_distance(self.prev_obj1_position, self.prev_obj3_position)\n current_diff_13 = self.task.calc_distance(obj1_position, obj3_position)\n\n prev_diff_23 = self.task.calc_distance(self.prev_obj2_position, self.prev_obj3_position)\n current_diff_23 = self.task.calc_distance(obj2_position, obj3_position)\n \n norm_diff = (prev_diff_13 - current_diff_13) / prev_diff_13 + (prev_diff_23 - current_diff_23) / prev_diff_23 + (prev_diff_12 - current_diff_12) / prev_diff_12\n\n self.prev_obj1_position = obj1_position\n self.prev_obj2_position = obj2_position\n self.prev_obj3_position = obj3_position\n\n return norm_diff", "def euclidean_dist(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n eucl_matr = distance.cdist(corr_real, corr_rand, 'euclidean')\r\n\r\n eucl = LA.norm(eucl_matr)\r\n\r\n return eucl, eucl_matr", "def distance(self, c1, c2):\n if c1 > c2:\n c1, c2 = c2, c1\n clusterDistance = self.clusterDistanceCache.get((c1,c2), None)\n if clusterDistance is None:\n totalDistance = FeatureComparisonResult() # 0.0\n count = 0\n for b1 in self.c2b[c1]:\n for b2 in self.c2b[c2]:\n totalDistance = totalDistance.add(self._baseDistance(b1, b2))\n count += 1\n if count == 0:\n clusterDistance = FeatureComparisonResult() # 0.0\n else:\n clusterDistance = totalDistance.normalize(count)\n self.clusterDistanceCache[(c1,c2)] = clusterDistance\n return clusterDistance", "def _Conv3DGrad(op, grad):\n strides = op.get_attr('strides')\n padding = op.get_attr('padding')\n data_format = op.get_attr('data_format')\n shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])\n dx = nn_ops.conv3d_backprop_input_v2(\n shape_0,\n op.inputs[1],\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = nn_ops.conv3d_backprop_filter_v2(\n op.inputs[0],\n shape_1,\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = 0.5 * (dw + tf.transpose(dw, (0, 1, 2, 4, 3)))\n return dx, dw\n # # Pool grads across symmetric channels\n # dw_t = tf.transpose(\n # dw,\n # (3, 4, 0, 1, 2))\n # dw_symm_t = (0.5) * (dw_t + tf.transpose(\n # dw,\n # (4, 3, 0, 1, 2)))\n # dw_symm = tf.transpose(\n # dw_symm_t,\n # (2, 3, 4, 0, 1))\n # return dx, dw_symm", "def F_calcDMradius(i, t, st, dm, t1, tth):\n mr = st.mn*dm.mxkg_v[i]/(st.mn+dm.mxkg_v[i]) # reduced mass, kg\n # before thermalization (cooling), rx changes with time:\n rxco = np.array([ F_rxco2(tim,t1,mr,(st.nb*1.e+6),dm.sigx_m,st.Rs,dm.mxkg_v[i],pF) for tim in t.time ]) # cm\n print \"-- Radius: rxco at t1 = \",F_rxco2(t1+0.1,t1,mr,(st.nb*1.e+6),dm.sigx_m,st.Rs,dm.mxkg_v[i],pF)\n # after thermalization:\n rxth1 = F_rxth(dm.mx_v[i],st.rhoc,st.Temp) # cm (formula)\n rxth2 = np.interp(tth,t.time,rxco) \t# cm (rxco(tth))\n rxth = rxth1\n print \" rxth=%.2e , rxth1=%.2e , rxth2=%.2e\" % (rxth,rxth1,rxth2)\n for k in xrange(len(t.time)):\n if t.time[k]<t1:\n t.rxtag[k] = 'Rs '\n t.rx[k] = st.Rs*1.e+2\n elif t.time[k]<tth:\n t.rxtag[k] = 'rxco'\n t.rx[k] = rxco[k]\n elif t.time[k]>=tth:\n t.rxtag[k] = 'rxth'\n t.rx[k] = rxth\n return rxco, rxth", "def calc_distances_from_central(cluster, embedding):\n\n return calc_distances_in_embedding(cluster, embedding)" ]
[ "0.53767455", "0.4826406", "0.47805288", "0.4753317", "0.47484082", "0.46879807", "0.46756828", "0.46334925", "0.45374662", "0.4479596", "0.44660226", "0.44558287", "0.44372138", "0.44101122", "0.43605304", "0.4340066", "0.43351212", "0.4289836", "0.4284168", "0.42829153", "0.42755893", "0.42594606", "0.4254602", "0.42381892", "0.42364877", "0.42356375", "0.42312276", "0.42300743", "0.4198575", "0.4198551" ]
0.6212199
0
Computes the tcrdist distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True): return _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n for i in range(ntrim, q_L - ctrim):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight\n continue\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty\n return dist", "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist", "def get_all_distances(cls, indices, dist_mat):\n distances = []\n for i, j in combinations(indices, 2):\n distances.append(cls.get_dist(dist_mat, i, j))\n return distances", "def traj_loc_distance(trajs, locs):\n return distance_matrix(\n np.asarray([t.last_pos() for t in trajs]),\n locs[:,2:4]\n )", "def reduce_mtx(distmat, indices):\r\n return distmat.take(indices, 0).take(indices, 1)", "def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n #print(indices.shape)\n #print(seqs_mat.shape)\n #print(seqs_L.shape)\n return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty)", "def simtraj(trans_mat, tsteps, stt=0):\n seq = zeros(tsteps)\n curr = stt\n \n nstates = trans_mat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n for tt in range(tsteps):\n seq[tt] = curr\n weights = copy(trans_mat[curr, :])\n curr = discrete_dist(states, weights, nn=1)\n return seq", "def gen_dist(genes):\n\n # First generate an NxNxB matrix that has False where\n # i and j individuals have the same kth gene and True\n # otherwise (XOR operation). Then sum along\n # the genome axis to get distance\n return np.sum(genes[:,None,:] ^ genes, axis=-1)", "def compute_pairwise_distances(input_vecs: types.Tensor) -> types.Tensor:\n r = tf.reduce_sum(input_vecs * input_vecs, axis=1, keepdims=True)\n pdistance_matrix = (\n r\n - 2 * tf.matmul(input_vecs, input_vecs, transpose_b=True)\n + tf.transpose(r)\n )\n return tf.cast(pdistance_matrix, dtype=tf.float32)", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def ComputeDistMatrix(dict_alignedSequences):\r\n \r\n # check if dictionary with keys as tuples containing integers and values as tuples containing strings\r\n check = True \r\n #1 Check Input is dict\r\n if isinstance(dict_alignedSequences, dict) == False:\r\n check = False\r\n \r\n #2 Check are the keys and values tuples. Do the keys only contain integers and the vlaues only strings\r\n i = 0\r\n while len(dict_alignedSequences) > i:\r\n #checking for keys and values as tuples\r\n if isinstance(list(dict_alignedSequences.keys())[i], tuple) == False or isinstance(list(dict_alignedSequences.values())[i], tuple) == False:\r\n check = False\r\n break\r\n #checking keys for integers\r\n if isinstance(list(dict_alignedSequences.keys())[i][0], int) == False or isinstance(list(dict_alignedSequences.keys())[i][1], int) == False:\r\n check = False\r\n break\r\n #checking values for strings\r\n if isinstance(list(dict_alignedSequences.values())[i][0], str) == False or isinstance(list(dict_alignedSequences.values())[i][1], str) == False:\r\n check = False\r\n break\r\n \r\n #increment the counter for while loop\r\n i += 1\r\n \r\n #3 Check sequences contain aligned DNA and are of equal length\r\n for key in dict_alignedSequences:\r\n if is_aligned_dna(dict_alignedSequences[key][0]) == False or is_aligned_dna(dict_alignedSequences[key][1]) == False:\r\n check = False\r\n break\r\n if len(dict_alignedSequences[key][0]) != len(dict_alignedSequences[key][1]):\r\n check = False\r\n break\r\n \r\n #final evalauation if data is usable\r\n if check == False:\r\n raise TypeError ('malformed input')\r\n \r\n #get number of sequences\r\n matrixdim = howmany_sequences(dict_alignedSequences)\r\n #initialize dist matrix\r\n distMatrix = init_Dist_Matrix(matrixdim)\r\n \r\n \r\n for i in dict_alignedSequences.keys():\r\n # useing the key i to get the corisponding aligned sequences \r\n seq = dict_alignedSequences[i]\r\n #calculate distances between the sequences\r\n distance = calculate_distance(seq[0],seq[1])\r\n #markdown result at the corrsiponding place in the distmatrix\r\n distMatrix[i[0]][i[1]] = distance\r\n distMatrix[i[1]][i[0]] = distance\r\n \r\n return(distMatrix)", "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T", "def measure_distance(self, mat):\n if len(mat) == 1:\n print(\"chain has only one CAatom\")\n return\n self.dists = []\n for num in range(0, len(mat)):\n if num + 1 <= len(mat) - 1:\n c1 = mat[num]\n c2 = mat[num + 1]\n d = c2 - c1\n self.dists.append(math.sqrt(np.sum(d * d)))\n return self.dists", "def distance_dmc(distances, Ks, points):\n doors = []\n for d in distances:\n dmc = []\n for k in Ks:\n print \"==========================\", k, \"==========================\"\n clusters = create_clusters(25, k)\n\n kmeans(points, clusters)\n # print \"Finished creating kmeans algorithm\"\n\n create_backbone_network(GRAPH, clusters, d)\n # print \"Finished creating backbone network\"\n\n find_all_shortest_paths(clusters, SP_TABLE, GRAPH)\n # print \"Finished finding all shortest paths\"\n\n for clst in clusters:\n clst.inter_cost = inter_cost(clst)\n clst.intra_cost = intra_cost(points, clst)\n clst.dm_cost = door_matt_cost(clusters, clst, SP_TABLE)\n\n ret = total_cost(clusters)\n dmc.append(ret[2])\n doors.append(sum(dmc))\n draw_door_matts(map(lambda d: float(format(d, \".4g\")), distances), doors)", "def compute_distmat(data, distfn):\n out = np.zeros((data.shape[0], data.shape[0]))\n for i in xrange(data.shape[0]):\n for j in xrange(data.shape[0]):\n if i == j: continue\n out[i,j] = distfn(data[i,:,:], data[j,:,:])\n return out", "def calc_distances(self, templates_features=None, batch_size=50000, th=0.2, beta=1.1):\n if templates_features is None:\n templates_features = self.calc_templates()\n distances = np.empty((self.pairs.shape[0]), dtype=np.float32)\n start, end = 0, 0\n for batch in self.batches(self.pairs, batch_size):\n t1 = np.empty((len(batch), self.features_dim), dtype=np.float32)\n t2 = np.empty((len(batch), self.features_dim), dtype=np.float32)\n start = end\n end += len(batch)\n # attenuate = np.empty((len(batch)), dtype=np.bool)\n for i, pair in enumerate(batch):\n t1[i] = templates_features[pair[0]]\n t2[i] = templates_features[pair[1]]\n # lomax1 = np.max(self.quality_scores[pair[0]])\n # lomax2 = np.max(self.quality_scores[pair[1]])\n # attenuate[i] = lomax1 <= th or lomax2 <= th\n\n ## find cosine distance, assume template descriptors are normalized\n distances[start:end] = 1 - np.einsum(\"ij,ij->i\", t1, t2)\n # distances[start:end] = np.where(attenuate, distances[start:end], distances[start:end] / beta)\n return distances", "def calcDistance(self):\n # Initialize the distance matrix\n arr = np.repeat(0, self.num_col)\n result_mat = np.repeat(arr, self.num_col)\n result_mat = np.reshape(result_mat, (self.num_col, self.num_col))\n trinary_mat = self.df_trinary.values\n for left_val in TRINARY_VALUES:\n left_func = lambda v: 1 if v==left_val else 0\n left_mat = np.transpose(np.vectorize(left_func)(trinary_mat))\n for right_val in TRINARY_VALUES:\n if left_val == right_val:\n continue\n right_func = lambda v: 1 if v==right_val else 0\n right_mat = np.vectorize(right_func)(trinary_mat)\n # Count the number of occurrences of this combination of values\n # by doing a matrix multiply\n new_mat = np.matmul(left_mat, right_mat)\n # Multiply by the squared distance between the values\n squared_distance = (left_val - right_val)**2\n new_mat = new_mat*squared_distance\n # Accumulate the result\n result_mat = result_mat + new_mat\n # Convert to dataframe\n result_mat = np.vectorize(lambda v: np.sqrt(v)) (result_mat)\n self.df_distance = pd.DataFrame(result_mat, columns=self.columns,\n index=self.columns)", "def dist_mat(objs: t.Union[t.Sequence[nx.Graph], t.Sequence[t.Collection[SeqGene]]], n_jobs: int = 10) -> np.ndarray:\n if isinstance(objs[0], nx.Graph):\n dist = graph_dist\n else:\n dist = seq_dist\n size = len(objs)\n base = np.zeros(shape=(size, size))\n staged_data = []\n for i in range(size):\n for j in range(size):\n if i <= j:\n staged_data.append((i, objs[i], j, objs[j]))\n staged_data = tqdm(\n staged_data,\n desc='Distance matrix')\n\n with Pool(n_jobs) as workers:\n distances = workers.starmap(dist, staged_data)\n for i, j, d in distances:\n base[i][j] = d\n base[j][i] = d\n return base", "def point_distances(src_points, gt_points):\n distances = EuclideanDistances(np.matrix(src_points), np.matrix(gt_points))\n return np.array(distances)", "def nb_tcrdist(seq_vec1, seq_vec2, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n q_L = seq_vec1.shape[0]\n s_L = seq_vec2.shape[0]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n tmp_dist = 0\n for i in range(ntrim, q_L - ctrim):\n tmp_dist += distance_matrix[seq_vec1[i], seq_vec2[i]]\n return tmp_dist * dist_weight\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n \"\"\"If we are not aligning, use a fixed gap position relative to the start of the CDR3\n that reflects the typically longer and more variable-length contributions to\n the CDR3 from the J than from the V. For a normal-length\n CDR3 this would be after the Cys+5 position (ie, gappos = 6; align 6 rsds on N-terminal side of CDR3).\n Use an earlier gappos if lenshort is less than 11.\"\"\"\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n \"\"\"The CYS and the first G of the GXG are 'aligned' in the beta sheet\n the alignment seems to continue through roughly CYS+4\n ie it's hard to see how we could have an 'insertion' within that region\n gappos=1 would be a insertion after CYS\n gappos=5 would be a insertion after CYS+4 (5 rsds before the gap)\n the full cdr3 ends at the position before the first G\n so gappos of len(shortseq)-1 would be gap right before the 'G'\n shifting this back by 4 would be analogous to what we do on the other strand, ie len(shortseq)-1-4\"\"\"\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seq_vec1[n_i], seq_vec2[n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seq_vec1[q_L - 1 - c_i], seq_vec2[s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n \"\"\"Note that weight_cdr3_region is not applied to the gap penalty\"\"\"\n return min_dist * dist_weight + len_diff * gap_penalty", "def _compute_snp_distances(self, task):\n genetic_map = task[\"genetic_map\"]\n temp = task[\"snps\"]\n\n # merge genetic map for this chrom\n temp = pd.concat([temp, genetic_map], ignore_index=False, sort=True)\n\n # sort based on pos\n temp = temp.sort_values(\"pos\")\n\n # fill recombination rates forward\n temp[\"rate\"] = temp[\"rate\"].fillna(method=\"ffill\")\n\n # assume recombination rate of 0 for SNPs upstream of first defined rate\n temp[\"rate\"] = temp[\"rate\"].fillna(0)\n\n # get difference between positions\n pos_diffs = np.ediff1d(temp[\"pos\"])\n\n # compute cMs between each pos based on probabilistic recombination rate\n # https://www.biostars.org/p/123539/\n cMs_match_segment = (temp[\"rate\"] * np.r_[pos_diffs, 0] / 1e6).values\n\n # add back into temp\n temp[\"cMs\"] = np.r_[0, cMs_match_segment][:-1]\n\n temp = temp.reset_index()\n\n # use null `map` values to find locations of SNPs\n snp_indices = temp.loc[temp[\"map\"].isnull()].index\n\n # use SNP indices to determine boundaries over which to sum cMs\n start_snp_ix = snp_indices + 1\n end_snp_ix = np.r_[snp_indices, snp_indices[-1]][1:] + 1\n snp_boundaries = np.c_[start_snp_ix, end_snp_ix]\n\n # sum cMs between SNPs to get total cM distance between SNPs\n # http://stackoverflow.com/a/7471967\n c = np.r_[0, temp[\"cMs\"].cumsum()][snp_boundaries]\n cM_from_prev_snp = c[:, 1] - c[:, 0]\n\n temp = temp.loc[temp[\"map\"].isna()]\n\n # add back into temp\n temp[\"cM_from_prev_snp\"] = np.r_[0, cM_from_prev_snp][:-1]\n\n # restore index\n temp = temp.set_index(\"index\")\n\n return pd.DataFrame(temp[\"cM_from_prev_snp\"])", "def seq_dist(i1: int, s1: t.Collection[SeqGene],\n i2: int, s2: t.Collection[SeqGene]) -> t.Tuple[int, int, float]:\n m1, m2 = map(\n lambda s: dict(chain.from_iterable(\n ((pos, aa) for pos, aa in zip(g.Pos, g.Seq)) for g in s)),\n [s1, s2])\n d = 0.\n for p in set(m1) | set(m2):\n if p in m1 and p in m2 and m1[p] == m2[p]:\n continue\n d += 1\n return i1, i2, d", "def computeCDR3PWDist(seqs, gap_open=3, gap_extend=3, matrix=parasail.blosum62, useIdentity=False):\n cache = CachedNWDistance(seqs, matrix=matrix, gap_open=gap_open, gap_extend=gap_extend, useIdentity=useIdentity)\n\n indices = cache.indices()\n L = indices.shape[0]\n pwdist = np.nan * np.zeros((L, L))\n \n for i, j in itertools.product(range(L), range(L)):\n \n if i <= j:\n d = cache.metric(indices[i], indices[j])\n pwdist[i, j] = d\n pwdist[j, i] = d\n\n pwdist = pd.DataFrame(pwdist, columns=cache.elements, index=cache.elements)\n return pwdist", "def distance_matrix(cities):\n\n return [[city1.distance(city2) for city2 in cities]\n for city1 in cities]", "def get_closeness(pats,seq_dict,isdiagonal=False,log=False):\n similarities= np.zeros((len(pats),len(pats)))\n intersectCount = np.zeros((len(pats),len(pats)))\n for i,patI in enumerate(pats):\n seqsI=list(zip(*seq_dict[patI]))[1]\n for j,patJ in enumerate(pats):\n seqsJ=list(zip(*seq_dict[patJ]))[1]\n if isdiagonal and i!=j: continue\n if j>i: continue\n if i==j and len(seqsI) < clim:\n similarities[i][j]=0\n continue\n keys1=set(list(zip(*seq_dict[pats[i]]))[0])\n keys2=set(list(zip(*seq_dict[pats[j]]))[0])\n intersectCount[i][j] = len(list(keys1 & keys2))\n random_score=get_random_score(seqsI,seqsJ,nsample=10)\n identical_score=get_identical_score(seqsI,seqsJ)\n score=0.0\n norm=0\n if mixvec:\n vecI=_seqs2vec(seqsI)\n vecJ=_seqs2vec(seqsJ)\n score=get_subscore_mixvec(vecI,vecJ)\n else:\n score=get_subscore_pairwise(list(seqsI),list(seqsJ)) \n print \"idscore (max):\", round(identical_score,4), \"randscore (min):\", round(random_score,4), \"score:\", round(score,4)\n if log: similarity = -math.log(1-((float(score)-float(random_score))/(float(identical_score)-float(random_score))))\n else: similarity = ((float(score)-float(random_score))/(float(identical_score)-float(random_score)))\n similarities[i][j]= similarity\n similarities[j][i]= similarity\n print patI,patJ,similarity\n dfDists=pd.DataFrame(similarities,columns=pats,index=pats)\n dfCount=pd.DataFrame(intersectCount,columns=pats,index=pats)\n return dfDists, dfCount", "def _data_labels_distance(self, samples, tfidf_dict, distance_metric='cosine'):\n \n def distance_fn(x):\n return sklearn.metrics.pairwise.pairwise_distances(\n x, x[0], metric=distance_metric).ravel() * 100\n\n base_doc_vector = np.fromiter(tfidf_dict.values(),float)\n base_doc_keys = list(tfidf_dict.keys())\n vectors = [base_doc_vector]\n for sample in samples:\n sample_vector = np.zeros(len(base_doc_keys))\n for token in sample.split():\n token_index = base_doc_keys.index(token)\n sample_vector[token_index] = base_doc_vector[token_index]\n vectors.append(sample_vector)\n\n\n distances = distance_fn(sp.sparse.csr_matrix(vectors))\n return np.array(vectors), distances", "def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)", "def test_sequence_dist_all_metrics(metric):\n unique_seqs = np.array([\"AAA\", \"ARA\", \"AFFFFFA\", \"FAFAFA\", \"FFF\"])\n seqs2 = np.array([\"RRR\", \"FAFA\", \"WWWWWWW\"])\n dist_mat = ir.ir_dist.sequence_dist(unique_seqs, metric=metric, cutoff=8, n_jobs=2)\n assert dist_mat.shape == (5, 5)\n\n dist_mat = ir.ir_dist.sequence_dist(\n unique_seqs, seqs2, metric=metric, cutoff=8, n_jobs=2\n )\n assert dist_mat.shape == (5, 3)" ]
[ "0.6867072", "0.6173414", "0.61204684", "0.59654254", "0.5629023", "0.5607086", "0.55902237", "0.55837727", "0.55610365", "0.5435831", "0.54259413", "0.5407338", "0.5377603", "0.5373048", "0.5354197", "0.5333928", "0.5265103", "0.52603585", "0.5210351", "0.5201209", "0.51830506", "0.51698697", "0.51546896", "0.514426", "0.51390195", "0.5131739", "0.51274806", "0.50870544", "0.505327", "0.50357234" ]
0.6814871
1
Computes the Levenshtein edit distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1): #print(indices.shape) #print(seqs_mat.shape) #print(seqs_L.shape) return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist", "def nb_editdistance(seq_vec1, seq_vec2, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n \n q_L = seq_vec1.shape[0]\n s_L = seq_vec2.shape[0]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n dist = 0\n for i in range(q_L):\n dist += distance_matrix[seq_vec1[i], seq_vec2[i]]\n return dist\n\n ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seq_vec1[row-1], seq_vec2[col-1]]) # substitution\n return ldmat[row, col]", "def levenshteinDistance(s1, s2):\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]", "def edit_distance_between_seqs(seq1, seq2):\n aln1, aln2 = needleman_wunsch(seq1, seq2)\n return edit_distance_from_aln_strings(aln1, aln2)", "def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n for i in range(ntrim, q_L - ctrim):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight\n continue\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty\n return dist", "def iterative_levenshtein(s, t):\n rows = len(s)+1\n cols = len(t)+1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings \n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n \n for col in range(1, cols):\n for row in range(1, rows):\n if s[row-1] == t[col-1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row-1][col] + 1, # deletion\n dist[row][col-1] + 1, # insertion\n dist[row-1][col-1] + cost) # substitution\n #for r in range(rows):\n #print(dist[r])\n \n \n return dist[row][col]", "def dameraulevenshtein(seq1, seq2):\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(self, seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = range(1, len(seq2) + 1) + [0]\n for x in xrange(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in xrange(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def edit_distance(s1, s2, transpositions=False):\r\n # set up a 2-D array\r\n len1 = len(s1)\r\n len2 = len(s2)\r\n lev = _edit_dist_init(len1 + 1, len2 + 1)\r\n\r\n # iterate over the array\r\n for i in range(len1):\r\n for j in range(len2):\r\n _edit_dist_step(lev, i + 1, j + 1, s1, s2, transpositions=transpositions)\r\n return lev[len1][len2]", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def minimum_edit_distance(seq1,seq2):\n if len(seq1) > len(seq2):\n seq1,seq2 = seq2,seq1\n distances = range(len(seq1) + 1)\n for index2,char2 in enumerate(seq2):\n newDistances = [index2+1]\n for index1,char1 in enumerate(seq1):\n if char1 == char2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1],\n distances[index1+1],\n newDistances[-1])))\n distances = newDistances\n return distances[-1]", "def get_levenshtein_distance(a, b):\r\n n, m = len(a), len(b)\r\n if n > m:\r\n # Make sure n <= m, to use O(min(n,m)) space\r\n a, b = b, a\r\n n, m = m, n\r\n current_row = range(n+1) # Keep current and previous row, not entire matrix\r\n\r\n for i in range(1, m+1):\r\n previous_row, current_row = current_row, [i]+[0]*n\r\n for j in range(1, n+1):\r\n add, delete, change = previous_row[j]+1, current_row[j-1]+1, previous_row[j-1]\r\n if a[j-1] != b[i-1]:\r\n change += 1\r\n current_row[j] = min(add, delete, change)\r\n return current_row[n]", "def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list(range(x2)))\n\tfor i in range(1, x1):\n\t\td.append([i] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tif(e1 == e2): c = 0\n\t\t\telse:\n\t\t\t\tv1 = embeddings[e1]\n\t\t\t\tv2 = embeddings[e2]\n\n\t\t\t\tif((v1 is None) or (v2 is None)): c = 1\n\t\t\t\telse:\n\t\t\t\t\tdst = np.linalg.norm(v1 - v2) # Distance 2 (or L2 norm of the difference)\n\n\t\t\t\t\t# Now, we need a function increasing function mapping 0 to 0 and +inf to 1\n\t\t\t\t\tc = 1 - (1 / (1 + (alpha * dst)))\n\n\t\t\t\t\t#c /= r # If you uncomment this line, the cost of a substitution at distance `average_distance` will be 1 and substitutions might have higher cost, up to 1/r. This might be justified as long as `r` is above 0.5 (otherwise, some substitutions might be more expensive than an insertion followed by a deletion).\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + 1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + 1), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + c) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (len(seq1) + len(seq2)))\n\treturn raw", "def levenshtein_distance(s1,s2):\n\n\t\tif len(s1) < len(s2):\n\t\t\treturn Searcher.levenshtein_distance(s2, s1)\n\n\t\t# len(s1) >= len(s2)\n\t\tif len(s2) == 0:\n\t\t\treturn len(s1)\n\n\t\tprevious_row = range(len(s2) + 1)\n\t\tfor i, c1 in enumerate(s1):\n\t\t\tcurrent_row = [i + 1]\n\t\t\tfor j, c2 in enumerate(s2):\n\t\t\t\tinsertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n\t\t\t\tdeletions = current_row[j] + 1 # than s2\n\t\t\t\tsubstitutions = previous_row[j] + (c1 != c2)\n\t\t\t\tcurrent_row.append(min(insertions, deletions, substitutions))\n\t\t\tprevious_row = current_row\n\t\t\n\t\treturn previous_row[-1]", "def levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n \n return previous_row[-1]", "def compute_backpointers(s0, s1): #Tillverkar en array med backpointrs\r\n if s0 == None or s1 == None:\r\n raise Exception('Both s0 and s1 have to be set')\r\n rows = len(s0)+1 # antalet rader\r\n columns = len(s1)+1 # antalet kolumner\r\n\r\n ####### Tillverkar Levenshtein matrisen ########\r\n # Gör en tom matris med nollor\r\n distance = [[0 for y in range(len(s1)+1)] for x in range(len(s0)+1)]\r\n\r\n # Gör de yttre lagrerna i matrisen 0 -> len(str) vertikalt och horisontellt\r\n for i in range(1,rows):\r\n distance[i][0] = i\r\n for i in range(1,columns):\r\n distance[0][i] = i\r\n\r\n # Beräknar kostnaderna för varje plats inne i matrisen och sätter in dem\r\n # kollar om bokstaven på indexet i de två orden är samma i sådana fall kostar det 0\r\n # och skall ha samma värde som diagonalt innan, annars kostar det 1 från över eller underself.\r\n for column in range(1,columns):\r\n for row in range(1,rows): # kolla varje rad i vare column\r\n if s0[row-1] == s1[column -1]: # om det är samma bokstav kostar det 0\r\n c = 0\r\n else: # annars kostar det 2\r\n c = 2\r\n distance[row][column] = min(distance[row-1][column] + 1,distance[row][column-1] + 1,distance[row-1][column-1] + c)\r\n # raden över säger att det minsta värdet av över eller bredvid + 1 eller diagonalt innan plus (0 eller 2)\r\n # skall sättas in på platsen i matrisen.\r\n\r\n # det minsta avståndet är\r\n cost = distance[row][column]\r\n print(\"totalkostnaden är\")\r\n print(cost)\r\n\r\n\r\n ####### Tillverkar backptr-matrisen ########\r\n # Tillverkar en tom matris med [0,0] för till backptr-matrisen\r\n backptr = [[[0, 0] for y in range(len(s1)+1)] for x in range(len(s0)+1)]\r\n\r\n # går igenom platserna i Levenshtein matrisen bakirfrån\r\n for column in range(columns-1,0,-1):\r\n for row in range(rows-1,0,-1):\r\n # Om värdet till vänster är det minsta: peka vänster\r\n if distance[row][column-1] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row\r\n backptr[row][column][1] = column -1\r\n # Om värdet över är det minsta: peka upp\r\n if distance[row-1][column] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row -1\r\n backptr[row][column][1] = column\r\n # om värdet diagonalt är minst: peka på diagonalt\r\n if distance[row-1][column-1] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row-1\r\n backptr[row][column][1] = column -1\r\n\r\n # Gör yttervärdena i matrisen, (OBS behövs ej)\r\n for i in range(0,rows):\r\n j = i-1\r\n backptr[i][0][0] = j\r\n backptr[i][0][1] = 0\r\n for i in range(0,columns):\r\n j = i-1\r\n backptr[0][i][1] = j\r\n backptr[0][i][0] = 0\r\n\r\n return backptr", "def levenshtein_distance(first, second):\n if len(first) > len(second):\n first, second = second, first\n if len(second) == 0:\n return len(first)\n first_length = len(first) + 1\n second_length = len(second) + 1\n distance_matrix = [range(second_length) for x in range(first_length)]\n for i in range(1, first_length):\n for j in range(1, second_length):\n deletion = distance_matrix[i-1][j] + 1\n insertion = distance_matrix[i][j-1] + 1\n substitution = distance_matrix[i-1][j-1]\n if first[i-1] != second[j-1]:\n substitution += 1\n distance_matrix[i][j] = min(insertion, deletion, substitution)\n\n return distance_matrix[first_length-1][second_length-1]", "def find_edit_distance(string1,string2):\n M=zeros((len(string1)+1,len(string2)+1), dtype=int)\n for i in xrange(1,len(string1)+1):\n M[i][0]=i\n for j in xrange(1,len(string2)+1):\n M[0][j]=j\n for i in xrange(1,len(string1)+1):\n for j in xrange(1,len(string2)+1):\n if(string1[i-1]!=string2[j-1]):\n M[i][j] = min(M[i - 1][j] + 1, M[i][j - 1] + 1, M[i - 1][j - 1] + 1)\n else:\n M[i][j] = M[i - 1][j - 1]\n return M[len(string1)][len(string2)]", "def iterative_levenshtein(self, w1, d1, w2, d2):\n rows = len(w1) + 1\n cols = len(w2) + 1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n\n for col in range(1, cols):\n for row in range(1, rows):\n if w1[row - 1] == w2[col - 1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row - 1][col] + 1, # deletion\n dist[row][col - 1] + 1, # insertion\n dist[row - 1][col - 1] + cost) # substitution\n return dist[row][col] < 5", "def levenshtein(seq1: str, seq2: str) -> int:\n if seq1 == \"\":\n return len(seq2)\n if seq2 == \"\":\n return len(seq1)\n if seq1[-1] == seq2[-1]:\n cost = 0\n else:\n cost = 1\n \n result = min([levenshtein(seq1[:-1], seq2) + 1,\n levenshtein(seq1, seq2[:-1]) + 1,\n levenshtein(seq1[:-1], seq2[:-1]) + cost ])\n return result", "def levenshtein_similarity(self, top, rows):\n if len(rows) > 1:\n return (\n [(1 - editdistance.eval(top, rows[i]) / max(len(top), len(rows[i]))) for i in\n range(0, len(rows))])\n else:\n return 1", "def damerau_levenshtein_distance(comp_sec):\n s1 = comp_sec['log_trace']\n s2 = comp_sec['sim_trace']\n p1 = comp_sec['proc_log_trace']\n p2 = comp_sec['proc_sim_trace']\n w1 = comp_sec['wait_log_trace']\n w2 = comp_sec['wait_sim_trace']\n d = {}\n lenstr1 = len(s1)\n lenstr2 = len(s2)\n for i in range(-1,lenstr1+1):\n d[(i,-1)] = i+1\n for j in range(-1,lenstr2+1):\n d[(-1,j)] = j+1\n for i in range(0, lenstr1):\n for j in range(0, lenstr2):\n if s1[i] == s2[j]:\n t1 = p1[i] + w1[i]\n if t1 > 0:\n b1 = (p1[i]/t1)\n b2 = (w1[i]/t1)\n cost = (b1*abs(p2[j]-p1[i])) + (b2*abs(w2[j]-w1[i]))\n else:\n cost = 0\n else:\n cost = 1\n d[(i,j)] = min(\n d[(i-1,j)] + 1, # deletion\n d[(i,j-1)] + 1, # insertion\n d[(i-1,j-1)] + cost, # substitution\n )\n if i and j and s1[i]==s2[j-1] and s1[i-1] == s2[j]:\n d[(i,j)] = min (d[(i,j)], d[i-2,j-2] + cost) # transposition\n return d[lenstr1-1,lenstr2-1]", "def edit_distance(left, right):\n similarities = np.zeros((len(left) + 1, len(right) + 1), dtype=np.int32)\n similarities[:, 0] = range(len(left) + 1)\n similarities[0, :] = range(len(right) + 1)\n\n for l in range(1, len(left) + 1):\n for r in range(1, len(right) + 1):\n sub_cost = 0 if left[l - 1] == right[r - 1] else 1\n similarities[l][r] = min(similarities[l - 1][r] + 1,\n similarities[l][r - 1] + 1,\n similarities[l - 1][r - 1] + sub_cost)\n return similarities[len(left), len(right)]", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n\n return _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos)", "def _levenshtein_distance(t1: Trace, t2: Trace):\n if t1.length > t2.length:\n t1, t2 = t2, t1\n\n distances = range(t1.length + 1)\n for i2, c2 in enumerate(t2.event_list):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(t1.event_list):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]", "def leveinshtein_distance(source,target):\r\n\t#Step 1\r\n\ts_len=len(source)\r\n\tt_len=len(target)\r\n\tcost=0\r\n\tif(s_len==0):\r\n\t\treturn t_len\r\n\tif(t_len==0):\r\n\t\treturn s_len\r\n\tprint(\"Dimensions:\\n\\tN:%d\\n\\tM:%d\"%(s_len,t_len))\r\n\t#Step 2\r\n\tmatrix=[[0 for _ in range(0,t_len+1)] for _ in range(0, s_len+1)]\r\n\t#Initialize first row 0..s_len\r\n\tfor idx in range(0,s_len+1):\r\n\t\tmatrix[idx][0]=idx\r\n\t#Initialize the first column 0..t_len\r\n\tfor idx in range(0, t_len+1):\r\n\t\tmatrix[0][idx]=idx\r\n\tprint(\"===Original===\")\r\n\tprint_matrix(matrix,source,target)\r\n\t#Step 3\r\n\tfor i in range(1,s_len+1):\r\n\t\tch=source[i-1]\r\n\t\t#print(ch)\r\n\t\t#Step 4\r\n\t\tfor j in range(1,t_len+1):\r\n\t\t\t#print(\">%s\"%target[j-1])\r\n\t\t\t#Step 5\r\n\t\t\tif ch==target[j-1]:\r\n\t\t\t\tcost=0\r\n\t\t\telse:\r\n\t\t\t\tcost=1\r\n\t\t\t#Step 6\r\n\t\t\t\r\n\t\t\t#print(\"(i,j)=>(%d,%d)\"%(i,j))\r\n\t\t\t#print(matrix[i][j])\r\n\t\t\tmatrix[i][j]=minimum(\r\n\t\t\t\tmatrix[i-1][j]+1,\r\n\t\t\t\tmatrix[i][j-1]+1,\r\n\t\t\t\tmatrix[i-1][j-1]+cost\r\n\t\t\t)\r\n\tprint(\"===Final Matrix===\")\r\n\tprint_matrix(matrix,source,target)\r\n\treturn matrix[s_len-1][t_len-1]", "def damerau_levenshtein_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n infinite = n1 + n2\n\n char_arr = defaultdict(int)\n dp = [[0] * (n2 + 2) for _ in range(n1 + 2)]\n\n dp[0][0] = infinite\n for i in range(0, n1 + 1):\n dp[i + 1][0] = infinite\n dp[i + 1][1] = i\n for i in range(0, n2 + 1):\n dp[0][i + 1] = infinite\n dp[1][i + 1] = i\n\n for i in range(1, n1 + 1):\n db = 0\n for j in range(1, n2 + 1):\n i1 = char_arr[s2[j - 1]]\n j1 = db\n cost = 1\n if s1[i - 1] == s2[j - 1]:\n cost = 0\n db = j\n\n dp[i + 1][j + 1] = min(dp[i][j] + cost,\n dp[i + 1][j] + 1,\n dp[i][j + 1] + 1,\n dp[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1))\n char_arr[s1[i - 1]] = i\n\n return dp[n1 + 1][n2 + 1]" ]
[ "0.7585912", "0.65193325", "0.6018155", "0.5973261", "0.5960617", "0.5927266", "0.5902716", "0.5879615", "0.5879615", "0.5809641", "0.57759446", "0.5760104", "0.57229745", "0.5714925", "0.5702276", "0.5691489", "0.5681923", "0.5659949", "0.5631824", "0.56016165", "0.55863863", "0.558498", "0.55749315", "0.5506777", "0.54895747", "0.5479255", "0.5467247", "0.544265", "0.5356773", "0.5319879" ]
0.7149528
1
Store the names and grades of school students.
def __init__(self): self.students = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()", "def __init__(self):\n self.students = []\n self.grades = {}\n self.isSorted = True", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def __init__(self, first_name, last_name, address):\n\n self.first_name = first_name\n self.last_name = last_name\n self.address = address\n\n # Creates dictionary for each student with the label & info.\n\n self.info = {\n 'first name': self.first_name,\n 'last name': self.last_name,\n 'address': self.address,\n }", "def __init__(self):\n self.students = []\n self.grades = {}\n self.is_sorted = True", "def students_data():\n\n return [\n {'name': 'Alexey', 'rate': 2, 'course': 'Python'},\n {'name': 'Vali', 'rate': 5, 'course': 'Java'},\n {'name': 'Olga', 'rate': 4, 'course': 'Python'},\n {'name': 'Frank', 'rate': 5, 'course': 'Python'},\n {'name': 'Masha', 'rate': 3, 'course': 'Java'},\n {'name': 'Vasily', 'rate': 2, 'course': 'Java'},\n {'name': 'Daria', 'rate': 3, 'course': 'Python'},\n {'name': 'Nickname', 'rate': 4, 'course': 'Python'},\n {'name': 'Fort', 'rate': 3, 'course': 'Java'},\n {'name': 'Lama', 'rate': 4, 'course': 'Java'},\n {'name': 'Pop', 'rate': 2, 'course': 'Python'},\n {'name': 'Sort', 'rate': 3, 'course': 'Python'},\n {'name': 'Elya', 'rate': 5, 'course': 'Java'},\n {'name': 'Tolik', 'rate': 4, 'course': 'Python'},\n ]", "def __init__(self):\n self.students = [] # List of Student objects.\n self.grades = {} # Dictionary to map IDNumber -> list of grades.\n self.isSorted = True # True if self.students is sorted.", "def __init__(self, name, surname):\n\t\t\n\t\tself.grades = {}\n\t\tself.attendance = 0\n\t\t\n\t\tif not (isinstance(name, str) and isinstance(surname, str)):\n\t\t\tname, surname = \"None\", \"None\"\n\t\tself.name, self.surname = name, surname", "def __init__(self):\n self.students=[]\n self.grades={}\n self.isSorted=True", "def __init__(self): \r\n self.students = [] #list of students\r\n self.grades = {} #id Num -> list of grades\r\n self.isSorted = True", "def make_gradebook(roster, grades, sub_info):\n gradebook = []\n for student in roster.keys():\n s = {}\n # fill student file with evaluation grades\n for day, score in zip(sub_info.keys(), grades):\n s[str(day)] = score[student]\n s['total'] = sum(s.values())\n s['username'] = student\n gradebook.append(s)\n return gradebook", "def update_course_info(self, grades_file_info):\n grades_file = os.path.join(self.path, \"grades.txt\")\n sep, header = grades_file_info\n try:\n for info in file_reading_gen(grades_file, 4, sep, header):\n # StudentID | Course | Grade | InstructorID\n student_id = info[0]\n course_code = info[1]\n grade = info[2]\n instructor_id = info[3]\n\n if student_id not in self.students:\n raise KeyError(\"Student with student id {} does not exists in students.txt\".format(student_id))\n if instructor_id not in self.instructors:\n raise KeyError(\"Instructor with instructor id {} does not exists in instructors.txt\".format(instructor_id))\n\n student = self.students[student_id]\n instructor = self.instructors[instructor_id]\n\n student.courses_completed.add(course_code)\n student.grades[course_code] = grade\n\n instructor.courses_taught.add(course_code)\n instructor.student_count[course_code] += 1\n except ValueError:\n raise ValueError(\"Invalid data in grades.txt\")\n except FileNotFoundError as e:\n print('Missing grades.txt.\\n' + str(e))", "def add_student(self, student):\n if student in self.students:\n raise ValueError('Duplicate Student.')\n self.students.append(student)\n self.grades[student.id] = []\n self.is_sorted = False", "def add_grades(self, subject_name, grade_list, attendance=True): \n\t\n\t\tif (isinstance(subject_name, str) and isinstance(grade_list, list)):\n\t\t\tfor grade in grade_list:\n\t\t\t\tself.grades.setdefault(subject_name, []).append(grade)\n\t\t\tself.attendance += 1 if attendance else 0", "def __init__(self, name, skill):\n \n super(Student, self).__init__(name)\n self.grades = []\n self.skill = skill", "def addStudent(self, student):\n if student in self.students:\n raise ValueError(\"Duplicate Student\")\n self.students.append(student)\n self.grades[student.getIDNumber()] = []\n self.isSorted = False", "def add_student(user_inputs):\r\n no_space = (remove_space(user_inputs))\r\n student_tuple = student_info._make(no_space.split(\",\"))\r\n StudentRoster.append(student_tuple)", "def add_student(self, student: 'Student') -> None:\n # Add HOUSEHOLD attributes to the schools' composition\n self.total += 1\n self.composition += student.household.attributes\n self.students[student.idx] = student\n self.has_space = (self.total < self.capacity)", "def _create_students(self):\n def mktime(str_date):\n return time.mktime(time.strptime(\n str_date, CountSkillCompletion.DATE_FORMAT))\n self.day1 = '2015-01-01'\n self.day2 = '2015-01-02'\n self.day3 = '2015-01-03'\n self.day4 = '2015-01-04'\n c = SkillCompletionTracker.COMPLETED\n p = SkillCompletionTracker.IN_PROGRESS\n # progress string for students\n students_progress = [\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day1)},\n self.skill2.id : {c: mktime(self.day4), p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day2)},\n self.skill2.id : {p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day1)}},\n {} # No progress\n ]\n for index, progress in enumerate(students_progress):\n student = models.Student(user_id=str(index))\n student.put()\n comp = models.StudentPropertyEntity.create(\n student=student,\n property_name=SkillCompletionTracker.PROPERTY_KEY)\n comp.value = transforms.dumps(progress)\n comp.put()", "def add_students() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n for _ in range(50):\r\n create_student(faculty)", "def save_grades(self):\r\n try:\r\n try:\r\n self.grades.append(int(app.entry1.get()))\r\n self.grades.append(int(app.entry2.get()))\r\n self.grades.append(int(app.entry3.get()))\r\n self.grades.append(int(app.entry4.get()))\r\n self.grades.append(int(app.entry5.get()))\r\n except:\r\n app.info.configure(text='INFO: Warning, The Type of the Grade is incorrect.')\r\n # If the user pressed on 'Save Grades' with an entry value other than 'int', 'Info' Label shows the message:\r\n # \"INFO: Warning, The Type of the Grade is incorrect.\"\r\n number=5\r\n for index in range(len(self.grades)):\r\n open_data.student[open_data.name][number] = self.grades[index]\r\n number += 1\r\n self.grades=[]\r\n open_data.show_data()\r\n except AttributeError:\r\n if len(app.tree.get_children()) == 0:\r\n app.info.configure(text=\"INFO: Please Load the Files First.\", font=('', '7'))\r\n # If the user pressed on the treeview before loading the file, 'Info' Label shows the message:\r\n # 'INFO: Please Load The Files First.'\r\n except:\r\n app.info.configure(text=\"INFO: Warning, The Type of the Grade is incorrect.\", font=('', '7'))\r\n # If the user enters invalid type, it will give that warning.\r", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def create_local_school():\n students = Student.create_by_csv(\"data/students.csv\")\n mentors = Mentor.create_by_csv(\"data/mentors.csv\")\n return students, mentors", "def students(self):\n\t\treturn self.grade_set.all().distinct()", "def add_student_data(connection,fname,lname,class_n,marks):\r\n with connection:\r\n connection.execute(INSERT_STUDENT,(fname,lname,class_n,marks))", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def main():\n given_scores = []\n num_grades = int(raw_input())\n for i in xrange(num_grades):\n given_scores.append(int(raw_input()))\n for score in grading_students(given_scores):\n print score", "def get_grade_by_student(first_name):\n\n QUERY = \"\"\"\n SELECT g.project_title, g.grade \n FROM Students AS s JOIN Grades AS g \n ON s.github = g.student_github\n WHERE s.first_name = ?\n \"\"\"\n\n db_cursor.execute(QUERY, (first_name,))\n row = db_cursor.fetchall()\n \n if row != []:\n for project in row:\n print 'Grade for %s: %s' %(project[0], project[1])\n else:\n print 'Please try again and enter a FIRST NAME'", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n\n return html" ]
[ "0.70357305", "0.61918634", "0.6180321", "0.61655444", "0.61187875", "0.6078942", "0.6064046", "0.60455054", "0.60356414", "0.60095865", "0.59675676", "0.5928267", "0.5887183", "0.5879371", "0.5865206", "0.58481586", "0.58402777", "0.5822257", "0.5813019", "0.5812019", "0.58046603", "0.57986885", "0.5746263", "0.57218164", "0.57161504", "0.5708563", "0.5705498", "0.56950647", "0.56697726", "0.56639487" ]
0.6250706
1
Add a student to a grade in the roster.
def add_student(self, name: str, grade: int) -> None: school_grade = self.students.setdefault(grade, []) school_grade.append(name) school_grade.sort()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def add_student(self, student):\n if student in self.students:\n raise ValueError('Duplicate Student.')\n self.students.append(student)\n self.grades[student.id] = []\n self.is_sorted = False", "def addStudent(self, student):\n if student in self.students:\n raise ValueError(\"Duplicate Student\")\n self.students.append(student)\n self.grades[student.getIDNumber()] = []\n self.isSorted = False", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def AddGrade(self, student, discipline, grade_value):\n if not self.__data['s'].HasKey(student.ID):\n raise NonExistentItemIDError(\"Student does not exist.\")\n if not self.__data['d'].HasKey(discipline.ID):\n raise NonExistentItemIDError(\"Discipline does not exist.\")\n self.__data['g'].AddItems([Grade(self.__data['g'].GetSafeKey(), student.ID, discipline.ID, grade_value)])\n self.__undo_list.append(['g'])\n self.__redo_list.clear()", "def AddStudent(self, event):\n pass", "def add_student(self, student: 'Student') -> None:\n # Add HOUSEHOLD attributes to the schools' composition\n self.total += 1\n self.composition += student.household.attributes\n self.students[student.idx] = student\n self.has_space = (self.total < self.capacity)", "def AddStudent(self, student_name):\n self.__data['s'].AddItems([Student(self.__data['s'].GetSafeKey(), student_name)])\n self.__undo_list.append(['s'])\n self.__redo_list.clear()", "def add_student(body): # noqa: E501\n if connexion.request.is_json:\n body = Student.from_dict(connexion.request.get_json()) # noqa: E501\n return student_service.add_student(student=body)", "def addStud(self,ID,name,attNr,grade):\n if ID < 0: raise Exception(\"Invalid ID!\")\n parts = name.split(' ')\n if len(parts) < 2: raise Exception('Invalid name!')\n for part in parts:\n if len(part)<3: raise Exception('Invalid name!')\n if attNr < 0: raise Exception('Invalid number of attendances!')\n if grade not in range(0,11): raise Exception('Invalid grade!')\n self.__studRepo.add(Student(ID,name,attNr,grade))", "def add_student(user_inputs):\r\n no_space = (remove_space(user_inputs))\r\n student_tuple = student_info._make(no_space.split(\",\"))\r\n StudentRoster.append(student_tuple)", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def addStudent():\n name = input(\"Name: \")\n number = input(\"Number: \")\n gpa = input(\"GPA: \")\n field = input(\"Field: \")\n student = Student(name, number, gpa, field)\n if t.insert(number, student):\n ht.insert(student)\n print(name, \"added successfully.\")\n else:\n print(\"student number is not valid.\")", "def test_add_student():\n classroom = setup_for_test()\n student = Student(\"Andrew Tsukuda\")\n classroom.add_student(student)\n assert len(classroom.student_dir) == 1\n assert classroom.student_dir[0].ID == 1", "def add_course_grade(self, course, grade):\n course_grade_tuple = (course, grade)\n self.courses_grades.append(course_grade_tuple)", "def add_student(body): # noqa: E501\n if connexion.request.is_json:\n student = Student.from_dict(connexion.request.get_json()) # noqa: E501\n # print(student)\n result = student_service.add_student(student)\n return result\n\n return 'input no bueono', 400", "def assign_grade(github, title, grade):\n QUERY = \"\"\"\n INSERT INTO Grades VALUES (?, ?, ?)\n \"\"\"\n\n db_cursor.execute(QUERY, (github, title, grade))\n db_connection.commit()\n\n print \"Successfully graded %s with a %s on %s\" % (github, grade, title)", "def add_student():\n if request.method == 'POST':\n db.add_student(request.form)\n return redirect('/registry')\n else:\n return render_template('add.html')", "def insert_grade(grade, form, rc):\n dbname = form[\"dbname\"]\n collname = \"grades\"\n try:\n coll = rc.client[dbname][collname]\n except (KeyError, AttributeError):\n abort(404)\n try:\n added = rc.client.insert_one(dbname, collname, grade)\n except Exception:\n traceback.print_exc()\n raise", "def _save_grade(self):\r\n student = self._student('POST', key='grader_id')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n # Update the number of essays the student has graded\r\n student.grade_peer_essay()\r\n return self._success_response({})", "def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()", "def add_student():\n # import pdb; pdb.set_trace()\n if request.method == \"POST\":\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n\n html = render_template(\"added_student_confirmation.html\",\n first=first,\n last=last,\n github=github)\n\n return html", "def assign_grade(github, title, grade):\n QUERY = \"\"\"INSERT INTO Grades VALUES(?,?,?)\"\"\"\n db_cursor.execute(QUERY, (github, title, grade))\n db_connection.commit()\n print \"Success! %s received a grade of %s on the %s project!\" % (github, grade, title)", "def update_grade(self, course, grade):\n if course not in self.courses:\n raise NameError('This student is not enrolled in that course')\n else:\n self.courses[course] = grade\n\n return self", "def enroll_student(self, student_email):\n # check if course exists\n if not self.is_course_exists():\n print(\"The given course not found\")\n return\n\n if self.is_student_enrolled(student_email):\n print(\"The course is not exists or/ and student {} is already enrolled\".format(student_email))\n return\n else:\n db = self._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n db[\"courses\"][crs_i][\"students\"].append(student_email)\n break\n self._file.write_db(db)\n print(\"The new student is enrolled to course: {}\".format(self._course_name))", "def _add_grade_to_row(self, component, score):\r\n component_index = self.components.setdefault(component, len(self.components))\r\n self._current_row[component_index] = score", "def added_student():\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n first, last, github = hackbright.get_student_by_github(github)\n\n html = render_template(\"student_added.html\", first=first, last=last, github=github)\n\n return html", "def add_row(self, student_id):\r\n self._current_row = {}\r\n yield self._add_grade_to_row\r\n self.grades[student_id] = self._current_row", "def copy_and_add_student(self, new_student, happiness, stress):\n new_room = Room(self.rm_id)\n new_room.students = frozenset(list(self.students) + [new_student])\n new_room.stress = self.stress + stress\n new_room.happiness = self.happiness + happiness\n return new_room" ]
[ "0.812951", "0.7889351", "0.7747675", "0.75414544", "0.7533531", "0.74755746", "0.74255633", "0.7281956", "0.7157385", "0.70832974", "0.70647204", "0.7050774", "0.7042668", "0.70276797", "0.68648064", "0.67618066", "0.6695062", "0.661738", "0.6594988", "0.65615714", "0.6531732", "0.6485175", "0.6464271", "0.6387548", "0.63413024", "0.6317869", "0.62850213", "0.6280628", "0.6225494", "0.6219798" ]
0.7927675
1
Computing initial values for position and velocity in GCRS system This is for later use in orbit integration, from tables in the prediction files. Use a lagrange polynomial in order to interpolate in the tables.
def calculate_initial_values(eph, rundate): data = sorted(eph["positions"].items()) pos_itrs = np.zeros((len(data), 3)) mjd1, mjd2 = zip(*[t for t, d in data]) rotation_mat = rotation.trs2gcrs(time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc")) tbl = time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc") for i in range(0, len(data)): pos_itrs[i] = data[i][1]["pos"] diffsec = np.array([(t - rundate).total_seconds() for t in tbl.utc.datetime]) # Table given in ITRF coordinate system. Convert to GCRS, where the integration of the satellite orbit will # be done pos_gcrs = np.sum(rotation_mat @ pos_itrs[:, :, None], axis=2) log.info("Interpolating data from prediction file in order to get initial pos/vel") pos_gcrs_ip, vel_gcrs_ip = interpolation.interpolate_with_derivative( diffsec, pos_gcrs, np.array([0.0]), kind="lagrange", window=10, bounds_error=False ) eph["initial_pos"] = pos_gcrs_ip[0] eph["initial_vel"] = vel_gcrs_ip[0] return eph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n self.positions = self._generate_initial_positions()\n self.scores = np.array(self.compute_scores(self.positions))\n\n self._pso_data.best_positions = self.positions\n self._pso_data.best_scores = self.scores\n\n magic_constant = 2 # feel free to change FIXME\n max_velocity = (self.upper_bound - self.lower_bound) / magic_constant\n shape = (len(self.positions), len(self.lower_bound))\n self._pso_data.velocities = np.random.uniform(low=-max_velocity, high=max_velocity, size=shape)", "def _initialize_spline(self, initial_state, final_state):\r\n x_f = final_state[0]\r\n y_f = final_state[1]\r\n theta_f = final_state[2] # rad\r\n kappa_f = final_state[3] # rad\r\n kappa_0 = initial_state[3] # rad\r\n\r\n \r\n d = np.sqrt(x_f**2 + y_f**2)\r\n theta_delta = np.abs(theta_f)\r\n s = d*(( (theta_delta**2) / 5) +1) + 2*theta_delta/5\r\n \r\n # Initialization method from Nagy and Kelly, 2001 \r\n # a = 6*theta_f/(s**2) - 2*kappa_0/s + 4*kappa_f/s\r\n # c = 0\r\n # b = 3*(kappa_0 + kappa_f)/(s**2) + 6*theta_f/(s**3)\r\n\r\n # Initilalization method from Thomas M. Howard, 2009\r\n a = 0.0\r\n b = 0.0\r\n c = kappa_f\r\n \r\n return a, b, c, s", "def initialize(self):\n\n for timestep in self.x:\n self.y_previous.append(self.equation(timestep))\n self.y_current.append(self.equation(timestep))\n\n self.y_previous[0] = 0\n self.y_current[0] = 0\n self.y_previous[99] = 0\n self.y_current[99] = 0", "def initialize():\n\n global z_from_t_interp\n\n # Logarithmic spacing\n log_z_set = np.linspace(0.0, 3.0, 300)\n z_set = 10**(log_z_set) - 1.0\n\n t_set = np.zeros(len(z_set))\n for i, z in enumerate(z_set):\n t_set[i] = calc_lookback_time(z) / 1.0e6 # in Myr\n\n z_from_t_interp = interp1d(t_set, z_set, bounds_error=False, fill_value=100.0)", "def initRunningVals(self):\n self.r_Vm = [0.0]*self.mirror.dataPoints\n self.r_Va = [0.0]*self.mirror.dataPoints", "def get_initial_params(self, x, y, yerr):\n estimated_max = max(y)\n estimated_min = min(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(min(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_max, estimated_min])\n return p0", "def initialize(H, Hmin, HZ, HminZ, solutes, restart_folder,\n field_to_subspace, XYZ,\n inlet_velocity,\n enable_NS, enable_PF, enable_EC,\n **namespace):\n w_init_field = dict()\n # if not restart_folder:\n # if enable_NS:\n # try:\n # subspace = field_to_subspace[\"u\"].collapse()\n # except:\n # subspace = field_to_subspace[\"u\"]\n # u_init = velocity_init(H, HZ, inlet_velocity, XYZ, 1, Hmin, HminZ)\n # w_init_field[\"u\"] = df.interpolate(u_init, subspace)\n\n # Ensure all processes have completed (Might be redundant)\n mpi_barrier()\n return w_init_field", "def __init__(self, timestep=1.0 * simtk.unit.femtoseconds):\n\n super(VelocityVerletIntegrator, self).__init__(timestep)\n\n self.addPerDofVariable(\"x1\", 0)\n\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "def main():\n \n def get_x_input():\n \"\"\"\n This gets the initial x position and velocity values\n Param:none\n Return:Tuple with x pos and vel\n \"\"\"\n # Ask for and validate user input for x pos and vel\n while True:\n try:\n posx = float(input(\"Please enter the initial x position in m: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n\n while True:\n try:\n velx = float(input(\"Please enter the initial x velocity in m/s: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n \n #return tuple\n xinput = (posx, velx)\n return xinput\n\n def get_y_input():\n \"\"\"\n This gets the initial y position and velocity values\n Param:none\n Return:Tuple with y pos and vel\n \"\"\" \n # Ask for and validate user input for y pos and vel\n while True:\n try:\n posy = float(input(\"Please enter the initial y position in m: \"))\n\n #start at ground\n if posy < 0:\n print(\"Please enter a positive value.\")\n continue\n\n except ValueError:\n print(\"Invalid input\")\n continue\n else:\n break\n\n while True:\n try:\n vely = float(input(\"Please enter the initial y velocity in m/s: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n \n # Return tuple\n yinput = (posy, vely)\n return yinput\n\n #Inital position and velocity of user input x and y\n posx0, velx0 = get_x_input()\n posy0, vely0 = get_y_input()\n \n #acceleration y acceleration is gravity\n accelx = 0.0\n GRAVITY = -9.8 \n \n #Initial time of 0s, time intervals of .01 s\n deltat = .01\n t = 0.0\n \n #lists of all x and y positions in the motion \n x = [posx0]\n y = [posy0]\n \n #limit of time intervals to calculate\n intervals = 4000\n\n for i in range(0, intervals):\n #increment time, add xy positions at that time\n t = t + deltat\n x.append(position(posx0, velx0, t, accelx))\n y.append(position(posy0, vely0, t, GRAVITY))\n \n #if the projectile has hit the ground, break\n if y[i+1] <= 0:\n break\n\n plot_motion(x, y)", "def initialize_local_frame(self):\n self.local_offset_g = 0.0\n\n for i in range(30):\n rospy.sleep(0.1)\n\n q0, q1, q2, q3 = (\n self.current_pose_g.pose.pose.orientation.w,\n self.current_pose_g.pose.pose.orientation.x,\n self.current_pose_g.pose.pose.orientation.y,\n self.current_pose_g.pose.pose.orientation.z,\n )\n\n psi = atan2((2 * (q0 * q3 + q1 * q2)),\n (1 - 2 * (pow(q2, 2) + pow(q3, 2))))\n\n self.local_offset_g += degrees(psi)\n self.local_offset_pose_g.x += self.current_pose_g.pose.pose.position.x\n self.local_offset_pose_g.y += self.current_pose_g.pose.pose.position.y\n self.local_offset_pose_g.z += self.current_pose_g.pose.pose.position.z\n\n self.local_offset_pose_g.x /= 30.0\n self.local_offset_pose_g.y /= 30.0\n self.local_offset_pose_g.z /= 30.0\n self.local_offset_g /= 30.0\n\n rospy.loginfo(CBLUE2 + \"Coordinate offset set\" + CEND)\n rospy.loginfo(\n CGREEN2 + \"The X-Axis is facing: {}\".format(self.local_offset_g) + CEND)", "def initialise_calibration(self):\n for i in range(0, self.NUM_SENSORS):\n self.calibratedMax[i] = 0\n self.calibratedMin[i] = self.READING_TIMEOUT", "def recalibrate_start(self):\n self.epoch += 1\n self.initialize()\n self.recalibration_i = 0\n\n if self.vr_from_epoch is not None and self.epoch >= self.vr_from_epoch:\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['gavg'].zero_()\n param_state['m2'].zero_()\n\n # xk is changed to the running_x\n p.data.zero_().add_(param_state['running_x'])\n param_state['tilde_x'] = p.data.clone()", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def __call__(self, t_):\n X, Y, t, _n = self.X, self.Y, self.t, self._n\n x, y = 0, 0 # initial x and y return values\n for i in _n:\n p_i = 1 # initial lagrange polynomial value\n for j in _n:\n # if i != j: update lagrange polynomial\n if i != j: p_i *= (t_ - t[j]) / (t[i] - t[j])\n # mult ith control point by ith lagrange polynomial\n # (ith control point maps to ith time point)\n x += X[i] * p_i\n y += Y[i] * p_i\n return x, y", "def calc_refl(velocity, shotloc_x, shotloc_z, layer_idxs):\n solver_dg = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_dg.vv.min_coords = velocity.min_coords\n solver_dg.vv.node_intervals = velocity.node_intervals\n solver_dg.vv.npts = velocity.npts\n solver_dg.vv.values = velocity.values\n\n #shotloc = 2.56 # km\n src_idx = (int((shotloc_x - velocity.min_coords[0])/velocity.node_intervals[0]), int(shotloc_z/velocity.node_intervals[1]), 0)\n solver_dg.tt.values[src_idx] = 0\n solver_dg.unknown[src_idx] = False\n solver_dg.trial.push(*src_idx)\n solver_dg.solve()\n\n solver_ug = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_ug.vv.min_coords = solver_dg.vv.min_coords\n solver_ug.vv.node_intervals = solver_dg.vv.node_intervals\n solver_ug.vv.npts = solver_dg.vv.npts\n solver_ug.vv.values = solver_dg.vv.values\n\n for ix in range(solver_ug.tt.npts[0]):\n #idx = (ix, solver_ug.tt.npts[1]-1, 0)\n idx = (ix, layer_idxs[ix], 0)\n solver_ug.tt.values[idx] = solver_dg.tt.values[idx]\n #print(idx, solver_dg.tt.values[idx])\n solver_ug.unknown[idx] = False\n solver_ug.trial.push(*idx)\n solver_ug.solve()\n \n return solver_ug.tt.values[:,0,0]", "def LAT(self):\n # The maximum update amount for these element\n LateralFraction_DELTA = self.dt * (self.LateralFraction_LIMITS[1] -\n self.LateralFraction_LIMITS[0]) / (\n 2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n LateralFraction_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.LateralFraction += LateralFraction_DIRECTION * LateralFraction_DELTA\n self.LateralFraction = np.clip(self.LateralFraction,\n self.LateralFraction_LIMITS[0],\n self.LateralFraction_LIMITS[1])", "def initial_velocity(self) -> float:\n return self._initial_velocity", "def initialize(self, state_space, state_positions, **__):\n # for organization purposes\n interval = self._initializer['interval']\n random_dist = self._initializer['random_init']\n random_params = self._initializer['random_params']\n self._initial_states.update(self._default_initializer['states'])\n if self._initializer['states'] is not None:\n self._initial_states.update(self._initializer['states'])\n\n # different limits for InductionMotor\n if any(state in self._initial_states for state in ['psi_ralpha', 'psi_rbeta']):\n # caution: _initial_limits sometimes contains singleton ndarrays, they must be\n # extracted with .item()\n nominal_values_ =\\\n [self._initial_limits[state].item() if isinstance(self._initial_limits[state], np.ndarray)\n else self._initial_limits[state] for state in self._initial_states]\n upper_bound = np.asarray(np.abs(nominal_values_), dtype=float)\n # state space for Induction Envs based on documentation\n # ['i_salpha', 'i_sbeta', 'psi_ralpha', 'psi_rbeta', 'epsilon']\n # hardcoded for induction motors currently given in the toolbox\n state_space_low = np.array([-1, -1, -1, -1, -1])\n lower_bound = upper_bound * state_space_low\n else:\n if isinstance(self._nominal_values, dict):\n nominal_values_ = [self._nominal_values[state]\n for state in self._initial_states.keys()]\n nominal_values_ = np.asarray(nominal_values_)\n else:\n nominal_values_ = np.asarray(self._nominal_values)\n\n state_space_idx = [\n state_positions[state] for state in self._initial_states.keys()\n ]\n\n upper_bound = np.asarray(nominal_values_, dtype=float)\n lower_bound = upper_bound * \\\n np.asarray(state_space.low, dtype=float)[state_space_idx]\n # clip nominal boundaries to user defined\n if interval is not None:\n lower_bound = np.clip(\n lower_bound,\n a_min=np.asarray(interval, dtype=float).T[0],\n a_max=None\n )\n upper_bound = np.clip(\n upper_bound,\n a_min=None,\n a_max=np.asarray(interval, dtype=float).T[1]\n )\n # random initialization for each motor state (current, epsilon)\n if random_dist is not None:\n if random_dist == 'uniform':\n initial_value = (upper_bound - lower_bound) \\\n * self._random_generator.uniform(size=len(self._initial_states.keys())) \\\n + lower_bound\n # writing initial values in initial_states dict\n random_states = {\n state: initial_value[idx] for idx, state in enumerate(self._initial_states.keys())\n }\n self._initial_states.update(random_states)\n\n elif random_dist in ['normal', 'gaussian']:\n # specific input or middle of interval\n mue = random_params[0] or (\n upper_bound - lower_bound) / 2 + lower_bound\n sigma = random_params[1] or 1\n a, b = (lower_bound - mue) / sigma, (upper_bound - mue) / sigma\n initial_value = truncnorm.rvs(\n a, b, loc=mue, scale=sigma, size=(\n len(self._initial_states.keys())),\n random_state=self.seed_sequence.pool[0]\n )\n # writing initial values in initial_states dict\n random_states = {\n state: initial_value[idx] for idx, state in enumerate(self._initial_states.keys())\n }\n self._initial_states.update(random_states)\n\n else:\n raise NotImplementedError\n # constant initialization for each motor state (current, epsilon)\n elif self._initial_states is not None:\n initial_value = np.atleast_1d(list(self._initial_states.values()))\n # check init_value meets interval boundaries\n if ((lower_bound <= initial_value).all()\n and (initial_value <= upper_bound).all()):\n initial_states_ = \\\n {state: initial_value[idx]\n for idx, state in enumerate(self._initial_states.keys())}\n self._initial_states.update(initial_states_)\n else:\n raise Exception(\n 'Initialization value has to be within nominal boundaries')\n else:\n raise Exception('No matching Initialization Case')", "def initialize_constants(self):\n # maximum pheromone value\n self.PH_MAX = np.float_( len(self.variables) / (1.0 - self.PH_REDUCE_FACTOR))\n # minimum pheromone value\n self.PH_MIN = np.float_(self.PH_MAX / (2*len(self.variables)) )", "def __init__(self):\n super().__init__()\n self.location = 0.0\n self.scale = 1.0\n self.type = 'Laplace'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def _compute_solar_torque(self):\n pass", "def __init__(\n self,\n velocity_north_m_s,\n velocity_east_m_s,\n velocity_down_m_s):\n self.velocity_north_m_s = velocity_north_m_s\n self.velocity_east_m_s = velocity_east_m_s\n self.velocity_down_m_s = velocity_down_m_s", "def get_start_velocity(self):\n # uniform circular motion have a start velocity of omega\n # TODO generate from start position and rotation direction\n return np.array([0, self.wz, 0])", "def setup_stellar_aberration(self,observer_velocity_xyz):\n self.v_for_stellar_aberr = sp.vscl(recip_clight,observer_velocity_xyz)", "def initialise(self):\n for i in range(self.nx):\n self.T[:, i] = (\n self.t_sun\n + self.mu\n * self.m_u\n * self.nabla\n * self.g\n * (self.y - self.y_max)\n / self.kb\n )\n self.P = self.p_sun * (self.T / self.t_sun) ** (1 / self.nabla)\n\n if self.Gaussian_perturbation:\n x_mean = 6e6\n y_mean = 2e6\n sigma = 8e5\n xx, yy = np.meshgrid(self.x, self.y)\n gaussian = self.t_sun * np.exp(\n -((xx - x_mean) ** 2 + (yy - y_mean) ** 2) / (2 * sigma ** 2)\n )\n self.T[:, :] = self.T[:, :] + gaussian\n\n self.rho[:, :] = self.P * self.mu * self.m_u / (self.kb * self.T[:, :])\n self.e[:, :] = self.P[:, :] / (self.Y - 1)", "def linear_evolve(self,nt=1):\n for l in range(nt):\n y_temp = np.empty(self.y.shape[0])\n for i in range(self.y.shape[0]):\n \n # idx left to the departure point\n j = int(np.floor((self.x[i]-self.u[i]*self.dt)/self.dx))\n # idx right to the departure point\n k = j+1\n print i, j, k\n # linear interpolation\n alpha = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n y_temp[i] = (1-alpha)*self.y[j] + alpha*self.y[k]\n # copy array to current time\n self.y = np.copy(y_temp)\n stop\n #return current varibale\n return self.y", "def initial_values(self):\n y = self._y\n trend = self.trend\n seasonal = self.seasonal\n seasoning = self.seasoning\n trending = self.trending\n m = self.seasonal_periods\n l0 = self._l0\n b0 = self._b0\n if seasoning:\n l0 = y[np.arange(self.nobs) % m == 0].mean() if l0 is None else l0\n if b0 is None and trending:\n lead, lag = y[m:m + m], y[:m]\n if trend == 'mul':\n b0 = np.exp((np.log(lead.mean()) - np.log(lag.mean())) / m)\n else:\n b0 = ((lead - lag) / m).mean()\n s0 = list(y[:m] / l0) if seasonal == 'mul' else list(y[:m] - l0)\n elif trending:\n l0 = y[0] if l0 is None else l0\n if b0 is None:\n b0 = y[1] / y[0] if trend == 'mul' else y[1] - y[0]\n s0 = []\n else:\n if l0 is None:\n l0 = y[0]\n b0 = None\n s0 = []\n\n return l0, b0, s0", "def solar_model():\n \n latitude, longitude, timezone, elevation = location_input()\n year, time = time_input()\n\n lat_r = latitude/180*np.pi\n lon_r = longitude/180*np.pi \n n = 0\n for i in range(1900,year):\n if i%4 == 0:\n n += 366\n else:\n n+=365\n JulD = n + time + 2415018.5 - (timezone)/24\n LT = time - int(time)\n JC = (JulD - 2451545) / 36525\n x = 46.815 + JC * (0.00059 - JC * 0.001813)\n M_OE = 23 + (26 + (21.448 - JC * x) / 60) / 60\n EEO = 0.016708634 - JC * (0.000042037 + 0.0000001267 * JC)\n GMAS = 357.52911 + JC * (35999.05029 - 0.0001537 * JC)\n GMAS_r = m.radians(GMAS)\n GMLS = (280.46646 + JC * (36000.76983 + JC * 0.0003032))%360\n GMLS_r = m.radians(GMLS)\n Obliq_C = M_OE + 0.00256 * np.cos((125.04 - 1934.136 * JC) / 180 * np.pi)\n Obliq_C_r = m.radians(Obliq_C)\n SEC = np.sin(GMAS_r) * (1.914602 - JC * (0.004817 + 0.000014 * JC)) + np.sin(2 * GMAS_r) * (0.019993 - 0.000101 * JC) + np.sin(3 * GMAS_r) * 0.000289\n STL = GMLS + SEC\n SAL = STL - 0.00569 - 0.00478 * np.sin((125.04 - 1934.136 * JC) / 180 * np.pi)\n SAL_r = m.radians(SAL)\n sin_Delta = np.sin(Obliq_C_r) * np.sin(SAL_r)\n Delta_r = np.arcsin(sin_Delta) #in radians \n Var_y = np.tan((Obliq_C / 2) / 180 * np.pi) * np.tan((Obliq_C / 2) / 180 * np.pi)\n EOT_prime = Var_y * np.sin(2 * GMLS_r) - 2 * EEO * np.sin(GMAS_r) + 4 * EEO * Var_y * np.sin(GMAS_r) * np.cos(2 * GMLS_r) - 0.5 * Var_y * Var_y * np.sin(4 * GMLS_r) - 1.25 * EEO * EEO * np.sin(2 * GMAS_r)\n EOT = 4 * EOT_prime / np.pi * 180 \n TST = (LT * 1440 + EOT + 4 * longitude - 60 * timezone)%1440\n if TST / 4 < 0:\n Omega = TST/4+180\n else:\n Omega = TST/4 - 180 \n Omega_r = m.radians(Omega)\n \n cos_Zenith = np.sin(lat_r) * np.sin(Delta_r) + np.cos(lat_r) * np.cos(Delta_r) * np.cos(Omega_r)\n Zenith_r = np.arccos(cos_Zenith) #in radians\n Aprime_r = np.arccos((np.sin(lat_r) * np.cos(Zenith_r) - np.sin(Delta_r)) / (np.cos(lat_r) * np.sin(Zenith_r)))\n Aprime = Aprime_r / np.pi * 180\n if Omega > 0:\n Azimuth = (Aprime + 180) % 360 #in degrees\n else:\n Azimuth = (540 - Aprime) % 360 #in degrees \n Azimuth_r = Azimuth / 180 * np.pi\n Elev_angle = (np.pi)/2 - Zenith_r\n\n \n # calculate incidence angle\n # Beta is equal to angle of tilted surface to horizontal (in radians)\n Beta = 45 # in degrees\n Beta_r = m.radians(Beta)\n \n cos_incidence = np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r) * np.sin(Azimuth_r) * np.sin(Omega_r) \n incidence_ang_r = np.arccos(cos_incidence)\n \n return Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle", "def init_env_variables(self):\n self.total_distance_moved = 0.0\n self.current_y_distance = self.get_y_dir_distance_from_start_point(self.start_point)\n self.cart_current_speed = rospy.get_param('/cart_pole_3d/init_cart_vel')", "def get_age_grad(self,renew=False):\n\t\ttry:\n\t\t\tdriv_lat = self['deriv_lat'].value\n\t\t\tdriv_lon = self['deriv_lon'].value\n\t\t\tdriv_msk = self['deriv_msk'].value\n\t\texcept:\n\t\t\tself._cal_age_grad()\n\t\tderiv_lat = self['deriv_lat'].value\n\t\tderiv_lon = self['deriv_lon'].value\n\t\tderiv_msk = self['deriv_msk'].value\n\t\tage_lon_Vec = self['age_lon_Vec'].value\n\t\tage_lat_Vec = self['age_lat_Vec'].value\n\t\txx, yy = np.meshgrid(age_lon_Vec, age_lat_Vec) # xx for longitude, yy for latitude\n\t\txx = xx.reshape(xx.size)\n\t\tyy = yy.reshape(yy.size)\n\t\tf_deriv_lat = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_lat.reshape(deriv_lat.size),rescale=False)\n\t\tf_deriv_lon = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_lon.reshape(deriv_lon.size),rescale=False)\n\t\tf_deriv_msk = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_msk.reshape(deriv_msk.size),rescale=False)\n\t\tfor period in self.attrs['prd_arr']:\n\t\t\tgroup = self['%g_sec'%( period )]\n\t\t\tlons_orig = group['lonArr'].value\n\t\t\tlons = lons_orig.reshape(lons_orig.size)\n\t\t\tlats = group['latArr'].value.reshape(lons_orig.size)\n\t\t\tderiv_lat_Arr = f_deriv_lat(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tderiv_lon_Arr = f_deriv_lon(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tderiv_msk_Arr = f_deriv_msk(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tif renew:\n\t\t\t\tdel group['age_deriv_lat_Arr']\n\t\t\t\tdel group['age_deriv_lon_Arr']\n\t\t\t\tdel group['age_deriv_msk_Arr']\n\t\t\tgroup.create_dataset(name='age_deriv_lat_Arr', data=deriv_lat_Arr)\n\t\t\tgroup.create_dataset(name='age_deriv_lon_Arr', data=deriv_lon_Arr)\n\t\t\tgroup.create_dataset(name='age_deriv_msk_Arr', data=deriv_msk_Arr)\n\t\tpass" ]
[ "0.6120319", "0.6116722", "0.5932199", "0.5788035", "0.57826406", "0.57638705", "0.5757198", "0.5753652", "0.57429856", "0.57152313", "0.57009196", "0.5668376", "0.5657558", "0.5629421", "0.5624367", "0.5592595", "0.5591798", "0.5582761", "0.5570598", "0.55646664", "0.5564146", "0.55511785", "0.5525184", "0.55204207", "0.55135536", "0.5510296", "0.550398", "0.5463102", "0.5457339", "0.5453269" ]
0.6997615
0
Do the initialization and setup for building a postage stamp. In the base class, we check for and parse the appropriate size and position values in config (aka base['stamp'] or base['image']. Values given in base['stamp'] take precedence if these are given in both places (which would be confusing, so probably shouldn't do that, but there might be a use case where it would make sense). config The configuration dict for the stamp field. base The base configuration dict. xsize The xsize of the image to build (if known). ysize The ysize of the image to build (if known). ignore A list of parameters that are allowed to be in config that we can ignore here. i.e. it won't be an error if these parameters are present. logger If given, a logger object to log progress. xsize, ysize, image_pos, world_pos
def setup(self, config, base, xsize, ysize, ignore, logger): # .. Do any custom setup you need to do. # Probably want to call the base class setup function to do the normal determination # of the size and position values. # Extra processing of 'bandpass' argument # Most needed type-checking is done in galsim.bandpass self._req_bp_fields = ['throughput', 'wave_type'] self._opt_bp_fields = ['red_limit', 'blue_limit', 'zeropoint'] try: bp = config['bandpass'] for req in self._req_bp_fields: if req not in bp.keys(): raise ValueError('Must pass field {} for a bandpass object!'.format(req)) # for opt in self._opt_bp_fields: # if opt not in bp.keys(): # config['bandpass'][opt] = None for key in bp.keys(): if key not in (self._req_bp_fields+self._opt_bp_fields): raise ValueError('Field {} is not a valid entry for a bandpass!'.format(key)) except KeyError: raise KeyError('`bandpass` is a required field for a COSMOSChromatic stamp!') extra_ignore = ignore + ['bandpass'] return super(self.__class__, self).setup(config, base, xsize, ysize, extra_ignore, logger)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, config, base, file_num, logger):\n # This is a copy of the base class code\n seed = galsim.config.SetupConfigRNG(base, logger=logger)\n logger.debug('file %d: seed = %d',file_num,seed)\n\n if 'det_num' not in config:\n config['det_num'] = { 'type': 'Sequence', 'nitems': 189 }\n\n # Figure out the detector name for the file name.\n detnum = galsim.config.ParseValue(config, 'det_num', base, int)[0]\n if 'camera' in config:\n camera_name = galsim.config.ParseValue(config, 'camera', base, str)[0]\n else:\n camera_name = 'LsstCam'\n camera = get_camera(camera_name)\n if 'only_dets' in config:\n only_dets = config['only_dets']\n det_name = only_dets[detnum]\n else:\n det_name = camera[detnum].getName()\n base['det_name'] = det_name\n if 'eval_variables' not in base:\n base['eval_variables'] = {}\n base['eval_variables']['sdet_name'] = det_name\n\n # Get detector size in pixels.\n det_bbox = camera[det_name].getBBox()\n base['xsize'] = det_bbox.width\n base['ysize'] = det_bbox.height\n\n if 'exptime' in config:\n base['exptime'] = galsim.config.ParseValue(\n config, 'exptime', base, float\n )[0]\n else:\n base['exptime'] = 30.0", "def __init__(self,\r\n default_path = None,\r\n default_level = None,\r\n logging_dir = None,\r\n log_file = None,\r\n log_file_dir = None,\r\n log_conf_full = None\r\n ):\r\n self.logger_is_set = False\r\n\r\n '''\r\n Get ready to setup everything.\r\n TO DO: read from structure is badly needed. \r\n '''\r\n self.default_path = default_path\r\n self.default_level = default_level\r\n self.logging_dir = logging_dir\r\n self.log_file = log_file\r\n self.log_file_dir = log_file_dir\r\n self.log_conf_full = log_conf_full\r\n\r\n\r\n\r\n self.setup_logging(self.default_path,\r\n self.default_level,\r\n self.logging_dir,\r\n self.log_file,\r\n self.log_file_dir,\r\n self.log_conf_full\r\n )", "def __init__(self, grape_config):\n super()\n self.grape_config = grape_config\n # inherit file name and data path from grape config\n self.file_name = grape_config[\"file_name\"]\n self.data_path = grape_config[\"data_path\"]\n log_file = \"{}.out\".format(self.file_name)\n self.log_path = os.path.join(self.data_path, log_file)", "def __init__(self):\n self.config = configs.Configuration()\n self.log = logger.CustomLogger(__name__).get_logger()\n self.output_dir = self.config.getConfigValue('OUTPUT_DIR')\n self.s3_directory = self.config.getConfigValue('S3_FILE_PATH_TRANSFORM')", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(gps_dvl_ins_stamped, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.sample_count is None:\n self.sample_count = 0\n if self.ekf_roll is None:\n self.ekf_roll = 0.\n if self.ekf_pitch is None:\n self.ekf_pitch = 0.\n if self.ekf_yaw is None:\n self.ekf_yaw = 0.\n if self.ekf_lat is None:\n self.ekf_lat = 0.\n if self.ekf_lon is None:\n self.ekf_lon = 0.\n if self.ekf_alt is None:\n self.ekf_alt = 0.\n if self.ekf_vN is None:\n self.ekf_vN = 0.\n if self.ekf_vE is None:\n self.ekf_vE = 0.\n if self.ekf_vD is None:\n self.ekf_vD = 0.\n if self.ekf_vX is None:\n self.ekf_vX = 0.\n if self.ekf_vY is None:\n self.ekf_vY = 0.\n if self.ekf_vZ is None:\n self.ekf_vZ = 0.\n if self.rad_gyro_X is None:\n self.rad_gyro_X = 0.\n if self.rad_gyro_Y is None:\n self.rad_gyro_Y = 0.\n if self.rad_gyro_Z is None:\n self.rad_gyro_Z = 0.\n if self.angular_acc_X is None:\n self.angular_acc_X = 0.\n if self.angular_acc_Y is None:\n self.angular_acc_Y = 0.\n if self.angular_acc_Z is None:\n self.angular_acc_Z = 0.\n if self.alt_DVL is None:\n self.alt_DVL = 0\n if self.dvl_error_code is None:\n self.dvl_error_code = b''\n if self.flag_to_check is None:\n self.flag_to_check = 0\n if self.imu_deg_gyro_X is None:\n self.imu_deg_gyro_X = 0.\n if self.imu_deg_gyro_Y is None:\n self.imu_deg_gyro_Y = 0.\n if self.imu_deg_gyro_Z is None:\n self.imu_deg_gyro_Z = 0.\n if self.imu_mag_X is None:\n self.imu_mag_X = 0.\n if self.imu_mag_Y is None:\n self.imu_mag_Y = 0.\n if self.imu_mag_Z is None:\n self.imu_mag_Z = 0.\n if self.imu_acc_X is None:\n self.imu_acc_X = 0.\n if self.imu_acc_Y is None:\n self.imu_acc_Y = 0.\n if self.imu_acc_Z is None:\n self.imu_acc_Z = 0.\n if self.gps_lat is None:\n self.gps_lat = 0\n if self.gps_lon is None:\n self.gps_lon = 0\n if self.gps_alt is None:\n self.gps_alt = 0.\n if self.gps_vN is None:\n self.gps_vN = 0.\n if self.gps_vE is None:\n self.gps_vE = 0.\n if self.gps_vD is None:\n self.gps_vD = 0.\n if self.dvl_vX is None:\n self.dvl_vX = 0.\n if self.dvl_vY is None:\n self.dvl_vY = 0.\n if self.dvl_vZ is None:\n self.dvl_vZ = 0.\n else:\n self.header = std_msgs.msg.Header()\n self.sample_count = 0\n self.ekf_roll = 0.\n self.ekf_pitch = 0.\n self.ekf_yaw = 0.\n self.ekf_lat = 0.\n self.ekf_lon = 0.\n self.ekf_alt = 0.\n self.ekf_vN = 0.\n self.ekf_vE = 0.\n self.ekf_vD = 0.\n self.ekf_vX = 0.\n self.ekf_vY = 0.\n self.ekf_vZ = 0.\n self.rad_gyro_X = 0.\n self.rad_gyro_Y = 0.\n self.rad_gyro_Z = 0.\n self.angular_acc_X = 0.\n self.angular_acc_Y = 0.\n self.angular_acc_Z = 0.\n self.alt_DVL = 0\n self.dvl_error_code = b''\n self.flag_to_check = 0\n self.imu_deg_gyro_X = 0.\n self.imu_deg_gyro_Y = 0.\n self.imu_deg_gyro_Z = 0.\n self.imu_mag_X = 0.\n self.imu_mag_Y = 0.\n self.imu_mag_Z = 0.\n self.imu_acc_X = 0.\n self.imu_acc_Y = 0.\n self.imu_acc_Z = 0.\n self.gps_lat = 0\n self.gps_lon = 0\n self.gps_alt = 0.\n self.gps_vN = 0.\n self.gps_vE = 0.\n self.gps_vD = 0.\n self.dvl_vX = 0.\n self.dvl_vY = 0.\n self.dvl_vZ = 0.", "def __init__(self, images=[], logfile='inspect_raw.info', load_log=True, \n master=None):\n if len(images) == 0:\n print('No images specified')\n return False\n \n if not os.path.exists(images[0]):\n print('First image not found (%s), is path correct?' %(images[0]))\n return False\n \n ##### Add .fits to filename and make backup if necessary\n self.logfile = logfile\n if not self.logfile.lower().endswith('.fits'):\n self.logfile += '.fits'\n \n if os.path.exists(self.logfile):\n bk = glob.glob(self.logfile+'.backup*')\n if len(bk) > 0:\n bkup_file = self.logfile + '.backup.%03d' %(len(bk))\n else:\n bkup_file = self.logfile + '.backup'\n \n shutil.copy(self.logfile, bkup_file)\n print('Made copy of %s -> %s' %(self.logfile, bkup_file))\n \n ####### Initialize parameters\n self.params = {} \n self.images = images\n \n self.marked_reads = None\n self.NREAD = 14\n \n ### Polygons for reads\n x0 = y0 = 12\n px = py = 6\n dx = dy = 241\n xi = np.array([0,1,1,0])\n yi = np.array([0,0,1,1])\n \n c = 0\n self.read_polygons = []\n for j in range(4):\n for i in range(4):\n c += 1\n if c > self.NREAD:\n break\n else:\n polyx = x0+i*(px+dx)+xi*dx\n polyy = y0+j*(py+dy)+yi*dy\n poly = np.array([polyx, polyy]).T\n self.read_polygons.append(mplPath.Path(poly))\n \n if os.path.exists(self.logfile) & load_log:\n self.read_fits()\n \n self.N = len(self.images)\n\n for key in ['satellite', 'earth', 'other', 'kill', 'seen']:\n if key not in self.params.keys():\n self.params[key] = np.zeros(self.N, dtype=np.int)\n \n if self.marked_reads is None:\n self.marked_reads = np.zeros((self.N, self.NREAD), dtype=int)\n \n if 'comment' not in self.params.keys():\n self.params['comment'] = ['---' for i in range(self.N)]\n \n self.i = 0\n self.master = master\n self.setup_gui()", "def __init__(self, config, logger):\n self.config = config\n self.logger = logger", "def _configure_logger(self):\n\n # NOTE not thread safe. Multiple BaseScripts cannot be instantiated concurrently.\n level = getattr(logging, self.args.log_level.upper())\n\n if self._GLOBAL_LOG_CONFIGURED:\n return\n\n # TODO different processors for different basescripts ?\n # TODO dynamically inject processors ?\n\n # since the hooks need to run through structlog, need to wrap them like processors\n def wrap_hook(fn):\n @wraps(fn)\n def processor(logger, method_name, event_dict):\n fn(event_dict)\n return event_dict\n\n return processor\n\n processors = self.define_log_processors()\n processors.extend(\n [ wrap_hook(h) for h in self.define_log_pre_format_hooks() ]\n )\n\n log_renderer = self.define_log_renderer()\n stderr_required = (not self.args.quiet)\n pretty_to_stderr = (\n stderr_required\n and (\n self.args.log_format == \"pretty\"\n or (self.args.log_format is None and sys.stderr.isatty())\n )\n )\n\n should_inject_pretty_renderer = (\n pretty_to_stderr\n and not isinstance(log_renderer, structlog.dev.ConsoleRenderer)\n )\n if should_inject_pretty_renderer:\n stderr_required = False\n processors.append(StderrConsoleRenderer())\n\n processors.append(log_renderer)\n processors.extend(\n [ wrap_hook(h) for h in self.define_log_post_format_hooks() ]\n )\n\n streams = []\n # we need to use a stream if we are writing to both file and stderr, and both are json\n if stderr_required:\n streams.append(sys.stderr)\n\n if self.args.log_file is not None:\n # TODO handle creating a directory for this log file ?\n # TODO set mode and encoding appropriately\n streams.append(open(self.args.log_file, 'a'))\n\n assert len(streams) != 0, \"cannot configure logger for 0 streams\"\n\n stream = streams[0] if len(streams) == 1 else Stream(*streams)\n atexit.register(stream.close)\n\n # a global level struct log config unless otherwise specified.\n structlog.configure(\n processors=processors,\n context_class=dict,\n logger_factory=LevelLoggerFactory(stream, level=level),\n wrapper_class=BoundLevelLogger,\n cache_logger_on_first_use=True,\n )\n\n # TODO take care of removing other handlers\n stdlib_root_log = logging.getLogger()\n stdlib_root_log.addHandler(StdlibStructlogHandler())\n stdlib_root_log.setLevel(level)\n\n self._GLOBAL_LOG_CONFIGURED = True", "def __init__(self, config):\n\n self.locations_hltv_starting_ = config[sC.BUCKET_LOCATIONS][sC.HLTV_STARTING]\n self.score_starting_ = config[sC.BUCKET_LOCATIONS][sC.SCORE_STARTING]\n self.logs_starting_ = config[sC.BUCKET_LOCATIONS][sC.LOGS_STARTING]\n self.temp = config[sC.FOLDER_LOCATIONS][sC.TEMP_APP_ENGINE_FOLDER]\n self.results_ = config[sC.FOLDER_LOCATIONS][sC.CONFIGS_RESULTS]\n self.amxmodx_logs_ = config[sC.FOLDER_LOCATIONS][sC.ADDONS_AMXMODX_LOGS]\n self.cstrike_logs_ = config[sC.FOLDER_LOCATIONS][sC.CSTRIKE_LOGS]\n self.hltv_demos_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.HLTV_DEMOS_FUNC]\n self.ftp_logs_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.FTP_LOGS_FUNC]\n\n print('{} - Initialized'.format(__name__))", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(GraspConfig, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.position is None:\n self.position = geometry_msgs.msg.Point()\n if self.approach is None:\n self.approach = geometry_msgs.msg.Vector3()\n if self.binormal is None:\n self.binormal = geometry_msgs.msg.Vector3()\n if self.axis is None:\n self.axis = geometry_msgs.msg.Vector3()\n if self.width is None:\n self.width = std_msgs.msg.Float32()\n if self.score is None:\n self.score = std_msgs.msg.Float32()\n if self.sample is None:\n self.sample = geometry_msgs.msg.Point()\n else:\n self.position = geometry_msgs.msg.Point()\n self.approach = geometry_msgs.msg.Vector3()\n self.binormal = geometry_msgs.msg.Vector3()\n self.axis = geometry_msgs.msg.Vector3()\n self.width = std_msgs.msg.Float32()\n self.score = std_msgs.msg.Float32()\n self.sample = geometry_msgs.msg.Point()", "def __init__(self, config):\n logging.info(\"Creating footprint\")\n # self.infra = yaml.load(config)\n self.infra = config\n self.footprint_name = self.infra.get(\"footprint\", \"ehw\")\n self.images = self.infra.get(\"images\")\n self.old_images = self.infra.get(\"old_images\", [])\n self.container_name = \"%s-metadata\" % self.footprint_name\n \n self.admin_password = self.infra.get('admin-password')\n self.savefile = self.infra.get(\"footprint\", \"outfile\") + \"-save.yaml\"\n if os.path.exists(self.savefile):\n self.saved_images = yaml.load(open(self.savefile))\n self.footprint_status=self.infra.get(\"footprint_status\", None)\n logging.debug(\"Loaded saved images: %s\" % self.saved_images)\n # sys.exit(0) ", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(LineTrackerGoalTimed, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.x is None:\n self.x = 0.\n if self.y is None:\n self.y = 0.\n if self.z is None:\n self.z = 0.\n if self.yaw is None:\n self.yaw = 0.\n if self.v_des is None:\n self.v_des = 0.\n if self.a_des is None:\n self.a_des = 0.\n if self.t_start is None:\n self.t_start = genpy.Time()\n if self.duration is None:\n self.duration = genpy.Duration()\n if self.relative is None:\n self.relative = False\n else:\n self.x = 0.\n self.y = 0.\n self.z = 0.\n self.yaw = 0.\n self.v_des = 0.\n self.a_des = 0.\n self.t_start = genpy.Time()\n self.duration = genpy.Duration()\n self.relative = False", "def __init__ (self, config, logger):\n self.logger = logger\n self.logger.add('loading AREA')\n config['data_type'] = np.float32\n self.area = AreaGrid(config,logger = self.logger)\n self.area.config['dataset_name'] = 'Area Data'\n self.area.config['description'] = \\\n \"\"\"Area Data contains fractional cohort data for each year the ATM\n was run. \n \"\"\"\n self.logger.add('performing post AREA setup')\n self.shape = self.area.config['grid_shape']\n self.aoi = self.area.area_of_interest()\n config['shape'] = self.shape\n config['grid_shape'] = self.area.config['grid_shape']\n config['AOI mask'] = self.aoi\n config['cohort list'] = self.area.get_cohort_list()\n self.logger.add('loading ALD')\n self.ald = ALDGrid(config,logger = self.logger)\n self.ald.config['dataset_name'] = 'ALD Data'\n self.ald.config['description'] = \\\n \"\"\"ALD Data contains ALD, and Protective Layer data for each year \n the ATM was run.\n \"\"\"\n self.logger.add('loading POI')\n self.poi = POIGrid(config,logger = self.logger)\n self.poi.config['dataset_name'] = 'POI Data'\n self.poi.config['description'] = \\\n \"\"\"POI Data contains Poi data for each year the ATM was run. \n \"\"\"\n self.logger.add('loading ICE')\n self.ice = IceGrid(config,logger = self.logger)\n self.ice.config['dataset_name'] = 'Ice Data'\n self.ice.config['description'] = \\\n \"\"\"\n Ice Data contains the ice content grid for the ATM model run\n \"\"\"\n self.logger.add('loading LAKE POND')\n self.lake_pond = LakePondGrid(config,logger = self.logger)\n self.lake_pond.config['dataset_name'] = 'Lake Pond Data'\n self.lake_pond.config['description'] = \\\n \"\"\"Lake-Pond Data contains Lake and Pond depth and count data for \n each year the ATM was run. \n \"\"\"\n self.logger.add('loading CLIMATE EVENT')\n self.climate_event = ClimateEventGrid(config,logger = self.logger)\n self.climate_event.config['dataset_name'] = 'Climate Event Data'\n self.climate_event.config['description'] = \\\n \"\"\"Climate Event Data contains climate event data for each \n year the ATM was run. \n \"\"\"\n ## TODO:redo masks here\n # for lpt in config['pond types'] + config['lake types']:\n # #~ print lpt\n # mask = self.area[lpt][0] > 0 # all cells in first ts > 0\n # self.lake_pond.apply_mask(lpt, mask)\n self.logger.add('loading DRAINGAGE')\n self.drainage = DrainageGrid(config,logger = self.logger)\n self.drainage.config['dataset_name'] = 'Drainage Data'\n self.drainage.config['description'] = \"\"\"\n Drainage contains the drainage grid for the ATM model run\n \"\"\"\n \n self.logger.add('loading DEGREE DAY')\n self.degreedays = DegreeDayGrids(\n os.path.join(\n config['Input_dir'], config['Met_Control']['FDD_file']),\n os.path.join(\n config['Input_dir'], config['Met_Control']['TDD_file'])\n )\n \n ## what does this do?\n self.ald.setup_ald_constants(\n self.degreedays.thawing[config['start year']]\n )", "def __init__(self, config):\n\n # controls for scope logging\n self.vars = None\n self.log = {}\n self.conf = config\n pe.set_default_val(self.conf, 'clip_by_norm', 0.3)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(tipCoords, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.thumb is None:\n self.thumb = [0.] * 3\n if self.index is None:\n self.index = [0.] * 3\n if self.middle is None:\n self.middle = [0.] * 3\n if self.ring is None:\n self.ring = [0.] * 3\n if self.little is None:\n self.little = [0.] * 3\n else:\n self.thumb = [0.] * 3\n self.index = [0.] * 3\n self.middle = [0.] * 3\n self.ring = [0.] * 3\n self.little = [0.] * 3", "def __init__(self, config_file):\n # Parse configuration file\n self.config_file = config_file\n self.config = ConfigParser.SafeConfigParser()\n self.config.read(self.config_file)\n\n self.source_do_purge = self.config.getboolean(\"source\", \"do_purge\")\n self.source_requests_dir = self.config.get(\"source\", \"requests_dir\")\n\n self.add_console_handler = self.config.getboolean(\"source\", \"add_console_handler\")\n self.add_file_handler = self.config.getboolean(\"source\", \"add_file_handler\")\n self.log_file_name = self.config.get(\"source\", \"log_file_name\")\n log_level = self.config.get(\"source\", \"log_level\")\n if log_level == 'DEBUG':\n self.log_level = logging.DEBUG\n elif log_level == 'INFO':\n self.log_level = logging.INFO\n elif log_level == 'WARNING':\n self.log_level = logging.WARNING\n elif log_level == 'ERROR':\n self.log_level = logging.ERROR\n elif log_level == 'CRITICAL':\n self.log_level = logging.CRITICAL\n\n self.author_config_file = self.config.get(\"author\", \"config_file\")\n self.author_do_purge = self.config.getboolean(\"author\", \"do_purge\")\n self.author_requests_dir = self.config.get(\"author\", \"requests_dir\")\n\n self.flickr_content_dir = self.config.get(\"flickr\", \"content_dir\")\n self.tumblr_content_dir = self.config.get(\"tumblr\", \"content_dir\")\n self.twitter_content_dir = self.config.get(\"twitter\", \"content_dir\")\n\n self.tumblr_min_total_tags = self.config.getint(\"tumblr\", \"min_total_tags\")\n self.tumblr_min_total_blogs = self.config.getint(\"tumblr\", \"min_total_blogs\")\n\n # Create a logger\n root = logging.getLogger()\n root.setLevel(self.log_level)\n formatter = logging.Formatter(\n \"%(asctime)s %(name)s %(levelname)s: %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n for handler in root.handlers:\n root.removeHandler(handler)\n if self.add_console_handler:\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n root.addHandler(console_handler)\n if self.add_file_handler:\n file_handler = logging.handlers.RotatingFileHandler(\n self.log_file_name, maxBytes=1000000, backupCount=5, encoding='utf-8')\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)\n\n self.logger = logging.getLogger(u\"BluPenSource\")", "def __init__(self, config_directory, scale_override=None):\n self._config_directory = config_directory\n\n self._config_detector = DetectorConfig(config_directory)\n self._config_align = AlignConfig(config_directory, scale_override=scale_override)\n self._config_crystal = CrystalMatchConfig(config_directory)", "def __init__(self):\r\n config = ConfigProvider().getProcessingConfig()\r\n self.xGround = config.get(\"xGround\")\r\n self.yGround = config.get(\"yGround\")", "def __init__(self, config, fname, dt, preload=True): \n \n self.config = config\n self.fname = fname\n self.dt = dt\n self.xvar = config.get('profiles', 'xvar')\n self.yvar = config.get('profiles', 'yvar')\n self.zvar = config.get('profiles', 'zvar')\n self.zbounds = np.array(self.config.get('grid', 'zbounds').split(','), dtype=np.float64)\n self.pvar = config.get('profiles', 'pvar')\n self.pnvar = config.get('profiles', 'pnvar')\n self.irvar = config.get('profiles', 'irvar')\n self.psvar = config.get('profiles', 'psvar')\n self.psalqcvar = config.get('profiles', 'psalqcvar')\n self.qcvar = config.get('profiles', 'qcvar')\n self.posqcvar = config.get('profiles', 'posqcvar')\n self.datavar = config.get('profiles', 'datavar')\n self.fixedgap = config.get('profiles', 'fixedgap')\n \n if preload: \n self.load_data()\n self.load_x()\n self.load_y()\n self.load_z()\n self.load_p()\n self.load_pn()\n self.load_ir()\n self.load_ps()\n self.load_psalqc()\n self.load_qc()\n self.load_posqc()", "def __init__(\n self, path=\"logger.yml\", default_level=logging.INFO, env_key=\"LOG_CFG\"\n ):\n\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(os.path.normpath(path)):\n with open(path, \"rt\") as f:\n config = yaml.safe_load(f.read())\n to_log = \"\"\n # If directory is non existent create it\n # Todo: Here a dir will be made after installation, so if this prohibited go to the other dir\n if \"file\" in config[\"handlers\"]:\n pathtologfile = os.path.normpath(config[\"handlers\"][\"file\"][\"filename\"]).split(os.sep)\n if not os.path.isdir(\n os.path.join(os.getcwd(), *pathtologfile[:-1])\n ):\n os.mkdir(os.path.join(os.getcwd(), *pathtologfile[:-1]))\n else:\n to_log = (\n \"Logging to file failed, since no file handler was defined!\"\n )\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n\n self.log_LEVELS = {\n \"NOTSET\": 0,\n \"DEBUG\": 10,\n \"INFO\": 20,\n \"WARNING\": 30,\n \"ERROR\": 40,\n \"CRITICAL\": 50,\n }\n\n self.welcome_string = (\n \"\\n\"\n \" __ ______ ______ ______ __ __ ______ \\n\" \n \" /\\ \\ /\\ __ \\ /\\ ___\\ /\\ ___\\ /\\ \\ /\\ \\ /\\ ___\\ \\n\" \n \" \\ \\ \\____ \\ \\ \\/\\ \\ \\ \\ \\__ \\ \\ \\ __\\ \\ \\ \\ \\ \\ \\____ \\ \\ __\\ \\n\" \n \" \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_____\\ \\ \\_____\\ \\n\" \n \" \\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/ \\/_____/ \\/_____/\\n\\n\\n\"\n )\n\n snoopy = (\"\\n\\n\\n XXXX\\n\"\n \" X XX\\n\"\n \" X *** X XXXXX\\n\"\n \" X ***** X XXX XX\\n\"\n \" XXXX ******* XXX XXXX XX\\n\"\n \" XX X ****** XXXXXXXXX XX XXX\\n\"\n \" XX X **** X X** X\\n\"\n\" X XX XX X X***X\\n\"\n\" X //XXXX X XXXX\\n\"\n\" X // X XX\\n\"\n\"X // X XXXXXXXXXXXXXXXXXX/ \\n\"\n\"X XXX// X X\\n\"\n\"X X X X X\\n\"\n\"X X X X X\\n\"\n\" X X X X X XX\\n\"\n\" X X X X X XXX XX\\n\"\n\" X XXX X X X X X X\\n\"\n\" X X X XX X XXXX\\n\"\n\" X X XXXXXXXX/ XX XX X\\n\"\n\" XX XX X X X XX\\n\"\n\" XX XXXX XXXXXX/ X XXXX\\n\"\n\" XXX XX*** X X\\n\"\n\" XXXXXXXXXXXXX * * X X\\n\"\n\" *---* X X X\\n\"\n\" *-* * XXX X X\\n\"\n\" *- * XXX X\\n\"\n\" *- *X XXX\\n\"\n\" *- *X X XXX\\n\"\n\" *- *X X XX\\n\"\n\" *- *XX X X\\n\"\n\" * *X* X X X\\n\"\n\" * *X * X X X\\n\"\n\" * * X** X XXXX X\\n\"\n\" * * X** XX X X\\n\"\n\" * ** X** X XX X\\n\"\n\" * ** X* XXX X X\\n\"\n\" * ** XX XXXX XXX\\n\"\n\" * * * XXXX X X\\n\"\n\" * * * X X X\\n\"\n\" >>>>>>>******* * * X X XXXXXXXX/ \\n\"\n\" * * * /XXXXX XXXXXXXX/ <\\n\"\n\" >>>>>********** * X < / <\\n\"\n\" >>>>* * X / / <XXXXX\\n\"\n\">>>>>>>>>********** XXXXXXXXXXXXXXXXXXXXXX\\n\")\n\n # Create a logger Object\n self.LOG = logging.getLogger(\"Logfile\")\n # Print welcome message\n self.LOG.info(self.welcome_string)\n self.LOG.debug(snoopy)\n if to_log:\n self.LOG.info(to_log)", "def __init__(self, exp_params, stamp_unique=True):\n self._main_thread = True\n self.params = copy.deepcopy(exp_params)\n self.params['class'] = self.__class__.__name__\n self._check_required_params()\n self.__check_exist_path()\n self.__create_folder(stamp_unique)\n set_experiment_logger(self.params['path_exp'], FILE_LOGS)\n # set stream logging to info level\n for lh in logging.getLogger().handlers:\n if isinstance(lh, logging.StreamHandler) and \\\n not isinstance(lh, logging.FileHandler):\n lh.setLevel(logging.INFO)\n logging.info('initialise experiment...')\n logging.info(string_dict(self.params, 'PARAMETERS:'))\n logging.info('COMPUTER: %r', computer_info())", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def init(self):\n self.filename, file_extension = os.path.splitext(os.path.basename(__file__))\n\n # parse argument\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--configdir\", help=\"your config.ini directory\", type=str)\n parser.add_argument(\"--logdir\", help=\"your log directory\", type=str)\n args = parser.parse_args()\n\n # determine config directory\n if args.configdir:\n config_file = os.path.join(args.configdir, 'config.ini')\n else:\n config_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../config', 'config.ini')\n\n if args.logdir:\n log_file = os.path.join(args.logdir, '%s.log' % self.filename)\n else:\n log_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../logs', '%s.log' % self.filename)\n\n # load config\n self.config = configparser.ConfigParser()\n self.config.read(config_file)\n\n # init logger\n logbook.set_datetime_format(\"local\")\n self.logger = logbook.Logger(name=self.filename)\n format_string = '%s %s' % ('[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] {record.level_name}',\n '{record.module}:{record.lineno}: {record.message}')\n if self.config.has_option('handler_stream_handler', 'verbose'):\n log_handler = logbook.StreamHandler(sys.stdout, level=self.config.get('Logger', 'level'), bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n log_handler = logbook.TimedRotatingFileHandler(log_file, level=self.config.get('Logger', 'level'),\n date_format='%Y%m%d', backup_count=5, bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n else:\n log_handler = logbook.TimedRotatingFileHandler(log_file, level=self.config.get('Logger', 'level'),\n date_format='%Y%m%d', backup_count=5, bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n\n # init database\n self.db = AdhocDatabaseHandler.instantiate_from_configparser(self.config, self.logger)", "def __init__(self, args, logger: MainLogger, log_start_t=0):\n\n super().__init__(args, logger)\n self.batch_size = self.args.batch_size_run\n assert self.batch_size == 1\n\n self.env = env_REGISTRY[self.args.env](**self.args.env_args)\n # Find id of the first policy team - Only supported for one policy team in the build plan\n teams = args.env_args[\"match_build_plan\"]\n self.policy_team_id = get_policy_team_id(teams)\n if self.args.headless_controls:\n controls = HeadlessControls(env=self.env)\n controls.daemon = True\n controls.start()\n\n self.episode_limit = self.env.episode_limit\n self.t = 0 # current time step within the episode\n self.log_start_t = log_start_t # timestep to start logging from\n self.t_env = 0 # total time steps for this runner in the provided environment across multiple episodes\n self.phi: FeatureFunction = feature_func_REGISTRY[self.args.sfs] if self.args.sfs else None\n self.home_batch = None\n self.home_mac = None\n self.new_batch_fn = None", "def __init__(self, pyconfig):\n self.pylot_cfg = pyconfig\n self.logfile = pyconfig.dir_logs + 'Pylot.log'", "def __init__(self, samples, obs, nPlanets=0, nOffsets=0, nImportSamps=10000, scale=1.0, pRatio=1., slope=False):\n\n self.samples = samples\n self.nPlanets = nPlanets\n self.nOffsets = nOffsets\n self.nImportSamps = nImportSamps\n self.scale = scale\n self.pRatio = pRatio\n self.slope = slope\n\n param_keys, param_IS_keys = create.dict_keys(self.nPlanets, self.nOffsets, slope=self.slope)\n print(param_keys)\n print(param_IS_keys)\n postSamp, nPostSamples = create.posterior_samples_from_emcee(self.samples, param_keys)\n\n postSamp_pKhkl = compute.pKewM_to_importSamp_parameterization(postSamp, param_IS_keys, self.nPlanets)\n\n self.mediansG, self.covMatrixG, self.choleskyDecomp, self.logDetSigmaG = compute.matrix_info(postSamp_pKhkl)\n\n nParams = len(param_IS_keys)\n random_values = [ truncnorm.rvs(-self.scale, self.scale, size=nParams) for i in range(self.nImportSamps) ]\n\n samples = [ [] for i in range(self.nImportSamps) ]\n g_samples = [ [] for i in range(self.nImportSamps) ]\n loggs = [ 0. for i in range(self.nImportSamps) ]\n\n print(\"## Drawing importance samples...\")\n\n for x in range(self.nImportSamps):\n dispersion = np.dot( self.choleskyDecomp, np.transpose(random_values[x]) )\n samples[x] = self.mediansG + dispersion\n g_samples[x] = list(samples[x])\n\n diff = np.subtract(samples[x],self.mediansG)\n\n logg = -0.5 * (nParams*np.log(2.*np.pi) + self.logDetSigmaG + \\\n np.dot( np.transpose(diff), \\\n np.linalg.solve(self.covMatrixG, np.subtract(samples[x],self.mediansG) ) ) ) - \\\n nParams*np.log(erf(self.scale/np.sqrt(2.)))\n loggs[x] = logg\n\n print(\"## Done drawing importance samples!\")\n print(\"\")\n \n g_samples_T = np.transpose(g_samples)\n importSamp_dict = OrderedDict()\n\n for i, item in enumerate(g_samples_T):\n importSamp_dict[param_IS_keys[i]] = item\n\n importSamp_pKhkl_dict = compute.importSamp_parameterization_to_pKewM(importSamp_dict, param_keys, self.nPlanets, self.pRatio)\n importSamp_pKewM = np.transpose([ vals for key, vals in importSamp_pKhkl_dict.items() ])\n\n print(\"## Evaluating lnpost at importance samples...\")\n\n logPosteriors = np.array([ np.nan for i in range(self.nImportSamps) ])\n for i in range(nImportSamps):\n logPosteriors[i] = lnpost(importSamp_pKewM[i], obs, self.nPlanets, slope=self.slope)\n\n print(\"## Done evaluating lnpost!\")\n print(\"\")\n\n\n logSum = -(9.**99.)\n\n for i in range(self.nImportSamps): \n diff = logPosteriors[i] - loggs[i]\n\n logSum = np.logaddexp(logSum, diff)\n if i%1000==0:\n print(str(i+1) + \" \" + str(logSum - np.log(i+1)))\n \n self.logAvg = logSum - np.log(self.nImportSamps)\n self.f_MCMC = 0.\n\n print(\"\")\n print(\"## logAvg: \" + str(self.logAvg))\n\n postSamp_wo_keys = []\n for key in postSamp_pKhkl:\n postSamp_wo_keys.append(postSamp_pKhkl[key])\n \n postSamp_wo_keys = np.transpose(np.array(postSamp_wo_keys))\n diff = postSamp_wo_keys-self.mediansG\n\n for j in range(nPostSamples):\n\n z = np.linalg.solve(self.choleskyDecomp, diff[j])\n\n if all([abs(k)<=scale for k in z]):\n self.f_MCMC += 1.\n else:\n self.f_MCMC += 0.\n \n self.f_MCMC = self.f_MCMC/nPostSamples\n self.logFML = self.logAvg - np.log(self.f_MCMC)\n\n print(\"## f_MCMC: \" + str(self.f_MCMC))\n print(\"## logFML: \" + str(self.logFML))\n\n print(\"## FML computed!\")\n print(\"## Done!\")", "def __init__(self, start_time, forcing_step_seconds, forcing_num_steps,\n xname, yname, xsize, ysize, input_directory, output_directory,\n work_directory, template_directory, input_file_pattern,\n archive_directory, strict=True):\n self.start_time = start_time\n self.forcing_num_steps = forcing_num_steps\n self.forcing_step_seconds = forcing_step_seconds\n self.xname = xname\n self.yname = yname\n # Grid must have positive sizes in x and y directions.\n if strict and (xsize <= 0 or ysize <= 0):\n msg = 'Grid sizes must be >= 1, got xsize={} and ysize={}.'\n raise ConfigurationError(msg.format(xsize, ysize))\n self.xsize = xsize\n self.ysize = ysize\n # The input and template directories are required to exist, the output\n # and work directories may be able to be created when required.\n if strict and not os.path.exists(input_directory):\n msg = 'The input directory \"{}\" does not exist.'\n raise ConfigurationError(msg.format(input_directory))\n self.input_directory = input_directory\n if strict and not os.path.exists(template_directory):\n msg = 'The template directory \"{}\" does not exist.'\n raise ConfigurationError(msg.format(template_directory))\n self.template_directory = template_directory\n self.work_directory = work_directory\n self.output_directory = output_directory\n self.input_file_pattern = input_file_pattern\n self.archive_directory = archive_directory", "def setup(self, cfg):\n super().setup(cfg)\n\n \"\"\"\n TODO override the date format to ISOsomething standard...\n \"\"\"\n #general_fmt = r\"%(asctime)s [%(process)3d] [%(levelname)-7s] %(message)s\"\n #Gunicorn 'access' somehow has a very different requestion context. So the ip getting is left out, it is inserted by access below\n general_formatter = RequestFormatter(\n '[%(asctime)s] [%(base_hostname)s:%(hostname)s:%(process)3d] [%(levelname)-7s] %(message)s'\n )\n #print(self.cfg.access_log_format)\n #self.cfg.access_log_format = general_fmt\n\n # Override Gunicorn's `error_log` configuration.\n self._set_handler( self.error_log, cfg.errorlog, general_formatter )\n\n #Push the general format at our the access formatter, which will publish specialised messages\n self._set_handler( self.access_log, cfg.accesslog, general_formatter )", "def __init__(self, name, config):\n self.name = name\n self.config = config\n self.logger = logging.getLogger(name)\n if 'type' not in config:\n self.config['type'] = DEFAULT_BACKUP_TYPE\n elif config['type'] not in SUPPORTED_BACKUP_TYPES:\n self.logger.error('Unknown dump type: %s', config['type'])\n sys.exit(-1)\n if 'retention' not in config:\n self.config['retention'] = DEFAULT_RETENTION_DAYS\n else:\n self.config['retention'] = int(config['retention'])", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)" ]
[ "0.5815659", "0.5406954", "0.53990644", "0.53855544", "0.5383662", "0.5338992", "0.5320965", "0.5303831", "0.5302118", "0.5297782", "0.52322584", "0.51910156", "0.51902056", "0.5185881", "0.5184995", "0.5149911", "0.5142809", "0.51350987", "0.51125443", "0.5085775", "0.5084318", "0.5083843", "0.50741166", "0.50733346", "0.5055613", "0.5020785", "0.501958", "0.5002097", "0.5001654", "0.5001007" ]
0.64838976
0
Before drawing the profile, see whether this object can be trivially skipped. The base method checks if the object is completely off the main image, so the intersection bounds will be undefined. In this case, don't bother drawing the postage stamp for this object. prof The profile to draw. image The image onto which to draw the profile (which may be None). method The method to use in drawImage. offset The offset to apply when drawing. config The configuration dict for the stamp field. base The base configuration dict. logger If given, a logger object to log progress. whether to skip drawing this object.
def updateSkip(self, prof, image, method, offset, config, base, logger): # NOTE: There are currently unresolved issues with the image size checking of chromatic # objects. For now, we ignore any possible speed increases and skip the check. # if isinstance(prof, galsim.ChromaticObject): # return False if prof is not None and base.get('current_image',None) is not None: if image is None: prof = base['wcs'].toImage(prof, image_pos=base['image_pos']) # NOTE: Old version: # N = prof.getGoodImageSize(1.) if isinstance(prof, galsim.GSObject): N = prof.getGoodImageSize(1.) elif isinstance(prof, galsim.ChromaticObject): # TODO: Finish implementation # return False pudb.set_trace() # Find the suggested image size for each object given the choice of scale, and use the # maximum just to be safe. print '\nprof.original = {}'.format(prof.original) print '\nprof.original.obj_list = {}'.format(prof.original.obj_list) # print '\nprof.objlist = {}'.format(prof.original.obj_list) obj_list = prof.original.obj_list possible_im_sizes = [] for obj in obj_list: print '\n obj : {}'.format(obj) possible_im_sizes.append([ ob.getGoodImageSize(1.) for ob in obj]) print 'possible_im_sizes : {}'.format(possible_im_sizes) N = np.max(possible_im_sizes) N += 2 + int(np.abs(offset.x) + np.abs(offset.y)) bounds = galsim._BoundsI(1,N,1,N) else: bounds = image.bounds # Set the origin appropriately stamp_center = base['stamp_center'] if stamp_center: bounds = bounds.shift(stamp_center - bounds.center) else: bounds = bounds.shift(base.get('image_origin',galsim.PositionI(1,1)) - galsim.PositionI(bounds.xmin, bounds.ymin)) overlap = bounds & base['current_image'].bounds if not overlap.isDefined(): logger.info('obj %d: skip drawing object because its image will be entirely off ' 'the main image.', base['obj_num']) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self, prof, image, method, offset, config, base, logger, **kwargs):\n # ... draw prof onto the given image (making a new Image if necessary)\n if prof is None:\n return image\n else:\n logger = galsim.config.LoggerWrapper(logger)\n # Setup the kwargs to pass to drawImage\n # (Start with any additional kwargs given as extra kwargs to DrawBasic and add to it.)\n kwargs['image'] = image\n kwargs['offset'] = offset\n kwargs['method'] = method\n if 'wmult' in config and 'wmult' not in kwargs: # pragma: no cover\n kwargs['wmult'] = galsim.config.ParseValue(config, 'wmult', base, float)[0]\n if 'wcs' not in kwargs and 'scale' not in kwargs:\n kwargs['wcs'] = base['wcs'].local(image_pos = base['image_pos'])\n if method == 'phot' and 'rng' not in kwargs:\n kwargs['rng'] = galsim.config.GetRNG(config, base, logger, \"method='phot'\")\n\n # Check validity of extra phot options:\n max_extra_noise = None\n if 'n_photons' in config and 'n_photons' not in kwargs:\n if method != 'phot':\n raise AttributeError('n_photons is invalid with method != phot')\n if 'max_extra_noise' in config:\n logger.warning(\n \"Both 'max_extra_noise' and 'n_photons' are set in config dict, \"+\n \"ignoring 'max_extra_noise'.\")\n kwargs['n_photons'] = galsim.config.ParseValue(config, 'n_photons', base, int)[0]\n elif 'max_extra_noise' in config:\n max_extra_noise = galsim.config.ParseValue(config, 'max_extra_noise', base, float)[0]\n if method != 'phot' and max_extra_noise is not None:\n raise AttributeError('max_extra_noise is invalid with method != phot')\n\n if 'poisson_flux' in config and 'poisson_flux' not in kwargs:\n if method != 'phot':\n raise AttributeError('poisson_flux is invalid with method != phot')\n kwargs['poisson_flux'] = galsim.config.ParseValue(config, 'poisson_flux', base, bool)[0]\n\n if max_extra_noise is not None and 'max_extra_noise' not in kwargs:\n if max_extra_noise < 0.:\n raise ValueError(\"image.max_extra_noise cannot be negative\")\n if 'image' in base and 'noise' in base['image']:\n noise_var = galsim.config.CalculateNoiseVariance(base)\n else:\n raise AttributeError(\"Need to specify noise level when using max_extra_noise\")\n if noise_var < 0.:\n raise ValueError(\"noise_var calculated to be < 0.\")\n max_extra_noise *= noise_var\n kwargs['max_extra_noise'] = max_extra_noise\n\n if logger.isEnabledFor(logging.DEBUG):\n # Don't output the full image array. Use str(image) for that kwarg.\n alt_kwargs = dict([(k,str(kwargs[k]) if isinstance(kwargs[k],galsim.Image) else kwargs[k])\n for k in kwargs])\n logger.debug('obj %d: drawImage kwargs = %s',base.get('obj_num',0), alt_kwargs)\n logger.debug('obj %d: prof = %s',base.get('obj_num',0),prof)\n try:\n # NOTE: Old version:\n # image = prof.drawImage(**kwargs)\n if isinstance(prof, galsim.GSObject):\n image = prof.drawImage(**kwargs)\n elif isinstance(prof, galsim.ChromaticObject):\n bp = {}\n for key in (self._req_bp_fields+self._opt_bp_fields):\n try:\n bp[key] = config['bandpass'][key]\n except KeyError:\n bp[key] = None\n\n bandpass = galsim.Bandpass(blue_limit=bp['blue_limit'], red_limit=bp['red_limit'],\n wave_type=bp['wave_type'], throughput=bp['throughput'],\n zeropoint=bp['zeropoint'])\n\n image = prof.drawImage(bandpass=bandpass, **kwargs)\n\n except Exception as e: # pragma: no cover\n logger.debug('obj %d: prof = %r', base.get('obj_num',0), prof)\n raise\n return image", "def testDiagonalProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n # Trigger tool button for diagonal profile mode\n self.toolBar.lineAction.trigger()\n\n # draw profile line\n widget.setFocus(qt.Qt.OtherFocusReason)\n self.mouseMove(widget, pos=pos1)\n self.qWait(100)\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.qWait(100)\n self.mouseMove(widget, pos=pos2)\n self.qWait(100)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n self.qWait(100)\n\n manager = self.toolBar.getProfileManager()\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n roi = manager.getCurrentRoi()\n self.assertIsNotNone(roi)\n roi.setProfileLineWidth(3)\n roi.setProfileMethod(method)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n curveItem = roi.getProfileWindow().getCurrentPlotWidget().getAllCurves()[0]\n if method == 'sum':\n self.assertTrue(curveItem.getData()[1].max() > 10000)\n elif method == 'mean':\n self.assertTrue(curveItem.getData()[1].max() < 10000)\n\n # Remove the ROI so the profile window is also removed\n roiManager = manager.getRoiManager()\n roiManager.removeRoi(roi)\n self.qWait(100)", "def test_profiler(self):\n\n a = np.arange(16, dtype=np.float32)\n b = np.arange(16, dtype=np.float32)\n p = profiler.Profile()\n try:\n p.enable()\n dot(a, b)\n p.disable()\n stats = pstats.Stats(p).strip_dirs()\n self.assertIn(('test_profiler.py', 7, 'dot'), stats.stats)\n finally:\n # make sure the profiler is deactivated when this test is done so as not to\n # pollute any other tests\n p.disable()\n del p", "def merge_profile(prof1, prof2):\r\n new_t = []\r\n new_l = []\r\n new_sub_profile = []\r\n #merge common(same object) opt\r\n for l in set(prof1[0]).intersection(set(prof2[0])):\r\n idx1 = prof1[0].index(l)\r\n idx2 = prof2[0].index(l)\r\n new_t.append(prof1[1][idx1] +\r\n prof2[1][idx2])\r\n new_l.append(l)\r\n if hasattr(l, 'merge_profile'):\r\n assert len(prof1[6][idx1]) == len(prof2[6][idx2])\r\n new_sub_profile.append(l.merge_profile(prof1[6][idx1],\r\n prof2[6][idx2]))\r\n else:\r\n new_sub_profile.append(None)\r\n\r\n # merge not common opt\r\n from theano.compat.six import StringIO\r\n for l in set(prof1[0]).symmetric_difference(set(prof2[0])):\r\n #The set trick above only work for the same object optimization\r\n #It don't work for equivalent optimization.\r\n #So we try to merge equivalent optimization here.\r\n new_l_names = [o.name for o in new_l]\r\n if l.name in new_l_names:\r\n idx = new_l_names.index(l.name)\r\n io1 = StringIO()\r\n io2 = StringIO()\r\n l.print_summary(io1)\r\n new_l[idx].print_summary(io2)\r\n if io1.read() == io2.read():\r\n if l in prof1[0]:\r\n p = prof1\r\n else:\r\n p = prof2\r\n new_t[idx] += p[1][p[0].index(l)]\r\n if hasattr(l, 'merge_profile'):\r\n assert len(p[6][p[0].index(l)]) == \\\r\n len(new_sub_profile[idx])\r\n new_sub_profile[idx] = l.merge_profile(\r\n new_sub_profile[idx], p[6][p[0].index(l)])\r\n else:\r\n new_sub_profile[idx] = None\r\n continue\r\n if l in prof1[0]:\r\n p = prof1\r\n else:\r\n p = prof2\r\n new_t.append(p[1][p[0].index(l)])\r\n idx = p[0].index(l)\r\n new_l.append(l)\r\n new_sub_profile.append(p[6][idx])\r\n\r\n new_opt = SeqOptimizer(*new_l)\r\n #We need to assert based on the name as we merge also based on\r\n #the name.\r\n assert set([l.name for l in prof1[0]]).issubset(\r\n set([l.name for l in new_l]))\r\n assert set([l.name for l in prof2[0]]).issubset(\r\n set([l.name for l in new_l]))\r\n assert len(new_t) == len(new_opt) == len(new_sub_profile)\r\n return (new_opt, new_t, prof1[2] + prof2[2],\r\n prof1[3] + prof2[3],\r\n -1, -1, new_sub_profile, [])", "def test_profiler(self):\n cmdline = [\n \"starfish\",\n \"--profile\",\n \"noop\",\n ]\n if cmdline[0] == 'starfish':\n coverage_cmdline = [\n \"coverage\", \"run\",\n \"-p\",\n \"--source\", \"starfish\",\n \"-m\", \"starfish.starfish\",\n ]\n coverage_cmdline.extend(cmdline[1:])\n cmdline = coverage_cmdline\n env = os.environ.copy()\n env[PROFILER_NOOP_ENVVAR] = \"\"\n subprocess.check_call(cmdline, env=env)", "def plot_visco_profiles(pointsh5, skip=slice(None,None,1), xscale=1e3, yscale=1e-2, tscale=3.1536e7, adjustRadial=False, benchmark=[], title=None):\n\tplt.figure()\n\n\tcoords,data,number,times = pu.load_h5_visco(pointsh5)\n\n\t#x = 1e3*np.loadtxt(points,usecols=[0]) # output_points2.txt\n\t#y = np.zeros_like(x)\n\tx = coords[:,0]\n\ty = np.zeros_like(x)\n\n\t# NOTE: plot elastic solution by passing dictionary as showelastic\n\t# Plot analytic elastic solution (t=0)\n\t#print(benchmark)\n\tif len(benchmark)>=1:\n\t\tur = zeros_like(x)\n\t\tuz = np.zeros_like(x)\n\t\tfor b in benchmark:\n\t\t\turi,uzi = m.calc_mogi_dp(x,y,**params)\n\t\t\tur += uri\n\t\t\tuz += uzi\n\t\tplt.plot(x*xscale,uz*yscale,'ko',label='benchmark')\n\n\t# Convert units\n\t#ur = np.hypot(data[:,:,0], data[:,:,1]) #assume progiles are along EW profile\n\tur = data[:,:,0]\n\tuz = data[:,:,2]\n\tx = x / xscale\n\tur = ur / yscale #cm\n\tuz = uz / yscale #cm\n\ttimes = times / tscale\n\t#times = times / 8.64e4 #days\n\t#times = times / 31536000 #years\n\n\t#plots = np.arange(0,times.size,skip)\n\t#print(plots.size)\n\t#way to cycle through markers if plotting many lines\n\t#marker = itertools.cycle(['o','^','s','D']) #plot(marker=marker.next() iterates list)\n\t#way to use gradually changing colors from a colormap\n\t#color = plt.cm.jet(1.0*i/plots.size)\n\tindplots = np.arange(times.size-1)\n\tprint(indplots)\n\tindplots = indplots[skip]\n\tprint(indplots)\n\tfor i in indplots:\n\t\tline, = plt.plot(x, uz[i], color=plt.cm.jet(1.0*i/indplots[-1]), label='{:.1f}'.format(times[i]))\n\t\tplt.plot(x, ur[i], ls='dashed', color=line.get_color())\n\t#print uz[i]\n\t#print uz[i-1]\n\n\tif title:\n\t\tplt.title(title)\n\telse:\n\t\tplt.title(pointsh5)\n\n\tplt.axhline(color='k',linestyle='dashed')\n\tplt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\tplt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.show()\n\tplt.legend(title='{}'.format(get_unit(tscale)))\n\tplt.grid()", "def testAlignedProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n for action in (self.toolBar.hLineAction, self.toolBar.vLineAction):\n with self.subTest(mode=action.text()):\n # Trigger tool button for mode\n action.trigger()\n # Without image\n self.mouseMove(widget, pos=pos1)\n self.mouseClick(widget, qt.Qt.LeftButton, pos=pos1)\n\n # with image\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.mouseMove(widget, pos=pos2)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n\n self.mouseMove(widget)\n self.mouseClick(widget, qt.Qt.LeftButton)\n\n manager = self.toolBar.getProfileManager()\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break", "def test_remove_spawning_profile():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n\n default_driver = DriverProfile(\"Default\", 8, 2, 2, 0, 30, 3, 1)\n default_vehicle = VehicleProfile(\"Default\", 5, 15, 2, 2, 1000, 65)\n default_spawn = SpawningProfile(\"Default\", default_driver, default_vehicle)\n spawn2 = SpawningProfile(\"spawn2\", default_driver, default_vehicle)\n spawn_not_in_list = SpawningProfile(\"spawn3\", default_driver, default_vehicle)\n\n i.add_spawning_profile(default_spawn)\n i.add_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 2\n\n i.remove_spawning_profile(spawn_not_in_list)\n\n assert len(i.get_spawning_profile_list()) == 2\n\n i.remove_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 1\n\n i.remove_spawning_profile(default_spawn)\n\n assert len(i.get_spawning_profile_list()) == 0\n assert not i.get_spawning_profile_list()", "def _profile(self) -> None:\n if self.use_case.profile:\n if self._profile_stats is None:\n self._profile_stats = pstats.Stats()\n if self._current_profiler is not None:\n self._current_profiler.disable()\n self._profile_stats.add(self._current_profiler)\n # TODO: use clear() instead of always creating a new profile\n self._current_profiler = cProfile.Profile()\n self._current_profiler.enable()", "def prepocessImg(self, method, size, img, bb,offset=0.3,gray=True,\n boundry=False, outputDebug=False,outputprefix=None):\n if method == 'crop':\n crop_img = crop_only(img,bb.left(),bb.top(),bb.width(),bb.height(),offset,size)\n elif method == 'affine':\n img = Image.fromarray(img)\n if self.predictor == None:\n raise Exception(\"Error: method affine should initial with an facepredictor.\")\n alignPoints = self.align(img, bb)\n (xs, ys) = zip(*alignPoints)\n (l, r, t, b) = (min(xs), max(xs), min(ys), max(ys))\n w,h = img.size\n if boundry and (l < 0 or r > w or t < 0 or b > h):\n raise AliError('face out of boundry')\n \n left_eye_l = alignPoints[36]\n left_eye_r = alignPoints[39]\n left_eye = (np.array(left_eye_l)+np.array(left_eye_r))/2\n right_eye_l = alignPoints[42]\n right_eye_r = alignPoints[45]\n right_eye = (np.array(right_eye_l)+np.array(right_eye_r))/2\n crop_img = crop_simi(img,left_eye,right_eye,(offset,offset),(size,size))\n im_buffer = cStringIO.StringIO()\n crop_img.save(im_buffer, format=\"JPEG\")\n im_str = base64.b64encode(im_buffer.getvalue())\n else:\n raise Exception(\"undefined crop method\")\n if gray:\n crop_img = crop_img.convert('L')\n if outputDebug:\n dirname = './aligndebug'\n if not os.path.exists(os.path.abspath(dirname)):\n os.mkdir(dirname)\n drawbox(img,(bb.left(),bb.right(),bb.top(),bb.bottom()))\n if method == 'affine':\n drawpoint(img,left_eye)\n drawpoint(img,right_eye)\n img.save('{}/{}_annotated.jpg'.format(dirname,outputprefix))\n crop_img.save('{}/{}_crop.jpg'.format(dirname,outputprefix))\n crop_img = np.array(crop_img,dtype=np.float32) #look carefully on data format\n if crop_img.ndim == 3: #data shape for caffe\n return crop_img,score\n elif crop_img.ndim == 2:\n bbox = [bb.left(),bb.top(),bb.right(),bb.bottom()]\n return crop_img[:,:,np.newaxis], bbox\n else:\n raise Exception(\"wrong dimension\")", "def test_add_spawning_profile():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n\n assert not i.get_spawning_profile_list()\n\n default_driver = DriverProfile(\"Default\", 8, 2, 2, 0, 30, 3, 1)\n default_vehicle = VehicleProfile(\"Default\", 5, 15, 2, 2, 1000, 65)\n default_spawn = SpawningProfile(\"Default\", default_driver, default_vehicle)\n spawn2 = SpawningProfile(\"spawn2\", default_driver, default_vehicle)\n\n i.add_spawning_profile(default_spawn)\n\n assert i.get_spawning_profile_list()\n assert len(i.get_spawning_profile_list()) == 1\n\n i.add_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 2", "def test_remove_spawning_profile_from_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) != 0:\n assert True\n\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n assert True\n break\n\n tester.delete_spawning_profile_from_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) == 0:\n assert True", "def profile(profileOutputFile=None, dotOutputFile=None, imageOutputFile=None):\n\n try:\n __import__(\"gobject\")\n from thirdparty.gprof2dot import gprof2dot\n from thirdparty.xdot import xdot\n import gtk\n import pydot\n except ImportError as ex:\n errMsg = \"profiling requires third-party libraries ('%s') \" % getSafeExString(ex)\n errMsg += \"(Hint: 'sudo apt-get install python-pydot python-pyparsing python-profiler graphviz')\"\n logger.error(errMsg)\n\n return\n\n if profileOutputFile is None:\n profileOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.raw\")\n\n if dotOutputFile is None:\n dotOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.dot\")\n\n if imageOutputFile is None:\n imageOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.png\")\n\n if os.path.exists(profileOutputFile):\n os.remove(profileOutputFile)\n\n if os.path.exists(dotOutputFile):\n os.remove(dotOutputFile)\n\n if os.path.exists(imageOutputFile):\n os.remove(imageOutputFile)\n\n infoMsg = \"profiling the execution into file '%s'\" % profileOutputFile\n logger.info(infoMsg)\n\n # Start sqlmap main function and generate a raw profile file\n cProfile.run(\"start()\", profileOutputFile)\n\n infoMsg = \"converting profile data into a dot file '%s'\" % dotOutputFile\n logger.info(infoMsg)\n\n # Create dot file by using extra/gprof2dot/gprof2dot.py\n # http://code.google.com/p/jrfonseca/wiki/Gprof2Dot\n dotFilePointer = codecs.open(dotOutputFile, 'wt', UNICODE_ENCODING)\n parser = gprof2dot.PstatsParser(profileOutputFile)\n profile = parser.parse()\n profile.prune(0.5 / 100.0, 0.1 / 100.0)\n dot = gprof2dot.DotWriter(dotFilePointer)\n dot.graph(profile, gprof2dot.TEMPERATURE_COLORMAP)\n dotFilePointer.close()\n\n infoMsg = \"converting dot file into a graph image '%s'\" % imageOutputFile\n logger.info(infoMsg)\n\n # Create graph image (png) by using pydot (python-pydot)\n # http://code.google.com/p/pydot/\n pydotGraph = pydot.graph_from_dot_file(dotOutputFile)\n\n # Reference: http://stackoverflow.com/questions/38176472/graph-write-pdfiris-pdf-attributeerror-list-object-has-no-attribute-writ\n if isinstance(pydotGraph, list):\n pydotGraph = pydotGraph[0]\n\n try:\n pydotGraph.write_png(imageOutputFile)\n except OSError:\n errMsg = \"profiling requires graphviz installed \"\n errMsg += \"(Hint: 'sudo apt-get install graphviz')\"\n logger.error(errMsg)\n else:\n infoMsg = \"displaying interactive graph with xdot library\"\n logger.info(infoMsg)\n\n # Display interactive Graphviz dot file by using extra/xdot/xdot.py\n # http://code.google.com/p/jrfonseca/wiki/XDot\n win = xdot.DotWindow()\n win.connect('destroy', gtk.main_quit)\n win.set_filter(\"dot\")\n win.open_file(dotOutputFile)\n gtk.main()", "def paintAvatar(self):\n self.paintBody()\n self.paintShoes()\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n self.paintShirt()\n self.paintTrousers()\n else:\n self.paintSkirt()\n self.paintHead()\n self.paintHair()\n self.paintMask()", "def __init__(self, velocity, vorticity, prof_coords, \n direction, beginMeanComput, **kwds):\n assert 'variables' not in kwds, 'variables parameter is useless.'\n super(Profiles, self).__init__(variables=[velocity, vorticity],\n **kwds)\n ## velocity field\n self.velocity = velocity\n ## vorticity field\n self.vorticity = vorticity\n ## X and Y coordinates of the profile\n self.prof_coords = prof_coords\n ## profile direction (0, 1 or 2)\n self.direction = direction\n ## time at which the computation of mean profile must begin\n self.beginMeanComput = beginMeanComput\n self.input = [velocity, vorticity]\n self.output = []", "def onSkipSegLimit(self):\r\n profprint()\r\n #research\r\n logic = self.logic\r\n logic.placeAxialLimitMarker(assign=False)", "def profile(args):\n\n if not args.first_batches_to_skip < args.max_batch_num:\n raise ValueError(\"arg 'first_batches_to_skip' must be smaller than \"\n \"'max_batch_num'.\")\n if not args.first_batches_to_skip >= 0:\n raise ValueError(\n \"arg 'first_batches_to_skip' must not be smaller than 0.\")\n\n _, avg_cost, accuracy = stacked_lstmp_model(\n frame_dim=args.frame_dim,\n hidden_dim=args.hidden_dim,\n proj_dim=args.proj_dim,\n stacked_num=args.stacked_num,\n class_num=args.class_num,\n parallel=args.parallel)\n\n optimizer = fluid.optimizer.Adam(\n learning_rate=fluid.layers.exponential_decay(\n learning_rate=args.learning_rate,\n decay_steps=1879,\n decay_rate=1 / 1.2,\n staircase=True))\n optimizer.minimize(avg_cost)\n\n place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n ltrans = [\n trans_add_delta.TransAddDelta(2, 2),\n trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),\n trans_splice.TransSplice(5, 5), trans_delay.TransDelay(5)\n ]\n\n data_reader = reader.AsyncDataReader(\n args.feature_lst, args.label_lst, -1, split_sentence_threshold=1024)\n data_reader.set_transformers(ltrans)\n\n feature_t = fluid.LoDTensor()\n label_t = fluid.LoDTensor()\n\n sorted_key = None if args.sorted_key is 'None' else args.sorted_key\n with profiler.profiler(args.device, sorted_key) as prof:\n frames_seen, start_time = 0, 0.0\n for batch_id, batch_data in enumerate(\n data_reader.batch_iterator(args.batch_size,\n args.minimum_batch_size)):\n if batch_id >= args.max_batch_num:\n break\n if args.first_batches_to_skip == batch_id:\n profiler.reset_profiler()\n start_time = time.time()\n frames_seen = 0\n # load_data\n (features, labels, lod, _) = batch_data\n features = np.reshape(features, (-1, 11, 3, args.frame_dim))\n features = np.transpose(features, (0, 2, 1, 3))\n feature_t.set(features, place)\n feature_t.set_lod([lod])\n label_t.set(labels, place)\n label_t.set_lod([lod])\n\n frames_seen += lod[-1]\n\n outs = exe.run(fluid.default_main_program(),\n feed={\"feature\": feature_t,\n \"label\": label_t},\n fetch_list=[avg_cost, accuracy]\n if args.print_train_acc else [],\n return_numpy=False)\n\n if args.print_train_acc:\n print(\"Batch %d acc: %f\" %\n (batch_id, lodtensor_to_ndarray(outs[1])[0]))\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n time_consumed = time.time() - start_time\n frames_per_sec = frames_seen / time_consumed\n print(\"\\nTime consumed: %f s, performance: %f frames/s.\" %\n (time_consumed, frames_per_sec))", "def _enable_profiling():\n import cProfile\n import atexit\n global _profiler\n _profiler = cProfile.Profile()\n _profiler.enable()\n atexit.register(_profile_atexit)", "def testProfile2D(self):\n self.plot = StackView()\n self.plot.show()\n self.qWaitForWindowExposed(self.plot)\n\n self.plot.setStack(numpy.array([[[0, 1], [2, 3]],\n [[4, 5], [6, 7]]]))\n\n toolBar = self.plot.getProfileToolbar()\n\n manager = toolBar.getProfileManager()\n roiManager = manager.getRoiManager()\n\n roi = rois.ProfileImageStackHorizontalLineROI()\n roi.setPosition(0.5)\n roi.setProfileType(\"2D\")\n roiManager.addRoi(roi)\n roiManager.setCurrentRoi(roi)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n profileWindow = roi.getProfileWindow()\n self.assertIsInstance(roi.getProfileWindow(), qt.QMainWindow)\n self.assertIsInstance(profileWindow.getCurrentPlotWidget(), Plot2D)\n\n roi.setProfileType(\"1D\")\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n profileWindow = roi.getProfileWindow()\n self.assertIsInstance(roi.getProfileWindow(), qt.QMainWindow)\n self.assertIsInstance(profileWindow.getCurrentPlotWidget(), Plot1D)", "def __init__(\n self,\n img_path: Union[str, \"Path\"],\n profile: dict,\n crop_size: int,\n padding: int = 0,\n **kwargs\n ):\n super().__init__()\n self.img_path = img_path\n self.crop_size = crop_size\n self.padding = padding\n\n profile.update(blockxsize=crop_size, blockysize=crop_size, tiled=True, **kwargs)\n\n # Create the file and get the indices of write locations\n with rasterio.open(self.img_path, \"w\", **profile) as dst:\n self.height = dst.height\n self.width = dst.width\n self.profile = dst.profile\n\n _y0s = range(0, self.height, self.crop_size)\n _x0s = range(0, self.width, self.crop_size)\n self.y0x0 = list(itertools.product(_y0s, _x0s))", "def test_add_spawning_profile_to_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n attached = False\n\n for i in intersections:\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n attached = True\n break;\n\n assert attached", "def trace_base(opt_model, pupil, fld, wvl, apply_vignetting=True, **kwargs):\n vig_pupil = fld.apply_vignetting(pupil) if apply_vignetting else pupil\n osp = opt_model.optical_spec\n fod = opt_model['analysis_results']['parax_data'].fod\n eprad = fod.enp_radius\n aim_pt = np.array([0., 0.])\n if hasattr(fld, 'aim_pt') and fld.aim_pt is not None:\n aim_pt = fld.aim_pt\n pt1 = np.array([eprad*vig_pupil[0]+aim_pt[0], eprad*vig_pupil[1]+aim_pt[1],\n fod.obj_dist+fod.enp_dist])\n pt0 = osp.obj_coords(fld)\n dir0 = pt1 - pt0\n length = norm(dir0)\n dir0 = dir0/length\n sm = opt_model.seq_model\n # To handle virtual object distances, always propagate from \n # the object in a positive Z direction.\n if dir0[2] * sm.z_dir[0] < 0:\n dir0 = -dir0\n return rt.trace(sm, pt0, dir0, wvl, **kwargs)", "def apply_double_profile(plotDict, args=None):\n\tif not 'prof' in plotDict['tree_draw_options'] or 'profs' in plotDict['tree_draw_options']:\n\t\tif isinstance(plotDict['tree_draw_options'], basestring):\n\t\t\tplotDict['tree_draw_options'] = [plotDict['tree_draw_options']]\n\t\tplotDict['tree_draw_options'].append('prof')\n\t# Parameter List Expansion\n\t# the x vs x profile must be an exakt match of y vs x\n\t# we thus must replicate all settings for their position to match\n\t# settings we need to replicate in a controlled fashion\n\tinput_root_opts = ['nicks', 'x_expressions', 'y_expressions', 'z_expressions', 'x_bins', 'y_bins', 'z_bins', 'scale_factors', 'files', 'directories', 'folders', 'weights', 'friend_trees', 'tree_draw_options']\n\t\n\tif not plotDict.get('files'):\n\t\tplotDict['files'] = get_input_files(args)[0]\n\t# make sure all n-length (non-0,1) objects have the same size\n\topt_n_length_max = max(len(plotDict.get(opt_name, ())) for opt_name in input_root_opts if not isinstance(plotDict.get(opt_name), str))\n\tassert opt_n_length_max > 0, 'Cannot expand empty plot definition'\n\tfor opt_name in input_root_opts:\n\t\tif opt_name not in plotDict or isinstance(plotDict[opt_name], str):\n\t\t\tcontinue\n\t\tassert len(plotDict[opt_name]) <= 1 or len(plotDict[opt_name]) == opt_n_length_max, \"Replication requires all input_root options to be either of 0, 1 or same max length ('%s' is %d/%d)\" % (opt_name, len(plotDict[opt_name]), opt_n_length_max)\n\t\t# TODO: dunno if checking for None is required, saw this in HP - MF@20151130\n\t\tif not plotDict[opt_name] or plotDict[opt_name][0] is None:\n\t\t\tcontinue\n\t\tif len(plotDict[opt_name]) == 1:\n\t\t\tplotDict[opt_name] = plotDict[opt_name] * opt_n_length_max\n\t\t# never modify inplace - input may be mutable and used elsewhere/recursively\n\t\tplotDict[opt_name] = plotDict[opt_name][:] * 2\n\tif not plotDict.get('nicks') or plotDict['nicks'][0] is None:\n\t\tplotDict['nicks'] = [\"nick%d\" % nick for nick in xrange(len(plotDict['y_expressions']))]\n\t# X-Y Profile matching\n\t# explicitly create new x profiles\n\tplotDict['y_expressions'] = plotDict['y_expressions'][:opt_n_length_max] + plotDict['x_expressions'][opt_n_length_max:]\n\tplotDict['nicks'] = plotDict['nicks'][opt_n_length_max:] + ['%s_x_prof' % nick for nick in plotDict['nicks'][:opt_n_length_max]]\n\t# create new y vs <x> graphs\n\tplotDict['analysis_modules'] = plotDict.get('analysis_modules', [])[:]\n\tplotDict['analysis_modules'].insert(0, 'TGraphFromHistograms')\n\tplotDict['tgraph_strip_empty'] = 'any'\n\tplotDict['tgraph_y_nicks'] = plotDict['nicks'][:opt_n_length_max]\n\tplotDict['tgraph_x_nicks'] = plotDict['nicks'][opt_n_length_max:]\n\tplotDict['tgraph_result_nicks'] = ['%s_vs_x_prof' % nick for nick in plotDict['nicks'][:opt_n_length_max]]\n\t# disable source plots\n\tplotDict['nicks_blacklist'] = [r'^%s$' % nick for nick in plotDict['nicks']]\n\treturn plotDict", "def testMethodProfile2D(self):\n\n toolBar = self.plot.getProfileToolbar()\n\n toolBar.vLineAction.trigger()\n plot2D = self.plot.getPlotWidget().getWidgetHandle()\n pos1 = plot2D.width() * 0.5, plot2D.height() * 0.5\n self.mouseClick(plot2D, qt.Qt.LeftButton, pos=pos1)\n\n manager = toolBar.getProfileManager()\n roi = manager.getCurrentRoi()\n roi.setProfileMethod(\"mean\")\n roi.setProfileType(\"2D\")\n roi.setProfileLineWidth(3)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n # check 2D 'mean' profile\n profilePlot = roi.getProfileWindow().getCurrentPlotWidget()\n data = profilePlot.getAllImages()[0].getData()\n expected = numpy.array([[1, 4], [7, 10], [13, 16]])\n numpy.testing.assert_almost_equal(data, expected)", "def plot_profile(outdir, xval='x', xscale=1, yscale=1, comp2los=False, adjustRadial=False,\n fig=True):\n #Load data\n path = os.path.join(outdir,'points.h5')\n x,y,z,ux,uy,uz = pu.extract_points(path)\n\n Y = uz / yscale\n if xval == 'x':\n X = x / xscale\n Y1 = ux / yscale\n elif xval == 'r':\n X = np.hypot(x,y) / xscale\n ur = np.hypot(ux,uy)\n Y1 = ur / yscale\n if adjustRadial: #fix sign from hypot square root\n ur = pu.radial2negative(Y1)\n\n if fig:\n plt.figure()\n # otherwise profile added to active plot\n\n #plt.plot(X,uy/yscale,'r.-',label='Uy') #should be zero along EW axis\n de = 90e3 / xscale #eastern data extent\n if comp2los != False:\n data_extents = (X<=de)\n if comp2los == 'west': #switch sign of radial profile\n #ux = -ux #move to comp2los function\n X = -X\n Y1 = -Y1\n de = -de\n data_extents = (X>=de)\n\n los = pu.comp2los(x,ux,uy,uz,track=comp2los)\n plt.plot(X, los/yscale, 'k-', lw=2, label='Ulos_' + comp2los)\n plt.fill_between(X,los/yscale, where=data_extents, color='gray',alpha=0.5)\n\n plt.plot(X, Y, 'b-', lw=2, label='Uz')\n plt.plot(X, Y1, 'b--',lw=2, mfc='None',label='U{0}'.format(xval))\n\n # Annotate\n plt.title(outdir)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Uz [{}]'.format(get_unit(yscale)))\n plt.axhline(color='k')\n plt.axvline(de,color='k', linestyle='dashed', label='EW data extent') #EW extent of InSAR coverage\n plt.legend(loc='best')\n plt.grid(True)\n plt.show()", "def set_profile(self, profile='default'):\n\n # parameters used by various subclasses\n # each set is indexed by a name, called a profile\n # Note that each parameter must also be listed in set_params method in order to get set\n self.profile = profile\n self.params = {\n 'default' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [44.,88.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -107.6177, # longitude of the array center (vla)\n 'lat': 34.07875 # latitude of the array center (vla)\n },\n 'vlacrab' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [29.,58.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -107.6177, # longitude of the array center\n 'lat': 34.07875 # latitude of the array center\n },\n 'psa' : {\n 'chans': n.array(range(140,150)), # channels to read\n 'dmarr' : [0.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': 21.411, # longitude of the array center\n 'lat': -30.721 # latitude of the array center\n },\n 'pocob0329' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [0, 13.4, 26.8, 40.2, 53.5], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.005, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -121.470, # longitude of the array center\n 'lat': 40.817 # latitude of the array center\n },\n 'mwa' : {\n 'chans': n.array(n.arange(128)), # channels to read\n 'dmarr' : [0, 50.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': 116.671, # longitude of the array center\n 'lat': -26.703 # latitude of the array center\n }\n }\n\n \n self.pathout = self.params[self.profile]['pathout']\n self.chans = self.params[self.profile]['chans']\n self.dmarr = self.params[self.profile]['dmarr']\n self.pulsewidth = self.params[self.profile]['pulsewidth'] * n.ones(len(self.chans))\n self.approxuvw = self.params[self.profile]['approxuvw']\n self.beam_params = self.params[self.profile]['beam_params']\n self.long = self.params[self.profile]['long']\n self.lat = self.params[self.profile]['lat']", "def pre_draw(p5_instance, draw_func):\n global _CTX_MIDDLE, _DEFAULT_FILL, _DEFAULT_LEADMULT, _DEFAULT_STROKE, _DEFAULT_TEXT_FILL\n\n global ADD, ALT, ARROW, AUTO, AUDIO, AXES, BACKSPACE, BASELINE, BEVEL, BEZIER, BLEND, BLUR, BOLD, BOLDITALIC\n global BOTTOM, BURN, CENTER, CHORD, CLAMP, CLOSE, CONTROL, CORNER, CORNERS, CROSS, CURVE, DARKEST\n global DEG_TO_RAD, DEGREES, DELETE, DIFFERENCE, DILATE, DODGE, DOWN_ARROW, ENTER, ERODE, ESCAPE, EXCLUSION\n global FILL, GRAY, GRID, HALF_PI, HAND, HARD_LIGHT, HSB, HSL, IMAGE, IMMEDIATE, INVERT, ITALIC, LANDSCAPE\n global LEFT, LEFT_ARROW, LIGHTEST, LINE_LOOP, LINE_STRIP, LINEAR, LINES, MIRROR, MITER, MOVE, MULTIPLY, NEAREST\n global NORMAL, OPAQUE, OPEN, OPTION, OVERLAY, P2D, PI, PIE, POINTS, PORTRAIT, POSTERIZE, PROJECT, QUAD_STRIP, QUADRATIC\n global QUADS, QUARTER_PI, RAD_TO_DEG, RADIANS, RADIUS, REPEAT, REPLACE, RETURN, RGB, RIGHT, RIGHT_ARROW\n global ROUND, SCREEN, SHIFT, SOFT_LIGHT, SQUARE, STROKE, SUBTRACT, TAB, TAU, TEXT, TEXTURE, THRESHOLD, TOP\n global TRIANGLE_FAN, TRIANGLE_STRIP, TRIANGLES, TWO_PI, UP_ARROW, VIDEO, WAIT, WEBGL\n\n global frameCount, focused, displayWidth, displayHeight, windowWidth, windowHeight, width, height\n global disableFriendlyErrors, deviceOrientation, accelerationX, accelerationY, accelerationZ\n global pAccelerationX, pAccelerationY, pAccelerationZ, rotationX, rotationY, rotationZ\n global pRotationX, pRotationY, pRotationZ, turnAxis, keyIsPressed, key, keyCode, mouseX, mouseY, pmouseX, pmouseY\n global winMouseX, winMouseY, pwinMouseX, pwinMouseY, mouseButton, mouseIsPressed, touches, pixels\n\n _CTX_MIDDLE = p5_instance._CTX_MIDDLE\n _DEFAULT_FILL = p5_instance._DEFAULT_FILL\n _DEFAULT_LEADMULT = p5_instance._DEFAULT_LEADMULT\n _DEFAULT_STROKE = p5_instance._DEFAULT_STROKE\n _DEFAULT_TEXT_FILL = p5_instance._DEFAULT_TEXT_FILL\n\n ADD = p5_instance.ADD\n ALT = p5_instance.ALT\n ARROW = p5_instance.ARROW\n AUDIO = p5_instance.AUDIO\n AUTO = p5_instance.AUTO\n AXES = p5_instance.AXES\n BACKSPACE = p5_instance.BACKSPACE\n BASELINE = p5_instance.BASELINE\n BEVEL = p5_instance.BEVEL\n BEZIER = p5_instance.BEZIER\n BLEND = p5_instance.BLEND\n BLUR = p5_instance.BLUR\n BOLD = p5_instance.BOLD\n BOLDITALIC = p5_instance.BOLDITALIC\n BOTTOM = p5_instance.BOTTOM\n BURN = p5_instance.BURN\n CENTER = p5_instance.CENTER\n CHORD = p5_instance.CHORD\n CLAMP = p5_instance.CLAMP\n CLOSE = p5_instance.CLOSE\n CONTROL = p5_instance.CONTROL\n CORNER = p5_instance.CORNER\n CORNERS = p5_instance.CORNERS\n CROSS = p5_instance.CROSS\n CURVE = p5_instance.CURVE\n DARKEST = p5_instance.DARKEST\n DEG_TO_RAD = p5_instance.DEG_TO_RAD\n DEGREES = p5_instance.DEGREES\n DELETE = p5_instance.DELETE\n DIFFERENCE = p5_instance.DIFFERENCE\n DILATE = p5_instance.DILATE\n DODGE = p5_instance.DODGE\n DOWN_ARROW = p5_instance.DOWN_ARROW\n ENTER = p5_instance.ENTER\n ERODE = p5_instance.ERODE\n ESCAPE = p5_instance.ESCAPE\n EXCLUSION = p5_instance.EXCLUSION\n FILL = p5_instance.FILL\n GRAY = p5_instance.GRAY\n GRID = p5_instance.GRID\n HALF_PI = p5_instance.HALF_PI\n HAND = p5_instance.HAND\n HARD_LIGHT = p5_instance.HARD_LIGHT\n HSB = p5_instance.HSB\n HSL = p5_instance.HSL\n IMAGE = p5_instance.IMAGE\n IMMEDIATE = p5_instance.IMMEDIATE\n INVERT = p5_instance.INVERT\n ITALIC = p5_instance.ITALIC\n LANDSCAPE = p5_instance.LANDSCAPE\n LEFT = p5_instance.LEFT\n LEFT_ARROW = p5_instance.LEFT_ARROW\n LIGHTEST = p5_instance.LIGHTEST\n LINE_LOOP = p5_instance.LINE_LOOP\n LINE_STRIP = p5_instance.LINE_STRIP\n LINEAR = p5_instance.LINEAR\n LINES = p5_instance.LINES\n MIRROR = p5_instance.MIRROR\n MITER = p5_instance.MITER\n MOVE = p5_instance.MOVE\n MULTIPLY = p5_instance.MULTIPLY\n NEAREST = p5_instance.NEAREST\n NORMAL = p5_instance.NORMAL\n OPAQUE = p5_instance.OPAQUE\n OPEN = p5_instance.OPEN\n OPTION = p5_instance.OPTION\n OVERLAY = p5_instance.OVERLAY\n P2D = p5_instance.P2D\n P3D = p5_instance.WEBGL\n PI = p5_instance.PI\n PIE = p5_instance.PIE\n POINTS = p5_instance.POINTS\n PORTRAIT = p5_instance.PORTRAIT\n POSTERIZE = p5_instance.POSTERIZE\n PROJECT = p5_instance.PROJECT\n QUAD_STRIP = p5_instance.QUAD_STRIP\n QUADRATIC = p5_instance.QUADRATIC\n QUADS = p5_instance.QUADS\n QUARTER_PI = p5_instance.QUARTER_PI\n RAD_TO_DEG = p5_instance.RAD_TO_DEG\n RADIANS = p5_instance.RADIANS\n RADIUS = p5_instance.RADIUS\n REPEAT = p5_instance.REPEAT\n REPLACE = p5_instance.REPLACE\n RETURN = p5_instance.RETURN\n RGB = p5_instance.RGB\n RIGHT = p5_instance.RIGHT\n RIGHT_ARROW = p5_instance.RIGHT_ARROW\n ROUND = p5_instance.ROUND\n SCREEN = p5_instance.SCREEN\n SHIFT = p5_instance.SHIFT\n SOFT_LIGHT = p5_instance.SOFT_LIGHT\n SQUARE = p5_instance.SQUARE\n STROKE = p5_instance.STROKE\n SUBTRACT = p5_instance.SUBTRACT\n TAB = p5_instance.TAB\n TAU = p5_instance.TAU\n TEXT = p5_instance.TEXT\n TEXTURE = p5_instance.TEXTURE\n THRESHOLD = p5_instance.THRESHOLD\n TOP = p5_instance.TOP\n TRIANGLE_FAN = p5_instance.TRIANGLE_FAN\n TRIANGLE_STRIP = p5_instance.TRIANGLE_STRIP\n TRIANGLES = p5_instance.TRIANGLES\n TWO_PI = p5_instance.TWO_PI\n UP_ARROW = p5_instance.UP_ARROW\n VIDEO = p5_instance.VIDEO\n WAIT = p5_instance.WAIT\n WEBGL = p5_instance.WEBGL\n\n frameCount = p5_instance.frameCount\n focused = p5_instance.focused\n displayWidth = p5_instance.displayWidth\n displayHeight = p5_instance.displayHeight\n windowWidth = p5_instance.windowWidth\n windowHeight = p5_instance.windowHeight\n width = p5_instance.width\n height = p5_instance.height\n disableFriendlyErrors = p5_instance.disableFriendlyErrors\n deviceOrientation = p5_instance.deviceOrientation\n accelerationX = p5_instance.accelerationX\n accelerationY = p5_instance.accelerationY\n accelerationZ = p5_instance.accelerationZ\n pAccelerationX = p5_instance.pAccelerationX\n pAccelerationY = p5_instance.pAccelerationY\n pAccelerationZ = p5_instance.pAccelerationZ\n rotationX = p5_instance.rotationX\n rotationY = p5_instance.rotationY\n rotationZ = p5_instance.rotationZ\n pRotationX = p5_instance.pRotationX\n pRotationY = p5_instance.pRotationY\n pRotationZ = p5_instance.pRotationZ\n turnAxis = p5_instance.turnAxis\n keyIsPressed = p5_instance.keyIsPressed\n key = p5_instance.key\n keyCode = p5_instance.keyCode\n mouseX = p5_instance.mouseX\n mouseY = p5_instance.mouseY\n pmouseX = p5_instance.pmouseX\n pmouseY = p5_instance.pmouseY\n winMouseX = p5_instance.winMouseX\n winMouseY = p5_instance.winMouseY\n pwinMouseX = p5_instance.pwinMouseX\n pwinMouseY = p5_instance.pwinMouseY\n mouseButton = p5_instance.mouseButton\n mouseIsPressed = p5_instance.mouseIsPressed\n touches = p5_instance.touches\n pixels = p5_instance.pixels\n\n return draw_func()", "def _should_profile(self) -> bool:\n if \"profile\" in self._allowed_plugins:\n if not self._one_shot:\n raise ValueError(\n \"Profile plugin currently only supported for one shot.\"\n )\n logger.info(\"Profile plugin is enalbed.\")\n return True\n return False", "def filterprofile(profile, settings):\n \n if settings.exclude is True and len(profile.description) == 0:\n print(\"EMPTY BIO\")\n return False\n\n if profile.description is None:\n return False\n\n if len(settings.include_keywords) > 1 and not any(kw in profile.description for kw in settings.include_keywords.splitlines()):\n print(\"NO KEYWORDS\")\n return False\n\n if profile.followers_count is None:\n return False\n \n if profile.followers_count < settings.followers:\n print(\"NUM FOLLOWERS\")\n return False\n\n if any(loc in profile.location for loc in settings.fromcountries.splitlines()):\n print(\"LOCATION\")\n return False\n\n if profile.statuses_count < settings.tweets:\n print(\"NUM TWEETS\")\n return False\n\n created = datetime.datetime.strptime(profile.created_at, \"%a %b %d %H:%M:%S %z %Y\")\n months = relativedelta(datetime.datetime.now(datetime.timezone.utc), created).years * 12\n if months == 0:\n if (profile.statuses_count / 12) > settings.tweetsperyear:\n print(\"TWEETS PER YEAR\")\n return False\n else:\n if (profile.statuses_count / months / 12) > settings.tweetsperyear:\n print(\"TWEETS PER YEAR\")\n return False\n\n if profile.status is not None:\n lasttweetdate = datetime.datetime.strptime(profile.status.created_at, \"%a %b %d %H:%M:%S %z %Y\").replace(tzinfo=None)\n \n lasttweetmonths = relativedelta(datetime.datetime.now(), lasttweetdate).years * 12\n if lasttweetmonths > settings.notweetsfor:\n print(\"LAST TWEET\")\n return False\n else:\n return False\n # else\n return True", "def addProfile(self, profile, color=None, close=False):\n if close:\n e1 = profile[0] # should always be a point\n if e1[0] != 0.0:\n profile = [(0.0, e1[1])] + profile\n e2 = profile[-1]\n if e2[0] != 0.0:\n if len(e2) == 2:\n profile.append((0.0, e2[1]))\n else:\n # profile ends in an arc\n profile.append((0.0, e2[0][1]))\n # previous line start x/y, for line -> arc\n px1 = py1 = None\n for e1, e2 in windowItr(profile, 2, 1):\n if e2 is None:\n break\n le1 = len(e1)\n le2 = len(e2)\n # line or start -> line\n if le1 == 2 and le2 == 2:\n x1, y1 = e1\n x2, y2 = e2\n self.blendTangent(False)\n patch = Patch.fromRevLineSeg(x1, y1, x2, y2, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n px1 = x1\n py1 = y1\n # line or start -> arc\n elif le1 == 2 and le2 == 3:\n x1, y1 = e1\n (x2, y2), (cx, cy), d = e2\n if px1 is not None:\n self.blendTangent(self._isLineTanToArc(px1, py1, x1, y1,\n cx, cy, d))\n patch = Patch.fromRevArcSeg(x1, y1, x2, y2, cx, cy, d, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n # arc -> line\n elif le1 == 3 and le2 == 2:\n (aex, aey), (cx, cy), d = e1\n lex, ley = e2\n self.blendTangent(self._isLineTanToArc(lex, ley, aex, aey, cx,\n cy, d))\n patch = Patch.fromRevLineSeg(aex, aey, lex, ley, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n px1 = aex\n py1 = aey\n # arc -> arc\n else:\n (x1, y1), (cx1, cy1), d1 = e1\n (x2, y2), (cx2, cy2), d2 = e2\n self.blendTangent(self._isArcTangentToArc(x1, y1, cx1, cy1,\n cx2, cy2))\n patch = Patch.fromRevArcSeg(x1, y1, x2, y2, cx2, cy2, d2,\n self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n self._bbox = BBox.fromVertices(self._sharedVertices)" ]
[ "0.6533734", "0.51599497", "0.47828445", "0.47232333", "0.45972005", "0.45731962", "0.45694324", "0.45572576", "0.45465982", "0.45340365", "0.44940332", "0.44605252", "0.44413173", "0.44082165", "0.4281743", "0.4263862", "0.42636248", "0.42611814", "0.42450166", "0.42342076", "0.4228299", "0.42116234", "0.4206318", "0.42003477", "0.41977915", "0.41658568", "0.41562256", "0.41488603", "0.4140673", "0.41311204" ]
0.67846763
0
Draw the profile on the postage stamp image. This is a slightly modified version of `stamp.DrawBasic()` which allows drawing of chromatic objects. prof The profile to draw. image The image onto which to draw the profile (which may be None). method The method to use in drawImage. offset The offset to apply when drawing. config The configuration dict for the stamp field. base The base configuration dict. logger If given, a logger object to log progress. the resulting image
def draw(self, prof, image, method, offset, config, base, logger, **kwargs): # ... draw prof onto the given image (making a new Image if necessary) if prof is None: return image else: logger = galsim.config.LoggerWrapper(logger) # Setup the kwargs to pass to drawImage # (Start with any additional kwargs given as extra kwargs to DrawBasic and add to it.) kwargs['image'] = image kwargs['offset'] = offset kwargs['method'] = method if 'wmult' in config and 'wmult' not in kwargs: # pragma: no cover kwargs['wmult'] = galsim.config.ParseValue(config, 'wmult', base, float)[0] if 'wcs' not in kwargs and 'scale' not in kwargs: kwargs['wcs'] = base['wcs'].local(image_pos = base['image_pos']) if method == 'phot' and 'rng' not in kwargs: kwargs['rng'] = galsim.config.GetRNG(config, base, logger, "method='phot'") # Check validity of extra phot options: max_extra_noise = None if 'n_photons' in config and 'n_photons' not in kwargs: if method != 'phot': raise AttributeError('n_photons is invalid with method != phot') if 'max_extra_noise' in config: logger.warning( "Both 'max_extra_noise' and 'n_photons' are set in config dict, "+ "ignoring 'max_extra_noise'.") kwargs['n_photons'] = galsim.config.ParseValue(config, 'n_photons', base, int)[0] elif 'max_extra_noise' in config: max_extra_noise = galsim.config.ParseValue(config, 'max_extra_noise', base, float)[0] if method != 'phot' and max_extra_noise is not None: raise AttributeError('max_extra_noise is invalid with method != phot') if 'poisson_flux' in config and 'poisson_flux' not in kwargs: if method != 'phot': raise AttributeError('poisson_flux is invalid with method != phot') kwargs['poisson_flux'] = galsim.config.ParseValue(config, 'poisson_flux', base, bool)[0] if max_extra_noise is not None and 'max_extra_noise' not in kwargs: if max_extra_noise < 0.: raise ValueError("image.max_extra_noise cannot be negative") if 'image' in base and 'noise' in base['image']: noise_var = galsim.config.CalculateNoiseVariance(base) else: raise AttributeError("Need to specify noise level when using max_extra_noise") if noise_var < 0.: raise ValueError("noise_var calculated to be < 0.") max_extra_noise *= noise_var kwargs['max_extra_noise'] = max_extra_noise if logger.isEnabledFor(logging.DEBUG): # Don't output the full image array. Use str(image) for that kwarg. alt_kwargs = dict([(k,str(kwargs[k]) if isinstance(kwargs[k],galsim.Image) else kwargs[k]) for k in kwargs]) logger.debug('obj %d: drawImage kwargs = %s',base.get('obj_num',0), alt_kwargs) logger.debug('obj %d: prof = %s',base.get('obj_num',0),prof) try: # NOTE: Old version: # image = prof.drawImage(**kwargs) if isinstance(prof, galsim.GSObject): image = prof.drawImage(**kwargs) elif isinstance(prof, galsim.ChromaticObject): bp = {} for key in (self._req_bp_fields+self._opt_bp_fields): try: bp[key] = config['bandpass'][key] except KeyError: bp[key] = None bandpass = galsim.Bandpass(blue_limit=bp['blue_limit'], red_limit=bp['red_limit'], wave_type=bp['wave_type'], throughput=bp['throughput'], zeropoint=bp['zeropoint']) image = prof.drawImage(bandpass=bandpass, **kwargs) except Exception as e: # pragma: no cover logger.debug('obj %d: prof = %r', base.get('obj_num',0), prof) raise return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateSkip(self, prof, image, method, offset, config, base, logger):\n\n # NOTE: There are currently unresolved issues with the image size checking of chromatic\n # objects. For now, we ignore any possible speed increases and skip the check.\n # if isinstance(prof, galsim.ChromaticObject):\n # return False\n\n if prof is not None and base.get('current_image',None) is not None:\n if image is None:\n prof = base['wcs'].toImage(prof, image_pos=base['image_pos'])\n # NOTE: Old version:\n # N = prof.getGoodImageSize(1.)\n if isinstance(prof, galsim.GSObject):\n N = prof.getGoodImageSize(1.)\n elif isinstance(prof, galsim.ChromaticObject):\n # TODO: Finish implementation\n # return False\n pudb.set_trace()\n # Find the suggested image size for each object given the choice of scale, and use the\n # maximum just to be safe.\n print '\\nprof.original = {}'.format(prof.original)\n print '\\nprof.original.obj_list = {}'.format(prof.original.obj_list)\n # print '\\nprof.objlist = {}'.format(prof.original.obj_list)\n obj_list = prof.original.obj_list\n possible_im_sizes = []\n for obj in obj_list:\n print '\\n obj : {}'.format(obj)\n possible_im_sizes.append([ ob.getGoodImageSize(1.) for ob in obj])\n print 'possible_im_sizes : {}'.format(possible_im_sizes)\n N = np.max(possible_im_sizes)\n N += 2 + int(np.abs(offset.x) + np.abs(offset.y))\n bounds = galsim._BoundsI(1,N,1,N)\n else:\n bounds = image.bounds\n\n # Set the origin appropriately\n stamp_center = base['stamp_center']\n if stamp_center:\n bounds = bounds.shift(stamp_center - bounds.center)\n else:\n bounds = bounds.shift(base.get('image_origin',galsim.PositionI(1,1)) -\n galsim.PositionI(bounds.xmin, bounds.ymin))\n\n overlap = bounds & base['current_image'].bounds\n if not overlap.isDefined():\n logger.info('obj %d: skip drawing object because its image will be entirely off '\n 'the main image.', base['obj_num'])\n return True\n\n return False", "def profile(profileOutputFile=None, dotOutputFile=None, imageOutputFile=None):\n\n try:\n __import__(\"gobject\")\n from thirdparty.gprof2dot import gprof2dot\n from thirdparty.xdot import xdot\n import gtk\n import pydot\n except ImportError as ex:\n errMsg = \"profiling requires third-party libraries ('%s') \" % getSafeExString(ex)\n errMsg += \"(Hint: 'sudo apt-get install python-pydot python-pyparsing python-profiler graphviz')\"\n logger.error(errMsg)\n\n return\n\n if profileOutputFile is None:\n profileOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.raw\")\n\n if dotOutputFile is None:\n dotOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.dot\")\n\n if imageOutputFile is None:\n imageOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.png\")\n\n if os.path.exists(profileOutputFile):\n os.remove(profileOutputFile)\n\n if os.path.exists(dotOutputFile):\n os.remove(dotOutputFile)\n\n if os.path.exists(imageOutputFile):\n os.remove(imageOutputFile)\n\n infoMsg = \"profiling the execution into file '%s'\" % profileOutputFile\n logger.info(infoMsg)\n\n # Start sqlmap main function and generate a raw profile file\n cProfile.run(\"start()\", profileOutputFile)\n\n infoMsg = \"converting profile data into a dot file '%s'\" % dotOutputFile\n logger.info(infoMsg)\n\n # Create dot file by using extra/gprof2dot/gprof2dot.py\n # http://code.google.com/p/jrfonseca/wiki/Gprof2Dot\n dotFilePointer = codecs.open(dotOutputFile, 'wt', UNICODE_ENCODING)\n parser = gprof2dot.PstatsParser(profileOutputFile)\n profile = parser.parse()\n profile.prune(0.5 / 100.0, 0.1 / 100.0)\n dot = gprof2dot.DotWriter(dotFilePointer)\n dot.graph(profile, gprof2dot.TEMPERATURE_COLORMAP)\n dotFilePointer.close()\n\n infoMsg = \"converting dot file into a graph image '%s'\" % imageOutputFile\n logger.info(infoMsg)\n\n # Create graph image (png) by using pydot (python-pydot)\n # http://code.google.com/p/pydot/\n pydotGraph = pydot.graph_from_dot_file(dotOutputFile)\n\n # Reference: http://stackoverflow.com/questions/38176472/graph-write-pdfiris-pdf-attributeerror-list-object-has-no-attribute-writ\n if isinstance(pydotGraph, list):\n pydotGraph = pydotGraph[0]\n\n try:\n pydotGraph.write_png(imageOutputFile)\n except OSError:\n errMsg = \"profiling requires graphviz installed \"\n errMsg += \"(Hint: 'sudo apt-get install graphviz')\"\n logger.error(errMsg)\n else:\n infoMsg = \"displaying interactive graph with xdot library\"\n logger.info(infoMsg)\n\n # Display interactive Graphviz dot file by using extra/xdot/xdot.py\n # http://code.google.com/p/jrfonseca/wiki/XDot\n win = xdot.DotWindow()\n win.connect('destroy', gtk.main_quit)\n win.set_filter(\"dot\")\n win.open_file(dotOutputFile)\n gtk.main()", "def addProfile(self, profile, color=None, close=False):\n if close:\n e1 = profile[0] # should always be a point\n if e1[0] != 0.0:\n profile = [(0.0, e1[1])] + profile\n e2 = profile[-1]\n if e2[0] != 0.0:\n if len(e2) == 2:\n profile.append((0.0, e2[1]))\n else:\n # profile ends in an arc\n profile.append((0.0, e2[0][1]))\n # previous line start x/y, for line -> arc\n px1 = py1 = None\n for e1, e2 in windowItr(profile, 2, 1):\n if e2 is None:\n break\n le1 = len(e1)\n le2 = len(e2)\n # line or start -> line\n if le1 == 2 and le2 == 2:\n x1, y1 = e1\n x2, y2 = e2\n self.blendTangent(False)\n patch = Patch.fromRevLineSeg(x1, y1, x2, y2, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n px1 = x1\n py1 = y1\n # line or start -> arc\n elif le1 == 2 and le2 == 3:\n x1, y1 = e1\n (x2, y2), (cx, cy), d = e2\n if px1 is not None:\n self.blendTangent(self._isLineTanToArc(px1, py1, x1, y1,\n cx, cy, d))\n patch = Patch.fromRevArcSeg(x1, y1, x2, y2, cx, cy, d, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n # arc -> line\n elif le1 == 3 and le2 == 2:\n (aex, aey), (cx, cy), d = e1\n lex, ley = e2\n self.blendTangent(self._isLineTanToArc(lex, ley, aex, aey, cx,\n cy, d))\n patch = Patch.fromRevLineSeg(aex, aey, lex, ley, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n px1 = aex\n py1 = aey\n # arc -> arc\n else:\n (x1, y1), (cx1, cy1), d1 = e1\n (x2, y2), (cx2, cy2), d2 = e2\n self.blendTangent(self._isArcTangentToArc(x1, y1, cx1, cy1,\n cx2, cy2))\n patch = Patch.fromRevArcSeg(x1, y1, x2, y2, cx2, cy2, d2,\n self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n self._bbox = BBox.fromVertices(self._sharedVertices)", "def testDiagonalProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n # Trigger tool button for diagonal profile mode\n self.toolBar.lineAction.trigger()\n\n # draw profile line\n widget.setFocus(qt.Qt.OtherFocusReason)\n self.mouseMove(widget, pos=pos1)\n self.qWait(100)\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.qWait(100)\n self.mouseMove(widget, pos=pos2)\n self.qWait(100)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n self.qWait(100)\n\n manager = self.toolBar.getProfileManager()\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n roi = manager.getCurrentRoi()\n self.assertIsNotNone(roi)\n roi.setProfileLineWidth(3)\n roi.setProfileMethod(method)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n curveItem = roi.getProfileWindow().getCurrentPlotWidget().getAllCurves()[0]\n if method == 'sum':\n self.assertTrue(curveItem.getData()[1].max() > 10000)\n elif method == 'mean':\n self.assertTrue(curveItem.getData()[1].max() < 10000)\n\n # Remove the ROI so the profile window is also removed\n roiManager = manager.getRoiManager()\n roiManager.removeRoi(roi)\n self.qWait(100)", "def prepocessImg(self, method, size, img, bb,offset=0.3,gray=True,\n boundry=False, outputDebug=False,outputprefix=None):\n if method == 'crop':\n crop_img = crop_only(img,bb.left(),bb.top(),bb.width(),bb.height(),offset,size)\n elif method == 'affine':\n img = Image.fromarray(img)\n if self.predictor == None:\n raise Exception(\"Error: method affine should initial with an facepredictor.\")\n alignPoints = self.align(img, bb)\n (xs, ys) = zip(*alignPoints)\n (l, r, t, b) = (min(xs), max(xs), min(ys), max(ys))\n w,h = img.size\n if boundry and (l < 0 or r > w or t < 0 or b > h):\n raise AliError('face out of boundry')\n \n left_eye_l = alignPoints[36]\n left_eye_r = alignPoints[39]\n left_eye = (np.array(left_eye_l)+np.array(left_eye_r))/2\n right_eye_l = alignPoints[42]\n right_eye_r = alignPoints[45]\n right_eye = (np.array(right_eye_l)+np.array(right_eye_r))/2\n crop_img = crop_simi(img,left_eye,right_eye,(offset,offset),(size,size))\n im_buffer = cStringIO.StringIO()\n crop_img.save(im_buffer, format=\"JPEG\")\n im_str = base64.b64encode(im_buffer.getvalue())\n else:\n raise Exception(\"undefined crop method\")\n if gray:\n crop_img = crop_img.convert('L')\n if outputDebug:\n dirname = './aligndebug'\n if not os.path.exists(os.path.abspath(dirname)):\n os.mkdir(dirname)\n drawbox(img,(bb.left(),bb.right(),bb.top(),bb.bottom()))\n if method == 'affine':\n drawpoint(img,left_eye)\n drawpoint(img,right_eye)\n img.save('{}/{}_annotated.jpg'.format(dirname,outputprefix))\n crop_img.save('{}/{}_crop.jpg'.format(dirname,outputprefix))\n crop_img = np.array(crop_img,dtype=np.float32) #look carefully on data format\n if crop_img.ndim == 3: #data shape for caffe\n return crop_img,score\n elif crop_img.ndim == 2:\n bbox = [bb.left(),bb.top(),bb.right(),bb.bottom()]\n return crop_img[:,:,np.newaxis], bbox\n else:\n raise Exception(\"wrong dimension\")", "def set_profile(self, profile='default'):\n\n # parameters used by various subclasses\n # each set is indexed by a name, called a profile\n # Note that each parameter must also be listed in set_params method in order to get set\n self.profile = profile\n self.params = {\n 'default' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [44.,88.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -107.6177, # longitude of the array center (vla)\n 'lat': 34.07875 # latitude of the array center (vla)\n },\n 'vlacrab' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [29.,58.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -107.6177, # longitude of the array center\n 'lat': 34.07875 # latitude of the array center\n },\n 'psa' : {\n 'chans': n.array(range(140,150)), # channels to read\n 'dmarr' : [0.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': 21.411, # longitude of the array center\n 'lat': -30.721 # latitude of the array center\n },\n 'pocob0329' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [0, 13.4, 26.8, 40.2, 53.5], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.005, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -121.470, # longitude of the array center\n 'lat': 40.817 # latitude of the array center\n },\n 'mwa' : {\n 'chans': n.array(n.arange(128)), # channels to read\n 'dmarr' : [0, 50.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': 116.671, # longitude of the array center\n 'lat': -26.703 # latitude of the array center\n }\n }\n\n \n self.pathout = self.params[self.profile]['pathout']\n self.chans = self.params[self.profile]['chans']\n self.dmarr = self.params[self.profile]['dmarr']\n self.pulsewidth = self.params[self.profile]['pulsewidth'] * n.ones(len(self.chans))\n self.approxuvw = self.params[self.profile]['approxuvw']\n self.beam_params = self.params[self.profile]['beam_params']\n self.long = self.params[self.profile]['long']\n self.lat = self.params[self.profile]['lat']", "def tool_draw_point(self,img,point,color=[0,0,0]):\n def s(pos):\n return int((pos + 1) / 2 * 128)\n if point is None:\n print(\"Warn: tool_draw_point Fail => point is None\")\n return img\n x, y = s(point[0]), s(point[1])\n img = cv2.rectangle(img, (x, y), (x, y), color, 5)\n return img", "def plot_profile(outdir, xval='x', xscale=1, yscale=1, comp2los=False, adjustRadial=False,\n fig=True):\n #Load data\n path = os.path.join(outdir,'points.h5')\n x,y,z,ux,uy,uz = pu.extract_points(path)\n\n Y = uz / yscale\n if xval == 'x':\n X = x / xscale\n Y1 = ux / yscale\n elif xval == 'r':\n X = np.hypot(x,y) / xscale\n ur = np.hypot(ux,uy)\n Y1 = ur / yscale\n if adjustRadial: #fix sign from hypot square root\n ur = pu.radial2negative(Y1)\n\n if fig:\n plt.figure()\n # otherwise profile added to active plot\n\n #plt.plot(X,uy/yscale,'r.-',label='Uy') #should be zero along EW axis\n de = 90e3 / xscale #eastern data extent\n if comp2los != False:\n data_extents = (X<=de)\n if comp2los == 'west': #switch sign of radial profile\n #ux = -ux #move to comp2los function\n X = -X\n Y1 = -Y1\n de = -de\n data_extents = (X>=de)\n\n los = pu.comp2los(x,ux,uy,uz,track=comp2los)\n plt.plot(X, los/yscale, 'k-', lw=2, label='Ulos_' + comp2los)\n plt.fill_between(X,los/yscale, where=data_extents, color='gray',alpha=0.5)\n\n plt.plot(X, Y, 'b-', lw=2, label='Uz')\n plt.plot(X, Y1, 'b--',lw=2, mfc='None',label='U{0}'.format(xval))\n\n # Annotate\n plt.title(outdir)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Uz [{}]'.format(get_unit(yscale)))\n plt.axhline(color='k')\n plt.axvline(de,color='k', linestyle='dashed', label='EW data extent') #EW extent of InSAR coverage\n plt.legend(loc='best')\n plt.grid(True)\n plt.show()", "def profile(self, profile):\n\n self.width = profile['width']\n self.height = profile['height']\n self.crs = profile['crs']\n self.interleave = profile['interleave']\n self.resampling = profile['resampling']", "def writeProfile(fname,prof):\n t = np.linspace(0,1,prof.shape[0],endpoint=False)\n fh = open(fname,'w')\n for x in range(prof.shape[0]):\n fh.write('%.7e %.7e\\n' % (t[x],prof[x]))\n fh.close()", "def paintAvatar(self):\n self.paintBody()\n self.paintShoes()\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n self.paintShirt()\n self.paintTrousers()\n else:\n self.paintSkirt()\n self.paintHead()\n self.paintHair()\n self.paintMask()", "def CreateProfileLikelihoodPlot(model, data, poi):\n\n nll = model.createNLL(data);\n profile = nll.createProfile(ROOT.RooArgSet(poi)); \n\n frame = poi.frame();\n ROOT.RooStats.HistFactory.FormatFrameForLikelihood(frame)\n\n nll.plotOn(frame, ROOT.RooCmdArg(\"ShiftToZero\",True), \n ROOT.RooCmdArg(\"LineColor\",ROOT.kRed), \n ROOT.RooCmdArg(\"LineStyle\",ROOT.kDashed) );\n profile.plotOn(frame);\n frame.SetMinimum(0);\n frame.SetMaximum(2.);\n canvas = ROOT.TCanvas( \"Profile Likelihood\", \"\", 800,600);\n frame.Draw(\"goff\");\n png_string = CanvasToPngString(canvas)\n return png_string", "def PlotProfile():\n (metadata, data) = Parse('/tmp/sdcard-scalability.txt')\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp('set pointsize 2')\n gp.clear()\n gp.xlabel('writer process')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n\n dataset = data[0]\n x = numpy.array(dataset.time, dtype='int_')\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='linespoints')\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def _atexit_print_fn():\n if config.profile:\n to_sum = []\n\n if config.profiling__destination == \"stderr\":\n destination_file = \"<stderr>\"\n elif config.profiling__destination == \"stdout\":\n destination_file = \"<stdout>\"\n else:\n destination_file = config.profiling__destination\n\n with extended_open(destination_file, mode=\"w\"):\n # Reverse sort in the order of compile+exec time\n for ps in sorted(\n _atexit_print_list, key=lambda a: a.compile_time + a.fct_call_time\n )[::-1]:\n if (\n ps.fct_callcount >= 1\n or ps.compile_time > 1\n or getattr(ps, \"callcount\", 0) > 1\n ):\n ps.summary(\n file=destination_file,\n n_ops_to_print=config.profiling__n_ops,\n n_apply_to_print=config.profiling__n_apply,\n )\n\n if ps.show_sum:\n to_sum.append(ps)\n else:\n # TODO print the name if there is one!\n print(\"Skipping empty Profile\")\n if len(to_sum) > 1:\n # Make a global profile\n cum = copy.copy(to_sum[0])\n msg = f\"Sum of all({len(to_sum)}) printed profiles at exit.\"\n cum.message = msg\n for ps in to_sum[1:]:\n for attr in [\n \"compile_time\",\n \"fct_call_time\",\n \"fct_callcount\",\n \"vm_call_time\",\n \"rewriter_time\",\n \"linker_time\",\n \"validate_time\",\n \"import_time\",\n \"linker_node_make_thunks\",\n ]:\n setattr(cum, attr, getattr(cum, attr) + getattr(ps, attr))\n\n # merge dictionary\n for attr in [\n \"apply_time\",\n \"apply_callcount\",\n \"apply_cimpl\",\n \"variable_shape\",\n \"variable_strides\",\n \"variable_offset\",\n \"linker_make_thunk_time\",\n ]:\n cum_attr = getattr(cum, attr)\n for key, val in getattr(ps, attr.items()):\n assert key not in cum_attr, (key, cum_attr)\n cum_attr[key] = val\n\n if cum.rewriter_profile and ps.rewriter_profile:\n try:\n merge = cum.rewriter_profile[0].merge_profile(\n cum.rewriter_profile[1], ps.rewriter_profile[1]\n )\n assert len(merge) == len(cum.rewriter_profile[1])\n cum.rewriter_profile = (cum.rewriter_profile[0], merge)\n except Exception as e:\n print(e)\n cum.rewriter_profile = None\n else:\n cum.rewriter_profile = None\n\n cum.summary(\n file=destination_file,\n n_ops_to_print=config.profiling__n_ops,\n n_apply_to_print=config.profiling__n_apply,\n )\n\n if config.print_global_stats:\n print_global_stats()", "def mark_person(snap, annot, switch_format=True):\n frame = cv2.imread(snap)\n height, width, _ = frame.shape\n\n iTL = 0\n iBR = 2\n TL = (int(annot.bounding_poly.normalized_vertices[iTL].x * width),\n int(annot.bounding_poly.normalized_vertices[iTL].y * height))\n BR = (int(annot.bounding_poly.normalized_vertices[iBR].x * width),\n int(annot.bounding_poly.normalized_vertices[iBR].y * height))\n \n print(f\"Drawing from {TL} to {BR}\")\n\n color = (0, 0, 255)\n thickness = 2\n frame = cv2.rectangle(frame, TL, BR, color, thickness)\n if switch_format:\n snap = snap.replace(\"png\", \"jpeg\")\n cv2.imwrite(snap, frame)\n return snap", "def testAlignedProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n for action in (self.toolBar.hLineAction, self.toolBar.vLineAction):\n with self.subTest(mode=action.text()):\n # Trigger tool button for mode\n action.trigger()\n # Without image\n self.mouseMove(widget, pos=pos1)\n self.mouseClick(widget, qt.Qt.LeftButton, pos=pos1)\n\n # with image\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.mouseMove(widget, pos=pos2)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n\n self.mouseMove(widget)\n self.mouseClick(widget, qt.Qt.LeftButton)\n\n manager = self.toolBar.getProfileManager()\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break", "def draw(self, base, level):\n\n a = base.a\n b = base.b\n\n if level > 0:\n delta = base.b - base.a\n px = a.x + delta.x / 3\n py = a.y + delta.y / 3\n rx = a.x + 2 * delta.x / 3\n ry = a.y + 2 * delta.y / 3\n p = Point(px, py)\n r = Point(rx, ry)\n q = Point(rx, ry)\n q.rotate_deg(60, p)\n self.draw(Line(a,p), level-1)\n self.draw(Line(p,q), level-1)\n self.draw(Line(q,r), level-1)\n self.draw(Line(r,b), level-1)\n else:\n self.container.window.create_line(a.x, a.y, b.x, b.y)", "def make_lineprofile(npix,rstar,xc,vgrid,A,veq,linewidth):\n vc=(np.arange(npix)-xc)/rstar*veq\n vs=vgrid[np.newaxis,:]-vc[:,np.newaxis]\n profile=1.-A*np.exp( -(vs*vs)/2./linewidth**2)\n return profile", "def extract_profile(tif, line_file, ds):\r\n\r\n import numpy as np\r\n import gdal\r\n import fiona\r\n from scipy.interpolate import interp1d\r\n# from scipy.interpolate import interp2d\r\n from scipy.ndimage import map_coordinates\r\n \r\n #%% Create evenly spaced points\r\n # Read coordinates of the profile line from shapefile\r\n fiona_obj = fiona.open(line_file)\r\n# line = fiona_obj.next()\r\n line = iter(fiona_obj).next() # this line is proper syntax for fiona v2. Corrected on Mar 12, 2021 by TCB\r\n coords = np.array( line['geometry']['coordinates'] ) # m the easting and northing coordinates of the vertices along the shapefile\r\n \r\n sqrd_deltas = np.diff(coords, axis=0)**2 # squared differences between x and y coordinates\r\n deltas = np.sum(sqrd_deltas, axis=1)**0.5 # m straight-line path length between adjacent points in the shapefile\r\n dist = np.cumsum( np.append(0, deltas) ) # m running distance along the shapefile from one end.\r\n \r\n disti = np.arange(dist[0], dist[-1], ds) # m vector of evenly spaced distances along the shapefile,\r\n # equivalent to an evenly spaced version of dist\r\n xi = interp1d(dist, coords[:,0])(disti) # m the easting coordinates of disti points, at which profile will be extracted\r\n yi = interp1d(dist, coords[:,1])(disti) # m the northing coordinates of disti points, at which profile will be extracted\r\n\r\n #%% Manipulate the raster and extract its data\r\n # ---- dimensions of geotiff\r\n gtif = gdal.Open(tif)\r\n xmin,xres,xskew,ymax,yskew,yres = gtif.GetGeoTransform()\r\n\r\n\r\n # convert the profile coordinates into pixel coordinates\r\n px = (xi - xmin) / xres\r\n py = (yi - ymax) / yres\r\n# px = np.round(col).astype(int)\r\n# py = np.round(row).astype(int)\r\n \r\n \r\n # pull out the array of raster data. Data are assumed to be in band 1.\r\n gtif_data = gtif.GetRasterBand(1).ReadAsArray()\r\n# gtif_data = band.ReadAsArray()px,py, 1, 1)\r\n \r\n # Two early versions of extacting the data:\r\n # profile = map_coordinates(gtif_data,[px,py],order=0,cval=np.nan)\r\n # profile = interp2d(np.arange(gtif_data.shape[1]), np.arange(gtif_data.shape[0]), \r\n # gtif_data)(px, py)\r\n\r\n # Interpolate within gtif_data at given pixel coordinates to identify values from the geotiff \r\n # Uses a 1st order spline interpolant to extract estimated values of\r\n # gtif_data at the (non-integer) pixel values px and py.\r\n # Function returns `cval' at undefined values of gtif_data.\r\n profile = map_coordinates(gtif_data, np.vstack((py, px)),\r\n order=1, cval=np.nan)\r\n \r\n# profile = np.array(profile,dtype=float)\r\n if type(profile[0]) == float:\r\n profile[np.abs(profile) == 9999] = np.nan\r\n \r\n return disti, profile", "def draw(self, prev_draw):\n # Std deviations for each parameter, the mean is the current location\n # strike = .375\n # length = 4.e3\n # width = 3.e3\n # depth = .1875\n # slip = .01\n # rake = .25\n # dip = .0875\n # longitude = .025\n # latitude = .01875\n strike_std = 5. # strike_std = 1.\n length_std = 5.e3 # length_std = 2.e4\n width_std = 2.e3 # width_std = 1.e4\n depth_std = 1.e3 # depth_std = 2.e3\n slip_std = 0.5 # slip_std = 0.5\n rake_std = 0.5 # rake_std = 0.5\n dip_std = 0.1 # dip_std = 0.1\n longitude_std = 0.15 # longitude_std = .025\n latitude_std = 0.15 # latitude_std = .025\n mean = np.zeros(9)\n # square for std => cov\n cov = np.diag(np.square([strike_std, length_std, width_std, depth_std, slip_std, rake_std,\n dip_std, longitude_std, latitude_std]))\n\n cov *= 0.25;\n\n # random draw from normal distribution\n e = stats.multivariate_normal(mean, cov).rvs()\n\n # does sample update normally\n print(\"Random walk difference:\", e)\n print(\"New draw:\", prev_draw + e)\n new_draw = prev_draw + e\n\n \"\"\"\n Here we make some fixed changes to the dip and depth according \n to a simple rule documented elsewhere. This fix will likely\n depreciate upon finishing proof of concept paper and work on 1852\n event.\n \"\"\"\n # doctor dip to 20 degrees as discussed\n new_draw[6] = 20\n # doctor depth according to adhoc fix\n new_draw[3] = self.doctored_depth_1852_adhoc(new_draw[7], new_draw[8], new_draw[6])\n\n # return appropriately doctored draw\n return new_draw", "def __init__(\n self,\n img_path: Union[str, \"Path\"],\n profile: dict,\n crop_size: int,\n padding: int = 0,\n **kwargs\n ):\n super().__init__()\n self.img_path = img_path\n self.crop_size = crop_size\n self.padding = padding\n\n profile.update(blockxsize=crop_size, blockysize=crop_size, tiled=True, **kwargs)\n\n # Create the file and get the indices of write locations\n with rasterio.open(self.img_path, \"w\", **profile) as dst:\n self.height = dst.height\n self.width = dst.width\n self.profile = dst.profile\n\n _y0s = range(0, self.height, self.crop_size)\n _x0s = range(0, self.width, self.crop_size)\n self.y0x0 = list(itertools.product(_y0s, _x0s))", "def merge_profile(prof1, prof2):\r\n new_t = []\r\n new_l = []\r\n new_sub_profile = []\r\n #merge common(same object) opt\r\n for l in set(prof1[0]).intersection(set(prof2[0])):\r\n idx1 = prof1[0].index(l)\r\n idx2 = prof2[0].index(l)\r\n new_t.append(prof1[1][idx1] +\r\n prof2[1][idx2])\r\n new_l.append(l)\r\n if hasattr(l, 'merge_profile'):\r\n assert len(prof1[6][idx1]) == len(prof2[6][idx2])\r\n new_sub_profile.append(l.merge_profile(prof1[6][idx1],\r\n prof2[6][idx2]))\r\n else:\r\n new_sub_profile.append(None)\r\n\r\n # merge not common opt\r\n from theano.compat.six import StringIO\r\n for l in set(prof1[0]).symmetric_difference(set(prof2[0])):\r\n #The set trick above only work for the same object optimization\r\n #It don't work for equivalent optimization.\r\n #So we try to merge equivalent optimization here.\r\n new_l_names = [o.name for o in new_l]\r\n if l.name in new_l_names:\r\n idx = new_l_names.index(l.name)\r\n io1 = StringIO()\r\n io2 = StringIO()\r\n l.print_summary(io1)\r\n new_l[idx].print_summary(io2)\r\n if io1.read() == io2.read():\r\n if l in prof1[0]:\r\n p = prof1\r\n else:\r\n p = prof2\r\n new_t[idx] += p[1][p[0].index(l)]\r\n if hasattr(l, 'merge_profile'):\r\n assert len(p[6][p[0].index(l)]) == \\\r\n len(new_sub_profile[idx])\r\n new_sub_profile[idx] = l.merge_profile(\r\n new_sub_profile[idx], p[6][p[0].index(l)])\r\n else:\r\n new_sub_profile[idx] = None\r\n continue\r\n if l in prof1[0]:\r\n p = prof1\r\n else:\r\n p = prof2\r\n new_t.append(p[1][p[0].index(l)])\r\n idx = p[0].index(l)\r\n new_l.append(l)\r\n new_sub_profile.append(p[6][idx])\r\n\r\n new_opt = SeqOptimizer(*new_l)\r\n #We need to assert based on the name as we merge also based on\r\n #the name.\r\n assert set([l.name for l in prof1[0]]).issubset(\r\n set([l.name for l in new_l]))\r\n assert set([l.name for l in prof2[0]]).issubset(\r\n set([l.name for l in new_l]))\r\n assert len(new_t) == len(new_opt) == len(new_sub_profile)\r\n return (new_opt, new_t, prof1[2] + prof2[2],\r\n prof1[3] + prof2[3],\r\n -1, -1, new_sub_profile, [])", "def genFrameImages((widthPixels, heightPixels), flashColourGen, flashColourGenPipTrain, numFrames, FPS, superSamplingScale=8, BG_COLOUR=(0,0,0), TEXT_COLOUR=(255,255,255), GFX_COLOUR=(255,255,255), title=\"\", TITLE_COLOUR=(255,255,255), FRAMES_AS_FIELDS=False, frameSkipChecker=None, segments=[]):\n\n # we're going to draw a larger (super sampled) image and then scale it down\n # to get smoothing (compensating for the lack of anti-aliased drawing functions\n # in PIL)\n\n width = widthPixels * superSamplingScale\n height = heightPixels * superSamplingScale\n\n flashCols = list(flashColourGen)[0:numFrames]\n flashColsPipTrain = list(flashColourGenPipTrain)[0:numFrames]\n\n # we'll pretend we're working within a rectangle (0,0) - (160,90)\n # and use a scaling function to map to out actual dimensions\n scaler = AspectPreservingCoordinateScaler((160,90),(width,height))\n\n # load a font for text\n font = loadFont(sizePt = scaler.s(4))\n smallfont = loadFont(sizePt = scaler.s(4))\n \n # work out the segment description text, then check its size and adjust the fontsize to ensure it fits within bounding area\n if segments:\n segment_description_text = \"\\n\".join(map(lambda seg : seg[\"description\"], segments))\n tmpimg = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n tmpdraw = ImageDraw.Draw(tmpimg)\n w,h = tmpdraw.multiline_textsize(segment_description_text, font=smallfont)\n max_w, max_h = scaler.xy((140,13))\n \n shrink_factor = min(float(max_w) / w, float(max_h) / h, 1)\n smallfont = loadFont(sizePt = scaler.s(4*shrink_factor))\n \n poy = 0 # pie Y offset\n dfy = 65 # duration and FPS labels Y offset\n if segments:\n poy = -10\n dfy = 19\n\n\n\n WHITE=(255,255,255)\n BLACK=(0,0,0)\n\n if FRAMES_AS_FIELDS:\n imageName = \"field\"\n labelFps = FPS / 2\n else:\n imageName = \"frame\"\n labelFps = FPS\n\n\n for frameNum in range(0,numFrames):\n if frameSkipChecker is not None:\n shouldSkip=frameSkipChecker(frameNum)\n if shouldSkip:\n yield None\n continue\n\n timecode = frameNumToTimecode(frameNum, FPS, framesAreFields=FRAMES_AS_FIELDS)\n timeSecs = float(frameNum) / FPS\n nextTimeSecs = float(frameNum+1) / FPS # time of next frame after this\n durationTimecode = frameNumToTimecode(numFrames, FPS)\n\n # create black image and an object to let us draw on it\n img = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n draw = ImageDraw.Draw(img)\n\n # draw a flashing rectangular box on the left side\n flashColour = flashCols[frameNum]\n topLeft = scaler.xy((10, 30))\n bottomRight = scaler.xy((40, 60))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=GFX_COLOUR)\n topLeft = scaler.xy((11, 31))\n bottomRight = scaler.xy((39, 59))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=flashColour)\n\n # draw text label explaining to attach light sensor to the flashing box\n topLeft = scaler.xy((41, 37))\n draw.text(topLeft, \"Use light detector\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 41))\n draw.text(topLeft, \"on centre of\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 45))\n draw.text(topLeft, \"this box\", font=font, fill=TEXT_COLOUR)\n\n # draw text labels giving frame number, timecode and seconds covered by this frame\n topLeft = scaler.xy((10, 4))\n draw.text(topLeft, timecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 9))\n draw.text(topLeft, \"%06d of %d %ss\" % (frameNum, numFrames, imageName), font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 14))\n draw.text(topLeft, u\"%08.3f \\u2264 t < %08.3f secs\" % (timeSecs, nextTimeSecs), font=font, fill=TEXT_COLOUR)\n\n topLeft = scaler.xy((10,dfy))\n draw.text(topLeft, \"Duration: \" + durationTimecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10,dfy+5))\n draw.text(topLeft, \"%d fps\" % labelFps, font=font, fill=TEXT_COLOUR)\n\n # and more text labels, but this time right justified\n text = title\n w,h = font.getsize(text)\n topLeft = scaler.xy((150,4))\n topLeft = topLeft[0] - w, topLeft[1]\n draw.text(topLeft, text, font=font, fill=TITLE_COLOUR)\n\n # draw an outer ring segment indicating the time period covered by the current frame\n topLeft = scaler.xy((105, 20+poy))\n bottomRight = scaler.xy((155, 70+poy))\n angle1 = 360 * (frameNum % FPS) / FPS\n angle2 = 360 * ((frameNum % FPS) + 1) / FPS\n draw.pieslice(topLeft + bottomRight, start=270+angle1, end=270+angle2, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((108, 23+poy))\n bottomRight = scaler.xy((152, 67+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n\n\n # draw frame num ring\n topLeft = scaler.xy((110, 25+poy))\n bottomRight = scaler.xy((150, 65+poy))\n angle = 360 * (frameNum % FPS) / FPS\n if (frameNum / FPS) % 2 == 0: # if this is an even second (0-0.9, 2-2.9, 4-4.9 etc)\n draw.pieslice(topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n else:\n draw.pieslice(topLeft + bottomRight, start=270+angle, end=270+360, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((113, 28+poy))\n bottomRight = scaler.xy((147, 62+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n \n # draw outer for segments\n if segments:\n topLeft = scaler.xy((115-0.25, 30+poy-0.25))\n bottomRight = scaler.xy((145+0.25, 60+poy+0.25))\n draw.ellipse(topLeft + bottomRight, fill=WHITE, outline=None)\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n draw.ellipse(topLeft + bottomRight, fill=BLACK, outline=None)\n\n # draw progress pie\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n angle = 360.0*frameNum/numFrames\n precise_filled_pieslice(draw, topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n\n # draw segments over the pieslice\n if segments:\n for i in range(0, len(segments)):\n angle = math.radians(270 + 360.0*segments[i][\"startSecs\"]/numFrames*FPS)\n centre = scaler.xy((130,45+poy))\n armEnd = scaler.xy((130 + 15*math.cos(angle), 45+poy + 15*math.sin(angle)))\n draw.line([centre, armEnd], fill=WHITE, width=int(scaler.s(0.25)))\n \n segStartFrame = segments[i][\"startSecs\"] * FPS\n nextStartFrame = segments[(i+1) % len(segments)][\"startSecs\"] * FPS\n if nextStartFrame <= segStartFrame:\n nextStartFrame += numFrames\n midAngle = math.radians(270 + 360.0* (segStartFrame+nextStartFrame)/2/numFrames)\n w,h = font.getsize(segments[i][\"label\"])\n centre = scaler.xy((130 + 15*math.cos(midAngle)*0.7, 45+poy + 15*math.sin(midAngle)*0.7))\n topLeft = centre[0] - w/2, centre[1] - h/2\n draw.text(topLeft, segments[i][\"label\"], fill=WHITE, font=font)\n\n # draw segment long labels\n topLeft = scaler.xy((10,61))\n draw.multiline_text(topLeft, segment_description_text, fill=WHITE, font=smallfont)\n \n # draw pulse train at the bottom\n LIM=FPS\n NUM_BLOBS = 2*LIM + 1\n blobSpacing = 150.0/NUM_BLOBS\n\n for offset in range(-LIM, +LIM+1):\n left = 80+blobSpacing*(offset-0.5)\n right = 80+blobSpacing*(offset+0.5)\n\n topLeft = scaler.xy(( left, 80 ))\n bottomRight = scaler.xy(( right, 85 ))\n\n seqIndex = offset + frameNum\n if seqIndex >= 0 and seqIndex < numFrames:\n colour = flashColsPipTrain[seqIndex]\n draw.rectangle(topLeft + bottomRight, outline=None, fill = colour)\n\n if offset == 0:\n # draw blob above\n topLeft = scaler.xy(( left, 75 ))\n bottomRight = scaler.xy(( right, 80 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # and below\n topLeft = scaler.xy(( left, 85 ))\n bottomRight = scaler.xy(( right, 90 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # shrink the image using high quality downsampling\n try:\n scalingMode = Image.LANCZOS\n except AttributeError:\n scalingMode = Image.BICUBIC\n\n rescaledImage = img.resize((widthPixels,heightPixels), scalingMode)\n\n yield rescaledImage", "def profile(script, argv, timer, pickle_protocol, dump_filename, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n __profile__(filename, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "def stm_profile_plot(flat_file, points, scan_dir=0, cmap=None, vmin=None, vmax=None, xy_ticks=4, z_ticks=4):\n nm = 10 ** -9 # Define the nanometer to meter conversion.\n\n fig, ax = plt.subplots() # Create an instance of a pyplot figure and axis.\n\n # Set the minimum of the scan data to zero.\n figure_data = (flat_file[scan_dir].data - np.amin(flat_file[scan_dir].data)) / nm\n\n if cmap is None: # If no color scheme is given use hot as default.\n cmap = 'hot'\n\n if vmin is None: # If no z-axis minimum is given use minimum of the image data.\n vmin = np.amin(figure_data)\n if vmax is None: # If no z-axis maxmimum is given use 125% of the maximum in the image data.\n vmax = 1.25 * np.amax(figure_data)\n\n # Add image plot to the axis and define it so that the color map can be generated.\n cax = ax.imshow(figure_data, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n\n # Convert nanometer values into pixel numbers.\n for point in range(len(points)):\n points[point][0] = nm2pnt(points[point][0], flat_file, axis='x')\n points[point][1] = nm2pnt(points[point][1], flat_file, axis='y')\n\n # Plot the line profile points on the axis.\n ax.plot(points[:, 0], points[:, 1], 'bo-')\n\n xy_units = flat_file[scan_dir].info['unitxy'] # Get xy units.\n\n x_res = flat_file[scan_dir].info['xres'] # Get number of x-axis pixels.\n y_res = flat_file[scan_dir].info['yres'] # Get number of y-axis pixels.\n\n x_max = flat_file[scan_dir].info['xreal'] # Get x-axis image size.\n y_max = flat_file[scan_dir].info['yreal'] # get y-axis image size.\n\n # Set the x-axis ticks from number given.\n ax.set_xticks([x for x in np.arange(0, x_res + 1, x_res / xy_ticks)])\n # Set the x-axis tick labels from image size.\n ax.set_xticklabels([str(np.round(x, 1)) for x in np.arange(0, x_max + 1, x_max / xy_ticks)])\n\n # Set the y-axis ticks from number given\n ax.set_yticks([y for y in np.arange(0, y_res + 1, y_res / xy_ticks)])\n # Set the y-axis tick labels from image size.\n ax.set_yticklabels([str(np.round(y, 1)) for y in np.arange(0, y_max + 1, y_max / xy_ticks)])\n\n # Set the x- and y-axis labels.\n ax.set_xlabel(xy_units, size=16, weight='bold')\n ax.set_ylabel(xy_units, size=16, weight='bold')\n\n # Define the limits of the plot.\n ax.set_xlim([0, x_res])\n ax.set_ylim([0, y_res])\n\n # St the plot title with the image setpoint parameters.\n ax.set_title('Set-Points: {voltage} V, {current} pA'.format(voltage=flat_file[scan_dir].info['vgap'],\n current=np.round(\n flat_file[scan_dir].info['current']*10**12)))\n\n # Define list containing the z-axis ticks from number given.\n cbar_ticks = [z for z in np.arange(vmin, vmax * 1.01, vmax / z_ticks)]\n # Define the z-axis tick labels.\n cbar_ticklabels = [str(np.round(z, 1)) for z in np.arange(vmin, vmax + 1, vmax / z_ticks)]\n # Create color bar.\n cbar = fig.colorbar(cax, ticks=cbar_ticks)\n # Set the color bar tick labels.\n cbar.ax.set_yticklabels(cbar_ticklabels, size=16)\n # Set color bar label.\n cbar.set_label('Height [' + xy_units + ']', size=18, weight='bold')\n\n plt.show()", "def magic_profile(self, parameter_s=''):\n if self.rc.profile:\n printpl('Current IPython profile: $self.rc.profile.')\n else:\n print 'No profile active.'", "def save_current_to_profile(self, profile_name, prof_desc='', prof_path='',\n self_contained=False):\n # Open the already existing profile\n new_profile = profile(profile_name, workdir=os.path.dirname(prof_path))\n\n # shortcut\n w3af_plugins = self._w3af_core.plugins\n\n # Save the enabled plugins\n for plugin_type in w3af_plugins.get_plugin_types():\n enabled_plugins = []\n for plugin_name in w3af_plugins.get_enabled_plugins(plugin_type):\n enabled_plugins.append(plugin_name)\n new_profile.set_enabled_plugins(plugin_type, enabled_plugins)\n\n # Save the plugin options\n for plugin_type in w3af_plugins.get_plugin_types():\n for plugin_name in w3af_plugins.get_enabled_plugins(plugin_type):\n plugin_options = w3af_plugins.get_plugin_options(plugin_type,\n plugin_name)\n if plugin_options:\n new_profile.set_plugin_options(plugin_type,\n plugin_name,\n plugin_options,\n self_contained=self_contained)\n\n # Save the profile targets\n targets = cf.cf.get('targets')\n if targets:\n new_profile.set_target(' , '.join(t.url_string for t in targets))\n\n # Save the misc and http settings\n misc_settings = MiscSettings()\n new_profile.set_misc_settings(misc_settings.get_options())\n new_profile.set_http_settings(\n self._w3af_core.uri_opener.settings.get_options())\n\n # Save the profile name and description\n new_profile.set_desc(prof_desc)\n new_profile.set_name(profile_name)\n\n # Save the profile to the file\n new_profile.save(profile_name)\n\n return new_profile", "def __init__(self, velocity, vorticity, prof_coords, \n direction, beginMeanComput, **kwds):\n assert 'variables' not in kwds, 'variables parameter is useless.'\n super(Profiles, self).__init__(variables=[velocity, vorticity],\n **kwds)\n ## velocity field\n self.velocity = velocity\n ## vorticity field\n self.vorticity = vorticity\n ## X and Y coordinates of the profile\n self.prof_coords = prof_coords\n ## profile direction (0, 1 or 2)\n self.direction = direction\n ## time at which the computation of mean profile must begin\n self.beginMeanComput = beginMeanComput\n self.input = [velocity, vorticity]\n self.output = []", "def _profile(self) -> None:\n if self.use_case.profile:\n if self._profile_stats is None:\n self._profile_stats = pstats.Stats()\n if self._current_profiler is not None:\n self._current_profiler.disable()\n self._profile_stats.add(self._current_profiler)\n # TODO: use clear() instead of always creating a new profile\n self._current_profiler = cProfile.Profile()\n self._current_profiler.enable()", "def plot_visco_profiles(pointsh5, skip=slice(None,None,1), xscale=1e3, yscale=1e-2, tscale=3.1536e7, adjustRadial=False, benchmark=[], title=None):\n\tplt.figure()\n\n\tcoords,data,number,times = pu.load_h5_visco(pointsh5)\n\n\t#x = 1e3*np.loadtxt(points,usecols=[0]) # output_points2.txt\n\t#y = np.zeros_like(x)\n\tx = coords[:,0]\n\ty = np.zeros_like(x)\n\n\t# NOTE: plot elastic solution by passing dictionary as showelastic\n\t# Plot analytic elastic solution (t=0)\n\t#print(benchmark)\n\tif len(benchmark)>=1:\n\t\tur = zeros_like(x)\n\t\tuz = np.zeros_like(x)\n\t\tfor b in benchmark:\n\t\t\turi,uzi = m.calc_mogi_dp(x,y,**params)\n\t\t\tur += uri\n\t\t\tuz += uzi\n\t\tplt.plot(x*xscale,uz*yscale,'ko',label='benchmark')\n\n\t# Convert units\n\t#ur = np.hypot(data[:,:,0], data[:,:,1]) #assume progiles are along EW profile\n\tur = data[:,:,0]\n\tuz = data[:,:,2]\n\tx = x / xscale\n\tur = ur / yscale #cm\n\tuz = uz / yscale #cm\n\ttimes = times / tscale\n\t#times = times / 8.64e4 #days\n\t#times = times / 31536000 #years\n\n\t#plots = np.arange(0,times.size,skip)\n\t#print(plots.size)\n\t#way to cycle through markers if plotting many lines\n\t#marker = itertools.cycle(['o','^','s','D']) #plot(marker=marker.next() iterates list)\n\t#way to use gradually changing colors from a colormap\n\t#color = plt.cm.jet(1.0*i/plots.size)\n\tindplots = np.arange(times.size-1)\n\tprint(indplots)\n\tindplots = indplots[skip]\n\tprint(indplots)\n\tfor i in indplots:\n\t\tline, = plt.plot(x, uz[i], color=plt.cm.jet(1.0*i/indplots[-1]), label='{:.1f}'.format(times[i]))\n\t\tplt.plot(x, ur[i], ls='dashed', color=line.get_color())\n\t#print uz[i]\n\t#print uz[i-1]\n\n\tif title:\n\t\tplt.title(title)\n\telse:\n\t\tplt.title(pointsh5)\n\n\tplt.axhline(color='k',linestyle='dashed')\n\tplt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\tplt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.show()\n\tplt.legend(title='{}'.format(get_unit(tscale)))\n\tplt.grid()" ]
[ "0.56249416", "0.49046072", "0.4773243", "0.4728293", "0.47255272", "0.46967715", "0.460233", "0.45648476", "0.45566934", "0.45022318", "0.44824857", "0.44786713", "0.44598132", "0.4454975", "0.44341892", "0.4383001", "0.43778774", "0.43481213", "0.43244997", "0.43213403", "0.43109924", "0.43092555", "0.42924997", "0.4275022", "0.42717794", "0.42637676", "0.42579624", "0.42568153", "0.4250528", "0.42297515" ]
0.70576626
0
Take a draft_dict that was already validated by draft_dict_validator then further sanitize, validate, and transform it. Ultimately return this "further validated" draft dict. It will have a slightly different set of keys the values for which can be used to directly create a Draft object.
def further_validated_draft_dict( draft_dict: Dict[str, Any], user_profile: UserProfile ) -> Dict[str, Any]: content = normalize_body(draft_dict["content"]) timestamp = draft_dict.get("timestamp", time.time()) timestamp = round(timestamp, 6) if timestamp < 0: # While it's not exactly an invalid timestamp, it's not something # we want to allow either. raise JsonableError(_("Timestamp must not be negative.")) last_edit_time = timestamp_to_datetime(timestamp) topic = "" recipient_id = None to = draft_dict["to"] if draft_dict["type"] == "stream": topic = truncate_topic(draft_dict["topic"]) if "\0" in topic: raise JsonableError(_("Topic must not contain null bytes")) if len(to) != 1: raise JsonableError(_("Must specify exactly 1 stream ID for stream messages")) stream, sub = access_stream_by_id(user_profile, to[0]) recipient_id = stream.recipient_id elif draft_dict["type"] == "private" and len(to) != 0: to_users = get_user_profiles_by_ids(set(to), user_profile.realm) try: recipient_id = recipient_for_user_profiles(to_users, False, None, user_profile).id except ValidationError as e: # nocoverage raise JsonableError(e.messages[0]) return { "recipient_id": recipient_id, "topic": topic, "content": content, "last_edit_time": last_edit_time, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _draft_from_response(data):\n return Draft(\n uuid=UUID(data['uuid']),\n bundle_uuid=UUID(data['bundle_uuid']),\n name=data['name'],\n updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),\n files={\n path: DraftFile(path=path, **file)\n for path, file in data['staged_draft']['files'].items()\n },\n links={\n name: DraftLinkDetails(\n name=name,\n direct=LinkReference(**link[\"direct\"]),\n indirect=[LinkReference(**ind) for ind in link[\"indirect\"]],\n modified=link[\"modified\"],\n )\n for name, link in data['staged_draft']['links'].items()\n }\n )", "def do_edit_draft(draft_id: int, draft_dict: Dict[str, Any], user_profile: UserProfile) -> None:\n try:\n draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)\n except Draft.DoesNotExist:\n raise ResourceNotFoundError(_(\"Draft does not exist\"))\n valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)\n draft_object.content = valid_draft_dict[\"content\"]\n draft_object.topic = valid_draft_dict[\"topic\"]\n draft_object.recipient_id = valid_draft_dict[\"recipient_id\"]\n draft_object.last_edit_time = valid_draft_dict[\"last_edit_time\"]\n draft_object.save()\n\n event = {\"type\": \"drafts\", \"op\": \"update\", \"draft\": draft_object.to_dict()}\n send_event(user_profile.realm, event, [user_profile.id])", "def create_draft(self):\n return Draft(self)", "def _convert_states_v28_dict_to_v29_dict(cls, draft_change_list):\n return draft_change_list", "def clean_dict(d):\n if not isinstance(d, dict):\n return d\n return dict((clean_dict(k), v) for k, v in d.items() if k is not 'dates')", "def validate_input(self, deposition, draft_id=None):\n v = APIValidator()\n draft_id = draft_id or deposition.get_default_draft_id()\n metadata_schema = deposition.type.api_metadata_schema(draft_id)\n\n if metadata_schema:\n schema = self.input_schema.copy()\n schema['metadata'] = metadata_schema\n else:\n schema = self.input_schema\n\n # Either conform to dictionary schema or dictionary is empty\n if not v.validate(request.json, schema) and \\\n request.json:\n abort(\n 400,\n message=\"Bad request\",\n status=400,\n errors=filter_validation_errors(v.errors),\n )", "def _get_draft(self):\n review_request = self.create_review_request(publish=True)\n return ReviewRequestDraft.create(review_request)", "def _convert_states_v27_dict_to_v28_dict(cls, draft_change_list):\n for i, change in enumerate(draft_change_list):\n if (change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY and\n change.property_name ==\n exp_domain.STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED): # pylint: disable=line-too-long\n draft_change_list[i] = exp_domain.ExplorationChange({\n 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,\n 'property_name': (\n exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS),\n 'state_name': change.state_name,\n 'new_value': {\n 'voiceovers_mapping': change.new_value\n }\n })\n\n return draft_change_list", "def create_dict_deep_distortion_old(defect_dict: dict, \r\n fancy_defects: dict,\r\n ):\r\n dict_deep_distortion = {}\r\n defect_dict_copy = defect_dict.copy()\r\n for defect_type in fancy_defects.keys(): # for each defect type (vac, as , int)\r\n \r\n dict_deep_distortion[defect_type] = import_deep_distortion_by_type(defect_dict_copy[defect_type],\r\n fancy_defects[defect_type]) #defects for which we'll try the deep distortion found for one of the charge states \r\n return dict_deep_distortion", "def _convert_states_v29_dict_to_v30_dict(cls, draft_change_list):\n for i, change in enumerate(draft_change_list):\n if (change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY and\n change.property_name ==\n exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS):\n draft_change_list[i] = exp_domain.ExplorationChange({\n 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,\n 'property_name': (\n exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS),\n 'state_name': change.state_name,\n 'new_value': {\n 'rule_specs': change.new_value['rule_specs'],\n 'outcome': change.new_value['outcome'],\n 'training_data': change.new_value['training_data'],\n 'tagged_skill_misconception_id': None\n }\n })\n return draft_change_list", "def get(self, oauth, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n return d.type.marshal_draft(d.get_draft(draft_id))", "def do_create_drafts(draft_dicts: List[Dict[str, Any]], user_profile: UserProfile) -> List[Draft]:\n draft_objects = []\n for draft_dict in draft_dicts:\n valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)\n draft_objects.append(\n Draft(\n user_profile=user_profile,\n recipient_id=valid_draft_dict[\"recipient_id\"],\n topic=valid_draft_dict[\"topic\"],\n content=valid_draft_dict[\"content\"],\n last_edit_time=valid_draft_dict[\"last_edit_time\"],\n )\n )\n\n created_draft_objects = Draft.objects.bulk_create(draft_objects)\n\n event = {\n \"type\": \"drafts\",\n \"op\": \"add\",\n \"drafts\": [draft.to_dict() for draft in created_draft_objects],\n }\n send_event(user_profile.realm, event, [user_profile.id])\n\n return created_draft_objects", "def clean_form_dict(self, dict_):\n clean_dict = {}\n first_pdb_type, first_pdb_id, first_pdb_file = '', '', ''\n second_pdb_type, second_pdb_id, second_pdb_file = '', '', ''\n x1, y1, z1, x2, y2, z2 = '0', '0', '0', '0', '0', '0'\n degXY_1, degYZ_1, degXY_2, degYZ_2 = '0', '0', '0', '0'\n\n num_of_proteins = dict_.get('num_of_proteins')\n user_rand = dict_.get('user_rand')\n first_pdb_type = dict_.get('first_pdb_type')\n if first_pdb_type == 'by_id':\n first_pdb_id = dict_.get('first_pdb_id')\n first_pdb_file = ''\n elif first_pdb_type == 'by_file':\n first_pdb_id = ''\n first_pdb_file = dict_.get('first_pdb_file')\n\n if num_of_proteins == '2':\n second_pdb_type = dict_.get('second_pdb_type')\n if second_pdb_type == 'by_id':\n second_pdb_id = dict_.get('second_pdb_id')\n second_pdb_file = ''\n elif first_pdb_type == 'by_file':\n second_pdb_id = ''\n second_pdb_file = dict_.get('second_pdb_file')\n x2, y2, z2 = dict_.get('x2', 0), dict_.get('y2', 0), dict_.get('z2', 0)\n degXY_2, degYZ_2 = dict_.get('degXY_2', 0), dict_.get('degYZ_2', 0)\n\n x1, y1, z1 = dict_.get('x1', 0), dict_.get('y1', 0), dict_.get('z1', 0)\n degXY_1, degYZ_1 = dict_.get('degXY_1', 0), dict_.get('degYZ_1', 0)\n\n temperature_scale = dict_.get('temperature_scale', '')\n temperature = dict_.get('temperature', '')\n time_step_number = dict_.get('time_step_number', '')\n\n clean_dict['user_rand'] = user_rand\n clean_dict['num_of_proteins'] = num_of_proteins\n clean_dict['first_pdb_type'] = first_pdb_type\n clean_dict['first_pdb_id'] = first_pdb_id\n clean_dict['first_pdb_file'] = first_pdb_file\n clean_dict['second_pdb_type'] = second_pdb_type\n clean_dict['second_pdb_id'] = second_pdb_id\n clean_dict['second_pdb_file'] = second_pdb_file\n clean_dict['x1'] = x1\n clean_dict['y1'] = y1\n clean_dict['z1'] = z1\n clean_dict['x2'] = x2\n clean_dict['y2'] = y2\n clean_dict['z2'] = z2\n clean_dict['degXY_1'] = degXY_1\n clean_dict['degYZ_1'] = degYZ_1\n clean_dict['degXY_2'] = degXY_2\n clean_dict['degYZ_2'] = degYZ_2\n clean_dict['temperature_scale'] = temperature_scale\n clean_dict['temperature'] = temperature\n clean_dict['time_step_number'] = time_step_number\n\n return clean_dict", "def translate_dict(entity_dict, config):\n\n dump_accepted_entity_dict = OrderedDict()\n\n for key in entity_dict:\n if key in config[\"ent_keys_dump\"]:\n dump_accepted_entity_dict[config[\n \"ent_keys_dump\"][key]] = entity_dict[key]\n\n else:\n dump_accepted_entity_dict[key] = entity_dict[key]\n\n return dump_accepted_entity_dict", "def convert_to_draft(self, source_location):\r\n if source_location.category in DIRECT_ONLY_CATEGORIES:\r\n raise InvalidVersionError(source_location)\r\n original = self.collection.find_one({'_id': source_location.to_deprecated_son()})\r\n if not original:\r\n raise ItemNotFoundError(source_location)\r\n draft_location = as_draft(source_location)\r\n original['_id'] = draft_location.to_deprecated_son()\r\n try:\r\n self.collection.insert(original)\r\n except pymongo.errors.DuplicateKeyError:\r\n raise DuplicateItemError(original['_id'])\r\n\r\n self.refresh_cached_metadata_inheritance_tree(draft_location.course_key)\r\n\r\n return wrap_draft(self._load_items(source_location.course_key, [original])[0])", "def dict_normalization(dict_, nested=False):\n dict_norm = dict()\n if not nested:\n if dict_.values():\n d_max = max(dict_.values())\n d_min = min(dict_.values())\n if d_max - d_min == 0:\n dict_norm = {key: 1 for key in dict_}\n else:\n dict_norm = {key: (dict_[key] - d_min) / (d_max - d_min) for key in dict_}\n else:\n for key_1 in dict_:\n if dict_[key_1]:\n dict_norm[key_1] = dict()\n else: continue\n d_max = max(dict_[key_1].values())\n d_min = min(dict_[key_1].values())\n for key_2 in dict_[key_1]:\n if d_max - d_min == 0:\n dict_norm[key_1][key_2] = 1 / len(dict_[key_1])\n else:\n dict_norm[key_1][key_2] = (dict_[key_1][key_2] - d_min) / (d_max - d_min)\n return dict_norm", "def validate_to_python(self, value):\n super(DictField, self).validate(value)\n if value == None:\n return {}\n if not isinstance(value, dict):\n raise ValidationError('Must be a dict, got {0}'.format(type(value).__name__))\n form = self.Form(value)\n if form.is_valid():\n return form.cleaned_data\n else:\n errors = form.errors.as_text()\n raise ValidationError(errors)", "def _from_dict_transform(cls: Type[TVerifiedElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n data = super()._from_dict_transform(data)\n\n if 'verified' in data:\n data['is_verified'] = data.pop('verified')\n\n if 'verification_code' in data:\n del data['verification_code']\n\n return data", "def _normalize(self, dictionnary):\r\n copy_dict = OrderedDict()\r\n for k,v in dictionnary.items():\r\n if isinstance(v, OrderedDict):\r\n copy_dict[k.replace('#','').replace('@','')] = self._normalize(v)\r\n else:\r\n copy_dict[k.replace('#','').replace('@','')] = v\r\n return copy_dict", "def _mask_dict(self, value):\n\n return MaskedDict(value)", "def validate_update(cls, document: dict) -> dict:\n if document is None:\n return {\"\": [\"No data provided.\"]}\n\n if not isinstance(document, dict):\n return {\"\": [\"Must be a dictionary.\"]}\n\n new_document = copy.deepcopy(document)\n\n errors = {}\n\n updated_field_names = [\n field.name for field in cls.__fields__ if field.name in new_document\n ]\n unknown_fields = [\n field_name\n for field_name in new_document\n if field_name not in updated_field_names\n ]\n for unknown_field in unknown_fields:\n known_field, field_value = cls._to_known_field(\n unknown_field, new_document[unknown_field]\n )\n if known_field:\n new_document.setdefault(known_field.name, {}).update(field_value)\n elif not cls._skip_unknown_fields:\n errors.update({unknown_field: [\"Unknown field\"]})\n\n # Also ensure that primary keys will contain a valid value\n updated_fields = [\n field\n for field in cls.__fields__\n if field.name in new_document or field.is_primary_key\n ]\n for field in updated_fields:\n errors.update(field.validate_update(new_document))\n\n return errors", "def process_dict(self, dictionary):\n return self._flatten(dictionary)", "def get_draft(draft_uuid):\n assert isinstance(draft_uuid, UUID)\n try:\n data = api_request('get', api_url('drafts', str(draft_uuid)))\n except NotFound:\n raise DraftNotFound(f\"Draft does not exist: {draft_uuid}\") # lint-amnesty, pylint: disable=raise-missing-from\n return _draft_from_response(data)", "def validate(self, parameters_dict):\n return DiffParameters.schema(parameters_dict)", "def validate_insert(cls, document: dict) -> dict:\n if document is None:\n return {\"\": [\"No data provided.\"]}\n\n if not isinstance(document, dict):\n return {\"\": [\"Must be a dictionary.\"]}\n\n new_document = copy.deepcopy(document)\n\n errors = {}\n\n field_names = [field.name for field in cls.__fields__]\n unknown_fields = [\n field_name for field_name in new_document if field_name not in field_names\n ]\n for unknown_field in unknown_fields:\n known_field, field_value = cls._to_known_field(\n unknown_field, new_document[unknown_field]\n )\n if known_field:\n new_document.setdefault(known_field.name, {}).update(field_value)\n elif not cls._skip_unknown_fields:\n errors.update({unknown_field: [\"Unknown field\"]})\n\n for field in cls.__fields__:\n errors.update(field.validate_insert(new_document))\n\n return errors", "def load_transform_state_dict(self, state_dict):\n assert isinstance(self.transform_keys, list)\n assert isinstance(self.rename_transform_keys, dict)\n\n remaining = { utils.key_to_value(self.rename_transform_keys, k, False):v for k,v in state_dict.items() if k.split('.')[0] \\\n in self.transform_keys or k in self.transform_keys }\n self.load_state_dict(remaining, strict=False)\n\n return self", "def _from_dict_transform(cls: Type[TElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n if 'application' in data:\n data['created_by'] = data.pop('application')\n\n if 'added_timestamp' in data:\n data['created_ts'] = data.pop('added_timestamp')\n\n if 'created_ts' not in data:\n # some really old nin entries in the database have neither created_ts nor modified_ts\n data['_no_created_ts_in_db'] = True\n data['created_ts'] = datetime.fromisoformat('1900-01-01')\n\n if 'modified_ts' not in data:\n data['_no_modified_ts_in_db'] = True\n # Use created_ts as modified_ts if no explicit modified_ts was found\n data['modified_ts'] = data['created_ts']\n\n return data", "def clean_dict(d):\n\n if not isinstance(d, (dict, list)):\n return d\n if isinstance(d, list):\n return [v for v in (clean_dict(v) for v in d) if v]\n return OrderedDict([(k, v) for k, v in ((k, clean_dict(v)) for k, v in list(d.items())) if v])", "def _sanitize(data_dict):\n return data_dict", "def _from_dict_transform(cls: Type[TPrimaryElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n data = super()._from_dict_transform(data)\n\n if 'primary' in data:\n data['is_primary'] = data.pop('primary')\n\n return data" ]
[ "0.56550765", "0.56206477", "0.5450413", "0.52529806", "0.52475804", "0.5194902", "0.51102144", "0.51091665", "0.50821674", "0.5033902", "0.5002", "0.49568045", "0.4842201", "0.48251075", "0.48187992", "0.48105076", "0.48069793", "0.47712836", "0.47657195", "0.47256124", "0.47020632", "0.46927628", "0.46636012", "0.465873", "0.46474317", "0.4644899", "0.46396995", "0.46204132", "0.46197772", "0.46022105" ]
0.7376656
0
Create drafts in bulk for a given user based on the draft dicts. Since currently, the only place this method is being used (apart from tests) is from the create_draft view, we assume that the drafts_dicts are syntactically valid (i.e. they satisfy the draft_dict_validator).
def do_create_drafts(draft_dicts: List[Dict[str, Any]], user_profile: UserProfile) -> List[Draft]: draft_objects = [] for draft_dict in draft_dicts: valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile) draft_objects.append( Draft( user_profile=user_profile, recipient_id=valid_draft_dict["recipient_id"], topic=valid_draft_dict["topic"], content=valid_draft_dict["content"], last_edit_time=valid_draft_dict["last_edit_time"], ) ) created_draft_objects = Draft.objects.bulk_create(draft_objects) event = { "type": "drafts", "op": "add", "drafts": [draft.to_dict() for draft in created_draft_objects], } send_event(user_profile.realm, event, [user_profile.id]) return created_draft_objects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_draft(auth, subject, body, addresses, user_id, cc_addresses=[], attachments_list=None):\r\n data = {}\r\n data['Subject'] = subject\r\n data['Body'] = {}\r\n data['Body']['ContentType'] = 'HTML'\r\n data['Body']['Content'] = body\r\n data['ToRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in addresses]\r\n data['ccRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in cc_addresses]\r\n if attachments_list is not None:\r\n data['Attachments'] = attachments_list\r\n\r\n params = json.dumps(data).encode('utf8')\r\n\r\n url = \"{api_url}/{user_id}/messages\".format(api_url=API_URL, user_id=user_id)\r\n\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'Authorization': 'Bearer {}'.format(auth.access_token)\r\n }\r\n req = urllib.request.Request(url, params, headers)\r\n try:\r\n resp = urllib.request.urlopen(req)\r\n resp_data = json.load(resp)\r\n\r\n logging.getLogger(__name__).info(\"Draft created\")\r\n\r\n return resp_data['id']\r\n except urllib.error.HTTPError as err:\r\n raise AzureError(err)", "def test_create_draft_with_multiple_requests(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n draft_1 = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(draft_1)\r\n\r\n # Now check that when a user sends request to create a draft when there is already a draft version then\r\n # user gets that already created draft instead of getting 'DuplicateItemError' exception.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n draft_2 = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(draft_2)\r\n self.assertEqual(draft_1, draft_2)", "def further_validated_draft_dict(\n draft_dict: Dict[str, Any], user_profile: UserProfile\n) -> Dict[str, Any]:\n\n content = normalize_body(draft_dict[\"content\"])\n\n timestamp = draft_dict.get(\"timestamp\", time.time())\n timestamp = round(timestamp, 6)\n if timestamp < 0:\n # While it's not exactly an invalid timestamp, it's not something\n # we want to allow either.\n raise JsonableError(_(\"Timestamp must not be negative.\"))\n last_edit_time = timestamp_to_datetime(timestamp)\n\n topic = \"\"\n recipient_id = None\n to = draft_dict[\"to\"]\n if draft_dict[\"type\"] == \"stream\":\n topic = truncate_topic(draft_dict[\"topic\"])\n if \"\\0\" in topic:\n raise JsonableError(_(\"Topic must not contain null bytes\"))\n if len(to) != 1:\n raise JsonableError(_(\"Must specify exactly 1 stream ID for stream messages\"))\n stream, sub = access_stream_by_id(user_profile, to[0])\n recipient_id = stream.recipient_id\n elif draft_dict[\"type\"] == \"private\" and len(to) != 0:\n to_users = get_user_profiles_by_ids(set(to), user_profile.realm)\n try:\n recipient_id = recipient_for_user_profiles(to_users, False, None, user_profile).id\n except ValidationError as e: # nocoverage\n raise JsonableError(e.messages[0])\n\n return {\n \"recipient_id\": recipient_id,\n \"topic\": topic,\n \"content\": content,\n \"last_edit_time\": last_edit_time,\n }", "def drafts(self):\n if self._drafts is None:\n if self._initialize_drafts():\n self._save_drafts()\n return self._drafts", "def _initialize_drafts(self):\n drafts = memcache.get('user_drafts:' + self.email)\n if drafts is not None:\n self._drafts = drafts\n ##logging.info('HIT: %s -> %s', self.email, self._drafts)\n return False\n # We're looking for the Issue key id. The ancestry of comments goes:\n # Issue -> PatchSet -> Patch -> Comment.\n issue_ids = set(comment.key().parent().parent().parent().id()\n for comment in gql(Comment,\n 'WHERE author = :1 AND draft = TRUE',\n self.user))\n self._drafts = list(issue_ids)\n ##logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)\n return True", "def create_draft(convo_ID, template_ID):\n # Get response template through helper function.\n # Make an API request to reply to a conversation with the content in that template\n response_template = get_canned_response(template_ID)\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/drafts\"\n payload = {\n \"body\": response_template[\"body\"],\n \"subject\": response_template[\"subject\"],\n \"author_id\": \"tea_188ud\", # [needs to change later on]\n \"channel_id\": \"cha_14tfp\", # [also will need to be changed for team based settings]\n }\n files = []\n headers = {\"Authorization\": BEARER_TOKEN}\n requests.request(\"POST\", url, headers=headers, json=payload, files=files)", "def do_edit_draft(draft_id: int, draft_dict: Dict[str, Any], user_profile: UserProfile) -> None:\n try:\n draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)\n except Draft.DoesNotExist:\n raise ResourceNotFoundError(_(\"Draft does not exist\"))\n valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)\n draft_object.content = valid_draft_dict[\"content\"]\n draft_object.topic = valid_draft_dict[\"topic\"]\n draft_object.recipient_id = valid_draft_dict[\"recipient_id\"]\n draft_object.last_edit_time = valid_draft_dict[\"last_edit_time\"]\n draft_object.save()\n\n event = {\"type\": \"drafts\", \"op\": \"update\", \"draft\": draft_object.to_dict()}\n send_event(user_profile.realm, event, [user_profile.id])", "def create(self, dictionaries):\n \n return self.ep.post(self.endpoint, params=dictionaries)", "def test_bulk_create(self):\n urls = [reverse('api:user-list')]\n data = [\n {\n \"username\": \"newuser1\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n },\n {\n \"username\": \"newuser2\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n },\n ]\n access = {\n \"forbidden\": [self.admin_client, self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": []\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )", "def test_get_drafts(self):\n r1 = Recipes.objects.create(chef=self.user, name=\"Recipe 1\", draft=True)\n r2 = Recipes.objects.create(chef=self.user, name=\"Recipe 2\", draft=False)\n\n url = '/0/chefs/%i/drafts' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('drafts', resp.data)\n self.assertEqual(1, len(resp.data['drafts']))\n keys = (\"liked\", \"public_url\", \"edit_date\", \"ingredients\", \"shared\", \"tags\", \"commented\",\n \"private\", \"id\", \"chef\", \"reported\", \"nb_shares\", \"added\", \"nb_added\",\n \"nb_comments\", \"draft\", \"commensals\", \"creation_date\", \"nb_likes\", \"name\",\n \"products\", \"prep_time\", \"serves\", \"bought\", \"book_for_sale\", \"description\")\n self.assertEqual(set(keys), set(resp.data['drafts'][0].keys()))\n self.assertEqual(r1.pk, resp.data['drafts'][0]['id'])", "def test_create_with_new_draft(self):\n user1 = User.objects.create(username='reviewer1')\n user2 = User.objects.create(username='reviewer2')\n\n group1 = self.create_review_group(name='group1')\n group2 = self.create_review_group(name='group2')\n\n dep_review_request_1 = self.create_review_request(publish=True)\n dep_review_request_2 = self.create_review_request(publish=True)\n\n review_request = self.create_review_request(\n publish=True,\n bugs_closed='1,20,300',\n commit_id='abc123',\n description_rich_text=True,\n depends_on=[dep_review_request_1, dep_review_request_2],\n rich_text=True,\n target_groups=[group1, group2],\n target_people=[user1, user2],\n testing_done_rich_text=True,\n extra_data={\n 'key': {\n 'values': [1, 2, 3],\n },\n 'mybool': True,\n })\n\n active_file_attachment_1 = self.create_file_attachment(review_request)\n active_file_attachment_2 = self.create_file_attachment(review_request)\n inactive_file_attachment = self.create_file_attachment(review_request,\n active=False)\n\n active_screenshot_1 = self.create_screenshot(review_request)\n active_screenshot_2 = self.create_screenshot(review_request)\n inactive_screenshot = self.create_screenshot(review_request,\n active=False)\n\n # Create the draft.\n draft = ReviewRequestDraft.create(review_request)\n\n # Make sure all the fields are the same.\n self.assertEqual(draft.branch, review_request.branch)\n self.assertEqual(draft.bugs_closed, review_request.bugs_closed)\n self.assertEqual(draft.commit_id, review_request.commit_id)\n self.assertEqual(draft.description, review_request.description)\n self.assertEqual(draft.description_rich_text,\n review_request.description_rich_text)\n self.assertEqual(draft.extra_data, review_request.extra_data)\n self.assertEqual(draft.rich_text, review_request.rich_text)\n self.assertEqual(draft.summary, review_request.summary)\n self.assertEqual(draft.testing_done, review_request.testing_done)\n self.assertEqual(draft.testing_done_rich_text,\n review_request.testing_done_rich_text)\n\n self.assertEqual(list(draft.depends_on.order_by('pk')),\n [dep_review_request_1, dep_review_request_2])\n self.assertEqual(list(draft.target_groups.all()),\n [group1, group2])\n self.assertEqual(list(draft.target_people.all()),\n [user1, user2])\n self.assertEqual(list(draft.file_attachments.all()),\n [active_file_attachment_1, active_file_attachment_2])\n self.assertEqual(list(draft.inactive_file_attachments.all()),\n [inactive_file_attachment])\n self.assertEqual(list(draft.screenshots.all()),\n [active_screenshot_1, active_screenshot_2])\n self.assertEqual(list(draft.inactive_screenshots.all()),\n [inactive_screenshot])\n\n self.assertIsNotNone(draft.changedesc)", "def commit_draft(draft_uuid):\n api_request('post', api_url('drafts', str(draft_uuid), 'commit'))", "def createMultipleDocuments(cred, payload):\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n del pathData['updateMask']\n data['writes'].append(pathData)\n\n makeRequest(cred, url, 'POST', data)", "def bulk_create(cls, cb, approvals):\n url = cls.urlobject.format(cb.credentials.org_key) + \"/_bulk\"\n resp = cb.post_object(url, body=approvals)\n result = resp.json()\n item_list = result.get(\"results\", [])\n return [cls(cb, item[\"id\"], item) for item in item_list]", "def bulk_create():\n logger.info(\"Creating persuasions in bulk\")\n try:\n request_data = json.loads(request.data)\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.create, data): data for data in request_data}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will create shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def fromDict(cls, userDBDict : dict, **kwargs) -> bbUserDB:\n # Instance the new bbUserDB\n newDB = bbUserDB()\n # iterate over all user IDs to spawn\n for id in userDBDict.keys():\n # Construct new bbUsers for each ID in the database\n # JSON stores properties as strings, so ids must be converted to int first.\n newDB.addUserObj(bbUser.bbUser.fromDict(userDBDict[id], id=int(id)))\n return newDB", "def create_test_data(users=5, categories=2, forums=2, topics=1, posts=1):\n create_default_groups()\n create_default_settings()\n\n data_created = {'users': 0, 'categories': 0, 'forums': 0,\n 'topics': 0, 'posts': 0}\n\n # create 5 users\n for u in range(1, users + 1):\n username = \"test%s\" % u\n email = \"test%[email protected]\" % u\n user = User(username=username, password=\"test\", email=email)\n user.primary_group_id = u\n user.activated = True\n user.save()\n data_created['users'] += 1\n\n user1 = User.query.filter_by(id=1).first()\n user2 = User.query.filter_by(id=2).first()\n\n # lets send them a few private messages\n for i in range(1, 3):\n # TODO\n pass\n\n # create 2 categories\n for i in range(1, categories + 1):\n category_title = \"Test Category %s\" % i\n category = Category(title=category_title,\n description=\"Test Description\")\n category.save()\n data_created['categories'] += 1\n\n # create 2 forums in each category\n for j in range(1, forums + 1):\n if i == 2:\n j += 2\n\n forum_title = \"Test Forum %s %s\" % (j, i)\n forum = Forum(title=forum_title, description=\"Test Description\",\n category_id=i)\n forum.save()\n data_created['forums'] += 1\n\n for t in range(1, topics + 1):\n # create a topic\n topic = Topic()\n post = Post()\n\n topic.title = \"Test Title %s\" % j\n post.content = \"Test Content\"\n topic.save(post=post, user=user1, forum=forum)\n data_created['topics'] += 1\n\n for p in range(1, posts + 1):\n # create a second post in the forum\n post = Post()\n post.content = \"Test Post\"\n post.save(user=user2, topic=topic)\n data_created['posts'] += 1\n\n return data_created", "def create_users(self, users_dict, groups, orgs, locations):\n users = {}\n tokens = {}\n for (user_name, user_data) in users_dict.items():\n if user_data == \"staff\":\n users[user_name] = AppUser.objects.create_superuser(\n username=user_name,\n email='{}@test.com'.format(user_name),\n password='abcd1234@')\n else:\n users[user_name] = AppUser.objects.create_user(\n username=user_name,\n email='{}@test.com'.format(user_name),\n password='abcd1234@',\n organization=orgs.get(user_data.get('organization')))\n if 'authorized_locations' in user_data:\n for location_name in user_data.get('authorized_locations'):\n location = locations.get(location_name)\n users[user_name].authorized_locations.add(location)\n groups[user_data['group']].user_set.add(\n users[user_name])\n groups[user_data['group']].save()\n\n users[user_name].save()\n if JWT_AUTH:\n payload = JWT_PAYLOAD_HANDLER(users[user_name])\n tokens[user_name] = JWT_ENCODE_HANDLER(payload)\n else:\n tokens[user_name] = Token.objects.create(user=users[user_name])\n tokens[user_name].save()\n return users, tokens", "def create_attendees(event, attendees_dict):\n attendees_list = []\n for record in attendees_dict:\n attendee = Attendee()\n attendee.event = event\n attendee.email = record.get('email', '')\n # Converting camelCase to snake_case\n attendee.response = ''.join(\n i if i.islower() else f'_{i.lower()}' for i\n in record['responseStatus']\n )\n if record.get('self') and record.get('responseStatus') == ACCEPTED:\n event.is_attendee = True\n else:\n attendees_list.append(attendee)\n Attendee.objects.bulk_create(attendees_list)\n event.save()", "def post(self):\n try:\n draft_project_dto = DraftProjectDTO(request.get_json())\n draft_project_dto.user_id = token_auth.current_user()\n draft_project_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"error validating request: {str(e)}\")\n return {\"Error\": \"Unable to create project\", \"SubCode\": \"InvalidData\"}, 400\n\n try:\n draft_project_id = ProjectAdminService.create_draft_project(\n draft_project_dto\n )\n return {\"projectId\": draft_project_id}, 201\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403\n except (InvalidGeoJson, InvalidData) as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "def test_get_list_published_user_drafts(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n language=\"en\", author=self.user)\n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft',\n language=\"en\", author=self.user)\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/'\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 2)\n story_ids = [story['story_id'] for story in self.deserialize(resp)['objects']]\n self.assertIn(story1.story_id, story_ids)\n self.assertIn(story2.story_id, story_ids)", "def test_create_from_dict_no_schema(session): # pylint:disable=unused-argument\n user = User(username='CP1234567',\n keycloak_guid='1b20db59-19a0-4727-affe-c6f64309fd04')\n\n session.add(user)\n session.commit()\n\n result_invitation = InvitationModel.create_from_dict(None, user.id, 'STANDARD')\n\n assert result_invitation is None", "def collect_draft_pick(team_dict):\n team_dict['draft_pick'] = []\n pick_list = DraftPick.objects.filter(owner=team_dict['team'].franchise.id,\n year=team_dict['team'].year)\n for p in pick_list:\n number = ((p.round - 1) * 16) + p.order\n dft_pick = {\n 'pick': p,\n 'number': number,\n }\n team_dict['draft_pick'].append(dft_pick)\n return team_dict", "def get_drafts(self, **kwargs):\n default_kwargs = { \"order\": \"updated_at desc\" }\n default_kwargs.update(kwargs)\n return self.get_messages(statuses=[\"draft\"], **default_kwargs)", "def populate_with_uuids(self, uuids):\n if not self.isAllowedToEdit():\n raise Unauthorized(_(\"You are not allowed to add content to this tile\"))\n self.set_limit()\n data_mgr = ITileDataManager(self)\n\n old_data = data_mgr.get()\n if old_data[\"uuids\"] is None:\n # If there is no content yet, just assign an empty dict\n old_data[\"uuids\"] = dict()\n\n uuids_dict = old_data.get(\"uuids\")\n if not isinstance(uuids_dict, dict):\n # Make sure this is a dict\n uuids_dict = old_data[\"uuids\"] = dict()\n\n if uuids_dict and len(uuids_dict) > self.limit:\n # Do not allow adding more objects than the defined limit\n return\n\n order_list = [int(val.get(\"order\", 0)) for val in uuids_dict.values()]\n if len(order_list) == 0:\n # First entry\n order = 0\n else:\n # Get last order position and increment 1\n order_list.sort()\n order = order_list.pop() + 1\n\n for uuid in uuids:\n if uuid not in uuids_dict:\n entry = dict()\n entry[u\"order\"] = six.text_type(order)\n uuids_dict[uuid] = entry\n order += 1\n\n old_data[\"uuids\"] = uuids_dict\n data_mgr.set(old_data)", "async def fill_user_dict(user_dict: dict):\n # TODO: be more careful about name duplication\n raw_db_data = await userdb.get_all_users_with_any(user_dict.keys())\n for row in raw_db_data:\n necrouser = _get_user_from_db_row(row) # type: NecroUser\n\n # Don't insert users that aren't members of the current server\n if necrouser.member is None:\n continue\n\n if necrouser.twitch_name is not None and necrouser.twitch_name.lower() in user_dict:\n user_dict[necrouser.twitch_name.lower()].append(necrouser)\n\n return user_dict", "def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)", "def user_import_process(request, setting_dict, preview=True, id=''):\n key_list = setting_dict['key'].split(',')\n # key(s)- user field(s) or profile fields(s)? that is import to identify\n key_user_list = [key for key in key_list if key in user_field_names]\n key_profile_list = [key for key in key_list if key in profile_field_names]\n\n setting_dict['total'] = request.session[id].get('total', 0)\n setting_dict['count_insert'] = 0\n setting_dict['count_update'] = 0\n setting_dict['count_invalid'] = 0\n\n data_dict_list = request.session[id].get('data_dict_list', [])\n data_dict_list_len = len(data_dict_list)\n\n user_obj_list = []\n invalid_list = []\n\n start = 0\n if not preview:\n finish = start + ROWS_TO_PROCESS\n if finish > data_dict_list_len:\n finish = data_dict_list_len\n else:\n finish = data_dict_list_len\n\n for r in xrange(start, finish):\n user_object_dict = {}\n if not preview:\n user_import_dict = {}\n identity_user_dict = {} # used to look up the User\n identity_profile_dict = {} # used to look up the Profile\n missing_keys = []\n\n data_dict = data_dict_list[r]\n\n missing_keys = [key for key in data_dict.keys()\n if key in key_list\n and data_dict[key] == '']\n\n for key in data_dict.keys():\n user_object_dict[key] = data_dict[key]\n\n if key in key_list and data_dict[key] != '':\n if key in key_user_list:\n identity_user_dict[key] = data_dict[key]\n if key in key_profile_list:\n identity_profile_dict[key] = data_dict[key]\n\n user_object_dict['ROW_NUM'] = data_dict['ROW_NUM']\n\n if missing_keys:\n user_object_dict['ERROR'] = 'Missing key: %s.' % (\n ', '.join(missing_keys))\n user_object_dict['IS_VALID'] = False\n setting_dict['count_invalid'] += 1\n if not preview:\n invalid_list.append({'ROW_NUM': user_object_dict['ROW_NUM'],\n 'ERROR': user_object_dict['ERROR']})\n else:\n user_object_dict['IS_VALID'] = True\n\n # the keys could be the fields in both User and Profile tables\n user = get_user_by_key(identity_user_dict, identity_profile_dict)\n if user:\n if preview:\n user_object_dict['ACTION'] = 'update'\n else:\n user_import_dict['ACTION'] = 'update'\n setting_dict['count_update'] += 1\n\n if preview:\n populate_user_dict(user, user_object_dict, setting_dict)\n else:\n #user = None\n if preview:\n user_object_dict['ACTION'] = 'insert'\n else:\n user_import_dict['ACTION'] = 'insert'\n setting_dict['count_insert'] += 1\n\n if not preview:\n user = do_user_import(request, user,\n user_object_dict,\n setting_dict)\n user_import_dict['user'] = user\n user_import_dict['ROW_NUM'] = user_object_dict['ROW_NUM']\n user_obj_list.append(user_import_dict)\n\n if preview:\n user_obj_list.append(user_object_dict)\n\n if not preview:\n if finish < data_dict_list_len:\n # not finished yet, store some data in the session\n count_insert = request.session[id].get('count_insert', 0) + \\\n setting_dict['count_insert']\n count_update = request.session[id].get('count_update', 0) + \\\n setting_dict['count_update']\n\n setting_dict['is_completed'] = False\n\n for r in xrange(start, finish):\n # remove those already processed rows\n data_dict_list.remove(data_dict_list[0])\n\n d = request.session[id]\n d.update({'is_completed': False,\n 'count_insert': count_insert,\n 'count_update': count_update,\n 'data_dict_list': data_dict_list})\n request.session[id] = d\n else:\n setting_dict['is_completed'] = True\n setting_dict['count_insert'] += request.session[id].get(\n 'count_insert', 0)\n setting_dict['count_update'] += request.session[id].get(\n 'count_update', 0)\n d = request.session[id]\n d.update({'is_completed': True})\n request.session[id] = d\n\n return user_obj_list, invalid_list", "def populate_with_uuids(self, uuids):\n if not self.isAllowedToEdit():\n raise Unauthorized(\n _('You are not allowed to add content to this tile'))\n data_mgr = ITileDataManager(self)\n\n old_data = data_mgr.get()\n if old_data['uuids'] is None:\n # If there is no content yet, just assign an empty dict\n old_data['uuids'] = dict()\n\n uuids_dict = old_data.get('uuids')\n if not isinstance(uuids_dict, dict):\n # Make sure this is a dict\n uuids_dict = old_data['uuids'] = dict()\n\n # if uuids_dict and len(uuids_dict) > self.limit:\n # # Do not allow adding more objects than the defined limit\n # return\n\n order_list = [int(val.get('order', 0))\n for key, val in uuids_dict.items()]\n if len(order_list) == 0:\n # First entry\n order = 0\n else:\n # Get last order position and increment 1\n order_list.sort()\n order = order_list.pop() + 1\n\n for uuid in uuids:\n if uuid not in uuids_dict.keys():\n entry = dict()\n entry[u'order'] = unicode(order)\n uuids_dict[uuid] = entry\n order += 1\n\n old_data['uuids'] = uuids_dict\n data_mgr.set(old_data)", "def copy_from_teamusercopy(apps, schema_editor):\n TeamUser = apps.get_model('status', 'TeamUser')\n TeamUserCopy = apps.get_model('status', 'TeamUserCopy')\n\n for teamusercopy in TeamUserCopy.objects.all():\n if TeamUser.objects.filter(team_id=teamusercopy.team_id, user_id=teamusercopy.user_id).count() == 0:\n TeamUser.objects.create(team_id=teamusercopy.team_id, user_id=teamusercopy.user_id)\n print('Created %s %s' % (teamusercopy.team_id, teamusercopy.user_id))\n else:\n print('Already exists... skipping')" ]
[ "0.56473714", "0.55276287", "0.5469983", "0.5152256", "0.5134393", "0.5032918", "0.4999463", "0.49490035", "0.49176887", "0.49063164", "0.48992178", "0.48911917", "0.48814285", "0.48810473", "0.48643064", "0.48627475", "0.48573655", "0.48487023", "0.48398086", "0.48395732", "0.47892275", "0.47678792", "0.47663376", "0.47654828", "0.47591096", "0.47496137", "0.47379512", "0.4734019", "0.47167554", "0.470971" ]
0.7956114
0
Edit/update a single draft for a given user. Since the only place this method is being used from (apart from tests) is the edit_draft view, we assume that the drafts_dict is syntactically valid (i.e. it satisfies the draft_dict_validator).
def do_edit_draft(draft_id: int, draft_dict: Dict[str, Any], user_profile: UserProfile) -> None: try: draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile) except Draft.DoesNotExist: raise ResourceNotFoundError(_("Draft does not exist")) valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile) draft_object.content = valid_draft_dict["content"] draft_object.topic = valid_draft_dict["topic"] draft_object.recipient_id = valid_draft_dict["recipient_id"] draft_object.last_edit_time = valid_draft_dict["last_edit_time"] draft_object.save() event = {"type": "drafts", "op": "update", "draft": draft_object.to_dict()} send_event(user_profile.realm, event, [user_profile.id])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_draft(self):\r\n EmptyPromise(\r\n lambda: self.q(css='.create-draft').present,\r\n 'Wait for edit draft link to be present'\r\n ).fulfill()\r\n\r\n self.q(css='.create-draft').first.click()\r\n\r\n EmptyPromise(\r\n lambda: self.q(css='.editing-draft-alert').present,\r\n 'Wait for draft mode to be activated'\r\n ).fulfill()", "def edit_user(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n return render_template(\"edit_user.html\",\n headline=f\"Edit Blogly {db_user.get_full_name()}\",\n user=db_user)", "def edit(user_id):\n if user_id != current_user.id:\n return abort(403)\n\n user = get_user(user_id)\n form = EditForm(obj=user)\n form.email.data = user.email\n\n if form.validate_on_submit():\n password = form.password.data\n username = form.username.data\n\n save_result = edit_user(user_id, password, username, user.active)\n user = save_result['entry']\n form = EditForm(request.form, obj=save_result['entry'])\n form.email.data = user.email\n return redirect(url_for('.index'))\n \n return render_template('users/edit.html'\n ,form=form\n ,user=user\n ,t=t\n ,m=m)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('/users/edit_page.html', user=user)", "def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/edit.html', user=user)", "def put(self, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n self.validate_input(d, draft_id)\n self.process_input(d, draft_id)\n d.save()", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n\n return render_template('edit-user.html', user=user)", "def update(self,\n draft_id,\n policy_draft,\n ):\n return self._invoke('update',\n {\n 'draft_id': draft_id,\n 'policy_draft': policy_draft,\n })", "def further_validated_draft_dict(\n draft_dict: Dict[str, Any], user_profile: UserProfile\n) -> Dict[str, Any]:\n\n content = normalize_body(draft_dict[\"content\"])\n\n timestamp = draft_dict.get(\"timestamp\", time.time())\n timestamp = round(timestamp, 6)\n if timestamp < 0:\n # While it's not exactly an invalid timestamp, it's not something\n # we want to allow either.\n raise JsonableError(_(\"Timestamp must not be negative.\"))\n last_edit_time = timestamp_to_datetime(timestamp)\n\n topic = \"\"\n recipient_id = None\n to = draft_dict[\"to\"]\n if draft_dict[\"type\"] == \"stream\":\n topic = truncate_topic(draft_dict[\"topic\"])\n if \"\\0\" in topic:\n raise JsonableError(_(\"Topic must not contain null bytes\"))\n if len(to) != 1:\n raise JsonableError(_(\"Must specify exactly 1 stream ID for stream messages\"))\n stream, sub = access_stream_by_id(user_profile, to[0])\n recipient_id = stream.recipient_id\n elif draft_dict[\"type\"] == \"private\" and len(to) != 0:\n to_users = get_user_profiles_by_ids(set(to), user_profile.realm)\n try:\n recipient_id = recipient_for_user_profiles(to_users, False, None, user_profile).id\n except ValidationError as e: # nocoverage\n raise JsonableError(e.messages[0])\n\n return {\n \"recipient_id\": recipient_id,\n \"topic\": topic,\n \"content\": content,\n \"last_edit_time\": last_edit_time,\n }", "def edit_draft(self, message_id):\n return Draft(self, message_id).fetch()", "def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template(\"users/edit_user.html\", user=user)", "def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)", "def put(self, user_id):\r\n return update_user(request, user_id)", "def test_update_draft():\n with open(basedir + \"fixture/7149593_formatted.json\", \"r\") as f:\n data = f.read()\n storage.save_draft(user_id, \"bib\", \"7149593\", data, \"1362044230872\")\n json_data = json.loads(data)\n json_data['@context'] = \"yadda\"\n storage.update_draft(user_id, \"bib\", \"7149593\", json.dumps(json_data), \"1362044230872\")\n assert json.loads(open(basedir + \"some/path/\" + user_id + \"/bib/7149593\", \"r\").read())['document']['@context'] == \"yadda\"", "def allow_to_edit(user):\n return allow_to_edit_well(user)", "def show_edit_user_form(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('edit_user.html', user=user)", "def update( self, trans, id, payload, **kwd ):\n current_user = trans.user\n user_to_update = self.user_manager.by_id( self.decode_id( id ) )\n\n # only allow updating other users if they're admin\n editing_someone_else = current_user != user_to_update\n is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user )\n if editing_someone_else and not is_admin:\n raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id )\n\n self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans )\n return self.user_serializer.serialize_to_view( user_to_update, view='detailed' )", "def show_edit_user_form(user_id):\r\n user = User.query.get_or_404(user_id)\r\n\r\n return render_template('edit-user.html', user=user)", "def patch(self,\n draft_id,\n policy_draft,\n ):\n return self._invoke('patch',\n {\n 'draft_id': draft_id,\n 'policy_draft': policy_draft,\n })", "def get_draft_by_id(request, draft_id):\n\n for draft in request.session[\"drafts\"]:\n if draft[\"id\"] == draft_id:\n # Found a valid draft, return it\n return draft\n\n return None # Otherwise return None.", "def put(self, user_id):\n data = request.json\n return update_user(data, user_id)", "def edit_post(post_id):\n\n post_data = {\"id\": post_id}\n db_post = Post.query.get_or_404(post_id)\n post_data[\"title\"] = db_post.title\n post_data[\"content\"] = db_post.content\n post_data[\"user_id\"] = db_post.user_id\n\n return render_template(\"edit_post.html\", headline=\"Add New Blogly User\", post=post_data)", "def can_edit(self, user, user_is_admin=False):\r\n if user is None or isinstance(user, FakeAccount):\r\n return False\r\n elif user_is_admin or self.author_id == user._id:\r\n return True\r\n elif Subreddit._by_name('discussion').is_editor(user):\r\n return True\r\n else:\r\n return False", "def edit_user_process(user_id):\n\n # extract form data, edit, commit, then redirect to /users\n first_name = request.form[\"first-name\"].strip()\n last_name = request.form[\"last-name\"].strip()\n image_url = request.form[\"image-url\"].strip()\n\n msg = db_edit_user(user_id, first_name, last_name, image_url)\n\n flash(msg[\"text\"], msg[\"severity\"])\n\n return redirect(f\"/users/{user_id}\")", "def update(self, user: U) -> None:\n ...", "def commit_draft(draft_uuid):\n api_request('post', api_url('drafts', str(draft_uuid), 'commit'))", "def user_edit(request):\n DEBUG = False\n\n if not has_permission('editUser', request.context, request):\n #print \"NOT has_permission !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n request.message = \"You do not have permissions to edit this user!\"\n raise HTTPForbidden\n\n # if no user_id in URL and not logged in, tell user to login\n\n try:\n user_id = request.matchdict['user_id']\n except KeyError, ke:\n #print ke\n return HTTPFound(location=request.route_url('not_found'))\n\n user = User.get_by_user_id(user_id)\n\n if user is None:\n msg = \"User was not founf in database.\"\n return HTTPFound(location=request.route_url('not_found'))\n\n form = Form(request, schema=UserSettingsSchema, obj=user)\n\n if 'form.submitted' in request.POST and not form.validate():\n # form didn't validate\n request.session.flash('Please check the form below for errors!')\n if DEBUG: # pragma: no cover\n print \"submitted but not validated!\"\n\n if 'form.submitted' in request.POST and form.validate():\n # ready for changing database entries!\n request.session.flash('form validated!')\n if DEBUG: # pragma: no cover\n print \"the form was submitted and validated.\"\n\n if form.data['surname'] != user.surname:\n if DEBUG: # pragma: no cover\n request.session.flash('surname was not same --> changing')\n print \"changing surname\"\n user.surname = form.data['surname']\n if form.data['lastname'] != user.lastname:\n if DEBUG: # pragma: no cover\n request.session.flash('lastname was not same --> changing')\n print \"changing lastname\"\n user.lastname = form.data['lastname']\n if form.data['email'] != user.email:\n request.session.flash('email was not same --> changing')\n user.email = form.data['email']\n if form.data['phone'] != user.phone:\n request.session.flash('phone was not same --> changing')\n user.phone = form.data['phone']\n if form.data['fax'] != user.fax:\n request.session.flash('fax was not same --> changing')\n user.fax = form.data['fax']\n if form.data['street'] != user.street:\n request.session.flash('street was not same --> changing')\n user.street = form.data['street']\n if form.data['number'] != user.number:\n request.session.flash('number was not same --> changing')\n user.number = form.data['number']\n if form.data['city'] != user.city:\n request.session.flash('city was not same --> changing')\n user.city = form.data['city']\n if form.data['postcode'] != user.postcode:\n request.session.flash('postcode was not same --> changing')\n user.postcode = form.data['postcode']\n if form.data['country'] != user.country:\n request.session.flash('country was not same --> changing')\n user.country = form.data['country']\n\n if DEBUG: # pragma: no cover\n print \"returning the form\"\n return {\n 'the_user_id': user_id,\n 'the_username': user.username,\n 'form': FormRenderer(form),\n }", "def edit(ctx, docid, password):\n coll = db.get_document_collection(ctx)\n config = ctx.obj[\"config\"]\n\n doc, docid = db.get_document_by_id(ctx, docid)\n title = doc[\"title\"]\n\n template, c = db.get_content(ctx, doc, password=password)\n\n content, tmpfile = utils.get_content_from_editor(config[\"editor\"], template=template)\n d = datetime.datetime.now()\n\n if doc[\"encrypted\"] is True:\n title = utils.get_title_from_content(content)\n content = c.encrypt_content(content.decode(\"utf-8\").encode(\"utf-8\"))\n else:\n if not \"links\" in doc[\"categories\"]:\n title = utils.get_title_from_content(content)\n\n if isinstance(template, unicode):\n content = content.decode(\"utf-8\")\n\n if content != template:\n doc[\"content\"] = content\n doc[\"title\"] = title\n doc[\"updated\"] = d\n if validate(doc):\n coll.save(doc)\n else:\n utils.log_error(\"Validation of the updated object did not succeed\")\n\n transaction.log(ctx, docid, \"edit\", title)\n utils.log_info(\"Document \\\"%s\\\" updated.\" % title)\n else:\n utils.log_info(\"No changes detected for \\\"%s\\\"\" % title)\n\n utils.clean_tmpfile(tmpfile)\n\n return True", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)" ]
[ "0.63595945", "0.6280873", "0.623346", "0.62005955", "0.618698", "0.61712956", "0.616757", "0.6136594", "0.6136562", "0.61018544", "0.60939956", "0.60843796", "0.5883826", "0.58486557", "0.5807267", "0.57433337", "0.5733624", "0.5710894", "0.56908655", "0.56681085", "0.5636532", "0.5623547", "0.5588345", "0.5549195", "0.55424416", "0.5525518", "0.55219877", "0.5499191", "0.5493994", "0.54774994" ]
0.7761259
0